3131)
3232from ...models import ConnectionCredentials , RevisionItem
3333from ...models .job_item import JobItem
34- from ...models import ConnectionCredentials
3534
36- io_types = (io .BytesIO , io .BufferedReader )
37-
38- from pathlib import Path
39- from typing import (
40- List ,
41- Mapping ,
42- Optional ,
43- Sequence ,
44- Tuple ,
45- TYPE_CHECKING ,
46- Union ,
47- )
48-
49- io_types = (io .BytesIO , io .BufferedReader )
35+ io_types_r = (io .BytesIO , io .BufferedReader )
36+ io_types_w = (io .BytesIO , io .BufferedWriter )
5037
5138# The maximum size of a file that can be published in a single request is 64MB
5239FILESIZE_LIMIT = 1024 * 1024 * 64 # 64MB
6148 from .schedules_endpoint import AddResponse
6249
6350FilePath = Union [str , os .PathLike ]
64- FileObject = Union [io .BufferedReader , io .BytesIO ]
65- PathOrFile = Union [FilePath , FileObject ]
51+ FileObjectR = Union [io .BufferedReader , io .BytesIO ]
52+ FileObjectW = Union [io .BufferedWriter , io .BytesIO ]
53+ PathOrFileR = Union [FilePath , FileObjectR ]
54+ PathOrFileW = Union [FilePath , FileObjectW ]
6655
6756
6857class Datasources (QuerysetEndpoint ):
@@ -80,7 +69,7 @@ def baseurl(self) -> str:
8069
8170 # Get all datasources
8271 @api (version = "2.0" )
83- def get (self , req_options : RequestOptions = None ) -> Tuple [List [DatasourceItem ], PaginationItem ]:
72+ def get (self , req_options : Optional [ RequestOptions ] = None ) -> Tuple [List [DatasourceItem ], PaginationItem ]:
8473 logger .info ("Querying all datasources on site" )
8574 url = self .baseurl
8675 server_response = self .get_request (url , req_options )
@@ -135,39 +124,11 @@ def delete(self, datasource_id: str) -> None:
135124 def download (
136125 self ,
137126 datasource_id : str ,
138- filepath : FilePath = None ,
127+ filepath : Optional [ PathOrFileW ] = None ,
139128 include_extract : bool = True ,
140129 no_extract : Optional [bool ] = None ,
141130 ) -> str :
142- if not datasource_id :
143- error = "Datasource ID undefined."
144- raise ValueError (error )
145- url = "{0}/{1}/content" .format (self .baseurl , datasource_id )
146-
147- if no_extract is False or no_extract is True :
148- import warnings
149-
150- warnings .warn (
151- "no_extract is deprecated, use include_extract instead." ,
152- DeprecationWarning ,
153- )
154- include_extract = not no_extract
155-
156- if not include_extract :
157- url += "?includeExtract=False"
158-
159- with closing (self .get_request (url , parameters = {"stream" : True })) as server_response :
160- _ , params = cgi .parse_header (server_response .headers ["Content-Disposition" ])
161- filename = to_filename (os .path .basename (params ["filename" ]))
162-
163- download_path = make_download_path (filepath , filename )
164-
165- with open (download_path , "wb" ) as f :
166- for chunk in server_response .iter_content (1024 ): # 1KB
167- f .write (chunk )
168-
169- logger .info ("Downloaded datasource to {0} (ID: {1})" .format (download_path , datasource_id ))
170- return os .path .abspath (download_path )
131+ return self .download_revision (datasource_id , None , filepath , include_extract , no_extract )
171132
172133 # Update datasource
173134 @api (version = "2.0" )
@@ -232,10 +193,10 @@ def delete_extract(self, datasource_item: DatasourceItem) -> None:
232193 def publish (
233194 self ,
234195 datasource_item : DatasourceItem ,
235- file : PathOrFile ,
196+ file : PathOrFileR ,
236197 mode : str ,
237- connection_credentials : ConnectionCredentials = None ,
238- connections : Sequence [ConnectionItem ] = None ,
198+ connection_credentials : Optional [ ConnectionCredentials ] = None ,
199+ connections : Optional [ Sequence [ConnectionItem ] ] = None ,
239200 as_job : bool = False ,
240201 ) -> Union [DatasourceItem , JobItem ]:
241202
@@ -255,8 +216,7 @@ def publish(
255216 error = "Only {} files can be published as datasources." .format (", " .join (ALLOWED_FILE_EXTENSIONS ))
256217 raise ValueError (error )
257218
258- elif isinstance (file , io_types ):
259-
219+ elif isinstance (file , io_types_r ):
260220 if not datasource_item .name :
261221 error = "Datasource item must have a name when passing a file object"
262222 raise ValueError (error )
@@ -302,7 +262,7 @@ def publish(
302262 if isinstance (file , (Path , str )):
303263 with open (file , "rb" ) as f :
304264 file_contents = f .read ()
305- elif isinstance (file , io_types ):
265+ elif isinstance (file , io_types_r ):
306266 file_contents = file .read ()
307267 else :
308268 raise TypeError ("file should be a filepath or file object." )
@@ -433,14 +393,17 @@ def download_revision(
433393 self ,
434394 datasource_id : str ,
435395 revision_number : str ,
436- filepath : Optional [PathOrFile ] = None ,
396+ filepath : Optional [PathOrFileW ] = None ,
437397 include_extract : bool = True ,
438398 no_extract : Optional [bool ] = None ,
439- ) -> str :
399+ ) -> PathOrFileW :
440400 if not datasource_id :
441401 error = "Datasource ID undefined."
442402 raise ValueError (error )
443- url = "{0}/{1}/revisions/{2}/content" .format (self .baseurl , datasource_id , revision_number )
403+ if revision_number is None :
404+ url = "{0}/{1}/content" .format (self .baseurl , datasource_id )
405+ else :
406+ url = "{0}/{1}/revisions/{2}/content" .format (self .baseurl , datasource_id , revision_number )
444407 if no_extract is False or no_extract is True :
445408 import warnings
446409
@@ -455,18 +418,22 @@ def download_revision(
455418
456419 with closing (self .get_request (url , parameters = {"stream" : True })) as server_response :
457420 _ , params = cgi .parse_header (server_response .headers ["Content-Disposition" ])
458- filename = to_filename (os .path .basename (params ["filename" ]))
459-
460- download_path = make_download_path (filepath , filename )
461-
462- with open (download_path , "wb" ) as f :
421+ if isinstance (filepath , io_types_w ):
463422 for chunk in server_response .iter_content (1024 ): # 1KB
464- f .write (chunk )
423+ filepath .write (chunk )
424+ return_path = filepath
425+ else :
426+ filename = to_filename (os .path .basename (params ["filename" ]))
427+ download_path = make_download_path (filepath , filename )
428+ with open (download_path , "wb" ) as f :
429+ for chunk in server_response .iter_content (1024 ): # 1KB
430+ f .write (chunk )
431+ return_path = os .path .abspath (download_path )
465432
466433 logger .info (
467- "Downloaded datasource revision {0} to {1} (ID: {2})" .format (revision_number , download_path , datasource_id )
434+ "Downloaded datasource revision {0} to {1} (ID: {2})" .format (revision_number , return_path , datasource_id )
468435 )
469- return os . path . abspath ( download_path )
436+ return return_path
470437
471438 @api (version = "2.3" )
472439 def delete_revision (self , datasource_id : str , revision_number : str ) -> None :
0 commit comments