@@ -30,9 +30,9 @@ def _compare_records(
30
30
This handles normalizing record names, which will be relative to workflow
31
31
step, so that they can be compared.
32
32
"""
33
- srcfields = {cwl .shortname (field .name ): field .type for field in (src .fields or {})}
33
+ srcfields = {cwl .shortname (field .name ): field .type_ for field in (src .fields or {})}
34
34
sinkfields = {
35
- cwl .shortname (field .name ): field .type for field in (sink .fields or {})
35
+ cwl .shortname (field .name ): field .type_ for field in (sink .fields or {})
36
36
}
37
37
for key in sinkfields .keys ():
38
38
if (
@@ -63,10 +63,10 @@ def _compare_type(type1: Any, type2: Any) -> bool:
63
63
return _compare_type (type1 .items , type2 .items )
64
64
elif isinstance (type1 , cwl .RecordSchema ) and isinstance (type2 , cwl .RecordSchema ):
65
65
fields1 = {
66
- cwl .shortname (field .name ): field .type for field in (type1 .fields or {})
66
+ cwl .shortname (field .name ): field .type_ for field in (type1 .fields or {})
67
67
}
68
68
fields2 = {
69
- cwl .shortname (field .name ): field .type for field in (type2 .fields or {})
69
+ cwl .shortname (field .name ): field .type_ for field in (type2 .fields or {})
70
70
}
71
71
if fields1 .keys () != fields2 .keys ():
72
72
return False
@@ -184,7 +184,7 @@ def check_types(
184
184
return "exception"
185
185
if linkMerge == "merge_nested" :
186
186
return check_types (
187
- cwl .ArraySchema (items = srctype , type = "array" ), sinktype , None , None
187
+ cwl .ArraySchema (items = srctype , type_ = "array" ), sinktype , None , None
188
188
)
189
189
if linkMerge == "merge_flattened" :
190
190
return check_types (merge_flatten_type (srctype ), sinktype , None , None )
@@ -212,7 +212,7 @@ def content_limit_respected_read(f: IO[bytes]) -> str:
212
212
def convert_stdstreams_to_files (clt : cwl .CommandLineTool ) -> None :
213
213
"""Convert stdin, stdout and stderr type shortcuts to files."""
214
214
for out in clt .outputs :
215
- if out .type == "stdout" :
215
+ if out .type_ == "stdout" :
216
216
if out .outputBinding is not None :
217
217
raise ValidationException (
218
218
"Not allowed to specify outputBinding when using stdout shortcut."
@@ -223,9 +223,9 @@ def convert_stdstreams_to_files(clt: cwl.CommandLineTool) -> None:
223
223
json_dumps (clt .save (), sort_keys = True ).encode ("utf-8" )
224
224
).hexdigest ()
225
225
)
226
- out .type = "File"
226
+ out .type_ = "File"
227
227
out .outputBinding = cwl .CommandOutputBinding (glob = clt .stdout )
228
- elif out .type == "stderr" :
228
+ elif out .type_ == "stderr" :
229
229
if out .outputBinding is not None :
230
230
raise ValidationException (
231
231
"Not allowed to specify outputBinding when using stderr shortcut."
@@ -236,10 +236,10 @@ def convert_stdstreams_to_files(clt: cwl.CommandLineTool) -> None:
236
236
json_dumps (clt .save (), sort_keys = True ).encode ("utf-8" )
237
237
).hexdigest ()
238
238
)
239
- out .type = "File"
239
+ out .type_ = "File"
240
240
out .outputBinding = cwl .CommandOutputBinding (glob = clt .stderr )
241
241
for inp in clt .inputs :
242
- if inp .type == "stdin" :
242
+ if inp .type_ == "stdin" :
243
243
if inp .inputBinding is not None :
244
244
raise ValidationException (
245
245
"Not allowed to specify unputBinding when using stdin shortcut."
@@ -253,7 +253,7 @@ def convert_stdstreams_to_files(clt: cwl.CommandLineTool) -> None:
253
253
"$(inputs.%s.path)"
254
254
% cast (str , inp .id ).rpartition ("#" )[2 ].split ("/" )[- 1 ]
255
255
)
256
- inp .type = "File"
256
+ inp .type_ = "File"
257
257
258
258
259
259
def merge_flatten_type (src : Any ) -> Any :
@@ -262,7 +262,7 @@ def merge_flatten_type(src: Any) -> Any:
262
262
return [merge_flatten_type (t ) for t in src ]
263
263
if isinstance (src , cwl .ArraySchema ):
264
264
return src
265
- return cwl .ArraySchema (type = "array" , items = src )
265
+ return cwl .ArraySchema (type_ = "array" , items = src )
266
266
267
267
268
268
def type_for_step_input (
@@ -280,9 +280,9 @@ def type_for_step_input(
280
280
cast (str , step_input .id ).split ("#" )[- 1 ]
281
281
== cast (str , in_ .id ).split ("#" )[- 1 ]
282
282
):
283
- input_type = step_input .type
283
+ input_type = step_input .type_
284
284
if step .scatter is not None and in_ .id in aslist (step .scatter ):
285
- input_type = cwl .ArraySchema (items = input_type , type = "array" )
285
+ input_type = cwl .ArraySchema (items = input_type , type_ = "array" )
286
286
return input_type
287
287
return "Any"
288
288
@@ -300,15 +300,15 @@ def type_for_step_output(
300
300
output .id .split ("#" )[- 1 ].split ("/" )[- 1 ]
301
301
== sourcename .split ("#" )[- 1 ].split ("/" )[- 1 ]
302
302
):
303
- output_type = output .type
303
+ output_type = output .type_
304
304
if step .scatter is not None :
305
305
if step .scatterMethod == "nested_crossproduct" :
306
306
for _ in range (len (aslist (step .scatter ))):
307
307
output_type = cwl .ArraySchema (
308
- items = output_type , type = "array"
308
+ items = output_type , type_ = "array"
309
309
)
310
310
else :
311
- output_type = cwl .ArraySchema (items = output_type , type = "array" )
311
+ output_type = cwl .ArraySchema (items = output_type , type_ = "array" )
312
312
return output_type
313
313
raise ValidationException (
314
314
"param {} not found in {}." .format (
@@ -328,42 +328,44 @@ def type_for_source(
328
328
scatter_context : List [Optional [Tuple [int , str ]]] = []
329
329
params = param_for_source_id (process , sourcenames , parent , scatter_context )
330
330
if not isinstance (params , list ):
331
- new_type = params .type
331
+ new_type = params .type_
332
332
if scatter_context [0 ] is not None :
333
333
if scatter_context [0 ][1 ] == "nested_crossproduct" :
334
334
for _ in range (scatter_context [0 ][0 ]):
335
- new_type = cwl .ArraySchema (items = new_type , type = "array" )
335
+ new_type = cwl .ArraySchema (items = new_type , type_ = "array" )
336
336
else :
337
- new_type = cwl .ArraySchema (items = new_type , type = "array" )
337
+ new_type = cwl .ArraySchema (items = new_type , type_ = "array" )
338
338
if linkMerge == "merge_nested" :
339
- new_type = cwl .ArraySchema (items = new_type , type = "array" )
339
+ new_type = cwl .ArraySchema (items = new_type , type_ = "array" )
340
340
elif linkMerge == "merge_flattened" :
341
341
new_type = merge_flatten_type (new_type )
342
342
return new_type
343
343
new_type = []
344
344
for p , sc in zip (params , scatter_context ):
345
345
if isinstance (p , str ) and not any (_compare_type (t , p ) for t in new_type ):
346
346
cur_type = p
347
- elif hasattr (p , "type" ) and not any (_compare_type (t , p .type ) for t in new_type ):
348
- cur_type = p .type
347
+ elif hasattr (p , "type_" ) and not any (
348
+ _compare_type (t , p .type_ ) for t in new_type
349
+ ):
350
+ cur_type = p .type_
349
351
else :
350
352
cur_type = None
351
353
if cur_type is not None :
352
354
if sc is not None :
353
355
if sc [1 ] == "nested_crossproduct" :
354
356
for _ in range (sc [0 ]):
355
- cur_type = cwl .ArraySchema (items = cur_type , type = "array" )
357
+ cur_type = cwl .ArraySchema (items = cur_type , type_ = "array" )
356
358
else :
357
- cur_type = cwl .ArraySchema (items = cur_type , type = "array" )
359
+ cur_type = cwl .ArraySchema (items = cur_type , type_ = "array" )
358
360
new_type .append (cur_type )
359
361
if len (new_type ) == 1 :
360
362
new_type = new_type [0 ]
361
363
if linkMerge == "merge_nested" :
362
- return cwl .ArraySchema (items = new_type , type = "array" )
364
+ return cwl .ArraySchema (items = new_type , type_ = "array" )
363
365
elif linkMerge == "merge_flattened" :
364
366
return merge_flatten_type (new_type )
365
367
elif isinstance (sourcenames , List ) and len (sourcenames ) > 1 :
366
- return cwl .ArraySchema (items = new_type , type = "array" )
368
+ return cwl .ArraySchema (items = new_type , type_ = "array" )
367
369
else :
368
370
return new_type
369
371
0 commit comments