-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathjobscript.py
2365 lines (2028 loc) · 88.2 KB
/
jobscript.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
Model of information submitted to a scheduler.
"""
from __future__ import annotations
from collections import defaultdict
import os
import shutil
import socket
import subprocess
from textwrap import dedent, indent
from typing import TextIO, cast, overload, TYPE_CHECKING
from typing_extensions import override
import numpy as np
from hpcflow.sdk.core import SKIPPED_EXIT_CODE
from hpcflow.sdk.core.enums import EARStatus
from hpcflow.sdk.core.errors import (
JobscriptSubmissionFailure,
NotSubmitMachineError,
)
from hpcflow.sdk.typing import hydrate
from hpcflow.sdk.core.json_like import ChildObjectSpec, JSONLike
from hpcflow.sdk.core.utils import nth_value, parse_timestamp, current_timestamp
from hpcflow.sdk.log import TimeIt
from hpcflow.sdk.submission.schedulers import QueuedScheduler
from hpcflow.sdk.submission.schedulers.direct import DirectScheduler
from hpcflow.sdk.submission.shells import get_shell, DEFAULT_SHELL_NAMES
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator, Mapping, Sequence
from datetime import datetime
from pathlib import Path
from typing import Any, ClassVar, Literal
from typing_extensions import TypeIs
from numpy.typing import NDArray, ArrayLike
from ..core.actions import ElementActionRun
from ..core.element import ElementResources
from ..core.loop_cache import LoopIndex
from ..core.types import JobscriptSubmissionFailureArgs, BlockActionKey
from ..core.workflow import WorkflowTask, Workflow
from ..persistence.base import PersistentStore
from .submission import Submission
from .shells.base import Shell
from .schedulers import Scheduler
from .enums import JobscriptElementState
from .types import (
JobScriptCreationArguments,
JobScriptDescriptor,
ResolvedJobscriptBlockDependencies,
SchedulerRef,
VersionInfo,
)
from ..core.cache import ObjectCache
from hpcflow.sdk.submission.submission import JOBSCRIPT_SUBMIT_TIME_KEYS
def is_jobscript_array(
resources: ElementResources, num_elements: int, store: PersistentStore
) -> bool:
"""Return True if a job array should be used for the specified `ElementResources`."""
if resources.scheduler in ("direct", "direct_posix"):
if resources.use_job_array:
raise ValueError(
f"`use_job_array` not supported by scheduler: {resources.scheduler!r}"
)
return False
if resources.combine_scripts:
return False
run_parallelism = store._features.EAR_parallelism
if resources.use_job_array is None:
if num_elements > 1 and run_parallelism:
return True
else:
return False
else:
if resources.use_job_array and not run_parallelism:
raise ValueError(
f"Store type {store!r} does not support element parallelism, so jobs "
f"cannot be submitted as scheduler arrays."
)
return resources.use_job_array
@TimeIt.decorator
def generate_EAR_resource_map(
task: WorkflowTask,
loop_idx: LoopIndex[str, int],
cache: ObjectCache,
) -> tuple[Sequence[ElementResources], Sequence[int], NDArray, NDArray]:
"""
Generate an integer array whose rows represent actions and columns represent task
elements and whose values index unique resources.
"""
none_val = -1
resources: list[ElementResources] = []
resource_hashes: list[int] = []
arr_shape = (task.num_actions, task.num_elements)
resource_map = np.empty(arr_shape, dtype=int)
EAR_ID_map = np.empty(arr_shape, dtype=int)
resource_map[:] = none_val
EAR_ID_map[:] = none_val
assert cache.elements is not None
assert cache.iterations is not None
for elem_id in task.element_IDs:
element = cache.elements[elem_id]
for iter_ID_i in element.iteration_IDs:
iter_i = cache.iterations[iter_ID_i]
if iter_i.loop_idx != loop_idx:
continue
if iter_i.EARs_initialised: # not strictly needed (actions will be empty)
for act_idx, action in iter_i.actions.items():
for run in action.runs:
if run.status == EARStatus.pending:
# TODO: consider `time_limit`s
res_hash = run.resources.get_jobscript_hash()
if res_hash not in resource_hashes:
resource_hashes.append(res_hash)
resources.append(run.resources)
resource_map[act_idx][element.index] = resource_hashes.index(
res_hash
)
EAR_ID_map[act_idx, element.index] = run.id_
# set defaults for and validate unique resources:
for res in resources:
res.set_defaults()
res.validate_against_machine()
return (
resources,
resource_hashes,
resource_map,
EAR_ID_map,
)
@TimeIt.decorator
def group_resource_map_into_jobscripts(
resource_map: ArrayLike,
none_val: Any = -1,
) -> tuple[list[JobScriptDescriptor], NDArray]:
"""
Convert a resource map into a plan for what elements to group together into jobscripts.
"""
resource_map_ = np.asanyarray(resource_map)
resource_idx = np.unique(resource_map_)
jobscripts: list[JobScriptDescriptor] = []
allocated = np.zeros_like(resource_map_)
js_map = np.ones_like(resource_map_, dtype=float) * np.nan
nones_bool: NDArray = resource_map_ == none_val
stop = False
for act_idx in range(resource_map_.shape[0]):
for res_i in resource_idx:
if res_i == none_val:
continue
if res_i not in resource_map_[act_idx]:
continue
resource_map_[nones_bool] = res_i
diff = np.cumsum(np.abs(np.diff(resource_map_[act_idx:], axis=0)), axis=0)
elem_bool = np.logical_and(
resource_map_[act_idx] == res_i, allocated[act_idx] == False
)
elem_idx = np.where(elem_bool)[0]
act_elem_bool = np.logical_and(elem_bool, nones_bool[act_idx] == False)
act_elem_idx: tuple[NDArray, ...] = np.where(act_elem_bool)
# add elements from downstream actions:
ds_bool = np.logical_and(
diff[:, elem_idx] == 0,
nones_bool[act_idx + 1 :, elem_idx] == False,
)
ds_act_idx: NDArray
ds_elem_idx: NDArray
ds_act_idx, ds_elem_idx = np.where(ds_bool)
ds_act_idx += act_idx + 1
ds_elem_idx = elem_idx[ds_elem_idx]
EARs_by_elem: dict[int, list[int]] = {
k.item(): [act_idx] for k in act_elem_idx[0]
}
for ds_a, ds_e in zip(ds_act_idx, ds_elem_idx):
EARs_by_elem.setdefault(ds_e.item(), []).append(ds_a.item())
EARs = np.vstack([np.ones_like(act_elem_idx) * act_idx, act_elem_idx])
EARs = np.hstack([EARs, np.array([ds_act_idx, ds_elem_idx])])
if not EARs.size:
continue
js: JobScriptDescriptor = {
"resources": res_i,
"elements": dict(sorted(EARs_by_elem.items(), key=lambda x: x[0])),
}
allocated[EARs[0], EARs[1]] = True
js_map[EARs[0], EARs[1]] = len(jobscripts)
jobscripts.append(js)
if np.all(allocated[~nones_bool]):
stop = True
break
if stop:
break
resource_map_[nones_bool] = none_val
return jobscripts, js_map
@TimeIt.decorator
def resolve_jobscript_dependencies(
jobscripts: Mapping[int, JobScriptCreationArguments],
element_deps: Mapping[int, Mapping[int, Sequence[int]]],
) -> Mapping[int, dict[int, ResolvedJobscriptBlockDependencies]]:
"""
Discover concrete dependencies between jobscripts.
"""
# first pass is to find the mappings between jobscript elements:
jobscript_deps: dict[int, dict[int, ResolvedJobscriptBlockDependencies]] = {}
for js_idx, elem_deps in element_deps.items():
# keys of new dict are other jobscript indices on which this jobscript (js_idx)
# depends:
jobscript_deps[js_idx] = {}
for js_elem_idx_i, EAR_deps_i in elem_deps.items():
# locate which jobscript elements this jobscript element depends on:
for EAR_dep_j in EAR_deps_i:
for js_k_idx, js_k in jobscripts.items():
if js_k_idx == js_idx:
break
if EAR_dep_j in js_k["EAR_ID"]:
if js_k_idx not in jobscript_deps[js_idx]:
jobscript_deps[js_idx][js_k_idx] = {"js_element_mapping": {}}
jobscript_deps[js_idx][js_k_idx]["js_element_mapping"].setdefault(
js_elem_idx_i, []
)
# retrieve column index, which is the JS-element index:
js_elem_idx_k: int = np.where(
np.any(js_k["EAR_ID"] == EAR_dep_j, axis=0)
)[0][0].item()
# add js dependency element-mapping:
if (
js_elem_idx_k
not in jobscript_deps[js_idx][js_k_idx]["js_element_mapping"][
js_elem_idx_i
]
):
jobscript_deps[js_idx][js_k_idx]["js_element_mapping"][
js_elem_idx_i
].append(js_elem_idx_k)
# next we can determine if two jobscripts have a one-to-one element mapping, which
# means they can be submitted with a "job array" dependency relationship:
for js_i_idx, deps_i in jobscript_deps.items():
for js_k_idx, deps_j in deps_i.items():
# is this an array dependency?
js_i_num_js_elements = jobscripts[js_i_idx]["EAR_ID"].shape[1]
js_k_num_js_elements = jobscripts[js_k_idx]["EAR_ID"].shape[1]
is_all_i_elems = sorted(set(deps_j["js_element_mapping"])) == list(
range(js_i_num_js_elements)
)
is_all_k_single = set(
len(i) for i in deps_j["js_element_mapping"].values()
) == {1}
is_all_k_elems = sorted(
i[0] for i in deps_j["js_element_mapping"].values()
) == list(range(js_k_num_js_elements))
is_arr = is_all_i_elems and is_all_k_single and is_all_k_elems
jobscript_deps[js_i_idx][js_k_idx]["is_array"] = is_arr
return jobscript_deps
def _reindex_dependencies(
jobscripts: Mapping[int, JobScriptCreationArguments],
from_idx: int,
to_idx: int,
):
for ds_js_idx, ds_js in jobscripts.items():
if ds_js_idx <= from_idx:
continue
deps = ds_js["dependencies"]
if from_idx in deps:
deps[to_idx] = deps.pop(from_idx)
@TimeIt.decorator
def merge_jobscripts_across_tasks(
jobscripts: Mapping[int, JobScriptCreationArguments],
) -> Mapping[int, JobScriptCreationArguments]:
"""Try to merge jobscripts between tasks.
This is possible if two jobscripts share the same resources and have an array
dependency (i.e. one-to-one element dependency mapping).
"""
# The set of IDs of dicts that we've merged, allowing us to not keep that info in
# the dicts themselves.
merged: set[int] = set()
for js_idx, js in jobscripts.items():
if not js["dependencies"]:
continue
closest_idx = cast("int", max(js["dependencies"]))
closest_js = jobscripts[closest_idx]
other_deps = {k: v for k, v in js["dependencies"].items() if k != closest_idx}
# if all `other_deps` are also found within `closest_js`'s dependencies, then we
# can merge `js` into `closest_js`:
merge = True
for dep_idx, dep_i in other_deps.items():
try:
if closest_js["dependencies"][dep_idx] != dep_i:
merge = False
except KeyError:
merge = False
if merge:
js_j = closest_js # the jobscript we are merging `js` into
js_j_idx = closest_idx
dep_info = js["dependencies"][js_j_idx]
# can only merge if resources are the same and is array dependency:
if js["resource_hash"] == js_j["resource_hash"] and dep_info["is_array"]:
num_loop_idx = len(
js_j["task_loop_idx"]
) # TODO: should this be: `js_j["task_loop_idx"][0]`?
# append task_insert_IDs
js_j["task_insert_IDs"].append(js["task_insert_IDs"][0])
js_j["task_loop_idx"].append(js["task_loop_idx"][0])
add_acts = [(a, b, num_loop_idx) for a, b, _ in js["task_actions"]]
js_j["task_actions"].extend(add_acts)
for k, v in js["task_elements"].items():
js_j["task_elements"][k].extend(v)
# append to elements and elements_idx list
js_j["EAR_ID"] = np.vstack((js_j["EAR_ID"], js["EAR_ID"]))
# mark this js as defunct
merged.add(id(js))
# update dependencies of any downstream jobscripts that refer to this js
_reindex_dependencies(jobscripts, js_idx, js_j_idx)
# remove is_merged jobscripts:
return {k: v for k, v in jobscripts.items() if id(v) not in merged}
@TimeIt.decorator
def resolve_jobscript_blocks(
jobscripts: Mapping[int, JobScriptCreationArguments],
) -> list[dict[str, Any]]:
"""For contiguous, dependent, non-array jobscripts with identical resource
requirements, combine into multi-block jobscripts.
Parameters
----------
jobscripts
Dict whose values must be dicts with keys "is_array", "resource_hash" and
"dependencies".
run_parallelism
True if the store supports run parallelism
"""
js_new: list[
list[JobScriptCreationArguments]
] = [] # TODO: not the same type, e.g. dependencies have tuple keys,
new_idx: dict[
int, tuple[int, int]
] = {} # track new positions by new jobscript index and block index
new_idx_inv: dict[int, list[int]] = defaultdict(list)
prev_hash = None
blocks: list[JobScriptCreationArguments] = []
js_deps_rec: dict[int, set[int]] = {} # recursive
for js_idx, js_i in jobscripts.items():
cur_js_idx = len(js_new)
new_deps_js_j = {
new_idx[i][0] for i in cast("Sequence[int]", js_i["dependencies"])
}
new_deps_js_j_rec = [
k for i in new_deps_js_j for j in new_idx_inv[i] for k in js_deps_rec[j]
]
js_deps_rec[js_idx] = new_deps_js_j.union(new_deps_js_j_rec)
# recursive dependencies of js_i (which we're looking to merge), excluding the
# dependency on the current jobscript:
js_j_deps_rec_no_cur = js_deps_rec[js_idx] - set([cur_js_idx])
# recursive dependencies of the current jobscript:
cur_deps_rec = {
j for i in new_idx_inv[cur_js_idx] for j in js_deps_rec[i] if j != cur_js_idx
}
# can we mege js_i into the current jobscript, as far as dependencies are
# concerned?
deps_mergable = cur_js_idx in new_deps_js_j
if deps_mergable and js_j_deps_rec_no_cur:
deps_mergable = js_j_deps_rec_no_cur == cur_deps_rec
if js_i["is_array"]:
# array jobs cannot be merged into the same jobscript
# append existing block:
if blocks:
js_new.append(blocks)
prev_hash = None
blocks = []
new_idx[js_idx] = (len(js_new), 0)
new_idx_inv[len(js_new)].append(js_idx)
js_new.append([js_i])
continue
if js_idx == 0 or prev_hash is None:
# (note: zeroth index will always exist)
# start a new block:
blocks.append(js_i)
new_idx[js_idx] = (len(js_new), len(blocks) - 1)
new_idx_inv[len(js_new)].append(js_idx)
# set resource hash to compare with the next jobscript
prev_hash = js_i["resource_hash"]
elif js_i["resource_hash"] == prev_hash and deps_mergable:
# merge with previous jobscript by adding another block
# only merge if this jobscript's dependencies include the current jobscript,
# and any other dependencies are included in the current jobscript's
# dependencies
blocks.append(js_i)
new_idx[js_idx] = (len(js_new), len(blocks) - 1)
new_idx_inv[len(js_new)].append(js_idx)
else:
# cannot merge, append the new jobscript data:
js_new.append(blocks)
# start a new block:
blocks = [js_i]
new_idx[js_idx] = (len(js_new), len(blocks) - 1)
new_idx_inv[len(js_new)].append(js_idx)
# set resource hash to compare with the next jobscript
prev_hash = js_i["resource_hash"]
# append remaining blocks:
if blocks:
js_new.append(blocks)
prev_hash = None
blocks = []
# re-index dependencies:
js_new_: list[dict[str, Any]] = []
for js_i_idx, js_new_i in enumerate(js_new):
resources = None
is_array = None
for block_j in js_new_i:
for k, v in new_idx.items():
dep_data = block_j["dependencies"].pop(k, None)
if dep_data:
block_j["dependencies"][v] = dep_data
del block_j["resource_hash"]
resources = block_j.pop("resources", None)
is_array = block_j.pop("is_array")
js_new_.append(
{
"resources": resources,
"is_array": is_array,
"blocks": js_new[js_i_idx],
}
)
return js_new_
@hydrate
class JobscriptBlock(JSONLike):
"""A rectangular block of element-actions to run within a jobscript.
Parameters
----------
task_insert_IDs: list[int]
The task insertion IDs.
task_actions: list[tuple]
The actions of the tasks.
``task insert ID, action_idx, index into task_loop_idx`` for each ``JS_ACTION_IDX``
task_elements: dict[int, list[int]]
The elements of the tasks.
Maps ``JS_ELEMENT_IDX`` to list of ``TASK_ELEMENT_IDX`` for each ``TASK_INSERT_ID``
EAR_ID:
Element action run information.
task_loop_idx: list[dict]
Description of what loops are in play.
dependencies: dict[tuple[int, int], dict]
Description of dependencies. Keys are tuples of (jobscript index,
jobscript-block index) of the dependency.
index: int
The index of the block within the parent jobscript.
jobscript: ~hpcflow.app.Jobscript
The parent jobscript.
"""
def __init__(
self,
index: int,
task_insert_IDs: list[int],
task_loop_idx: list[dict[str, int]],
task_actions: list[tuple[int, int, int]] | None = None,
task_elements: dict[int, list[int]] | None = None,
EAR_ID: NDArray | None = None,
dependencies: (
dict[tuple[int, int], ResolvedJobscriptBlockDependencies] | None
) = None,
jobscript: Jobscript | None = None,
):
self.jobscript = jobscript
self._index = index
self._task_insert_IDs = task_insert_IDs
self._task_actions = task_actions
self._task_elements = task_elements
self._task_loop_idx = task_loop_idx
self._EAR_ID = EAR_ID
self._dependencies = dependencies
self._all_EARs = None # assigned on first access to `all_EARs` property
@property
def index(self) -> int:
return self._index
@property
def submission(self) -> Submission:
assert self.jobscript is not None
return self.jobscript.submission
@property
def task_insert_IDs(self) -> Sequence[int]:
"""
The insertion IDs of tasks in this jobscript-block.
"""
return self._task_insert_IDs
@property
@TimeIt.decorator
def task_actions(self) -> NDArray:
"""
The IDs of actions of each task in this jobscript-block.
"""
assert self.jobscript is not None
return self.workflow._store.get_jobscript_block_task_actions_array(
sub_idx=self.submission.index,
js_idx=self.jobscript.index,
blk_idx=self.index,
task_actions_arr=self._task_actions,
)
@property
@TimeIt.decorator
def task_elements(self) -> Mapping[int, Sequence[int]]:
"""
The IDs of elements of each task in this jobscript-block.
"""
assert self.jobscript is not None
return self.workflow._store.get_jobscript_block_task_elements_map(
sub_idx=self.submission.index,
js_idx=self.jobscript.index,
blk_idx=self.index,
task_elems_map=self._task_elements,
)
@property
@TimeIt.decorator
def EAR_ID(self) -> NDArray:
"""
The array of EAR IDs in this jobscript-block.
"""
assert self.jobscript is not None
return self.workflow._store.get_jobscript_block_run_ID_array(
sub_idx=self.submission.index,
js_idx=self.jobscript.index,
blk_idx=self.index,
run_ID_arr=self._EAR_ID,
)
@property
@TimeIt.decorator
def dependencies(
self,
) -> Mapping[tuple[int, int], ResolvedJobscriptBlockDependencies]:
"""
The dependency descriptor.
"""
assert self.jobscript is not None
return self.workflow._store.get_jobscript_block_dependencies(
sub_idx=self.submission.index,
js_idx=self.jobscript.index,
blk_idx=self.index,
js_dependencies=self._dependencies,
)
@property
def task_loop_idx(self) -> Sequence[Mapping[str, int]]:
"""
The description of where various task loops are.
"""
return self._task_loop_idx
@property
@TimeIt.decorator
def num_actions(self) -> int:
"""
The maximal number of actions in the jobscript-block.
"""
return self.EAR_ID.shape[0]
@property
@TimeIt.decorator
def num_elements(self) -> int:
"""
The maximal number of elements in the jobscript-block.
"""
return self.EAR_ID.shape[1]
@property
def workflow(self) -> Workflow:
"""
The associated workflow.
"""
assert self.jobscript is not None
return self.jobscript.workflow
@property
@TimeIt.decorator
def all_EARs(self) -> Sequence[ElementActionRun]:
"""
Description of EAR information for this jobscript-block.
"""
assert self.jobscript is not None
return [i for i in self.jobscript.all_EARs if i.id_ in self.EAR_ID]
@override
def _postprocess_to_dict(self, d: dict[str, Any]) -> dict[str, Any]:
dct = super()._postprocess_to_dict(d)
del dct["_all_EARs"]
dct["_dependencies"] = [[list(k), v] for k, v in self.dependencies.items()]
dct = {k.lstrip("_"): v for k, v in dct.items()}
dct["EAR_ID"] = cast("NDArray", dct["EAR_ID"]).tolist()
return dct
@classmethod
def from_json_like(cls, json_like, shared_data=None):
json_like["EAR_ID"] = (
np.array(json_like["EAR_ID"]) if json_like["EAR_ID"] is not None else None
)
if json_like["dependencies"] is not None:
# transform list to dict with tuple keys, and transform string keys in
# `js_element_mapping` to integers:
deps_processed = {}
for i in json_like["dependencies"]:
deps_processed_i = {
"js_element_mapping": {
int(k): v for k, v in i[1]["js_element_mapping"].items()
},
"is_array": i[1]["is_array"],
}
deps_processed[tuple(i[0])] = deps_processed_i
json_like["dependencies"] = deps_processed
return super().from_json_like(json_like, shared_data)
def _get_EARs_arr(self) -> NDArray:
"""
Get all associated EAR objects as a 2D array.
"""
return np.array(self.all_EARs).reshape(self.EAR_ID.shape)
def get_task_loop_idx_array(self) -> NDArray:
"""
Get an array of task loop indices.
"""
loop_idx = np.empty_like(self.EAR_ID)
loop_idx[:] = np.array([i[2] for i in self.task_actions]).reshape(
(len(self.task_actions), 1)
)
return loop_idx
@TimeIt.decorator
def write_EAR_ID_file(self, fp: TextIO):
"""Write a text file with `num_elements` lines and `num_actions` delimited tokens
per line, representing whether a given EAR must be executed."""
assert self.jobscript is not None
# can't specify "open" newline if we pass the file name only, so pass handle:
np.savetxt(
fname=fp,
X=(self.EAR_ID).T,
fmt="%.0f",
delimiter=self.jobscript._EAR_files_delimiter,
)
@hydrate
class Jobscript(JSONLike):
"""
A group of actions that are submitted together to be executed by the underlying job
management system as a single unit.
Parameters
----------
task_insert_IDs: list[int]
The task insertion IDs.
task_actions: list[tuple]
The actions of the tasks.
``task insert ID, action_idx, index into task_loop_idx`` for each ``JS_ACTION_IDX``
task_elements: dict[int, list[int]]
The elements of the tasks.
Maps ``JS_ELEMENT_IDX`` to list of ``TASK_ELEMENT_IDX`` for each ``TASK_INSERT_ID``
EAR_ID:
Element action run information.
resources: ~hpcflow.app.ElementResources
Resources to use
task_loop_idx: list[dict]
Description of what loops are in play.
dependencies: dict[int, dict]
Description of dependencies.
submit_time: datetime
When the jobscript was submitted, if known.
submit_hostname: str
Where the jobscript was submitted, if known.
submit_machine: str
Description of what the jobscript was submitted to, if known.
submit_cmdline: str
The command line used to do the commit, if known.
scheduler_job_ID: str
The job ID from the scheduler, if known.
process_ID: int
The process ID of the subprocess, if known.
version_info: dict[str, ...]
Version info about the target system.
os_name: str
The name of the OS.
shell_name: str
The name of the shell.
scheduler_name: str
The scheduler used.
running: bool
Whether the jobscript is currently running.
"""
_EAR_files_delimiter: ClassVar[str] = ":"
_workflow_app_alias: ClassVar[str] = "wkflow_app"
_child_objects: ClassVar[tuple[ChildObjectSpec, ...]] = (
ChildObjectSpec(
name="resources",
class_name="ElementResources",
),
ChildObjectSpec(
name="blocks",
class_name="JobscriptBlock",
is_multiple=True,
parent_ref="jobscript",
),
)
def __init__(
self,
index: int,
is_array: bool,
resources: ElementResources,
blocks: list[JobscriptBlock],
at_submit_metadata: dict[str, Any] | None = None,
submit_hostname: str | None = None,
submit_machine: str | None = None,
shell_idx: int | None = None,
version_info: VersionInfo | None = None,
resource_hash: str | None = None,
elements: dict[int, list[int]] | None = None,
):
if resource_hash is not None:
raise AttributeError("resource_hash must not be supplied")
if elements is not None:
raise AttributeError("elements must not be supplied")
if not isinstance(blocks[0], JobscriptBlock):
blocks = [
JobscriptBlock(**i, index=idx, jobscript=self)
for idx, i in enumerate(blocks)
]
self._index = index
self._blocks = blocks
self._at_submit_metadata = at_submit_metadata or {
k: None for k in JOBSCRIPT_SUBMIT_TIME_KEYS
}
self._is_array = is_array
self._resources = resources
# assigned on parent `Submission.submit` (or retrieved form persistent store):
self._submit_hostname = submit_hostname
self._submit_machine = submit_machine
self._shell_idx = shell_idx
self._version_info = version_info
# assigned by parent Submission
self._submission: Submission | None = None
# assigned on first access to `scheduler` property
self._scheduler_obj: Scheduler | None = None
# assigned on first access to `shell` property
self._shell_obj: Shell | None = None
# assigned on first access to `submit_time` property
self._submit_time_obj: datetime | None = None
# assigned on first access to `all_EARs` property
self._all_EARs: list[ElementActionRun] | None = None
self._set_parent_refs()
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}("
f"index={self.index!r}, "
f"blocks={self.blocks!r}, "
f"resources={self.resources!r}, "
f")"
)
@override
def _postprocess_to_dict(self, d: dict[str, Any]) -> dict[str, Any]:
dct = super()._postprocess_to_dict(d)
del dct["_scheduler_obj"]
del dct["_shell_obj"]
del dct["_submit_time_obj"]
del dct["_all_EARs"]
dct = {k.lstrip("_"): v for k, v in dct.items()}
return dct
@classmethod
def from_json_like(cls, json_like, shared_data=None):
return super().from_json_like(json_like, shared_data)
@property
def workflow_app_alias(self) -> str:
"""
Alias for the workflow app in job scripts.
"""
return self.submission.WORKFLOW_APP_ALIAS
def get_commands_file_name(
self, block_act_key: BlockActionKey, shell: Shell | None = None
) -> str:
"""
Get the name of a file containing commands for a particular jobscript action.
"""
return self._app.RunDirAppFiles.get_commands_file_name(
block_act_key,
shell=shell or self.shell,
)
@property
def blocks(self) -> Sequence[JobscriptBlock]:
return self._blocks
@property
def at_submit_metadata(self) -> dict[str, Any]:
return self.workflow._store.get_jobscript_at_submit_metadata(
sub_idx=self.submission.index,
js_idx=self.index,
metadata_attr=self._at_submit_metadata,
)
@property
def all_EAR_IDs(self) -> NDArray:
"""Return all run IDs of this jobscripts (across all blocks), removing missing
run IDs (i.e. -1 values)"""
return np.concatenate([i.EAR_ID[i.EAR_ID >= 0] for i in self.blocks])
@property
@TimeIt.decorator
def all_EARs(self) -> Sequence[ElementActionRun]:
"""
Description of EAR information for this jobscript.
"""
return self.workflow.get_EARs_from_IDs(self.all_EAR_IDs)
@property
@TimeIt.decorator
def resources(self) -> ElementResources:
"""
The common resources that this jobscript requires.
"""
return self._resources
@property
@TimeIt.decorator
def dependencies(self) -> Mapping[tuple[int, int], dict[str, bool]]:
"""
The dependency descriptor, accounting for all blocks within this jobscript.
"""
deps = {}
for block in self.blocks:
for (js_idx, blk_idx), v in block.dependencies.items():
if js_idx == self.index:
# block dependency is internal to this jobscript
continue
else:
deps[js_idx, blk_idx] = {"is_array": v["is_array"]}
return deps
@property
@TimeIt.decorator
def start_time(self) -> None | datetime:
"""The first known start time of any EAR in this jobscript."""
if not self.is_submitted:
return None
return min(
(ear.start_time for ear in self.all_EARs if ear.start_time), default=None
)
@property
@TimeIt.decorator
def end_time(self) -> None | datetime:
"""The last known end time of any EAR in this jobscript."""
if not self.is_submitted:
return None
return max((ear.end_time for ear in self.all_EARs if ear.end_time), default=None)
@property
def submit_time(self):
"""
When the jobscript was submitted, if known.
"""
if self._submit_time_obj is None:
if _submit_time := self.at_submit_metadata["submit_time"]:
self._submit_time_obj = parse_timestamp(
_submit_time, self.workflow.ts_fmt
)
return self._submit_time_obj
@property
def submit_hostname(self) -> str | None:
"""
Where the jobscript was submitted, if known.
"""
return self._submit_hostname
@property
def submit_machine(self) -> str | None:
"""
Description of what the jobscript was submitted to, if known.
"""
return self._submit_machine
@property
def shell_idx(self):
return self._shell_idx
@property
def submit_cmdline(self) -> list[str] | None:
"""
The command line used to submit the jobscript, if known.
"""
return self.at_submit_metadata["submit_cmdline"]
@property
def scheduler_job_ID(self) -> str | None:
"""
The job ID from the scheduler, if known.
"""
return self.at_submit_metadata["scheduler_job_ID"]