Skip to content

Commit 9586d44

Browse files
authored
Add default behaviour for jobs that dont have some keys and handle the non-existent of some json_vals (#1017)
* Add default behaviour for jobs that dont have some keys and handle the non-existent of some json_vals * Remove get from command * cap add and drop should have values * removing the try/except since it was added in a separate pr * maybe not all gets are necessary * Adding comments
1 parent a7407f8 commit 9586d44

File tree

2 files changed

+36
-19
lines changed

2 files changed

+36
-19
lines changed

tron/core/action.py

+27-15
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,13 @@ def serialize_namedtuple(obj):
115115
return obj
116116

117117
try:
118+
# NOTE: you'll notice that there's a lot of get() accesses of state_data for
119+
# pretty common fields - this is because ActionCommandConfig is used by more
120+
# than one type of ActionRun (Kubernetes, Mesos, SSH) and these generally look
121+
# different. Alternatively, some of these fields are used by KubernetesActionRun
122+
# but are relatively new and older runs do not have data for them.
123+
# Once we get rid of the SSH and Mesos code as well as older runs in DynamoDB,
124+
# we'll likely be able to clean this up.
118125
return json.dumps(
119126
{
120127
"command": state_data["command"],
@@ -124,30 +131,35 @@ def serialize_namedtuple(obj):
124131
"cap_add": state_data["cap_add"],
125132
"cap_drop": state_data["cap_drop"],
126133
"constraints": [
127-
serialize_namedtuple(constraint) for constraint in state_data["constraints"]
134+
serialize_namedtuple(constraint) for constraint in state_data.get("constraints", [])
128135
], # convert each ConfigConstraint to dictionary, so it would be a list of dicts
129136
"docker_image": state_data["docker_image"],
130137
"docker_parameters": [
131-
serialize_namedtuple(parameter) for parameter in state_data["docker_parameters"]
138+
serialize_namedtuple(parameter) for parameter in state_data.get("docker_parameters", [])
132139
],
133-
"env": state_data["env"],
134-
"secret_env": {key: serialize_namedtuple(val) for key, val in state_data["secret_env"].items()},
135-
"secret_volumes": [serialize_namedtuple(volume) for volume in state_data["secret_volumes"]],
140+
"env": state_data.get("env", {}),
141+
"secret_env": {
142+
key: serialize_namedtuple(val) for key, val in state_data.get("secret_env", {}).items()
143+
},
144+
"secret_volumes": [serialize_namedtuple(volume) for volume in state_data.get("secret_volumes", [])],
136145
"projected_sa_volumes": [
137-
serialize_namedtuple(volume) for volume in state_data["projected_sa_volumes"]
146+
serialize_namedtuple(volume) for volume in state_data.get("projected_sa_volumes", [])
138147
],
139148
"field_selector_env": {
140-
key: serialize_namedtuple(val) for key, val in state_data["field_selector_env"].items()
149+
key: serialize_namedtuple(val) for key, val in state_data.get("field_selector_env", {}).items()
141150
},
142-
"extra_volumes": [serialize_namedtuple(volume) for volume in state_data["extra_volumes"]],
143-
"node_selectors": state_data["node_selectors"],
144-
"node_affinities": [serialize_namedtuple(affinity) for affinity in state_data["node_affinities"]],
145-
"labels": state_data["labels"],
146-
"annotations": state_data["annotations"],
147-
"service_account_name": state_data["service_account_name"],
148-
"ports": state_data["ports"],
151+
"extra_volumes": [serialize_namedtuple(volume) for volume in state_data.get("extra_volumes", [])],
152+
"node_selectors": state_data.get("node_selectors", {}),
153+
"node_affinities": [
154+
serialize_namedtuple(affinity) for affinity in state_data.get("node_affinities", [])
155+
],
156+
"labels": state_data.get("labels", {}),
157+
"annotations": state_data.get("annotations", {}),
158+
"service_account_name": state_data.get("service_account_name"),
159+
"ports": state_data.get("ports", []),
149160
"topology_spread_constraints": [
150-
serialize_namedtuple(constraint) for constraint in state_data["topology_spread_constraints"]
161+
serialize_namedtuple(constraint)
162+
for constraint in state_data.get("topology_spread_constraints", [])
151163
],
152164
}
153165
)

tron/core/actionrun.py

+9-4
Original file line numberDiff line numberDiff line change
@@ -185,8 +185,13 @@ def to_json(state_data: dict) -> Optional[str]:
185185
"end_time": state_data["end_time"].isoformat() if state_data["end_time"] else None,
186186
"rendered_command": state_data["rendered_command"],
187187
"exit_status": state_data["exit_status"],
188-
"mesos_task_id": state_data["mesos_task_id"],
189-
"kubernetes_task_id": state_data["kubernetes_task_id"],
188+
# NOTE: mesos_task_id can be deleted once we delete all Mesos
189+
# code and run data - and kubernetes_task_id can then be
190+
# accessed unconditionally :)
191+
# (see note in ActionCommandConfig::to_json() for more
192+
# information about why we do this)
193+
"mesos_task_id": state_data.get("mesos_task_id"),
194+
"kubernetes_task_id": state_data.get("kubernetes_task_id"),
190195
}
191196
)
192197
except KeyError:
@@ -811,12 +816,12 @@ def to_json(state_data: dict) -> Optional[str]:
811816
"job_run_id": state_data["job_run_id"],
812817
"action_name": state_data["action_name"],
813818
"state": state_data["state"],
814-
"original_command": state_data["original_command"],
819+
"original_command": state_data.get("original_command"),
815820
"start_time": state_data["start_time"].isoformat() if state_data["start_time"] else None,
816821
"end_time": state_data["end_time"].isoformat() if state_data["end_time"] else None,
817822
"node_name": state_data["node_name"],
818823
"exit_status": state_data["exit_status"],
819-
"attempts": [ActionRunAttempt.to_json(attempt) for attempt in state_data["attempts"]],
824+
"attempts": [ActionRunAttempt.to_json(attempt) for attempt in state_data.get("attempts", [])],
820825
"retries_remaining": state_data["retries_remaining"],
821826
"retries_delay": (
822827
state_data["retries_delay"].total_seconds() if state_data["retries_delay"] is not None else None

0 commit comments

Comments
 (0)