|
30 | 30 | CyclePoint,
|
31 | 31 | GenericResponse,
|
32 | 32 | ID,
|
| 33 | + SortArgs, |
| 34 | + Task, |
33 | 35 | Mutations,
|
34 | 36 | Queries,
|
| 37 | + process_resolver_info, |
| 38 | + STRIP_NULL_DEFAULT, |
35 | 39 | Subscriptions,
|
36 | 40 | WorkflowID,
|
37 | 41 | _mut_field,
|
38 |
| - sstrip |
| 42 | + sstrip, |
| 43 | + get_nodes_all |
39 | 44 | )
|
40 | 45 | from cylc.uiserver.resolvers import Resolvers
|
41 | 46 |
|
@@ -254,6 +259,227 @@ class Arguments:
|
254 | 259 | result = GenericScalar()
|
255 | 260 |
|
256 | 261 |
|
| 262 | +async def get_jobs(root, info, **kwargs): |
| 263 | + if kwargs['live']: |
| 264 | + return await get_nodes_all(root, info, **kwargs) |
| 265 | + |
| 266 | + _, field_ids = process_resolver_info(root, info, kwargs) |
| 267 | + |
| 268 | + if hasattr(kwargs, 'id'): |
| 269 | + kwargs['ids'] = [kwargs.get('id')] |
| 270 | + if field_ids: |
| 271 | + if isinstance(field_ids, str): |
| 272 | + field_ids = [field_ids] |
| 273 | + elif isinstance(field_ids, dict): |
| 274 | + field_ids = list(field_ids) |
| 275 | + kwargs['ids'] = field_ids |
| 276 | + elif field_ids == []: |
| 277 | + return [] |
| 278 | + |
| 279 | + for arg in ('ids', 'exids'): |
| 280 | + # live objects can be represented by a universal ID |
| 281 | + kwargs[arg] = [Tokens(n_id, relative=True) for n_id in kwargs[arg]] |
| 282 | + kwargs['workflows'] = [ |
| 283 | + Tokens(w_id) for w_id in kwargs['workflows']] |
| 284 | + kwargs['exworkflows'] = [ |
| 285 | + Tokens(w_id) for w_id in kwargs['exworkflows']] |
| 286 | + |
| 287 | + return await list_jobs(kwargs) |
| 288 | + |
| 289 | + |
| 290 | +async def list_jobs(args): |
| 291 | + if not args['workflows']: |
| 292 | + raise Exception('At least one workflow must be provided.') |
| 293 | + from cylc.flow.rundb import CylcWorkflowDAO |
| 294 | + from cylc.flow.pathutil import get_workflow_run_dir |
| 295 | + from cylc.flow.workflow_files import WorkflowFiles |
| 296 | + jobs = [] |
| 297 | + for workflow in args['workflows']: |
| 298 | + db_file = get_workflow_run_dir( |
| 299 | + workflow['workflow'], |
| 300 | + WorkflowFiles.LogDir.DIRNAME, |
| 301 | + "db" |
| 302 | + ) |
| 303 | + with CylcWorkflowDAO(db_file, is_public=True) as dao: |
| 304 | + conn = dao.connect() |
| 305 | + jobs.extend(make_query(conn, workflow)) |
| 306 | + return jobs |
| 307 | + |
| 308 | + |
| 309 | +def make_query(conn, workflow): |
| 310 | + |
| 311 | + # TODO: support all arguments including states |
| 312 | + # https://github.com/cylc/cylc-uiserver/issues/440 |
| 313 | + tasks = [] |
| 314 | + for row in conn.execute(''' |
| 315 | +SELECT |
| 316 | + name, |
| 317 | + cycle, |
| 318 | + submit_num, |
| 319 | + submit_status, |
| 320 | + time_run, |
| 321 | + time_run_exit, |
| 322 | + job_id, |
| 323 | + platform_name, |
| 324 | + time_submit, |
| 325 | +
|
| 326 | + -- Calculate Queue time stats |
| 327 | + MIN(queue_time) AS min_queue_time, |
| 328 | + AVG(queue_time) AS mean_queue_time, |
| 329 | + MAX(queue_time) AS max_queue_time, |
| 330 | + AVG(queue_time * queue_time) AS mean_squares_queue_time, |
| 331 | + MAX(CASE WHEN queue_time_quartile = 1 THEN queue_time END) |
| 332 | + queue_quartile_1, |
| 333 | + MAX(CASE WHEN queue_time_quartile = 2 THEN queue_time END) |
| 334 | + queue_quartile_2, |
| 335 | + MAX(CASE WHEN queue_time_quartile = 3 THEN queue_time END) |
| 336 | + queue_quartile_3, |
| 337 | +
|
| 338 | + -- Calculate Run time stats |
| 339 | + MIN(run_time) AS min_run_time, |
| 340 | + AVG(run_time) AS mean_run_time, |
| 341 | + MAX(run_time) AS max_run_time, |
| 342 | + AVG(run_time * run_time) AS mean_squares_run_time, |
| 343 | + MAX(CASE WHEN run_time_quartile = 1 THEN run_time END) run_quartile_1, |
| 344 | + MAX(CASE WHEN run_time_quartile = 2 THEN run_time END) run_quartile_2, |
| 345 | + MAX(CASE WHEN run_time_quartile = 3 THEN run_time END) run_quartile_3, |
| 346 | +
|
| 347 | + -- Calculate Total time stats |
| 348 | + MIN(total_time) AS min_total_time, |
| 349 | + AVG(total_time) AS mean_total_time, |
| 350 | + MAX(total_time) AS max_total_time, |
| 351 | + AVG(total_time * total_time) AS mean_squares_total_time, |
| 352 | + MAX(CASE WHEN total_time_quartile = 1 THEN total_time END) |
| 353 | + total_quartile_1, |
| 354 | + MAX(CASE WHEN total_time_quartile = 2 THEN total_time END) |
| 355 | + total_quartile_2, |
| 356 | + MAX(CASE WHEN total_time_quartile = 3 THEN total_time END) |
| 357 | + total_quartile_3, |
| 358 | +
|
| 359 | + COUNT(*) AS n |
| 360 | +
|
| 361 | +FROM |
| 362 | + (SELECT |
| 363 | + *, |
| 364 | + NTILE (4) OVER (PARTITION BY name ORDER BY queue_time) |
| 365 | + queue_time_quartile, |
| 366 | + NTILE (4) OVER (PARTITION BY name ORDER BY run_time) |
| 367 | + run_time_quartile, |
| 368 | + NTILE (4) OVER (PARTITION BY name ORDER BY total_time) |
| 369 | + total_time_quartile |
| 370 | + FROM |
| 371 | + (SELECT |
| 372 | + *, |
| 373 | + STRFTIME('%s', time_run_exit) - |
| 374 | + STRFTIME('%s', time_submit) AS total_time, |
| 375 | + STRFTIME('%s', time_run_exit) - |
| 376 | + STRFTIME('%s', time_run) AS run_time, |
| 377 | + STRFTIME('%s', time_run) - |
| 378 | + STRFTIME('%s', time_submit) AS queue_time |
| 379 | + FROM |
| 380 | + task_jobs)) |
| 381 | +WHERE |
| 382 | + run_status = 0 |
| 383 | +GROUP BY |
| 384 | + name; |
| 385 | +'''): |
| 386 | + tasks.append({ |
| 387 | + 'id': workflow.duplicate( |
| 388 | + cycle=row[1], |
| 389 | + task=row[0], |
| 390 | + job=row[2] |
| 391 | + ), |
| 392 | + 'name': row[0], |
| 393 | + 'cycle_point': row[1], |
| 394 | + 'submit_num': row[2], |
| 395 | + 'state': row[3], |
| 396 | + 'started_time': row[4], |
| 397 | + 'finished_time': row[5], |
| 398 | + 'job_ID': row[6], |
| 399 | + 'platform': row[7], |
| 400 | + 'submitted_time': row[8], |
| 401 | + # Queue time stats |
| 402 | + 'min_queue_time': row[9], |
| 403 | + 'mean_queue_time': row[10], |
| 404 | + 'max_queue_time': row[11], |
| 405 | + 'std_dev_queue_time': (row[12] - row[10]**2)**0.5, |
| 406 | + 'queue_quartiles': [row[13], |
| 407 | + row[13] if row[14] is None else row[14], |
| 408 | + row[13] if row[15] is None else row[15]], |
| 409 | + # Run time stats |
| 410 | + 'min_run_time': row[16], |
| 411 | + 'mean_run_time': row[17], |
| 412 | + 'max_run_time': row[18], |
| 413 | + 'std_dev_run_time': (row[19] - row[17]**2)**0.5, |
| 414 | + 'run_quartiles': [row[20], |
| 415 | + row[20] if row[21] is None else row[21], |
| 416 | + row[20] if row[22] is None else row[22]], |
| 417 | + # Total |
| 418 | + 'min_total_time': row[23], |
| 419 | + 'mean_total_time': row[24], |
| 420 | + 'max_total_time': row[25], |
| 421 | + 'std_dev_total_time': (row[26] - row[24] ** 2) ** 0.5, |
| 422 | + 'total_quartiles': [row[27], |
| 423 | + row[27] if row[28] is None else row[28], |
| 424 | + row[27] if row[29] is None else row[29]], |
| 425 | + |
| 426 | + 'count': row[30] |
| 427 | + }) |
| 428 | + |
| 429 | + return tasks |
| 430 | + |
| 431 | + |
| 432 | +class UISTask(Task): |
| 433 | + |
| 434 | + platform = graphene.String() |
| 435 | + min_total_time = graphene.Int() |
| 436 | + mean_total_time = graphene.Int() |
| 437 | + max_total_time = graphene.Int() |
| 438 | + std_dev_total_time = graphene.Int() |
| 439 | + queue_quartiles = graphene.List( |
| 440 | + graphene.Int, |
| 441 | + description=sstrip(''' |
| 442 | + List containing the first, second, |
| 443 | + third and forth quartile queue times.''')) |
| 444 | + min_queue_time = graphene.Int() |
| 445 | + mean_queue_time = graphene.Int() |
| 446 | + max_queue_time = graphene.Int() |
| 447 | + std_dev_queue_time = graphene.Int() |
| 448 | + run_quartiles = graphene.List( |
| 449 | + graphene.Int, |
| 450 | + description=sstrip(''' |
| 451 | + List containing the first, second, |
| 452 | + third and forth quartile run times.''')) |
| 453 | + min_run_time = graphene.Int() |
| 454 | + mean_run_time = graphene.Int() |
| 455 | + max_run_time = graphene.Int() |
| 456 | + std_dev_run_time = graphene.Int() |
| 457 | + total_quartiles = graphene.List( |
| 458 | + graphene.Int, |
| 459 | + description=sstrip(''' |
| 460 | + List containing the first, second, |
| 461 | + third and forth quartile total times.''')) |
| 462 | + count = graphene.Int() |
| 463 | + |
| 464 | + |
| 465 | +class UISQueries(Queries): |
| 466 | + |
| 467 | + tasks = graphene.List( |
| 468 | + UISTask, |
| 469 | + description=Task._meta.description, |
| 470 | + live=graphene.Boolean(default_value=True), |
| 471 | + strip_null=STRIP_NULL_DEFAULT, |
| 472 | + resolver=get_jobs, |
| 473 | + workflows=graphene.List(ID, default_value=[]), |
| 474 | + exworkflows=graphene.List(ID, default_value=[]), |
| 475 | + ids=graphene.List(ID, default_value=[]), |
| 476 | + exids=graphene.List(ID, default_value=[]), |
| 477 | + mindepth=graphene.Int(default_value=-1), |
| 478 | + maxdepth=graphene.Int(default_value=-1), |
| 479 | + sort=SortArgs(default_value=None), |
| 480 | + ) |
| 481 | + |
| 482 | + |
257 | 483 | class UISSubscriptions(Subscriptions):
|
258 | 484 | # Example graphiql workflow log subscription:
|
259 | 485 | # subscription {
|
@@ -320,7 +546,7 @@ class UISMutations(Mutations):
|
320 | 546 |
|
321 | 547 |
|
322 | 548 | schema = graphene.Schema(
|
323 |
| - query=Queries, |
| 549 | + query=UISQueries, |
324 | 550 | subscription=UISSubscriptions,
|
325 | 551 | mutation=UISMutations
|
326 | 552 | )
|
0 commit comments