|
| 1 | +# |
| 2 | +# Copyright (c) Granulate. All rights reserved. |
| 3 | +# Licensed under the AGPL3 License. See LICENSE.md in the project root for license information. |
| 4 | +# |
| 5 | + |
| 6 | +import json |
| 7 | +import logging |
| 8 | +import os |
| 9 | +import time |
| 10 | +from typing import Dict, Optional |
| 11 | + |
| 12 | +import requests |
| 13 | + |
| 14 | +from granulate_utils.exceptions import DatabricksJobNameDiscoverException |
| 15 | + |
| 16 | +HOST_KEY_NAME = "*.sink.ganglia.host" |
| 17 | +DATABRICKS_METRICS_PROP_PATH = "/databricks/spark/conf/metrics.properties" |
| 18 | +CLUSTER_TAGS_KEY = "spark.databricks.clusterUsageTags.clusterAllTags" |
| 19 | +SPARKUI_APPS_URL = "http://{}/api/v1/applications" |
| 20 | +REQUEST_TIMEOUT = 5 |
| 21 | +JOB_NAME_KEY = "RunName" |
| 22 | +DEFAULT_WEBUI_PORT = 40001 |
| 23 | +DATABRICKS_JOBNAME_TIMEOUT_S = 2 * 60 |
| 24 | +RETRY_INTERVAL_S = 1 |
| 25 | + |
| 26 | + |
| 27 | +class DatabricksClient: |
| 28 | + def __init__(self, logger: logging.LoggerAdapter) -> None: |
| 29 | + self.logger = logger |
| 30 | + self.logger.debug("Getting Databricks job name") |
| 31 | + self.job_name = self.get_job_name() |
| 32 | + if self.job_name is None: |
| 33 | + self.logger.warning( |
| 34 | + "Failed initializing Databricks client. Databricks job name will not be included in ephemeral clusters." |
| 35 | + ) |
| 36 | + else: |
| 37 | + self.logger.debug(f"Got Databricks job name: {self.job_name}") |
| 38 | + |
| 39 | + def _request_get(self, url: str) -> requests.Response: |
| 40 | + resp = requests.get(url, timeout=REQUEST_TIMEOUT) |
| 41 | + resp.raise_for_status() |
| 42 | + return resp |
| 43 | + |
| 44 | + @staticmethod |
| 45 | + def get_webui_address() -> Optional[str]: |
| 46 | + with open(DATABRICKS_METRICS_PROP_PATH) as f: |
| 47 | + properties = f.read() |
| 48 | + try: |
| 49 | + host = dict([line.split("=", 1) for line in properties.splitlines()])[HOST_KEY_NAME] |
| 50 | + except KeyError as e: |
| 51 | + if e.args[0] == HOST_KEY_NAME: |
| 52 | + # Might happen while provisioning the cluster, retry. |
| 53 | + return None |
| 54 | + raise DatabricksJobNameDiscoverException(f"Failed to get Databricks webui address {properties=}") from e |
| 55 | + except Exception as e: |
| 56 | + raise DatabricksJobNameDiscoverException(f"Failed to get Databricks webui address {properties=}") from e |
| 57 | + return f"{host}:{DEFAULT_WEBUI_PORT}" |
| 58 | + |
| 59 | + def get_job_name(self) -> Optional[str]: |
| 60 | + # Retry in case of a connection error, as the metrics server might not be up yet. |
| 61 | + start_time = time.monotonic() |
| 62 | + while time.monotonic() - start_time < DATABRICKS_JOBNAME_TIMEOUT_S: |
| 63 | + try: |
| 64 | + if cluster_metadata := self._cluster_all_tags_metadata(): |
| 65 | + name = self._get_name_from_metadata(cluster_metadata) |
| 66 | + if name: |
| 67 | + self.logger.debug("Found name in metadata", job_name=name, cluster_metadata=cluster_metadata) |
| 68 | + return name |
| 69 | + else: |
| 70 | + self.logger.debug("Failed to extract name from metadata", cluster_metadata=cluster_metadata) |
| 71 | + return None |
| 72 | + else: |
| 73 | + # No job name yet, retry. |
| 74 | + time.sleep(RETRY_INTERVAL_S) |
| 75 | + except DatabricksJobNameDiscoverException: |
| 76 | + self.logger.exception("Failed to get Databricks job name") |
| 77 | + return None |
| 78 | + except Exception: |
| 79 | + self.logger.exception("Generic exception was raise during spark job name discovery") |
| 80 | + return None |
| 81 | + self.logger.info("Databricks get job name timeout, continuing...") |
| 82 | + return None |
| 83 | + |
| 84 | + @staticmethod |
| 85 | + def _get_name_from_metadata(metadata: Dict[str, str]) -> Optional[str]: |
| 86 | + if JOB_NAME_KEY in metadata: |
| 87 | + return str(metadata[JOB_NAME_KEY]).replace(" ", "-").lower() |
| 88 | + return None |
| 89 | + |
| 90 | + def _cluster_all_tags_metadata(self) -> Optional[Dict[str, str]]: |
| 91 | + """ |
| 92 | + Returns `includes spark.databricks.clusterUsageTags.clusterAllTags` tags as `Dict`. |
| 93 | + """ |
| 94 | + if not os.path.isfile(DATABRICKS_METRICS_PROP_PATH): |
| 95 | + # We want to retry in case the cluster is still initializing, and the file is not yet deployed. |
| 96 | + return None |
| 97 | + webui = self.get_webui_address() |
| 98 | + if webui is None: |
| 99 | + # retry |
| 100 | + return None |
| 101 | + # The API used: https://spark.apache.org/docs/latest/monitoring.html#rest-api |
| 102 | + apps_url = SPARKUI_APPS_URL.format(webui) |
| 103 | + self.logger.debug("Databricks SparkUI address", apps_url=apps_url) |
| 104 | + try: |
| 105 | + response = self._request_get(apps_url) |
| 106 | + except requests.exceptions.RequestException: |
| 107 | + # Request might fail in cases where the cluster is still initializing, retrying. |
| 108 | + return None |
| 109 | + try: |
| 110 | + apps = response.json() |
| 111 | + except Exception as e: |
| 112 | + if "Spark is starting up. Please wait a while until it's ready" in response.text: |
| 113 | + # Spark is still initializing, retrying. |
| 114 | + # https://github.com/apache/spark/blob/38c41c/core/src/main/scala/org/apache/spark/ui/SparkUI.scala#L64 |
| 115 | + return None |
| 116 | + else: |
| 117 | + raise DatabricksJobNameDiscoverException( |
| 118 | + f"Failed to parse apps url response, query {response.text=}" |
| 119 | + ) from e |
| 120 | + if len(apps) == 0: |
| 121 | + # apps might be empty because of initialization, retrying. |
| 122 | + self.logger.debug("No apps yet, retrying.") |
| 123 | + return None |
| 124 | + |
| 125 | + env_url = f"{apps_url}/{apps[0]['id']}/environment" |
| 126 | + try: |
| 127 | + response = self._request_get(env_url) |
| 128 | + except Exception as e: |
| 129 | + # No reason for any exception, `environment` uri should be accessible if we have running apps. |
| 130 | + raise DatabricksJobNameDiscoverException(f"Environment request failed {env_url=}") from e |
| 131 | + try: |
| 132 | + env = response.json() |
| 133 | + except Exception as e: |
| 134 | + raise DatabricksJobNameDiscoverException(f"Environment request failed {response.text=}") from e |
| 135 | + props = env.get("sparkProperties") |
| 136 | + if props is None: |
| 137 | + raise DatabricksJobNameDiscoverException(f"sparkProperties was not found in {env=}") |
| 138 | + for prop in props: |
| 139 | + if prop[0] == CLUSTER_TAGS_KEY: |
| 140 | + try: |
| 141 | + all_tags_value = json.loads(prop[1]) |
| 142 | + except Exception as e: |
| 143 | + raise DatabricksJobNameDiscoverException(f"Failed to parse {prop=}") from e |
| 144 | + return {cluster_all_tag["key"]: cluster_all_tag["value"] for cluster_all_tag in all_tags_value} |
| 145 | + else: |
| 146 | + raise DatabricksJobNameDiscoverException(f"Failed to find {CLUSTER_TAGS_KEY=} in {props=}") |
0 commit comments