From 3988d315270e5df970f82aecd0891ad2990bbb8e Mon Sep 17 00:00:00 2001 From: Ben Bettridge Date: Thu, 7 Jul 2022 21:36:43 +1200 Subject: [PATCH 1/5] Add authKey secret, use initContainer to set sysctl --- chart/templates/deployment.yaml | 10 ++++++++++ chart/templates/secret.yaml | 11 +++++++++++ chart/values.yaml | 5 ++++- 3 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 chart/templates/secret.yaml diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 94d92c7..74dc6e3 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -27,6 +27,16 @@ spec: serviceAccountName: {{ include "tailscale-svc-lb.serviceAccountName" . }} securityContext: {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} + initContainers: + - name: {{ .Chart.Name }}-sysctl + securityContext: + {{- toYaml .Values.controller.securityContext | nindent 12 }} + image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + command: + - sh + - -c + - sysctl -w net.ipv4.ip_forward=1 containers: - name: {{ .Chart.Name }} securityContext: diff --git a/chart/templates/secret.yaml b/chart/templates/secret.yaml new file mode 100644 index 0000000..4f3c795 --- /dev/null +++ b/chart/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.tailscaleAuthKey }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: "tailscale-svc-lb" + labels: + {{- include "tailscale-svc-lb.labels" . | nindent 4 }} +stringData: + ts-auth-key: "{{ .Values.tailscaleAuthKey }}" +{{- end }} diff --git a/chart/values.yaml b/chart/values.yaml index 8ae4b05..bc98bca 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -5,6 +5,9 @@ nameOverride: "" fullnameOverride: "" +# (Optional) Auth Key to use when starting tailscale. Automates the login process. +tailscaleAuthKey: "" + controller: image: repository: clrxbl/tailscale-svc-lb-controller @@ -65,4 +68,4 @@ leaderElector: image: repository: gcr.io/google_containers/leader-elector pullPolicy: IfNotPresent - tag: "0.5" \ No newline at end of file + tag: "0.5" From e60ce7402dad28c9ac8dfb0e6680f53b71453857 Mon Sep 17 00:00:00 2001 From: Ben Bettridge Date: Sun, 31 Jul 2022 00:06:36 +1200 Subject: [PATCH 2/5] Refactor chart, add reconciliation, additional environvment configuration variables, optionally deploy as DaemonSet/Deployment --- Dockerfile | 11 +- Makefile | 20 ++ README.md | 47 ++- chart/service.yaml | 14 + chart/templates/deployment.yaml | 62 +++- chart/templates/secret.yaml | 11 + chart/values.yaml | 87 +++++- runtime/run.sh | 39 ++- src/tailscale_svc_lb_controller/config.py | 90 ++++++ .../examples/pod.yaml | 11 + .../examples/service.yaml | 14 + src/tailscale_svc_lb_controller/helpers.py | 76 +++++ src/tailscale_svc_lb_controller/main.py | 294 ++---------------- .../resources/base.py | 136 ++++++++ .../resources/daemonset.py | 82 +++++ .../resources/deployment.py | 82 +++++ .../resources/role.py | 92 ++++++ .../resources/role_binding.py | 88 ++++++ .../resources/secret.py | 75 +++++ .../resources/service_account.py | 73 +++++ .../tailscale_proxy.py | 61 ++++ .../test_tailscale_operator.py | 89 ++++++ 22 files changed, 1253 insertions(+), 301 deletions(-) create mode 100644 Makefile create mode 100644 chart/service.yaml create mode 100644 chart/templates/secret.yaml create mode 100755 src/tailscale_svc_lb_controller/config.py create mode 100644 src/tailscale_svc_lb_controller/examples/pod.yaml create mode 100644 src/tailscale_svc_lb_controller/examples/service.yaml create mode 100644 src/tailscale_svc_lb_controller/helpers.py create mode 100644 src/tailscale_svc_lb_controller/resources/base.py create mode 100644 src/tailscale_svc_lb_controller/resources/daemonset.py create mode 100644 src/tailscale_svc_lb_controller/resources/deployment.py create mode 100644 src/tailscale_svc_lb_controller/resources/role.py create mode 100644 src/tailscale_svc_lb_controller/resources/role_binding.py create mode 100644 src/tailscale_svc_lb_controller/resources/secret.py create mode 100644 src/tailscale_svc_lb_controller/resources/service_account.py create mode 100644 src/tailscale_svc_lb_controller/tailscale_proxy.py create mode 100644 src/tailscale_svc_lb_controller/test_tailscale_operator.py diff --git a/Dockerfile b/Dockerfile index f11dc94..6dc7d69 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,14 +14,15 @@ RUN apk add --no-cache --virtual .python_deps build-base python3-dev libffi-dev mkdir -p /app/src /app && \ poetry config virtualenvs.create false -ADD src /app/src ADD pyproject.toml /app/pyproject.toml - -WORKDIR /app -ENV PYTHONPATH=${PYTHONPATH}:/app - RUN apk add --no-cache --virtual .build_deps gcc g++ && \ + cd /app && \ poetry install --no-dev && \ apk del .build_deps +ADD src /app/src + +WORKDIR /app +ENV PYTHONPATH=${PYTHONPATH}:/app + CMD ["kopf", "run", "--all-namespaces", "--liveness=http://0.0.0.0:8080/health", "/app/src/tailscale_svc_lb_controller/main.py"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..6b991e3 --- /dev/null +++ b/Makefile @@ -0,0 +1,20 @@ + + +REPOSITORY=clrxbl/tailscale-svc-lb +TAG=latest + +build: build-controller build-runtime + +push: push-controller push-runtime + +build-controller: + docker build . -t $(REPOSITORY)-controller:$(TAG) + +push-controller: build-controller + docker push $(REPOSITORY)-controller:$(TAG) + +build-runtime: + cd runtime && docker build . -t $(REPOSITORY):$(TAG) + +push-runtime: build-runtime + docker push $(REPOSITORY):$(TAG) diff --git a/README.md b/README.md index 4068686..09a253d 100644 --- a/README.md +++ b/README.md @@ -12,21 +12,49 @@ It deploys the controller & any svc-lb pods in the namespace where it's installe Once the controller is deployed, create a LoadBalancer service with the loadBalancerClass set to "svc-lb.tailscale.iptables.sh/lb". -There should be a DaemonSet created in the controller's namespace for the newly-created LoadBalancer service. View the logs of the leader-elected pod and click the login.tailscale.com link to authenticate. You only have to do this once per service. +There should be a Deployment (or DaemonSet) created in the controller's namespace for the newly-created LoadBalancer service. View the logs of the leader-elected pod and click the login.tailscale.com link to authenticate. You only have to do this once per service. This can be automated by creating a secret in the controller's namespace called `tailscale-svc-lb` with the key `ts-auth-key` and the value being your Tailscale's registration token. +## Configuration Variables + +All configuration options are supplied using Environment Variables + +| Variable | Description | Default | +|--------------------------------------|------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------| + | `RESOURCE_PREFIX` | Prefix to prepend to the service name when creating proxy resources | `ts-` | +| `SECRET_NAME` | Name of the secret that the `ts-auth-key` value should be used from | `tailscale-svc-lb` | +| `LOAD_BALANCER_CLASS` | LoadBalancerClass that this controller will implement | `svc-lb.tailscale.iptables.sh/lb` | +| `NODE_SELECTOR_LABEL` | Label to use when selecting nodes to run Tailscale on. The value of this label should be `true` | None | +| `IMAGE_PULL_SECRETS` | A semi-colon seperated list of secret names to use as the `imagePullSecrets` for the Tailscale Proxy | None | +| `DEPLOYMENT_TYPE` | The type of deployment to use for the Tailscale Proxy. Can be one of: `Deployment`, `DaemonSet` | `Deployment` | +| `TS_PROXY_NAMESPACE` | Namespace all of the Tailscale Proxies will be created in | `default` | +| `TS_PROXY_REPLICA_COUNT` | The number of replicas to deploy for each Tailscale Proxy instance. Only used if `DEPLOYMENT_TYPE` is `Deployment` | `1` | +| `TS_PROXY_RUNTIME_IMAGE` | Image to use as the Tailscale Proxy Runtime container | `clrxbl/tailscale-svc-lb-runtime:latest` | +| `TS_PROXY_RUNTIME_IMAGE_PULL_POLICY` | ImagePullPolicy to use for the Tailscale Proxy Runtime container | `IfNotPresent` | +| `TS_PROXY_RUNTIME_REQUEST_CPU` | CPU Request for the Tailscale Proxy Runtime container | None | +| `TS_PROXY_RUNTIME_REQUEST_MEM` | Memory Request for the Tailscale Proxy Runtime container | None | +| `TS_PROXY_RUNTIME_LIMIT_CPU` | CPU Limit for the Tailscale Proxy Runtime container | None | +| `TS_PROXY_RUNTIME_LIMIT_MEM` | Memory Limit for the Tailscale Proxy Runtime container | None | +| `LEADER_ELECTOR_IMAGE` | Image to use as the Leader Elector container | `gcr.io/google_containers/leader-elector: 0.5` | +| `LEADER_ELECTOR_IMAGE_PULL_POLICY` | ImagePullPolicy to use for the Leader Elector container | `IfNotPresent` | +| `LEADER_ELECTOR_REQUEST_CPU` | CPU Request for the Leader Elector container | None | +| `LEADER_ELECTOR_REQUEST_MEM` | Memory Request for the Leader Elector container | None | +| `LEADER_ELECTOR_LIMIT_CPU` | CPU Limit for the Leader Elector container | None | +| `LEADER_ELECTOR_LIMIT_MEM` | Memory Limit for the Leader Elector container | None | +| `TS_HOSTNAME_FROM_SERVICE` | If set to `true`, the Hostname of the Tailscale Proxy will be generated from the namespace and service name of the proxied service | `false` | +| `TS_HOSTNAME_FROM_SERVICE_SUFFIX` | An optional hostname suffix to add to automatically generated Hostnames. Only applies if `TS_HOSTNAME_FROM_SERVICE` is `true` | None | + ## How it works **On new LoadBalancer service:** -1. Look for LoadBalancer services with our loadbalancerclass -2. Look for nodes with the label `svc-lb.tailscale.iptables.sh/deploy=true` -3. Deploy a DaemonSet with the name: `ts-${SVC_NAME}` and our custom Docker image containing tailscaled. -4. Let the DaemonSet container run tailscaled, once IP is acquired, update tailscaled's secret with the Tailscale IP. +1. Look for LoadBalancer services with our loadBalancerClass (Default: `svc-lb.tailscale.iptables.sh/lb`) +2. Look for nodes with our nodeSelectorLabel (Default: `svc-lb.tailscale.iptables.sh/deploy`) with the value `true` +3. Deploy a Deployment or DaemonSet with the name: `${RESOURCE_PREFIX}${SVC_NAME}` and our custom Docker image containing tailscaled. +4. Let the Deployment or DaemonSet run tailscaled, once IP is acquired, update tailscaled's secret with the Tailscale IP. 5. Retrieve IP from secret/configmap, update LoadBalancer service with ingress IP (Tailscale IP) - -Each `tailscale-svc-lb-runtime` DaemonSet runs the `leader-elector` sidecar to automatically elect a leader using the Kubernetes leader election system. `tailscaled` only works properly when ran on 1 pod at a time, hence this leader election system. +Each `tailscale-svc-lb-runtime` DaemonSet/Deployment runs the `leader-elector` sidecar to automatically elect a leader using the Kubernetes leader election system. `tailscaled` only works properly when ran on 1 pod at a time, hence this leader election system. iptables DNAT is used to redirect incoming traffic to the service ClusterIP address, so `NET_ADMIN` capability is required & ipv4 forwarding. @@ -34,3 +62,8 @@ iptables DNAT is used to redirect incoming traffic to the service ClusterIP addr 1. Delete the DaemonSet 2. Delete the Secret/ConfigMap 3. Let Kubernetes delete the service + +**Every 15 Seconds, after an initial 30 second idle time:** +1. Iterate all LoadBalancer services with our loadBalancerClass (Default: `svc-lb.tailscale.iptables.sh/lb`) +2. Reconcile the state of the relevant `${RESOURCE_PREFIX}${SVC_NAME` resources +3. If any resources are missing, create the Deployment/DaemonSet/Role/RoleBindings/ServiceAccount as necessary \ No newline at end of file diff --git a/chart/service.yaml b/chart/service.yaml new file mode 100644 index 0000000..8916498 --- /dev/null +++ b/chart/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: kuard +spec: + selector: + run: kuard + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + type: LoadBalancer + allocateLoadBalancerNodePorts: false + loadBalancerClass: "svc-lb.tailscale.iptables.sh/lb" diff --git a/chart/templates/deployment.yaml b/chart/templates/deployment.yaml index 94d92c7..a7a9369 100644 --- a/chart/templates/deployment.yaml +++ b/chart/templates/deployment.yaml @@ -45,12 +45,66 @@ spec: path: /health port: 8080 env: - - name: CONTROLLER_NAMESPACE + - name: TS_PROXY_NAMESPACE value: "{{ .Release.Namespace }}" - - name: TAILSCALE_RUNTIME_IMAGE - value: "{{ .Values.runtime.image.repository }}:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" + - name: LOAD_BALANCER_CLASS + value: "{{ .Values.loadBalancerClass }}" + - name: DEPLOYMENT_TYPE + value: "{{ .Values.proxy.type }}" + - name: TS_HOSTNAME_FROM_SERVICE + value: "{{ .Values.proxy.generateHostnameFromServiceName }}" + {{- if .Values.proxy.serviceDomainSuffix }} + - name: TS_HOSTNAME_FROM_SERVICE_SUFFIX + value: "{{ .Values.proxy.serviceDomainSuffix }}" + {{- end }} + {{- if .Values.proxy.nodeSelectorLabel }} + - name: TS_PROXY_NODE_SELECTOR_LABEL + value: "{{ .Values.proxy.nodeSelectorLabel }}" + {{- end }} + - name: IMAGE_PULL_SECRETS + value: "{{ join ";" .Values.proxy.runtime.imagePullSecrets }}" + - name: TS_PROXY_RUNTIME_IMAGE + value: "{{ .Values.proxy.runtime.image.repository }}:{{ .Values.proxy.runtime.image.tag | default .Chart.AppVersion }}" + - name: TS_PROXY_REPLICA_COUNT + value: "{{ .Values.proxy.deploymentReplicas }}" + {{- if ((.Values.proxy.runtime.resources).requests).cpu }} + - name: TS_PROXY_RUNTIME_REQUEST_CPU + value: "{{ ((.Values.proxy.runtime.resources).requests).cpu }}" + {{- end }} + {{- if ((.Values.proxy.runtime.resources).requests).memory }} + - name: TS_PROXY_RUNTIME_REQUEST_MEM + value: "{{ ((.Values.proxy.runtime.resources).requests).memory }}" + {{- end }} + {{- if ((.Values.proxy.runtime.resources).limits).cpu }} + - name: TS_PROXY_RUNTIME_LIMIT_CPU + value: "{{ ((.Values.proxy.runtime.resources).limits).cpu }}" + {{- end }} + {{- if ((.Values.proxy.runtime.resources).limits).memory }} + - name: TS_PROXY_RUNTIME_LIMIT_MEM + value: "{{ ((.Values.proxy.runtime.resources).limits).memory }}" + {{- end }} + - name: TS_PROXY_RUNTIME_IMAGE_PULL_POLICY + value: "{{ .Values.proxy.runtime.image.pullPolicy }}" - name: LEADER_ELECTOR_IMAGE - value: "{{ .Values.leaderElector.image.repository }}:{{ .Values.leaderElector.image.tag }}" + value: "{{ .Values.proxy.leaderElector.image.repository }}:{{ .Values.proxy.leaderElector.image.tag }}" + - name: LEADER_ELECTOR_IMAGE_PULL_POLICY + value: "{{ .Values.proxy.leaderElector.image.pullPolicy }}" + {{- if ((.Values.proxy.leaderElector.resources).requests).cpu }} + - name: LEADER_ELECTOR_REQUEST_CPU + value: "{{ ((.Values.proxy.leaderElector.resources).requests).cpu }}" + {{- end }} + {{- if ((.Values.proxy.leaderElector.resources).requests).memory }} + - name: LEADER_ELECTOR_REQUEST_MEM + value: "{{ ((.Values.proxy.leaderElector.resources).requests).memory }}" + {{- end }} + {{- if ((.Values.proxy.leaderElector.resources).limits).memory }} + - name: LEADER_ELECTOR_LIMIT_CPU + value: "{{ ((.Values.proxy.leaderElector.resources).limits).cpu }}" + {{- end }} + {{- if ((.Values.proxy.leaderElector.resources).limits).memory }} + - name: LEADER_ELECTOR_LIMIT_MEM + value: "{{ ((.Values.proxy.leaderElector.resources).limits).memory }}" + {{- end }} resources: {{- toYaml .Values.controller.resources | nindent 12 }} {{- with .Values.controller.nodeSelector }} diff --git a/chart/templates/secret.yaml b/chart/templates/secret.yaml new file mode 100644 index 0000000..4f3c795 --- /dev/null +++ b/chart/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if .Values.tailscaleAuthKey }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: "tailscale-svc-lb" + labels: + {{- include "tailscale-svc-lb.labels" . | nindent 4 }} +stringData: + ts-auth-key: "{{ .Values.tailscaleAuthKey }}" +{{- end }} diff --git a/chart/values.yaml b/chart/values.yaml index 8ae4b05..ab822f6 100644 --- a/chart/values.yaml +++ b/chart/values.yaml @@ -5,6 +5,17 @@ nameOverride: "" fullnameOverride: "" +# Prefix to apply to the target service name when creating Tailscale Proxy resources +# WARNING: Changing this after deploying the controller will result in orphaned resources. +resourcePrefix: "ts-" + +# LoadBalancerClass this instance of the controller should monitor +# WARNING: Changing this after deploying the controller will result in orphaned resources. +loadBalancerClass: "svc-lb.tailscale.iptables.sh/lb" + +# (Optional) Auth Key to use when starting tailscale. Automates the login process. +tailscaleAuthKey: "" + controller: image: repository: clrxbl/tailscale-svc-lb-controller @@ -54,15 +65,69 @@ controller: affinity: {} -runtime: - image: - repository: clrxbl/tailscale-svc-lb-runtime - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "latest" +# Configure the behaviour of the deployed Tailscale Proxy resources +proxy: + + # How to deploy the Tailscale Proxy instances - valid options are 'DaemonSet', 'Deployment' + type: Deployment + # The number of Tailscale Proxy Replicas to run for each instance. Only applies if proxy.type is set to 'Deployment' + deploymentReplicas: 2 + + # NodeLabel to use when creating Tailscale Proxy deployment. The value of this label should be "true" + # Example: "svc-lb.tailscale.iptables.sh/deploy" will use nodeLabelSelector svc-lb.tailscale.iptables.sh/deploy=true + nodeSelectorLabel: "svc-lb.tailscale.iptables.sh/deploy" + + # Generate a hostname based on the target service name and target service namespace + generateHostnameFromServiceName: false + # An optional domain suffix that will be appended to the generated hostname. Only used if + # generateHostnameFromServiceName: true + serviceDomainSuffix: "" + + # Runtime image to deploy as the + runtime: + image: + repository: clrxbl/tailscale-svc-lb-runtime + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + + # A list of imagePullSecret names that will be used when fetching the runtime images. + imagePullSecrets: [ ] + # - exampleSecretName + # - anotherSecretName + + resources: { } + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + leaderElector: + image: + repository: gcr.io/google_containers/leader-elector + pullPolicy: IfNotPresent + tag: "0.5" + + imagePullSecrets: [ ] + # - exampleSecretName + # - anotherSecretName + + resources: { } + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + -leaderElector: - image: - repository: gcr.io/google_containers/leader-elector - pullPolicy: IfNotPresent - tag: "0.5" \ No newline at end of file diff --git a/runtime/run.sh b/runtime/run.sh index 75eb6ea..6ead063 100755 --- a/runtime/run.sh +++ b/runtime/run.sh @@ -12,10 +12,17 @@ TS_DEST_IP="${TS_DEST_IP:-}" TS_EXTRA_ARGS="${TS_EXTRA_ARGS:-}" TS_ACCEPT_DNS="${TS_ACCEPT_DNS:-false}" TS_KUBE_SECRET="${TS_KUBE_SECRET:-tailscale}" +TS_HOSTNAME="${TS_HOSTNAME:-}" +TSD_EXTRA_ARGS="${TSD_EXTRA_ARGS:-}" + + +# Set to 'true' to skip leadership election. Only use when testing against one node +# This is useful on non x86_64 architectures, as the leader-elector image is only provided for that arch +DEBUG_SKIP_LEADER="${DEBUG_SKIP_LEADER:-false}" set -e -TAILSCALED_ARGS="--state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock" +TAILSCALED_ARGS="--state=kube:${TS_KUBE_SECRET} --socket=/tmp/tailscaled.sock ${TSD_EXTRA_ARGS}" if [ $(cat /proc/sys/net/ipv4/ip_forward) != 1 ]; then echo "IPv4 forwarding (/proc/sys/net/ipv4/ip_forward) needs to be enabled, exiting..." @@ -30,17 +37,21 @@ if [[ ! -c /dev/net/tun ]]; then mknod /dev/net/tun c 10 200 fi -echo "Waiting for leader election..." -LEADER=false -while [[ "${LEADER}" == "false" ]]; do - CURRENT_LEADER=$(curl http://127.0.0.1:4040 -s -m 2 | jq -r ".name") - if [[ "${CURRENT_LEADER}" == "$(hostname)" ]]; then - echo "I am the leader." - LEADER=true - else - sleep 1 - fi -done +if [[ "${DEBUG_SKIP_LEADER}" == "true" ]]; then + echo "CAUTION: Skipping leader election due to DEBUG_SKIP_LEADER==true." +else + echo "Waiting for leader election..." + LEADER=false + while [[ "${LEADER}" == "false" ]]; do + CURRENT_LEADER=$(curl http://127.0.0.1:4040 -s -m 2 | jq -r ".name") + if [[ "${CURRENT_LEADER}" == "$(hostname)" ]]; then + echo "I am the leader." + LEADER=true + else + sleep 1 + fi + done +fi echo "Starting tailscaled" tailscaled ${TAILSCALED_ARGS} & @@ -53,6 +64,10 @@ fi if [[ ! -z "${TS_EXTRA_ARGS}" ]]; then UP_ARGS="${UP_ARGS} ${TS_EXTRA_ARGS:-}" fi +if [[ ! -z "${TS_HOSTNAME}" ]]; then + echo "Overriding system hostname using TS_HOSTNAME: ${TS_HOSTNAME}" + UP_ARGS="--hostname=${TS_HOSTNAME} ${UP_ARGS}" +fi echo "Running tailscale up" tailscale --socket=/tmp/tailscaled.sock up ${UP_ARGS} diff --git a/src/tailscale_svc_lb_controller/config.py b/src/tailscale_svc_lb_controller/config.py new file mode 100755 index 0000000..bf1d903 --- /dev/null +++ b/src/tailscale_svc_lb_controller/config.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +import logging +import re +import sys +from os import environ as env + +# -- Constants +CONTROLLER_PREFIX = "svc-lb.tailscale.iptables.sh" +SERVICE_NAME_LABEL = CONTROLLER_PREFIX + "/svc-name" +SERVICE_NAMESPACE_LABEL = CONTROLLER_PREFIX + "/svc-namespace" + +# -- Proxy Deployment Configuration +RESOURCE_PREFIX = env.get("RESOURCE_PREFIX", "ts-") +SECRET_NAME = env.get("SECRET_NAME", "tailscale-svc-lb") +# LoadBalancerClass this instance of Tailscale Proxy will implement +LOAD_BALANCER_CLASS = env.get("LOAD_BALANCER_CLASS", CONTROLLER_PREFIX + "/lb") +# Label to use when selecting nodes for the Tailscale Proxy to run on. The value of this label should be 'true' +NODE_SELECTOR_LABEL = env.get("TS_PROXY_NODE_SELECTOR_LABEL", None) + +# A semi-colon seperated string containing the names of any secrets that should be used +# when pulling images. Secret must already exist and be present in the TS_PROXY_NAMESPACE +IMAGE_PULL_SECRETS = env.get("IMAGE_PULL_SECRETS", "") +if not re.match(r"^([a-z]|-|\d|;)*$", IMAGE_PULL_SECRETS): + logging.error("IMAGE_PULL_SECRETS invalid. Should be a semi-colon seperated list of" + "secret names.") + sys.exit(1) + +# Type of deployment to use for the Tailscale Proxy instances +DEPLOYMENT_TYPE = env.get("DEPLOYMENT_TYPE", "DaemonSet") +if DEPLOYMENT_TYPE not in ["DaemonSet", "Deployment"]: + logging.error("DEPLOYMENT_TYPE invalid. Valid options are 'DaemonSet', 'Deployment'") + sys.exit(1) + +# Tailscale Proxy Runtime Namepace. All Tailscale Proxies will be created in this namespace. +TS_PROXY_NAMESPACE = env.get("TS_PROXY_NAMESPACE", "default") + +# If TS_PROXY_DEPLOYMENT_TYPE is 'Deployment', this dictates the number of replicas. No effect otherwise. +try: + TS_PROXY_REPLICA_COUNT = int(env.get("TS_PROXY_REPLICA_COUNT", "2")) +except Exception: + logging.error("TS_PROXY_REPLICA_COUNT value invalid. Should be an integer above 0.") + sys.exit(1) + +# Tailscale Proxy Runtime Container Image +TS_PROXY_RUNTIME_IMAGE = env.get("TS_PROXY_RUNTIME_IMAGE", "clrxbl/tailscale-svc-lb-runtime:latest") + +# Tailscale Proxy Runtime Container ImagePullPolicy +TS_PROXY_RUNTIME_IMAGE_PULL_POLICY = env.get("TS_PROXY_RUNTIME_IMAGE_PULL_POLICY", "IfNotPresent") +if TS_PROXY_RUNTIME_IMAGE_PULL_POLICY not in ["Always", "IfNotPresent", "Never"]: + logging.error( + "TS_PROXY_RUNTIME_IMAGE_PULL_POLICY invalid. Valid options are 'Always', 'IfNotPresent', and " + "'Never'") + sys.exit(1) + +# Tailscale Proxy Runtime Container Requests/Limits +TS_PROXY_RUNTIME_REQUEST_CPU = env.get("TS_PROXY_RUNTIME_REQUEST_CPU", None) +TS_PROXY_RUNTIME_REQUEST_MEM = env.get("TS_PROXY_RUNTIME_REQUEST_MEM", None) +TS_PROXY_RUNTIME_LIMIT_CPU = env.get("TS_PROXY_RUNTIME_LIMIT_CPU", None) +TS_PROXY_RUNTIME_LIMIT_MEM = env.get("TS_PROXY_RUNTIME_LIMIT_MEM", None) + +# The docker image that will be run as the Leader Elector +LEADER_ELECTOR_IMAGE = env.get("LEADER_ELECTOR_IMAGE", "gcr.io/google_containers/leader-elector:0.5") + +# ImagePullPolicy to use when retrieving LEADER_ELECTOR_IMAGE +LEADER_ELECTOR_IMAGE_PULL_POLICY = env.get("LEADER_ELECTOR_IMAGE_PULL_POLICY", "IfNotPresent") +if LEADER_ELECTOR_IMAGE_PULL_POLICY not in ["Always", "IfNotPresent", "Never"]: + logging.error( + "LEADER_ELECTOR_IMAGE_PULL_POLICY invalid. Valid options are 'Always', 'IfNotPresent', and " + "'Never'") + sys.exit(1) + +# Tailscale Proxy Runtime Container Requests/Limits +LEADER_ELECTOR_REQUEST_CPU = env.get("LEADER_ELECTOR_REQUEST_CPU", None) +LEADER_ELECTOR_REQUEST_MEM = env.get("LEADER_ELECTOR_REQUEST_MEM", None) +LEADER_ELECTOR_LIMIT_CPU = env.get("LEADER_ELECTOR_LIMIT_CPU", None) +LEADER_ELECTOR_LIMIT_MEM = env.get("LEADER_ELECTOR_LIMIT_MEM", None) + +# -- Tailscale Configuration +# +# Automatically generate a hostname based on the target service name and namespace. +# Example: ts-kuard.default +TS_HOSTNAME_FROM_SERVICE = env.get("TS_HOSTNAME_FROM_SERVICE", "false") +if TS_HOSTNAME_FROM_SERVICE not in ["true", "false"]: + logging.error("TS_HOSTNAME_FROM_SERVICE valid options are 'true', 'false'") + sys.exit(1) + +# An optional suffix to append to the automatically generated hostname. Only applies if TAILSCALE_HOSTNAME_FROM_SERVICE +# has been set to "true". +# Example: ts-kuard.default.suffix +TS_HOSTNAME_FROM_SERVICE_SUFFIX = env.get("TS_HOSTNAME_FROM_SERVICE_SUFFIX", "") diff --git a/src/tailscale_svc_lb_controller/examples/pod.yaml b/src/tailscale_svc_lb_controller/examples/pod.yaml new file mode 100644 index 0000000..626c7b3 --- /dev/null +++ b/src/tailscale_svc_lb_controller/examples/pod.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + run: kuard + name: kuard +spec: + containers: + - image: gcr.io/kuar-demo/kuard-amd64:blue + imagePullPolicy: IfNotPresent + name: kuard diff --git a/src/tailscale_svc_lb_controller/examples/service.yaml b/src/tailscale_svc_lb_controller/examples/service.yaml new file mode 100644 index 0000000..45570ea --- /dev/null +++ b/src/tailscale_svc_lb_controller/examples/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: kuard +spec: + selector: + run: kuard + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + type: LoadBalancer + allocateLoadBalancerNodePorts: false + loadBalancerClass: "svc-lb.tailscale.iptables.sh/lb" \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/helpers.py b/src/tailscale_svc_lb_controller/helpers.py new file mode 100644 index 0000000..af7f611 --- /dev/null +++ b/src/tailscale_svc_lb_controller/helpers.py @@ -0,0 +1,76 @@ +import logging + +import kubernetes + +import config + + +def get_common_labels(service, namespace): + """ + Get the labels common to all resources managed by this operator. + """ + + return { + "app.kubernetes.io/name": "tailscale-svc-lb", + "app.kubernetes.io/managed-by": "tailscale-svc-lb-controller", + config.SERVICE_NAME_LABEL: service, + config.SERVICE_NAMESPACE_LABEL: namespace + } + + +def update_service_status(namespace, service, ip): + """ + Update the status of the service to reflect the service Tailscale IP. + """ + + try: + # Get the service + k8s = kubernetes.client.CoreV1Api() + service_object = k8s.read_namespaced_service(name=service, namespace=namespace) + + # Update the status + service_object.status.load_balancer.ingress = [ + kubernetes.client.V1LoadBalancerIngress(ip=ip) + ] + + # Patch the service with the new status + k8s.patch_namespaced_service_status(name=service, namespace=namespace, body=service_object) + except Exception as e: + raise e + + +def get_hostname(target_service_name: str, target_service_namespace: str) -> str: + """ + Generates the hostname to use for the tailscale client. + + If config.TS_HOSTNAME_FROM_SERVICE is set to "true", the hostname will be automatically generated based on the + supplied target service name, and namespace. + + While using config.TS_HOSTNAME_FROM_SERVICE, an optional domain suffix can be supplied by setting the + config.TS_HOSTNAME_FROM_SERVICE_SUFFIX constant. + + If no configuration values are set, this will be left unconfigured and the Tailscale hostname will default to + the pod name. + """ + if config.TS_HOSTNAME_FROM_SERVICE == "true": + if config.TS_HOSTNAME_FROM_SERVICE_SUFFIX != "": + return f'{target_service_name}-{target_service_namespace}-{config.TS_HOSTNAME_FROM_SERVICE_SUFFIX}' + else: + return f'{target_service_name}-{target_service_namespace}' + + return "" + + +def get_image_pull_secrets() -> [str]: + """ + Generates the imagePullSecrets to use, based on the semi-colon seperated string + config.IMAGE_PULL_SECRETS. + """ + if config.IMAGE_PULL_SECRETS is not None: + logging.debug(f"Image Pull Secrets: {config.IMAGE_PULL_SECRETS}") + retval = [] + secrets = config.IMAGE_PULL_SECRETS.split(";") + for secret in secrets: + retval.append(kubernetes.client.V1LocalObjectReference(name=secret)) + return retval + return [] diff --git a/src/tailscale_svc_lb_controller/main.py b/src/tailscale_svc_lb_controller/main.py index dddf4e5..3845c36 100755 --- a/src/tailscale_svc_lb_controller/main.py +++ b/src/tailscale_svc_lb_controller/main.py @@ -1,236 +1,22 @@ #!/usr/bin/env python3 -import kopf -import logging -import kubernetes import base64 -import os - -# Constants -CONTROLLER_PREFIX = "svc-lb.tailscale.iptables.sh" -CONTROLLER_NAMESPACE = os.getenv("CONTROLLER_NAMESPACE") -SECRET_NAME = "tailscale-svc-lb" -LOAD_BALANCER_CLASS = CONTROLLER_PREFIX + "/lb" -NODE_SELECTOR_LABEL = CONTROLLER_PREFIX + "/deploy" -SERVICE_NAME_LABEL = CONTROLLER_PREFIX + "/svc-name" -SERVICE_NAMESPACE_LABEL = CONTROLLER_PREFIX + "/svc-namespace" -RESOURCE_PREFIX = "ts-" - -TAILSCALE_RUNTIME_IMAGE = os.getenv("TAILSCALE_RUNTIME_IMAGE") -LEADER_ELECTOR_IMAGE = os.getenv("LEADER_ELECTOR_IMAGE") - - -def get_common_labels(service, namespace): - """ - Get the labels common to all Tailscale services. - """ - - return { - "app.kubernetes.io/name": "tailscale-svc-lb", - "app.kubernetes.io/managed-by": "tailscale-svc-lb-controller", - SERVICE_NAME_LABEL: service, - SERVICE_NAMESPACE_LABEL: namespace - } - - -def update_service_status(namespace, service, ip): - """ - Update the status of the service to reflect the service Tailscale IP. - """ - - try: - # Get the service - k8s = kubernetes.client.CoreV1Api() - service_object = k8s.read_namespaced_service(name=service, namespace=namespace) +import logging - # Update the status - service_object.status.load_balancer.ingress = [ - kubernetes.client.V1LoadBalancerIngress(ip=ip) - ] +import kopf - # Patch the service with the new status - k8s.patch_namespaced_service_status(name=service, namespace=namespace, body=service_object) - except Exception as e: - logging.error(e) +import config +from src.tailscale_svc_lb_controller import helpers +from tailscale_proxy import TailscaleProxyResource @kopf.on.startup() def configure(settings: kopf.OperatorSettings, **_): settings.persistence.diffbase_storage = kopf.AnnotationsDiffBaseStorage( - prefix=CONTROLLER_PREFIX, + prefix=config.CONTROLLER_PREFIX, key="last-handled-configuration", ) - settings.persistence.finalizer = CONTROLLER_PREFIX + "/finalizer" - settings.persistence.progress_storage = kopf.AnnotationsProgressStorage(prefix=CONTROLLER_PREFIX) - - -@kopf.on.create("services", field="spec.loadBalancerClass", value=LOAD_BALANCER_CLASS) -def create_svc_lb(spec, body, name, logger, **kwargs): - """ - Create a service load balancer instance. - """ - - namespace = CONTROLLER_NAMESPACE - service_namespace = body["metadata"]["namespace"] - logging.info(f"Creating svc-lb resources in namespace {namespace} for service {service_namespace}/{name}") - - common_labels = get_common_labels(name, service_namespace) - - # Create ServiceAccount - k8s = kubernetes.client.CoreV1Api() - k8s.create_namespaced_service_account(namespace=namespace, body=kubernetes.client.V1ServiceAccount( - metadata=kubernetes.client.V1ObjectMeta( - name=RESOURCE_PREFIX + name, - labels=common_labels, - namespace=namespace - ) - )) - - # Create Role to manage secrets - k8s = kubernetes.client.RbacAuthorizationV1Api() - role = kubernetes.client.V1Role( - metadata=kubernetes.client.V1ObjectMeta( - name=RESOURCE_PREFIX + name, - labels=common_labels, - namespace=namespace, - ), - rules=[ - kubernetes.client.V1PolicyRule( - api_groups=[""], - resources=["secrets", "endpoints"], - verbs=["create"] - ), - kubernetes.client.V1PolicyRule( - api_groups=[""], - resource_names=[f"{RESOURCE_PREFIX}{name}"], - resources=["secrets", "endpoints"], - verbs=["get", "update", "patch"] - ), - kubernetes.client.V1PolicyRule( - api_groups=["coordination.k8s.io"], - resource_names=[f"{RESOURCE_PREFIX}{name}"], - resources=["leases"], - verbs=["*"] - ) - ], - ) - k8s.create_namespaced_role(namespace, role) - - # Create RoleBinding - role_binding = kubernetes.client.V1RoleBinding( - metadata=kubernetes.client.V1ObjectMeta( - name=RESOURCE_PREFIX + name, - labels=common_labels, - namespace=namespace, - ), - role_ref=kubernetes.client.V1RoleRef( - api_group="rbac.authorization.k8s.io", - kind="Role", - name=RESOURCE_PREFIX + name, - ), - subjects=[ - kubernetes.client.V1Subject( - kind="ServiceAccount", - name=RESOURCE_PREFIX + name, - namespace=namespace, - ), - ], - ) - k8s.create_namespaced_role_binding(namespace, role_binding) - - # Create Secret - k8s = kubernetes.client.CoreV1Api() - secret = kubernetes.client.V1Secret( - metadata=kubernetes.client.V1ObjectMeta( - name=RESOURCE_PREFIX + name, - labels=common_labels, - namespace=namespace, - ), - type="Opaque", - string_data={} - ) - k8s.create_namespaced_secret(namespace, secret) - - # Create the DaemonSet - k8s = kubernetes.client.AppsV1Api() - k8s.create_namespaced_daemon_set( - namespace=namespace, - body=kubernetes.client.V1DaemonSet( - metadata=kubernetes.client.V1ObjectMeta( - name=RESOURCE_PREFIX + name, - labels=common_labels - ), - spec=kubernetes.client.V1DaemonSetSpec( - selector=kubernetes.client.V1LabelSelector( - match_labels=common_labels - ), - template=kubernetes.client.V1PodTemplateSpec( - metadata=kubernetes.client.V1ObjectMeta( - labels=common_labels - ), - spec=kubernetes.client.V1PodSpec( - service_account=RESOURCE_PREFIX + name, - service_account_name=RESOURCE_PREFIX + name, - node_selector={NODE_SELECTOR_LABEL: "true"}, - containers=[ - kubernetes.client.V1Container( - name="tailscale-svc-lb-runtime", - image=TAILSCALE_RUNTIME_IMAGE, - image_pull_policy="Always", # TODO: Return to IfNotPresent - env=[ - kubernetes.client.V1EnvVar( - name="TS_KUBE_SECRET", value=RESOURCE_PREFIX + name - ), - kubernetes.client.V1EnvVar( - name="SVC_NAME", value=name - ), - kubernetes.client.V1EnvVar( - name="SVC_NAMESPACE", value=service_namespace - ), - kubernetes.client.V1EnvVar( - name="TS_AUTH_KEY", value_from=kubernetes.client.V1EnvVarSource( - secret_key_ref=kubernetes.client.V1SecretKeySelector( - name=SECRET_NAME, - key="ts-auth-key", - optional=True, - ) - ) - ) - ], - lifecycle=kubernetes.client.V1Lifecycle( - pre_stop=kubernetes.client.V1LifecycleHandler( - _exec=kubernetes.client.V1ExecAction( - command=["/stop.sh"] - ) - ) - ), - security_context=kubernetes.client.V1SecurityContext( - privileged=True, - capabilities=kubernetes.client.V1Capabilities( - add=[ - "NET_ADMIN" - ] - ) - ) - ), - kubernetes.client.V1Container( - name="leader-elector", - image=LEADER_ELECTOR_IMAGE, - image_pull_policy="IfNotPresent", - args=[f"--election={RESOURCE_PREFIX}{name}", f"--election-namespace={namespace}", "--http=0.0.0.0:4040"], - lifecycle=kubernetes.client.V1Lifecycle( - pre_stop=kubernetes.client.V1LifecycleHandler( - _exec=kubernetes.client.V1ExecAction( - command=["pkill", "-f", "server"] - ) - ) - ) - ) - ], - ), - ), - ), - ), - ) + settings.persistence.finalizer = config.CONTROLLER_PREFIX + "/finalizer" + settings.persistence.progress_storage = kopf.AnnotationsProgressStorage(prefix=config.CONTROLLER_PREFIX) @kopf.on.field("secrets", field="data.ts-ip") @@ -240,59 +26,43 @@ def update_svc(body, namespace, **kwargs): """ # Get service name from svc-lb label - service = body["metadata"]["labels"][SERVICE_NAME_LABEL] - service_namespace = body["metadata"]["labels"][SERVICE_NAMESPACE_LABEL] + service = body["metadata"]["labels"][config.SERVICE_NAME_LABEL] + service_namespace = body["metadata"]["labels"][config.SERVICE_NAMESPACE_LABEL] # Get Tailscale IP from the service's secret ip = base64.b64decode(body["data"]["ts-ip"]).decode("utf-8") - logging.info(f"Updating LoadBalancer service in namespace {service_namespace} with Tailscale IP {ip}") + logging.info(f"Updating LoadBalancer service {config.TS_PROXY_NAMESPACE}/{service} with Tailscale IP {ip}") - update_service_status(service_namespace, service, ip) + helpers.update_service_status(service_namespace, service, ip) -@kopf.on.delete("services", field="spec.loadBalancerClass", value=LOAD_BALANCER_CLASS) +@kopf.on.delete("services", field="spec.loadBalancerClass", value=config.LOAD_BALANCER_CLASS) def delete_svc_lb(spec, name, logger, **kwargs): """ Delete all created service load balancer resources. """ - - namespace = CONTROLLER_NAMESPACE - logging.info(f"Deleting svc-lb resources in namespace {namespace} for service {name}") - - k8s = kubernetes.client.AppsV1Api() - # Delete all DaemonSets with svc-name label - k8s.delete_collection_namespaced_daemon_set( - namespace=namespace, - label_selector=f"{SERVICE_NAME_LABEL}={name}" - ) - - # Delete RoleBinding with svc-name label - k8s = kubernetes.client.RbacAuthorizationV1Api() - k8s.delete_collection_namespaced_role_binding( - namespace=namespace, - label_selector=f"{SERVICE_NAME_LABEL}={name}" + service_namespace = kwargs['meta']['namespace'] + ts = TailscaleProxyResource( + target_service_name=name, + target_service_namespace=service_namespace, + tailscale_proxy_namespace=config.TS_PROXY_NAMESPACE, + deployment_type=config.DEPLOYMENT_TYPE ) + logging.info(f"Deleting svc-lb resources in namespace {config.TS_PROXY_NAMESPACE}" + f" for service {service_namespace}/{name}") + ts.delete() - # Delete Role with svc-name label - k8s = kubernetes.client.RbacAuthorizationV1Api() - k8s.delete_collection_namespaced_role( - namespace=namespace, - label_selector=f"{SERVICE_NAME_LABEL}={name}" - ) + # TODO: Automatically remove device from tailnet + # In the meantime, using an Ephemeral key to register devices is a workaround - # Delete ServiceAccount with svc-name label - k8s = kubernetes.client.CoreV1Api() - k8s.delete_collection_namespaced_service_account( - namespace=namespace, - label_selector=f"{SERVICE_NAME_LABEL}={name}" - ) - # Delete Secret with svc-name label - k8s = kubernetes.client.CoreV1Api() - k8s.delete_collection_namespaced_secret( - namespace=namespace, - label_selector=f"{SERVICE_NAME_LABEL}={name}" +@kopf.timer('services', interval=10.0, field="spec.loadBalancerClass", value=config.LOAD_BALANCER_CLASS) +def create_svc_lb_timer(spec, **kwargs): + ts = TailscaleProxyResource( + target_service_name=kwargs['body']['metadata']['name'], + target_service_namespace=kwargs['body']['metadata']['namespace'], + tailscale_proxy_namespace=config.TS_PROXY_NAMESPACE, + deployment_type=config.DEPLOYMENT_TYPE ) - - # TODO: Automatically remove device from tailnet + ts.reconcile() diff --git a/src/tailscale_svc_lb_controller/resources/base.py b/src/tailscale_svc_lb_controller/resources/base.py new file mode 100644 index 0000000..77437e2 --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/base.py @@ -0,0 +1,136 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import config +from src.tailscale_svc_lb_controller import helpers + + +class BaseResource: + target_service_name = "" + target_service_namespace = "" + tailscale_proxy_namespace = "" + + # All resources that inherit this class need to implement the following methods + def new(self): raise NotImplementedError + + def create(self): raise NotImplementedError + + def delete(self): raise NotImplementedError + + def get(self): raise NotImplementedError + + def reconcile(self): raise NotImplementedError + + def _generate_pod_template_spec(self) -> kubernetes.client.V1PodTemplateSpec: + node_selector = None + if config.NODE_SELECTOR_LABEL is not None: + node_selector = {config.NODE_SELECTOR_LABEL: "true"} + + return kubernetes.client.V1PodTemplateSpec( + metadata=kubernetes.client.V1ObjectMeta( + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace) + ), + spec=kubernetes.client.V1PodSpec( + service_account=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + service_account_name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + node_selector=node_selector, + image_pull_secrets=helpers.get_image_pull_secrets(), + init_containers=[ + kubernetes.client.V1Container( + name="tailscale-svc-lb-init", + image=config.TS_PROXY_RUNTIME_IMAGE, + image_pull_policy=config.TS_PROXY_RUNTIME_IMAGE_PULL_POLICY, + command=['sh', '-c', 'sysctl -w net.ipv4.ip_forward=1'], + resources=kubernetes.client.V1ResourceRequirements( + requests={"cpu": config.TS_PROXY_RUNTIME_REQUEST_CPU, + "memory": config.TS_PROXY_RUNTIME_REQUEST_MEM}, + limits={"cpu": config.TS_PROXY_RUNTIME_LIMIT_CPU, + "memory": config.TS_PROXY_RUNTIME_LIMIT_MEM} + ), + security_context=kubernetes.client.V1SecurityContext( + privileged=True, + capabilities=kubernetes.client.V1Capabilities( + add=[ + "NET_ADMIN" + ] + ) + ) + ), + ], + containers=[ + kubernetes.client.V1Container( + name="tailscale-svc-lb-runtime", + image=config.TS_PROXY_RUNTIME_IMAGE, + image_pull_policy=config.TS_PROXY_RUNTIME_IMAGE_PULL_POLICY, + resources=kubernetes.client.V1ResourceRequirements( + requests={"cpu": config.TS_PROXY_RUNTIME_REQUEST_CPU, + "memory": config.TS_PROXY_RUNTIME_REQUEST_MEM}, + limits={"cpu": config.TS_PROXY_RUNTIME_LIMIT_CPU, + "memory": config.TS_PROXY_RUNTIME_LIMIT_MEM} + ), + env=[ + kubernetes.client.V1EnvVar( + name="TS_KUBE_SECRET", value=config.RESOURCE_PREFIX + self.target_service_name + ), + kubernetes.client.V1EnvVar( + name="SVC_NAME", value=self.target_service_name + ), + kubernetes.client.V1EnvVar( + name="SVC_NAMESPACE", value=self.target_service_namespace + ), + kubernetes.client.V1EnvVar( + name="TS_HOSTNAME", + value=helpers.get_hostname(self.target_service_name, + self.target_service_namespace) + ), + kubernetes.client.V1EnvVar( + name="TS_AUTH_KEY", value_from=kubernetes.client.V1EnvVarSource( + secret_key_ref=kubernetes.client.V1SecretKeySelector( + name=config.SECRET_NAME, + key="ts-auth-key", + optional=True, + ) + ) + ) + ], + lifecycle=kubernetes.client.V1Lifecycle( + pre_stop=kubernetes.client.V1LifecycleHandler( + _exec=kubernetes.client.V1ExecAction( + command=["/stop.sh"] + ) + ) + ), + security_context=kubernetes.client.V1SecurityContext( + privileged=True, + capabilities=kubernetes.client.V1Capabilities( + add=[ + "NET_ADMIN" + ] + ) + ) + ), + kubernetes.client.V1Container( + name="leader-elector", + image=config.LEADER_ELECTOR_IMAGE, + image_pull_policy=config.LEADER_ELECTOR_IMAGE_PULL_POLICY, + resources=kubernetes.client.V1ResourceRequirements( + requests={"cpu": config.LEADER_ELECTOR_REQUEST_CPU, + "memory": config.LEADER_ELECTOR_REQUEST_MEM}, + limits={"cpu": config.LEADER_ELECTOR_LIMIT_CPU, + "memory": config.LEADER_ELECTOR_LIMIT_MEM} + ), + args=[ + f"--election={config.RESOURCE_PREFIX}{self.target_service_name}", + f"--election-namespace={self.tailscale_proxy_namespace}", + "--http=0.0.0.0:4040" + ], + lifecycle=kubernetes.client.V1Lifecycle( + pre_stop=kubernetes.client.V1LifecycleHandler( + _exec=kubernetes.client.V1ExecAction( + command=["pkill", "-f", "server"] + ) + ) + ) + ) + ], + ), + ) diff --git a/src/tailscale_svc_lb_controller/resources/daemonset.py b/src/tailscale_svc_lb_controller/resources/daemonset.py new file mode 100644 index 0000000..0919f3e --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/daemonset.py @@ -0,0 +1,82 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class DaemonSet(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1DaemonSet: + """ + Returns the kubernetes.client.V1DaemonSet that runs the tailscale proxy instance + """ + return kubernetes.client.V1DaemonSet( + api_version="apps/v1", + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace) + ), + spec=kubernetes.client.V1DaemonSetSpec( + selector=kubernetes.client.V1LabelSelector( + match_labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace) + ), + template=self._generate_pod_template_spec() + ), + ) + + def create(self) -> kubernetes.client.V1DaemonSet: + """ + Creates the DaemonSet that runs the Tailscale Proxy + """ + k8s = kubernetes.client.AppsV1Api() + deployment = self.new() + + return k8s.create_namespaced_daemon_set( + namespace=self.tailscale_proxy_namespace, + body=deployment + ) + + def delete(self) -> None: + """ + Delete the DaemonSet deployed as part of a proxy instance, if it exists. + """ + k8s = kubernetes.client.AppsV1Api() + # Delete all DaemonSets with svc-name label + try: + k8s.delete_collection_namespaced_daemon_set( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + raise e + + def get(self) -> kubernetes.client.V1DaemonSet | None: + """ + Fetches the current DaemonSet that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.AppsV1Api() + try: + return k8s.read_namespaced_daemon_set( + namespace=self.tailscale_proxy_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/resources/deployment.py b/src/tailscale_svc_lb_controller/resources/deployment.py new file mode 100644 index 0000000..fde20aa --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/deployment.py @@ -0,0 +1,82 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class Deployment(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1Deployment: + """ + Returns the kubernetes.client.V1Deployment that runs the tailscale proxy instance + """ + return kubernetes.client.V1Deployment( + api_version="apps/v1", + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace) + ), + spec=kubernetes.client.V1DeploymentSpec( + selector=kubernetes.client.V1LabelSelector( + match_labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace) + ), + replicas=config.TS_PROXY_REPLICA_COUNT, + template=self._generate_pod_template_spec() + ), + ) + + def create(self) -> kubernetes.client.V1Deployment: + """ + Creates the Deployment that runs the Tailscale Proxy + """ + k8s = kubernetes.client.AppsV1Api() + deployment = self.new() + return k8s.create_namespaced_deployment( + namespace=self.tailscale_proxy_namespace, + body=deployment + ) + + def delete(self) -> None: + """ + Delete the Deployment deployed as part of a proxy instance + """ + k8s = kubernetes.client.AppsV1Api() + # Delete all Deployments with svc-name label + try: + k8s.delete_collection_namespaced_deployment( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + raise e + + def get(self) -> kubernetes.client.V1Deployment | None: + """ + Fetches the current Deployment that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.AppsV1Api() + try: + return k8s.read_namespaced_deployment( + namespace=self.tailscale_proxy_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/resources/role.py b/src/tailscale_svc_lb_controller/resources/role.py new file mode 100644 index 0000000..a6aee5a --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/role.py @@ -0,0 +1,92 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class Role(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1Role: + """ + Returns the kubernetes.client.V1Role that + """ + return kubernetes.client.V1Role( + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace), + namespace=self.tailscale_proxy_namespace, + ), + rules=[ + kubernetes.client.V1PolicyRule( + api_groups=[""], + resources=["secrets", "endpoints"], + verbs=["create"] + ), + kubernetes.client.V1PolicyRule( + api_groups=[""], + resource_names=[f"{config.RESOURCE_PREFIX}{self.target_service_name}"], + resources=["secrets", "endpoints"], + verbs=["get", "update", "patch"] + ), + kubernetes.client.V1PolicyRule( + api_groups=["coordination.k8s.io"], + resource_names=[f"{config.RESOURCE_PREFIX}{self.target_service_name}"], + resources=["leases"], + verbs=["*"] + ) + ], + ) + + def create(self) -> kubernetes.client.V1Role: + """ + Creates the Role necessary to run the Tailscale Proxy + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + return k8s.create_namespaced_role( + namespace=self.tailscale_proxy_namespace, + body=self.new() + ) + + def delete(self) -> None: + """ + Delete the Role deployed as part of a proxy instance + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + try: + k8s.delete_collection_namespaced_role( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + raise e + + def get(self) -> kubernetes.client.V1Role | None: + """ + Fetches the current Role that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + try: + return k8s.read_namespaced_role( + namespace=self.tailscale_proxy_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/resources/role_binding.py b/src/tailscale_svc_lb_controller/resources/role_binding.py new file mode 100644 index 0000000..05ec840 --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/role_binding.py @@ -0,0 +1,88 @@ +import logging + +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class RoleBinding(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1RoleBinding: + """ + Returns the kubernetes.client.V1RoleBinding used for a Tailscale Proxy instance + """ + return kubernetes.client.V1RoleBinding( + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace), + namespace=self.tailscale_proxy_namespace, + ), + role_ref=kubernetes.client.V1RoleRef( + api_group="rbac.authorization.k8s.io", + kind="Role", + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + ), + subjects=[ + kubernetes.client.V1Subject( + kind="ServiceAccount", + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + namespace=self.tailscale_proxy_namespace, + ), + ], + ) + + def create(self) -> kubernetes.client.V1RoleBinding: + """ + Creates the RoleBinding necessary to run the Tailscale Proxy + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + return k8s.create_namespaced_role_binding( + namespace=self.tailscale_proxy_namespace, + body=self.new() + ) + + def delete(self) -> None: + """ + Delete the RoleBinding deployed as part of a proxy instance + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + try: + k8s.delete_collection_namespaced_role_binding( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + logging.error(e) + return None + raise e + + def get(self) -> kubernetes.client.V1RoleBinding | None: + """ + Fetches the current RoleBinding that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.RbacAuthorizationV1Api() + try: + return k8s.read_namespaced_role_binding( + namespace=self.target_service_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/resources/secret.py b/src/tailscale_svc_lb_controller/resources/secret.py new file mode 100644 index 0000000..d05db25 --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/secret.py @@ -0,0 +1,75 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class Secret(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1Secret: + """ + Returns the kubernetes.client.V1Secret required for the Tailscale Proxy + """ + return kubernetes.client.V1Secret( + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace), + namespace=self.tailscale_proxy_namespace, + ), + type="Opaque", + string_data={} + ) + + def create(self) -> kubernetes.client.V1Secret: + """ + Creates the Secret necessary to run the Tailscale Proxy + """ + k8s = kubernetes.client.CoreV1Api() + return k8s.create_namespaced_secret( + namespace=self.tailscale_proxy_namespace, + body=self.new() + ) + + def delete(self) -> None: + """ + Delete the Secret deployed as part of a proxy instance + """ + k8s = kubernetes.client.CoreV1Api() + try: + k8s.delete_collection_namespaced_secret( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + raise e + + def get(self) -> kubernetes.client.V1Secret | None: + """ + Fetches the current Secret that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.CoreV1Api() + try: + return k8s.read_namespaced_secret( + namespace=self.tailscale_proxy_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/resources/service_account.py b/src/tailscale_svc_lb_controller/resources/service_account.py new file mode 100644 index 0000000..a3fe849 --- /dev/null +++ b/src/tailscale_svc_lb_controller/resources/service_account.py @@ -0,0 +1,73 @@ +import kubernetes + +from src.tailscale_svc_lb_controller import helpers, config +from src.tailscale_svc_lb_controller.resources.base import BaseResource + + +class ServiceAccount(BaseResource): + + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace + + def new(self) -> kubernetes.client.V1ServiceAccount: + """ + Returns the kubernetes.client.V1ServiceAccount required for the Tailscale Proxy + """ + return kubernetes.client.V1ServiceAccount( + metadata=kubernetes.client.V1ObjectMeta( + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}", + labels=helpers.get_common_labels(self.target_service_name, self.target_service_namespace), + namespace=self.tailscale_proxy_namespace + ) + ) + + def create(self) -> kubernetes.client.V1ServiceAccount: + """ + Creates the ServiceAccount necessary to run the Tailscale Proxy + """ + k8s = kubernetes.client.CoreV1Api() + return k8s.create_namespaced_service_account( + namespace=self.tailscale_proxy_namespace, + body=self.new() + ) + + def delete(self) -> None: + """ + Delete the ServiceAccount deployed as part of a proxy instance + """ + k8s = kubernetes.client.CoreV1Api() + try: + k8s.delete_collection_namespaced_service_account( + namespace=self.tailscale_proxy_namespace, + label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + raise e + + def get(self) -> kubernetes.client.V1ServiceAccount | None: + """ + Fetches the current ServiceAccount that should have been deployed as part of the proxy instance + """ + k8s = kubernetes.client.CoreV1Api() + try: + return k8s.read_namespaced_service_account( + namespace=self.tailscale_proxy_namespace, + name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" + ) + except kubernetes.client.exceptions.ApiException as e: + if e.status == 404: + return None + else: + raise e + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() diff --git a/src/tailscale_svc_lb_controller/tailscale_proxy.py b/src/tailscale_svc_lb_controller/tailscale_proxy.py new file mode 100644 index 0000000..c016ae1 --- /dev/null +++ b/src/tailscale_svc_lb_controller/tailscale_proxy.py @@ -0,0 +1,61 @@ +from src.tailscale_svc_lb_controller.resources.daemonset import DaemonSet +from src.tailscale_svc_lb_controller.resources.deployment import Deployment +from src.tailscale_svc_lb_controller.resources.role import Role +from src.tailscale_svc_lb_controller.resources.role_binding import RoleBinding +from src.tailscale_svc_lb_controller.resources.secret import Secret +from src.tailscale_svc_lb_controller.resources.service_account import ServiceAccount + + +class TailscaleProxyResource: + """ + Class to handle adding/fetching/deleting TailScale proxy kube resources + """ + target_service_name = "" + tailscale_proxy_namespace = "" + target_service_namespace = "" + deployment_type = "" + + def __init__(self, target_service_name: str, target_service_namespace: str, + tailscale_proxy_namespace: str, deployment_type: str): + """ + target_service_name: Name of the target Service this Proxy Instance should direct traffic to + target_service_namespace: Namespace of the target Service this Proxy Instance should direct traffic to + tailscale_proxy_namespace: Namespace that the Tailscale Proxy resources will be created in + """ + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = tailscale_proxy_namespace + self.deployment_type = deployment_type + + def create(self): + ServiceAccount(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + if self.deployment_type.lower() == "daemonset": + DaemonSet(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + elif self.deployment_type.lower() == "deployment": + Deployment(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + + def delete(self): + if self.deployment_type.lower() == "daemonset": + DaemonSet(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + elif self.deployment_type.lower() == "deployment": + Deployment(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + ServiceAccount(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + + def reconcile(self): + ServiceAccount(self.target_service_name, self.target_service_namespace, + self.tailscale_proxy_namespace).reconcile() + Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() + RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() + Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() + if self.deployment_type.lower() == "daemonset": + DaemonSet(self.target_service_name, self.target_service_namespace, + self.tailscale_proxy_namespace).reconcile() + elif self.deployment_type.lower() == "deployment": + Deployment(self.target_service_name, self.target_service_namespace, + self.tailscale_proxy_namespace).reconcile() diff --git a/src/tailscale_svc_lb_controller/test_tailscale_operator.py b/src/tailscale_svc_lb_controller/test_tailscale_operator.py new file mode 100644 index 0000000..9dd3eba --- /dev/null +++ b/src/tailscale_svc_lb_controller/test_tailscale_operator.py @@ -0,0 +1,89 @@ +import logging +import os +import random +import string +import time + +import kubernetes.client +import yaml +from kopf.testing import KopfRunner +from kubernetes import client, config + +import config as operator_config + + +def test_operator(): + ts_auth_key = os.getenv("TS_AUTH_KEY") + if ts_auth_key is None: + raise Exception("A Tailscale Auth Key must be supplied via the TS_AUTH_KEY env variable") + + print("Configuring Tailscale Auth Key Secret...") + + # Create a secret in the namespace controller to automatically authenticate against Tailscale + config.load_config() + k8s_api = client.CoreV1Api() + rbac_api = client.RbacAuthorizationV1Api() + app_api = client.AppsV1Api() + + k8s_api.create_namespaced_secret(operator_config.TS_PROXY_NAMESPACE, kubernetes.client.V1Secret( + metadata=kubernetes.client.V1ObjectMeta( + name=operator_config.SECRET_NAME, + namespace=operator_config.TS_PROXY_NAMESPACE, + ), + type="Opaque", + string_data={ + 'ts-auth-key': ts_auth_key + } + )) + + print("Starting operator...") + with KopfRunner(['run', '-A', '--verbose', 'main.py']) as runner: + # Create a namespace to use for deploying example resources + testing_namespace_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=8)) + logging.info(f"Creating namespace {testing_namespace_name} for example resources") + k8s_api.create_namespace(kubernetes.client.V1Namespace( + metadata=kubernetes.client.V1ObjectMeta( + name=testing_namespace_name, + ) + )) + try: + with open(os.path.join(os.path.dirname(__file__), 'examples', 'pod.yaml')) as file: + pod = yaml.load(file, Loader=yaml.FullLoader) + k8s_api.create_namespaced_pod(testing_namespace_name, pod) + with open(os.path.join(os.path.dirname(__file__), 'examples', 'service.yaml')) as file: + service = yaml.load(file, Loader=yaml.FullLoader) + k8s_api.create_namespaced_service(testing_namespace_name, service) + + # Give it some time to create the tailscale proxy resources + time.sleep(20) + + secret = k8s_api.read_namespaced_secret('ts-kuard', operator_config.TS_PROXY_NAMESPACE) + assert (secret is not None) + service_account = k8s_api.read_namespaced_service_account('ts-kuard', operator_config.TS_PROXY_NAMESPACE) + assert (service_account is not None) + role = rbac_api.read_namespaced_role('ts-kuard', operator_config.TS_PROXY_NAMESPACE) + assert (role is not None) + role_binding = rbac_api.read_namespaced_role_binding('ts-kuard', operator_config.TS_PROXY_NAMESPACE) + assert (role_binding is not None) + deployment = app_api.read_namespaced_deployment('ts-kuard', operator_config.TS_PROXY_NAMESPACE) + assert (deployment is not None) + except Exception as e: + print(e) + finally: + k8s_api.delete_namespaced_secret( + namespace=operator_config.TS_PROXY_NAMESPACE, + name=operator_config.SECRET_NAME + ) + k8s_api.delete_namespace(testing_namespace_name) + + # Give the operator time to cleanup any resources it created + time.sleep(15) + + print(runner.output) + assert runner.exit_code == 0 + assert runner.exception is None + assert "Exception" not in runner.output + assert "Error" not in runner.output + + +test_operator() From 7d9b65564a738a9985404cc9247ad4ce9f284293 Mon Sep 17 00:00:00 2001 From: Ben Bettridge Date: Sun, 31 Jul 2022 21:45:27 +1200 Subject: [PATCH 3/5] Feature/helm actions (#2) Use helm chart-testing to test Merge Requests (only occurs when chart changes) and chart-releaser to publish our helm chart to gh-pages Co-authored-by: Ben Bettridge --- .github/workflows/helm-lint.yaml | 42 +++++++++++++++++++ .github/workflows/helm-release.yaml | 31 ++++++++++++++ chart/Chart.yaml | 6 --- chart/service.yaml | 14 ------- .../tailscale-svc-lb}/.helmignore | 0 charts/tailscale-svc-lb/Chart.yaml | 13 ++++++ .../tailscale-svc-lb}/templates/_helpers.tpl | 0 .../templates/deployment.yaml | 0 .../tailscale-svc-lb}/templates/rbac.yaml | 0 .../tailscale-svc-lb}/templates/secret.yaml | 0 .../templates/serviceaccount.yaml | 0 .../tailscale-svc-lb}/values.yaml | 14 +++---- 12 files changed, 92 insertions(+), 28 deletions(-) create mode 100644 .github/workflows/helm-lint.yaml create mode 100644 .github/workflows/helm-release.yaml delete mode 100644 chart/Chart.yaml delete mode 100644 chart/service.yaml rename {chart => charts/tailscale-svc-lb}/.helmignore (100%) create mode 100644 charts/tailscale-svc-lb/Chart.yaml rename {chart => charts/tailscale-svc-lb}/templates/_helpers.tpl (100%) rename {chart => charts/tailscale-svc-lb}/templates/deployment.yaml (100%) rename {chart => charts/tailscale-svc-lb}/templates/rbac.yaml (100%) rename {chart => charts/tailscale-svc-lb}/templates/secret.yaml (100%) rename {chart => charts/tailscale-svc-lb}/templates/serviceaccount.yaml (100%) rename {chart => charts/tailscale-svc-lb}/values.yaml (96%) diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml new file mode 100644 index 0000000..c801fad --- /dev/null +++ b/.github/workflows/helm-lint.yaml @@ -0,0 +1,42 @@ +name: Lint and Test Charts + +on: pull_request + +jobs: + lint-test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@v1 + with: + version: v3.9.2 + + - uses: actions/setup-python@v2 + with: + python-version: 3.7 + + - name: Set up chart-testing + uses: helm/chart-testing-action@v2.2.1 + + - name: Run chart-testing (list-changed) + id: list-changed + run: | + changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }}) + if [[ -n "$changed" ]]; then + echo "::set-output name=changed::true" + fi + + - name: Run chart-testing (lint) + run: ct lint --target-branch ${{ github.event.repository.default_branch }} + + - name: Create kind cluster + uses: helm/kind-action@v1.2.0 + if: steps.list-changed.outputs.changed == 'true' + + - name: Run chart-testing (install) + run: ct install --target-branch ${{ github.event.repository.default_branch }} diff --git a/.github/workflows/helm-release.yaml b/.github/workflows/helm-release.yaml new file mode 100644 index 0000000..122cf33 --- /dev/null +++ b/.github/workflows/helm-release.yaml @@ -0,0 +1,31 @@ +name: Helm Publish + +on: + push: + branches: + - main +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v1 + with: + version: v3.8.1 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.4.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/chart/Chart.yaml b/chart/Chart.yaml deleted file mode 100644 index 346ce43..0000000 --- a/chart/Chart.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v2 -name: tailscale-svc-lb -description: klipper-lb but Tailscale -type: application -version: 1.0.0 -appVersion: "1.0.0" diff --git a/chart/service.yaml b/chart/service.yaml deleted file mode 100644 index 8916498..0000000 --- a/chart/service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kuard -spec: - selector: - run: kuard - ports: - - protocol: TCP - port: 80 - targetPort: 8080 - type: LoadBalancer - allocateLoadBalancerNodePorts: false - loadBalancerClass: "svc-lb.tailscale.iptables.sh/lb" diff --git a/chart/.helmignore b/charts/tailscale-svc-lb/.helmignore similarity index 100% rename from chart/.helmignore rename to charts/tailscale-svc-lb/.helmignore diff --git a/charts/tailscale-svc-lb/Chart.yaml b/charts/tailscale-svc-lb/Chart.yaml new file mode 100644 index 0000000..88231fb --- /dev/null +++ b/charts/tailscale-svc-lb/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v2 +name: tailscale-svc-lb +description: klipper-lb but Tailscale +type: application +version: 1.0.0 +appVersion: "1.0.0" +maintainers: + - name: Ben + email: ben@e720.io + url: https://github.com/bbetter173 + - name: clrxbl + email: michael@iptables.sh + url: https://github.com/clrxbl diff --git a/chart/templates/_helpers.tpl b/charts/tailscale-svc-lb/templates/_helpers.tpl similarity index 100% rename from chart/templates/_helpers.tpl rename to charts/tailscale-svc-lb/templates/_helpers.tpl diff --git a/chart/templates/deployment.yaml b/charts/tailscale-svc-lb/templates/deployment.yaml similarity index 100% rename from chart/templates/deployment.yaml rename to charts/tailscale-svc-lb/templates/deployment.yaml diff --git a/chart/templates/rbac.yaml b/charts/tailscale-svc-lb/templates/rbac.yaml similarity index 100% rename from chart/templates/rbac.yaml rename to charts/tailscale-svc-lb/templates/rbac.yaml diff --git a/chart/templates/secret.yaml b/charts/tailscale-svc-lb/templates/secret.yaml similarity index 100% rename from chart/templates/secret.yaml rename to charts/tailscale-svc-lb/templates/secret.yaml diff --git a/chart/templates/serviceaccount.yaml b/charts/tailscale-svc-lb/templates/serviceaccount.yaml similarity index 100% rename from chart/templates/serviceaccount.yaml rename to charts/tailscale-svc-lb/templates/serviceaccount.yaml diff --git a/chart/values.yaml b/charts/tailscale-svc-lb/values.yaml similarity index 96% rename from chart/values.yaml rename to charts/tailscale-svc-lb/values.yaml index ab822f6..eb4dbbb 100644 --- a/chart/values.yaml +++ b/charts/tailscale-svc-lb/values.yaml @@ -69,8 +69,8 @@ controller: proxy: # How to deploy the Tailscale Proxy instances - valid options are 'DaemonSet', 'Deployment' - type: Deployment - # The number of Tailscale Proxy Replicas to run for each instance. Only applies if proxy.type is set to 'Deployment' + type: Deployment + # The number of Tailscale Proxy Replicas to run for each instance. Only applies if proxy.type is set to 'Deployment' deploymentReplicas: 2 # NodeLabel to use when creating Tailscale Proxy deployment. The value of this label should be "true" @@ -92,11 +92,11 @@ proxy: tag: "latest" # A list of imagePullSecret names that will be used when fetching the runtime images. - imagePullSecrets: [ ] + imagePullSecrets: [] # - exampleSecretName # - anotherSecretName - resources: { } + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -114,11 +114,11 @@ proxy: pullPolicy: IfNotPresent tag: "0.5" - imagePullSecrets: [ ] + imagePullSecrets: [] # - exampleSecretName # - anotherSecretName - resources: { } + resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -129,5 +129,3 @@ proxy: # requests: # cpu: 100m # memory: 128Mi - - From d01ef54ae6d0b06ff94a7f07cdc9bdd5579ae9da Mon Sep 17 00:00:00 2001 From: Ben Bettridge Date: Mon, 1 Aug 2022 10:48:48 +1200 Subject: [PATCH 4/5] Remove tailscale-controller initContainer --- charts/tailscale-svc-lb/templates/deployment.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/charts/tailscale-svc-lb/templates/deployment.yaml b/charts/tailscale-svc-lb/templates/deployment.yaml index bb2f374..a7a9369 100644 --- a/charts/tailscale-svc-lb/templates/deployment.yaml +++ b/charts/tailscale-svc-lb/templates/deployment.yaml @@ -27,16 +27,6 @@ spec: serviceAccountName: {{ include "tailscale-svc-lb.serviceAccountName" . }} securityContext: {{- toYaml .Values.controller.podSecurityContext | nindent 8 }} - initContainers: - - name: {{ .Chart.Name }}-sysctl - securityContext: - {{- toYaml .Values.controller.securityContext | nindent 12 }} - image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.controller.image.pullPolicy }} - command: - - sh - - -c - - sysctl -w net.ipv4.ip_forward=1 containers: - name: {{ .Chart.Name }} securityContext: From ea15897c971f2063a3db87739ad2532dcd6a489d Mon Sep 17 00:00:00 2001 From: Ben Bettridge Date: Wed, 3 Aug 2022 13:08:14 +1200 Subject: [PATCH 5/5] Make changes as suggested in review --- runtime/run.sh | 18 +++--- src/tailscale_svc_lb_controller/config.py | 10 +++- src/tailscale_svc_lb_controller/helpers.py | 45 +++++++-------- .../resources/base.py | 37 +++++++++---- .../resources/daemonset.py | 32 ++--------- .../resources/deployment.py | 28 +--------- .../resources/role.py | 28 +--------- .../resources/role_binding.py | 29 +--------- .../resources/secret.py | 28 +--------- .../resources/service_account.py | 26 +-------- .../tailscale_proxy.py | 55 ++++++++----------- 11 files changed, 108 insertions(+), 228 deletions(-) diff --git a/runtime/run.sh b/runtime/run.sh index 6ead063..a6c51a8 100755 --- a/runtime/run.sh +++ b/runtime/run.sh @@ -42,14 +42,12 @@ if [[ "${DEBUG_SKIP_LEADER}" == "true" ]]; then else echo "Waiting for leader election..." LEADER=false - while [[ "${LEADER}" == "false" ]]; do + while :; do CURRENT_LEADER=$(curl http://127.0.0.1:4040 -s -m 2 | jq -r ".name") if [[ "${CURRENT_LEADER}" == "$(hostname)" ]]; then echo "I am the leader." - LEADER=true - else - sleep 1 - fi + break + sleep 1 done fi @@ -58,13 +56,13 @@ tailscaled ${TAILSCALED_ARGS} & PID=$! UP_ARGS="--accept-dns=${TS_ACCEPT_DNS}" -if [[ ! -z "${TS_AUTH_KEY}" ]]; then +if [[ -n "${TS_AUTH_KEY}" ]]; then UP_ARGS="--authkey=${TS_AUTH_KEY} ${UP_ARGS}" fi -if [[ ! -z "${TS_EXTRA_ARGS}" ]]; then +if [[ -n "${TS_EXTRA_ARGS}" ]]; then UP_ARGS="${UP_ARGS} ${TS_EXTRA_ARGS:-}" fi -if [[ ! -z "${TS_HOSTNAME}" ]]; then +if [[ -n "${TS_HOSTNAME}" ]]; then echo "Overriding system hostname using TS_HOSTNAME: ${TS_HOSTNAME}" UP_ARGS="--hostname=${TS_HOSTNAME} ${UP_ARGS}" fi @@ -82,7 +80,7 @@ echo "Trying to get the service ClusterIP..." SVC_IP_RETRIEVED=false while [[ "${SVC_IP_RETRIEVED}" == "false" ]]; do SVC_IP=$(getent hosts ${SVC_NAME}.${SVC_NAMESPACE}.svc | cut -d" " -f1) - if [[ ! -z "${SVC_IP}" ]]; then + if [[ -n "${SVC_IP}" ]]; then SVC_IP_RETRIEVED=true else sleep 1 @@ -100,4 +98,4 @@ echo "Updating secret with Tailscale IP" # patch secret with the tailscale ipv4 address kubectl patch secret "${TS_KUBE_SECRET}" --namespace "${PROXY_NAMESPACE}" --type=json --patch="[{\"op\":\"replace\",\"path\":\"/data/ts-ip\",\"value\":\"${TS_IP_B64}\"}]" -wait ${PID} +wait ${PID} \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/config.py b/src/tailscale_svc_lb_controller/config.py index bf1d903..7c589fe 100755 --- a/src/tailscale_svc_lb_controller/config.py +++ b/src/tailscale_svc_lb_controller/config.py @@ -20,7 +20,7 @@ # A semi-colon seperated string containing the names of any secrets that should be used # when pulling images. Secret must already exist and be present in the TS_PROXY_NAMESPACE IMAGE_PULL_SECRETS = env.get("IMAGE_PULL_SECRETS", "") -if not re.match(r"^([a-z]|-|\d|;)*$", IMAGE_PULL_SECRETS): +if not re.match(r"^[a-z\d;-]*$", IMAGE_PULL_SECRETS): logging.error("IMAGE_PULL_SECRETS invalid. Should be a semi-colon seperated list of" "secret names.") sys.exit(1) @@ -37,8 +37,12 @@ # If TS_PROXY_DEPLOYMENT_TYPE is 'Deployment', this dictates the number of replicas. No effect otherwise. try: TS_PROXY_REPLICA_COUNT = int(env.get("TS_PROXY_REPLICA_COUNT", "2")) -except Exception: - logging.error("TS_PROXY_REPLICA_COUNT value invalid. Should be an integer above 0.") +except ValueError: + logging.error("TS_PROXY_REPLICA_COUNT value invalid. Expected integer.") + sys.exit(1) + +if TS_PROXY_REPLICA_COUNT <= 0: + logging.error(f"TS_PROXY_REPLICA_COUNT value invalid. Needs to be an integer greater than 0. Received ${TS_PROXY_REPLICA_COUNT}") sys.exit(1) # Tailscale Proxy Runtime Container Image diff --git a/src/tailscale_svc_lb_controller/helpers.py b/src/tailscale_svc_lb_controller/helpers.py index af7f611..312ada9 100644 --- a/src/tailscale_svc_lb_controller/helpers.py +++ b/src/tailscale_svc_lb_controller/helpers.py @@ -23,21 +23,17 @@ def update_service_status(namespace, service, ip): Update the status of the service to reflect the service Tailscale IP. """ - try: - # Get the service - k8s = kubernetes.client.CoreV1Api() - service_object = k8s.read_namespaced_service(name=service, namespace=namespace) - - # Update the status - service_object.status.load_balancer.ingress = [ - kubernetes.client.V1LoadBalancerIngress(ip=ip) - ] + # Get the service + k8s = kubernetes.client.CoreV1Api() + service_object = k8s.read_namespaced_service(name=service, namespace=namespace) - # Patch the service with the new status - k8s.patch_namespaced_service_status(name=service, namespace=namespace, body=service_object) - except Exception as e: - raise e + # Update the status + service_object.status.load_balancer.ingress = [ + kubernetes.client.V1LoadBalancerIngress(ip=ip) + ] + # Patch the service with the new status + k8s.patch_namespaced_service_status(name=service, namespace=namespace, body=service_object) def get_hostname(target_service_name: str, target_service_namespace: str) -> str: """ @@ -61,16 +57,21 @@ def get_hostname(target_service_name: str, target_service_namespace: str) -> str return "" -def get_image_pull_secrets() -> [str]: +def get_image_pull_secrets() -> [kubernetes.client.V1LocalObjectReference]: """ Generates the imagePullSecrets to use, based on the semi-colon seperated string config.IMAGE_PULL_SECRETS. """ - if config.IMAGE_PULL_SECRETS is not None: - logging.debug(f"Image Pull Secrets: {config.IMAGE_PULL_SECRETS}") - retval = [] - secrets = config.IMAGE_PULL_SECRETS.split(";") - for secret in secrets: - retval.append(kubernetes.client.V1LocalObjectReference(name=secret)) - return retval - return [] + if not config.IMAGE_PULL_SECRETS: + return [] + logging.debug(f"Image Pull Secrets: {config.IMAGE_PULL_SECRETS}") + secrets = config.IMAGE_PULL_SECRETS.split(";") + return [kubernetes.client.V1LocalObjectReference(name=secret) for secret in secrets] + +@contextlib.contextmanager +def ignore_k8s_statuses(*ignored_statuses): + try: + yield + except kubernetes.client.exceptions.ApiException as e: + if e.status not in ignored_statuses: + raise e \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/base.py b/src/tailscale_svc_lb_controller/resources/base.py index 77437e2..595ce40 100644 --- a/src/tailscale_svc_lb_controller/resources/base.py +++ b/src/tailscale_svc_lb_controller/resources/base.py @@ -1,3 +1,5 @@ +import abc +import contextlib import kubernetes from src.tailscale_svc_lb_controller import config @@ -5,20 +7,35 @@ class BaseResource: - target_service_name = "" - target_service_namespace = "" - tailscale_proxy_namespace = "" - # All resources that inherit this class need to implement the following methods - def new(self): raise NotImplementedError + def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): + self.target_service_name = target_service_name + self.target_service_namespace = target_service_namespace + self.tailscale_proxy_namespace = namespace - def create(self): raise NotImplementedError + @abstractmethod + def new(self): + pass - def delete(self): raise NotImplementedError + @abstractmethod + def create(self): + pass - def get(self): raise NotImplementedError + @abstractmethod + def delete(self): + pass - def reconcile(self): raise NotImplementedError + @abstractmethod + def get(self): + pass + + def reconcile(self): + """ + Creates the resource if it doesn't already exist + """ + existing = self.get() + if existing is None: + self.create() def _generate_pod_template_spec(self) -> kubernetes.client.V1PodTemplateSpec: node_selector = None @@ -133,4 +150,4 @@ def _generate_pod_template_spec(self) -> kubernetes.client.V1PodTemplateSpec: ) ], ), - ) + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/daemonset.py b/src/tailscale_svc_lb_controller/resources/daemonset.py index 0919f3e..3daad66 100644 --- a/src/tailscale_svc_lb_controller/resources/daemonset.py +++ b/src/tailscale_svc_lb_controller/resources/daemonset.py @@ -6,11 +6,6 @@ class DaemonSet(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1DaemonSet: """ Returns the kubernetes.client.V1DaemonSet that runs the tailscale proxy instance @@ -34,11 +29,11 @@ def create(self) -> kubernetes.client.V1DaemonSet: Creates the DaemonSet that runs the Tailscale Proxy """ k8s = kubernetes.client.AppsV1Api() - deployment = self.new() + daemonset = self.new() return k8s.create_namespaced_daemon_set( namespace=self.tailscale_proxy_namespace, - body=deployment + body=daemonset ) def delete(self) -> None: @@ -47,36 +42,19 @@ def delete(self) -> None: """ k8s = kubernetes.client.AppsV1Api() # Delete all DaemonSets with svc-name label - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_daemon_set( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - raise e def get(self) -> kubernetes.client.V1DaemonSet | None: """ Fetches the current DaemonSet that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.AppsV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_daemon_set( namespace=self.tailscale_proxy_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" - ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/deployment.py b/src/tailscale_svc_lb_controller/resources/deployment.py index fde20aa..3f54cc0 100644 --- a/src/tailscale_svc_lb_controller/resources/deployment.py +++ b/src/tailscale_svc_lb_controller/resources/deployment.py @@ -6,11 +6,6 @@ class Deployment(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1Deployment: """ Returns the kubernetes.client.V1Deployment that runs the tailscale proxy instance @@ -47,36 +42,19 @@ def delete(self) -> None: """ k8s = kubernetes.client.AppsV1Api() # Delete all Deployments with svc-name label - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_deployment( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - raise e def get(self) -> kubernetes.client.V1Deployment | None: """ Fetches the current Deployment that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.AppsV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_deployment( namespace=self.tailscale_proxy_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" - ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/role.py b/src/tailscale_svc_lb_controller/resources/role.py index a6aee5a..dec8c88 100644 --- a/src/tailscale_svc_lb_controller/resources/role.py +++ b/src/tailscale_svc_lb_controller/resources/role.py @@ -6,11 +6,6 @@ class Role(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1Role: """ Returns the kubernetes.client.V1Role that @@ -57,36 +52,19 @@ def delete(self) -> None: Delete the Role deployed as part of a proxy instance """ k8s = kubernetes.client.RbacAuthorizationV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_role( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - raise e def get(self) -> kubernetes.client.V1Role | None: """ Fetches the current Role that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.RbacAuthorizationV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_role( namespace=self.tailscale_proxy_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" - ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/role_binding.py b/src/tailscale_svc_lb_controller/resources/role_binding.py index 05ec840..5ecda7c 100644 --- a/src/tailscale_svc_lb_controller/resources/role_binding.py +++ b/src/tailscale_svc_lb_controller/resources/role_binding.py @@ -8,11 +8,6 @@ class RoleBinding(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1RoleBinding: """ Returns the kubernetes.client.V1RoleBinding used for a Tailscale Proxy instance @@ -52,37 +47,19 @@ def delete(self) -> None: Delete the RoleBinding deployed as part of a proxy instance """ k8s = kubernetes.client.RbacAuthorizationV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_role_binding( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - logging.error(e) - return None - raise e def get(self) -> kubernetes.client.V1RoleBinding | None: """ Fetches the current RoleBinding that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.RbacAuthorizationV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_role_binding( namespace=self.target_service_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" - ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/secret.py b/src/tailscale_svc_lb_controller/resources/secret.py index d05db25..8c7fd62 100644 --- a/src/tailscale_svc_lb_controller/resources/secret.py +++ b/src/tailscale_svc_lb_controller/resources/secret.py @@ -6,11 +6,6 @@ class Secret(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1Secret: """ Returns the kubernetes.client.V1Secret required for the Tailscale Proxy @@ -40,36 +35,19 @@ def delete(self) -> None: Delete the Secret deployed as part of a proxy instance """ k8s = kubernetes.client.CoreV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_secret( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - raise e def get(self) -> kubernetes.client.V1Secret | None: """ Fetches the current Secret that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.CoreV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_secret( namespace=self.tailscale_proxy_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" - ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() + ) \ No newline at end of file diff --git a/src/tailscale_svc_lb_controller/resources/service_account.py b/src/tailscale_svc_lb_controller/resources/service_account.py index a3fe849..a289349 100644 --- a/src/tailscale_svc_lb_controller/resources/service_account.py +++ b/src/tailscale_svc_lb_controller/resources/service_account.py @@ -6,11 +6,6 @@ class ServiceAccount(BaseResource): - def __init__(self, target_service_name: str, target_service_namespace: str, namespace: str): - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = namespace - def new(self) -> kubernetes.client.V1ServiceAccount: """ Returns the kubernetes.client.V1ServiceAccount required for the Tailscale Proxy @@ -38,36 +33,19 @@ def delete(self) -> None: Delete the ServiceAccount deployed as part of a proxy instance """ k8s = kubernetes.client.CoreV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): k8s.delete_collection_namespaced_service_account( namespace=self.tailscale_proxy_namespace, label_selector=f"{config.SERVICE_NAME_LABEL}={self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - raise e def get(self) -> kubernetes.client.V1ServiceAccount | None: """ Fetches the current ServiceAccount that should have been deployed as part of the proxy instance """ k8s = kubernetes.client.CoreV1Api() - try: + with(helpers.ignore_k8s_statuses(404)): return k8s.read_namespaced_service_account( namespace=self.tailscale_proxy_namespace, name=f"{config.RESOURCE_PREFIX}{self.target_service_name}" ) - except kubernetes.client.exceptions.ApiException as e: - if e.status == 404: - return None - else: - raise e - - def reconcile(self): - """ - Creates the resource if it doesn't already exist - """ - existing = self.get() - if existing is None: - self.create() diff --git a/src/tailscale_svc_lb_controller/tailscale_proxy.py b/src/tailscale_svc_lb_controller/tailscale_proxy.py index c016ae1..fe9b69b 100644 --- a/src/tailscale_svc_lb_controller/tailscale_proxy.py +++ b/src/tailscale_svc_lb_controller/tailscale_proxy.py @@ -22,40 +22,33 @@ def __init__(self, target_service_name: str, target_service_namespace: str, target_service_namespace: Namespace of the target Service this Proxy Instance should direct traffic to tailscale_proxy_namespace: Namespace that the Tailscale Proxy resources will be created in """ - self.target_service_name = target_service_name - self.target_service_namespace = target_service_namespace - self.tailscale_proxy_namespace = tailscale_proxy_namespace - self.deployment_type = deployment_type + self.resources = [ + ServiceAccount(target_service_name, target_service_namespace, tailscale_proxy_namespace), + Role(target_service_name, target_service_namespace, tailscale_proxy_namespace), + RoleBinding(target_service_name, target_service_namespace, tailscale_proxy_namespace), + Secret(target_service_name, target_service_namespace, tailscale_proxy_namespace), + self.__get_deployment_class(deployment_type)(target_service_name, target_service_namespace, tailscale_proxy_namespace), + ] + + @staticmethod + def __get_deployment_class(deployment_type: str) -> Deployment | DaemonSet: + match deployment_type.lower(): + case "daemonset": + return DaemonSet + case "deployment": + return Deployment + case _: + raise ValueError(f"Invalid value for {deployment_type=}") def create(self): - ServiceAccount(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() - Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() - RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() - Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() - if self.deployment_type.lower() == "daemonset": - DaemonSet(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() - elif self.deployment_type.lower() == "deployment": - Deployment(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).create() + for resource in self.resources: + resource.create() def delete(self): - if self.deployment_type.lower() == "daemonset": - DaemonSet(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() - elif self.deployment_type.lower() == "deployment": - Deployment(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() - Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() - RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() - Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() - ServiceAccount(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).delete() + # Delete resources in reverse + for resource in self.resources[::-1]: + resource.delete() def reconcile(self): - ServiceAccount(self.target_service_name, self.target_service_namespace, - self.tailscale_proxy_namespace).reconcile() - Role(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() - RoleBinding(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() - Secret(self.target_service_name, self.target_service_namespace, self.tailscale_proxy_namespace).reconcile() - if self.deployment_type.lower() == "daemonset": - DaemonSet(self.target_service_name, self.target_service_namespace, - self.tailscale_proxy_namespace).reconcile() - elif self.deployment_type.lower() == "deployment": - Deployment(self.target_service_name, self.target_service_namespace, - self.tailscale_proxy_namespace).reconcile() + for resource in self.resources: + resource.reconcile() \ No newline at end of file