From 6281b31593a1ef80f3c27553ec4a7e346580a40e Mon Sep 17 00:00:00 2001 From: ARPIN Date: Mon, 30 Oct 2023 10:41:22 +0100 Subject: [PATCH] FEAT: e2e CI test and e2e executable --- .github/workflows/e2eCI.yaml | 107 +++++++++++++++++++++++ .github/workflows/test.yml | 2 +- e2e/KindConfig/badpod.yml | 15 ++++ e2e/KindConfig/kind-cluster-1.21.yaml | 11 +++ e2e/KindConfig/kind-cluster-1.23.yaml | 11 +++ e2e/KindConfig/kind-cluster-1.26.yaml | 11 +++ e2e/KindConfig/pod1.yml | 15 ++++ e2e/KindConfig/pod2.yml | 15 ++++ e2e/KindConfig/pod3.yml | 15 ++++ e2e/KindConfig/pod4.yml | 15 ++++ e2e/KindConfig/pod5.yml | 15 ++++ e2e/KotaryService/ConfigMap.yaml | 12 +++ e2e/KotaryService/QuotaClaim.yaml | 7 ++ e2e/KotaryService/QuotaClaimPending.yaml | 7 ++ e2e/KotaryService/QuotaClaimToBig.yaml | 7 ++ e2e/KotaryService/QuotaClaimUp.yaml | 7 ++ e2e/e2e.sh | 92 +++++++++++++++++++ go.mod | 8 +- go.sum | 16 ++-- internal/controller/controller.go | 1 - 20 files changed, 375 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/e2eCI.yaml create mode 100644 e2e/KindConfig/badpod.yml create mode 100644 e2e/KindConfig/kind-cluster-1.21.yaml create mode 100644 e2e/KindConfig/kind-cluster-1.23.yaml create mode 100644 e2e/KindConfig/kind-cluster-1.26.yaml create mode 100644 e2e/KindConfig/pod1.yml create mode 100644 e2e/KindConfig/pod2.yml create mode 100644 e2e/KindConfig/pod3.yml create mode 100644 e2e/KindConfig/pod4.yml create mode 100644 e2e/KindConfig/pod5.yml create mode 100644 e2e/KotaryService/ConfigMap.yaml create mode 100644 e2e/KotaryService/QuotaClaim.yaml create mode 100644 e2e/KotaryService/QuotaClaimPending.yaml create mode 100644 e2e/KotaryService/QuotaClaimToBig.yaml create mode 100644 e2e/KotaryService/QuotaClaimUp.yaml create mode 100755 e2e/e2e.sh diff --git a/.github/workflows/e2eCI.yaml b/.github/workflows/e2eCI.yaml new file mode 100644 index 0000000..ab3f2c0 --- /dev/null +++ b/.github/workflows/e2eCI.yaml @@ -0,0 +1,107 @@ +--- +name: kotary e2e testing +on: push +jobs: + kind: + runs-on: ubuntu-latest + name: Test Kotary Operator on Kind cluster. Kube version + strategy: + fail-fast: false + matrix: + kubernetes: + - 1.21 + - 1.23 + - 1.26 + steps: + - name: checkout + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.20' + + - name: Create cluster KinD + uses: helm/kind-action@v1.5.0 + with: + config: e2e/KindConfig/kind-cluster-${{ matrix.kubernetes }}.yaml + + - name: testing cluster kinD + run: | + kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context) + kubectl get all --all-namespaces + + - name: Set GOROOT + run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV + + - name: Deploy CRD + run: kubectl apply -f artifacts/crd.yml + + - name: Deploy kotary operator + run: kubectl apply -f artifacts/deployment.yml + + - name: Create kotary ConfigMap + run: kubectl -n kube-system create -f e2e/KotaryService/ConfigMap.yaml + + - name: Golden test 0 - Create NS 'kotarytest' and add a ResourceQuota + run: | + kubectl create ns kotarytest + while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do sleep 5; echo "Waiting for Kotary pod to be Running...."; done + kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest + if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + + - name: Golden test 1 - adding pods and verifying resources used + run: | + kubectl apply -f e2e/KindConfig/pod1.yml -n kotarytest + echo "<<<<<<<<< Ressource quota should be cpu: 100/660m memory: 0.25/1Gi >>>>>>>>>" + kubectl get resourcequota -n kotarytest + kubectl apply -f e2e/KindConfig/pod2.yml -n kotarytest + echo "<<<<<<<<< Ressource quota should be cpu: 200/660m memory: 0.5/1Gi >>>>>>>>>" + kubectl get resourcequota -n kotarytest + kubectl apply -f e2e/KindConfig/pod3.yml -n kotarytest + echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>" + kubectl get resourcequota -n kotarytest + kubectl apply -f e2e/KindConfig/pod4.yml -n kotarytest + echo "<<<<<<<<< Ressource quota should be cpu: 500/660m memory: 1/1Gi >>>>>>>>>" + kubectl get resourcequota -n kotarytest + + - name: Golden test 2 - trying to add a pod but no ressource left in NS. (Should return error) + run: if kubectl apply -f e2e/KindConfig/pod5.yml -n kotarytest ; then exit 1 ; fi + + - name: Golden test 3 - Upscale + run: | + kubectl apply -f e2e/KotaryService/QuotaClaimUp.yaml -n kotarytest + if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + + - name: Golden test 4 - Upscale (REJECTED) + run: | + kubectl apply -f e2e/KotaryService/QuotaClaimToBig.yaml -n kotarytest + if ! kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + + - name: Golden test 5 - Downscale + run: | + kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest + if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + + - name: Golden test 6 - Downscale (PENDING) + run: | + kubectl apply -f e2e/KotaryService/QuotaClaimPending.yaml -n kotarytest + kubectl get quotaclaim -n kotarytest + kubectl get resourcequota -n kotarytest + if ! kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + kubectl delete pod -n kotarytest podtest-4 + echo "<<<<<<<<< Deleted a pod, the pending claim should now be accepted >>>>>>>>>" + if kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi + kubectl get resourcequota -n kotarytest + kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest + + - name: Golden test 7 - Check RessourceQuota is well computed + run: | + kubectl apply -f e2e/KindConfig/badpod.yml + echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>" + kubectl get resourcequota -n kotarytest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 881d793..5eee57e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: test +name: testci on: push: diff --git a/e2e/KindConfig/badpod.yml b/e2e/KindConfig/badpod.yml new file mode 100644 index 0000000..2ac8028 --- /dev/null +++ b/e2e/KindConfig/badpod.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod-test +spec: + containers: + - name: hw-container1 + image: BadImg.com + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "250Mi" + cpu: "100m" \ No newline at end of file diff --git a/e2e/KindConfig/kind-cluster-1.21.yaml b/e2e/KindConfig/kind-cluster-1.21.yaml new file mode 100644 index 0000000..5013666 --- /dev/null +++ b/e2e/KindConfig/kind-cluster-1.21.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: "kindest/node:v1.21.14" + - role: worker + image: "kindest/node:v1.21.14" + - role: worker + image: "kindest/node:v1.21.14" + - role: worker + image: "kindest/node:v1.21.14" \ No newline at end of file diff --git a/e2e/KindConfig/kind-cluster-1.23.yaml b/e2e/KindConfig/kind-cluster-1.23.yaml new file mode 100644 index 0000000..6c51052 --- /dev/null +++ b/e2e/KindConfig/kind-cluster-1.23.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: "kindest/node:v1.23.13" + - role: worker + image: "kindest/node:v1.23.13" + - role: worker + image: "kindest/node:v1.23.13" + - role: worker + image: "kindest/node:v1.23.13" \ No newline at end of file diff --git a/e2e/KindConfig/kind-cluster-1.26.yaml b/e2e/KindConfig/kind-cluster-1.26.yaml new file mode 100644 index 0000000..01748bd --- /dev/null +++ b/e2e/KindConfig/kind-cluster-1.26.yaml @@ -0,0 +1,11 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: + - role: control-plane + image: "kindest/node:v1.26.6" + - role: worker + image: "kindest/node:v1.26.6" + - role: worker + image: "kindest/node:v1.26.6" + - role: worker + image: "kindest/node:v1.26.6" \ No newline at end of file diff --git a/e2e/KindConfig/pod1.yml b/e2e/KindConfig/pod1.yml new file mode 100644 index 0000000..a2dcef4 --- /dev/null +++ b/e2e/KindConfig/pod1.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: podtest-1 +spec: + containers: + - name: hw-container1 + image: nginx + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "250Mi" + cpu: "100m" \ No newline at end of file diff --git a/e2e/KindConfig/pod2.yml b/e2e/KindConfig/pod2.yml new file mode 100644 index 0000000..7fe1a23 --- /dev/null +++ b/e2e/KindConfig/pod2.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: podtest-2 +spec: + containers: + - name: hw-container1 + image: nginx + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "250Mi" + cpu: "100m" \ No newline at end of file diff --git a/e2e/KindConfig/pod3.yml b/e2e/KindConfig/pod3.yml new file mode 100644 index 0000000..ad7aac9 --- /dev/null +++ b/e2e/KindConfig/pod3.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: podtest-3 +spec: + containers: + - name: hw-container1 + image: nginx + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "250Mi" + cpu: "150m" \ No newline at end of file diff --git a/e2e/KindConfig/pod4.yml b/e2e/KindConfig/pod4.yml new file mode 100644 index 0000000..9e72b70 --- /dev/null +++ b/e2e/KindConfig/pod4.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: podtest-4 +spec: + containers: + - name: hw-container1 + image: nginx + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "250Mi" + cpu: "150m" \ No newline at end of file diff --git a/e2e/KindConfig/pod5.yml b/e2e/KindConfig/pod5.yml new file mode 100644 index 0000000..08fd96c --- /dev/null +++ b/e2e/KindConfig/pod5.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: podtest-5 +spec: + containers: + - name: hw-container1 + image: nginx + resources: + limits: + memory: "1Gi" + cpu: "660m" + requests: + memory: "500Mi" + cpu: "100m" \ No newline at end of file diff --git a/e2e/KotaryService/ConfigMap.yaml b/e2e/KotaryService/ConfigMap.yaml new file mode 100644 index 0000000..97bf054 --- /dev/null +++ b/e2e/KotaryService/ConfigMap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +data: + defaultClaimSpec: | + cpu: "5" + memory: "10Gi" + ratioMaxAllocationMemory: "0.33" + ratioMaxAllocationCPU: "0.33" + ratioOverCommitMemory: "1.3" + ratioOverCommitCPU: "1.3" +metadata: + name: kotary-config \ No newline at end of file diff --git a/e2e/KotaryService/QuotaClaim.yaml b/e2e/KotaryService/QuotaClaim.yaml new file mode 100644 index 0000000..0be0d67 --- /dev/null +++ b/e2e/KotaryService/QuotaClaim.yaml @@ -0,0 +1,7 @@ +apiVersion: cagip.github.com/v1 +kind: ResourceQuotaClaim +metadata: + name: demo +spec: + memory: 1Gi + cpu: 660m \ No newline at end of file diff --git a/e2e/KotaryService/QuotaClaimPending.yaml b/e2e/KotaryService/QuotaClaimPending.yaml new file mode 100644 index 0000000..191bc1c --- /dev/null +++ b/e2e/KotaryService/QuotaClaimPending.yaml @@ -0,0 +1,7 @@ +apiVersion: cagip.github.com/v1 +kind: ResourceQuotaClaim +metadata: + name: demo +spec: + memory: "800Mi" + cpu: 400m \ No newline at end of file diff --git a/e2e/KotaryService/QuotaClaimToBig.yaml b/e2e/KotaryService/QuotaClaimToBig.yaml new file mode 100644 index 0000000..000c7ac --- /dev/null +++ b/e2e/KotaryService/QuotaClaimToBig.yaml @@ -0,0 +1,7 @@ +apiVersion: cagip.github.com/v1 +kind: ResourceQuotaClaim +metadata: + name: demo +spec: + memory: "200000Mi" + cpu: 100000m \ No newline at end of file diff --git a/e2e/KotaryService/QuotaClaimUp.yaml b/e2e/KotaryService/QuotaClaimUp.yaml new file mode 100644 index 0000000..cdf5973 --- /dev/null +++ b/e2e/KotaryService/QuotaClaimUp.yaml @@ -0,0 +1,7 @@ +apiVersion: cagip.github.com/v1 +kind: ResourceQuotaClaim +metadata: + name: demo +spec: + memory: "2000Mi" + cpu: 1000m \ No newline at end of file diff --git a/e2e/e2e.sh b/e2e/e2e.sh new file mode 100755 index 0000000..c2fdafe --- /dev/null +++ b/e2e/e2e.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;36m' +PURPLE='\033[0;35m' +NC='\033[0m' # No Color + +#YOU NEED TO BE INSIDE A K8S CLUSTER TO RUN THIS# + +NS=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 12) #generate random namespace +ROOT=$(git rev-parse --show-toplevel) #get root of git repo + +CleanUp () { + echo -e "\\n${BLUE}Starting CleanUp ${NC}\\n" + kubectl delete ns $NS + #kubectl delete configmap -n kube-system kotary-config + #kubectl delete deployment -n kube-system kotary + #kubectl delete crd resourcequotaclaims.cagip.github.com +} + +echo -e "${BLUE}====== Starting SetUp ======${NC} \\n" + +if ! kubectl apply -f artifacts/crd.yml ; + then echo -e "\\n${RED}CONNECT TO A CLUSTER BEFORE RUNNING THIS EXECUTABLE${NC}\\n" && exit 1 ; fi + +kubectl apply -f artifacts/deployment.yml +kubectl -n kube-system create -f $ROOT/e2e/KotaryService/ConfigMap.yaml + +kubectl create ns $NS +while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do echo -e "${BLUE}.... Waiting for Kotary pod to be Running ....${NC}" ; sleep 2; done + +echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n" +kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS +if kubectl get quotaclaim -n $NS | grep REJECTED ; + then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is REJECTED. Should be accepted ${NC}" && CleanUp && exit 1 ; fi +kubectl get resourcequota -n $NS + +echo -e "\\n ${PURPLE}-- Applying pods in NS --${NC}" && sleep 1 +kubectl apply -f $ROOT/e2e/KindConfig/pod1.yml -n $NS +kubectl apply -f $ROOT/e2e/KindConfig/pod2.yml -n $NS +kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS +kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS +echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}" +kubectl get resourcequota -n $NS +echo -e "${GREEN} -- OK --${NC}\\n" + +echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 1 +if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ; + then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && CleanUp && exit 1 ; fi + echo -e "${GREEN} -- OK --${NC}\\n" + + +echo -e "\\n ${PURPLE}-- Scale UP --${NC}" && sleep 1 +kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS +if kubectl get quotaclaim -n $NS | grep REJECTED ; + then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim has been rejected${NC}\\n" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi + echo -e "${GREEN} -- OK --${NC}\\n" + +echo -e "\\n ${PURPLE}-- Scale UP(to big) --${NC}" && sleep 1 +kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS +if ! kubectl get quotaclaim -n $NS | grep REJECTED ; + then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi + echo -e "${GREEN} -- OK --${NC}\\n" + + +echo -e "\\n ${PURPLE}-- Scale Down (under what is curently used --> PENDING) --${NC}" && sleep 1 +kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS +if ! kubectl get quotaclaim -n $NS | grep PENDING ; + then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get resourcequota -n $NS && CleanUp && exit 1 ; fi + echo -e "${GREEN} -- OK --${NC}\\n" + +echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 1 +kubectl delete pod -n $NS podtest-4 && sleep 1 + +if kubectl get quotaclaim -n $NS | grep PENDING ; + then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1; fi +kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS +echo -e "${GREEN} -- OK --${NC}\\n" + +echo -e "\\n ${PURPLE}-- Adding a pod with bad image --> should not impact the ressources used --${NC}" && sleep 1 +kubectl apply -f $ROOT/e2e/KindConfig/badpod.yml -n $NS +if kubectl get resourcequota -n $NS | grep "350m/660m" && grep "750Mi/1Gi" ; + then echo -e "\\n${RED}FAILLED! error durring resource test: Not RUNNING pod is not ignored when calculating the resourcequota${NC}" && CleanUp && exit 1; fi +echo -e "${GREEN} -- OK --${NC}\\n" + + +echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}" + +CleanUp + +echo -e "\\n${BLUE}Done!${NC}" \ No newline at end of file diff --git a/go.mod b/go.mod index 3bdf5eb..4889365 100644 --- a/go.mod +++ b/go.mod @@ -52,11 +52,11 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/streadway/amqp v0.0.0-20200108173154-1c71cc93ed71 // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.8.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.5.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect golang.org/x/tools v0.7.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index e6ab550..a0ece80 100644 --- a/go.sum +++ b/go.sum @@ -194,8 +194,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= @@ -215,15 +215,15 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/internal/controller/controller.go b/internal/controller/controller.go index 2701a54..a0c01ae 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -137,7 +137,6 @@ func NewController( controller.enqueueNamespace(new) }, }) - return controller }