Skip to content

Commit

Permalink
FEAT: e2e CI test and e2e executable
Browse files Browse the repository at this point in the history
  • Loading branch information
ARPIN committed Nov 14, 2023
1 parent 8b3965e commit 6281b31
Show file tree
Hide file tree
Showing 20 changed files with 375 additions and 14 deletions.
107 changes: 107 additions & 0 deletions .github/workflows/e2eCI.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
---
name: kotary e2e testing
on: push
jobs:
kind:
runs-on: ubuntu-latest
name: Test Kotary Operator on Kind cluster. Kube version
strategy:
fail-fast: false
matrix:
kubernetes:
- 1.21
- 1.23
- 1.26
steps:
- name: checkout
uses: actions/checkout@v4

- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.20'

- name: Create cluster KinD
uses: helm/[email protected]
with:
config: e2e/KindConfig/kind-cluster-${{ matrix.kubernetes }}.yaml

- name: testing cluster kinD
run: |
kubectl cluster-info --context kind-chart-testing echo " current-context:" $(kubectl config current-context)
kubectl get all --all-namespaces
- name: Set GOROOT
run: echo "export GOROOT=/opt/hostedtoolcache/go/1.20/x64" >> $GITHUB_ENV

- name: Deploy CRD
run: kubectl apply -f artifacts/crd.yml

- name: Deploy kotary operator
run: kubectl apply -f artifacts/deployment.yml

- name: Create kotary ConfigMap
run: kubectl -n kube-system create -f e2e/KotaryService/ConfigMap.yaml

- name: Golden test 0 - Create NS 'kotarytest' and add a ResourceQuota
run: |
kubectl create ns kotarytest
while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do sleep 5; echo "Waiting for Kotary pod to be Running...."; done
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 1 - adding pods and verifying resources used
run: |
kubectl apply -f e2e/KindConfig/pod1.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 100/660m memory: 0.25/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod2.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 200/660m memory: 0.5/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod3.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KindConfig/pod4.yml -n kotarytest
echo "<<<<<<<<< Ressource quota should be cpu: 500/660m memory: 1/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
- name: Golden test 2 - trying to add a pod but no ressource left in NS. (Should return error)
run: if kubectl apply -f e2e/KindConfig/pod5.yml -n kotarytest ; then exit 1 ; fi

- name: Golden test 3 - Upscale
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimUp.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 4 - Upscale (REJECTED)
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimToBig.yaml -n kotarytest
if ! kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 5 - Downscale
run: |
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
if kubectl get quotaclaim -n kotarytest | grep REJECTED ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
- name: Golden test 6 - Downscale (PENDING)
run: |
kubectl apply -f e2e/KotaryService/QuotaClaimPending.yaml -n kotarytest
kubectl get quotaclaim -n kotarytest
kubectl get resourcequota -n kotarytest
if ! kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
kubectl delete pod -n kotarytest podtest-4
echo "<<<<<<<<< Deleted a pod, the pending claim should now be accepted >>>>>>>>>"
if kubectl get quotaclaim -n kotarytest | grep PENDING ; then exit 1 ; fi
kubectl get resourcequota -n kotarytest
kubectl apply -f e2e/KotaryService/QuotaClaim.yaml -n kotarytest
- name: Golden test 7 - Check RessourceQuota is well computed
run: |
kubectl apply -f e2e/KindConfig/badpod.yml
echo "<<<<<<<<< Ressource quota should be cpu: 350/660m memory: 0.75/1Gi >>>>>>>>>"
kubectl get resourcequota -n kotarytest
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: test
name: testci

on:
push:
Expand Down
15 changes: 15 additions & 0 deletions e2e/KindConfig/badpod.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: badpod-test
spec:
containers:
- name: hw-container1
image: BadImg.com
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "250Mi"
cpu: "100m"
11 changes: 11 additions & 0 deletions e2e/KindConfig/kind-cluster-1.21.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.21.14"
- role: worker
image: "kindest/node:v1.21.14"
- role: worker
image: "kindest/node:v1.21.14"
- role: worker
image: "kindest/node:v1.21.14"
11 changes: 11 additions & 0 deletions e2e/KindConfig/kind-cluster-1.23.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.23.13"
- role: worker
image: "kindest/node:v1.23.13"
- role: worker
image: "kindest/node:v1.23.13"
- role: worker
image: "kindest/node:v1.23.13"
11 changes: 11 additions & 0 deletions e2e/KindConfig/kind-cluster-1.26.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
image: "kindest/node:v1.26.6"
- role: worker
image: "kindest/node:v1.26.6"
- role: worker
image: "kindest/node:v1.26.6"
- role: worker
image: "kindest/node:v1.26.6"
15 changes: 15 additions & 0 deletions e2e/KindConfig/pod1.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-1
spec:
containers:
- name: hw-container1
image: nginx
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "250Mi"
cpu: "100m"
15 changes: 15 additions & 0 deletions e2e/KindConfig/pod2.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-2
spec:
containers:
- name: hw-container1
image: nginx
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "250Mi"
cpu: "100m"
15 changes: 15 additions & 0 deletions e2e/KindConfig/pod3.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-3
spec:
containers:
- name: hw-container1
image: nginx
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "250Mi"
cpu: "150m"
15 changes: 15 additions & 0 deletions e2e/KindConfig/pod4.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-4
spec:
containers:
- name: hw-container1
image: nginx
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "250Mi"
cpu: "150m"
15 changes: 15 additions & 0 deletions e2e/KindConfig/pod5.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: podtest-5
spec:
containers:
- name: hw-container1
image: nginx
resources:
limits:
memory: "1Gi"
cpu: "660m"
requests:
memory: "500Mi"
cpu: "100m"
12 changes: 12 additions & 0 deletions e2e/KotaryService/ConfigMap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
data:
defaultClaimSpec: |
cpu: "5"
memory: "10Gi"
ratioMaxAllocationMemory: "0.33"
ratioMaxAllocationCPU: "0.33"
ratioOverCommitMemory: "1.3"
ratioOverCommitCPU: "1.3"
metadata:
name: kotary-config
7 changes: 7 additions & 0 deletions e2e/KotaryService/QuotaClaim.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cagip.github.com/v1
kind: ResourceQuotaClaim
metadata:
name: demo
spec:
memory: 1Gi
cpu: 660m
7 changes: 7 additions & 0 deletions e2e/KotaryService/QuotaClaimPending.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cagip.github.com/v1
kind: ResourceQuotaClaim
metadata:
name: demo
spec:
memory: "800Mi"
cpu: 400m
7 changes: 7 additions & 0 deletions e2e/KotaryService/QuotaClaimToBig.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cagip.github.com/v1
kind: ResourceQuotaClaim
metadata:
name: demo
spec:
memory: "200000Mi"
cpu: 100000m
7 changes: 7 additions & 0 deletions e2e/KotaryService/QuotaClaimUp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: cagip.github.com/v1
kind: ResourceQuotaClaim
metadata:
name: demo
spec:
memory: "2000Mi"
cpu: 1000m
92 changes: 92 additions & 0 deletions e2e/e2e.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
#!/bin/bash

RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;36m'
PURPLE='\033[0;35m'
NC='\033[0m' # No Color

#YOU NEED TO BE INSIDE A K8S CLUSTER TO RUN THIS#

NS=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 12) #generate random namespace
ROOT=$(git rev-parse --show-toplevel) #get root of git repo

CleanUp () {
echo -e "\\n${BLUE}Starting CleanUp ${NC}\\n"
kubectl delete ns $NS
#kubectl delete configmap -n kube-system kotary-config
#kubectl delete deployment -n kube-system kotary
#kubectl delete crd resourcequotaclaims.cagip.github.com
}

echo -e "${BLUE}====== Starting SetUp ======${NC} \\n"

if ! kubectl apply -f artifacts/crd.yml ;
then echo -e "\\n${RED}CONNECT TO A CLUSTER BEFORE RUNNING THIS EXECUTABLE${NC}\\n" && exit 1 ; fi

kubectl apply -f artifacts/deployment.yml
kubectl -n kube-system create -f $ROOT/e2e/KotaryService/ConfigMap.yaml

kubectl create ns $NS
while ! kubectl get pods -n kube-system | grep kotary | grep Running > /dev/null ; do echo -e "${BLUE}.... Waiting for Kotary pod to be Running ....${NC}" ; sleep 2; done

echo -e "\\n${BLUE}====== Starting Tests ======${NC}\\n"
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS
if kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Claim test: the Claim is REJECTED. Should be accepted ${NC}" && CleanUp && exit 1 ; fi
kubectl get resourcequota -n $NS

echo -e "\\n ${PURPLE}-- Applying pods in NS --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KindConfig/pod1.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod2.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod3.yml -n $NS
kubectl apply -f $ROOT/e2e/KindConfig/pod4.yml -n $NS
echo -e "\\n ${PURPLE}Should be 'cpu: 500m/660m, memory: 1000Mi/1Gi'${NC}"
kubectl get resourcequota -n $NS
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Trying to add a pod over max ressources (must be forbidden) --${NC}" && sleep 1
if kubectl apply -f $ROOT/e2e/KindConfig/pod5.yml -n $NS ;
then echo -e "\\n${RED}FAILLED! error durring Pod test: The pod must not be accepted because it uses more ressources than what's left to use.${NC}" && CleanUp && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n ${PURPLE}-- Scale UP --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimUp.yaml -n $NS
if kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Scale UP: the Claim has been rejected${NC}\\n" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Scale UP(to big) --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimToBig.yaml -n $NS
if ! kubectl get quotaclaim -n $NS | grep REJECTED ;
then echo -e "\\n${RED}FAILLED! error durring Scale UP(to big): the Claim has not been rejected${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n ${PURPLE}-- Scale Down (under what is curently used --> PENDING) --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaimPending.yaml -n $NS
if ! kubectl get quotaclaim -n $NS | grep PENDING ;
then echo -e "\\n${RED}FAILLED! error durring pending test: the Claim is not set to PENDING${NC}" && kubectl get resourcequota -n $NS && CleanUp && exit 1 ; fi
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Delete pod-4: the pending claim should now be accepted --${NC}" && sleep 1
kubectl delete pod -n $NS podtest-4 && sleep 1

if kubectl get quotaclaim -n $NS | grep PENDING ;
then echo -e "\\n${RED}FAILLED! error durring pending test: the PENDING Claim is not accepted after resources are updated${NC}" && kubectl get quotaclaim -n $NS && CleanUp && exit 1; fi
kubectl apply -f $ROOT/e2e/KotaryService/QuotaClaim.yaml -n $NS
echo -e "${GREEN} -- OK --${NC}\\n"

echo -e "\\n ${PURPLE}-- Adding a pod with bad image --> should not impact the ressources used --${NC}" && sleep 1
kubectl apply -f $ROOT/e2e/KindConfig/badpod.yml -n $NS
if kubectl get resourcequota -n $NS | grep "350m/660m" && grep "750Mi/1Gi" ;
then echo -e "\\n${RED}FAILLED! error durring resource test: Not RUNNING pod is not ignored when calculating the resourcequota${NC}" && CleanUp && exit 1; fi
echo -e "${GREEN} -- OK --${NC}\\n"


echo -e "\\n${GREEN} <<< ALL GOOD, Well done! :) >>>${NC}"

CleanUp

echo -e "\\n${BLUE}Done!${NC}"
Loading

0 comments on commit 6281b31

Please sign in to comment.