diff --git a/.github/workflows/e2e-matrix.yml b/.github/workflows/e2e-matrix.yml index c5449b8d17..447cc00b76 100644 --- a/.github/workflows/e2e-matrix.yml +++ b/.github/workflows/e2e-matrix.yml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: E2E Matrix Tests (Skeleton) +name: E2E Storage Matrix on: push: @@ -26,15 +26,6 @@ on: schedule: - cron: "30 2 * * *" workflow_dispatch: - inputs: - profiles: - description: "Storage profiles (comma-separated): sds, cephrbd" - required: false - default: "sds,cephrbd" - timeout: - description: "Ginkgo timeout (e.g. 2h, 4h)" - required: false - default: "4h" permissions: contents: read @@ -44,49 +35,31 @@ env: jobs: setup: - name: Setup Profiles + name: Setup (${{ matrix.profile }}) runs-on: ubuntu-latest - outputs: - profiles: ${{ steps.profiles.outputs.profiles }} - steps: - - uses: actions/checkout@v4 - - - name: Load storage profiles - id: profiles - run: | - # Single profile: sds with storage class sds-replicated-volume - echo 'profiles=["sds"]' >> "$GITHUB_OUTPUT" - - - name: Print matrix - run: | - echo "Will test profiles: ${{ steps.profiles.outputs.profiles }}" - - prepare: - name: Matrix Setup (${{ matrix.profile }}) - needs: [setup] - runs-on: ubuntu-latest - timeout-minutes: 300 - concurrency: - group: prepare-${{ github.ref }}-${{ matrix.profile }} - cancel-in-progress: true strategy: - fail-fast: false matrix: - profile: ${{ fromJson(needs.setup.outputs.profiles) }} - + include: + - profile: sds-replicated-volume + storage_name: sds + storage_class: linstor-thin-r2 + parent_storage_class: linstor-thin-r1-immediate + image_storage_class: linstor-thin-r1-immediate + attach_disk_size: 10Gi + data_disk_count: 2 + concurrency: + group: setup-${{ github.head_ref || github.ref_name }}-${{ matrix.profile }} + cancel-in-progress: true env: - GO_VERSION: "1.24.6" - TMP_ROOT: ${{ github.workspace }}/ci/dvp-e2e/tmp - STORAGE_CLASS: sds-replicated-volume - - outputs: - run_id: ${{ steps.prep.outputs.run_id }} - storage_class: ${{ env.STORAGE_CLASS }} - + RUN_ID: nightly-nested-e2e-${{ matrix.storage_name }}-${{ github.run_number }} + PROFILE: ${{ matrix.profile }} + STORAGE_CLASS: ${{ matrix.storage_class }} + PARENT_STORAGE_CLASS: ${{ matrix.parent_storage_class }} + IMAGE_STORAGE_CLASS: ${{ matrix.image_storage_class }} + ATTACH_DISK_SIZE: ${{ matrix.attach_disk_size }} + DATA_DISK_COUNT: ${{ matrix.data_disk_count }} steps: - uses: actions/checkout@v4 - with: - fetch-depth: 0 - name: Install Task uses: arduino/setup-task@v2 @@ -102,20 +75,17 @@ jobs: - name: Install kubectl uses: azure/setup-kubectl@v4 with: - version: 'latest' + version: "latest" - - name: Install Deckhouse CLI - env: - D8_VERSION: v0.13.2 - run: | - set -euo pipefail - echo "Installing d8 ${D8_VERSION}..." - curl -fsSL -o d8.tgz "https://deckhouse.io/downloads/deckhouse-cli/${D8_VERSION}/d8-${D8_VERSION}-linux-amd64.tar.gz" - tar -xzf d8.tgz linux-amd64/bin/d8 - mv linux-amd64/bin/d8 /usr/local/bin/d8 - chmod +x /usr/local/bin/d8 - rm -rf d8.tgz linux-amd64 - d8 --version + - name: Setup d8 + uses: werf/trdl/actions/setup-app@v0.12.2 + with: + repo: d8 + url: https://deckhouse.ru/downloads/deckhouse-cli-trdl/ + root-version: 1 + root-sha512: 343bd5f0d8811254e5f0b6fe292372a7b7eda08d276ff255229200f84e58a8151ab2729df3515cb11372dc3899c70df172a4e54c8a596a73d67ae790466a0491 + group: 0 + channel: stable - name: Install yq run: | @@ -123,163 +93,56 @@ jobs: curl -L -o /usr/local/bin/yq https://github.com/mikefarah/yq/releases/download/v4.44.1/yq_linux_amd64 chmod +x /usr/local/bin/yq - - name: Prepare environment - id: prep - run: | - RUN_ID="nightly-nested-e2e-${{ matrix.profile }}-$(date +%H%M)" - echo "run_id=$RUN_ID" >> "$GITHUB_OUTPUT" - echo "RUN_ID=$RUN_ID" >> "$GITHUB_ENV" - echo "PROFILE=${{ matrix.profile }}" >> "$GITHUB_ENV" - echo "TMP_ROOT=${{ env.TMP_ROOT }}" >> "$GITHUB_ENV" - mkdir -p "${{ env.TMP_ROOT }}/shared" "${{ env.TMP_ROOT }}/matrix-logs" - - - name: Build parent kubeconfig from secret - shell: bash - run: | - set -euo pipefail - mkdir -p "$HOME/.kube" - cat > "$HOME/.kube/config" <> "$GITHUB_ENV" - - - name: Prepare run values.yaml - working-directory: ci/dvp-e2e - run: | - task run:values:prepare \ - RUN_ID="${{ env.RUN_ID }}" \ - RUN_NAMESPACE="${{ env.RUN_ID }}" \ - RUN_DIR="${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}" - echo "VALUES_TEMPLATE_FILE=${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/values.yaml" >> $GITHUB_ENV - - - name: Configure storage profile + - name: Setup nested environment + env: + RUN_ID: ${{ env.RUN_ID }} + PROFILE: ${{ env.PROFILE }} + STORAGE_CLASS: ${{ env.STORAGE_CLASS }} + PARENT_STORAGE_CLASS: ${{ env.PARENT_STORAGE_CLASS }} + IMAGE_STORAGE_CLASS: ${{ env.IMAGE_STORAGE_CLASS }} + ATTACH_DISK_SIZE: ${{ env.ATTACH_DISK_SIZE }} + DATA_DISK_COUNT: ${{ matrix.data_disk_count }} + REGISTRY_DOCKER_CFG: ${{ secrets.DEV_REGISTRY_DOCKER_CFG }} + API_URL: ${{ env.E2E_K8S_URL }} + SA_TOKEN: ${{ secrets.E2E_NESTED_SA_SECRET }} working-directory: ci/dvp-e2e run: | - # Set storage profile to sds with storage class sds-replicated-volume - PROFILE='sds' yq eval --inplace '.storageProfile = strenv(PROFILE)' "${{ env.TMP_ROOT }}/runs/${{ env.RUN_ID }}/values.yaml" - echo "Configured storage profile: sds with storage class: ${STORAGE_CLASS}" - - - name: Upload run context - if: always() - uses: actions/upload-artifact@v4 - with: - name: run-context-${{ env.RUN_ID }} - path: | - ci/dvp-e2e/tmp/runs/${{ env.RUN_ID }} - ci/dvp-e2e/tmp/shared - if-no-files-found: warn - - run-e2e: - name: E2E (${{ matrix.profile }}) [skeleton] - needs: [setup, prepare] - runs-on: ubuntu-latest - concurrency: - group: e2e-${{ github.ref }}-${{ matrix.profile }} - cancel-in-progress: true - strategy: - fail-fast: false - matrix: - profile: ${{ fromJson(needs.setup.outputs.profiles) }} - steps: - - name: Echo run - run: | - echo "E2E stage for profile=${{ matrix.profile }} (skeleton - placeholder)" - report: - name: Report [skeleton] - needs: [setup, run-e2e] - if: always() - runs-on: ubuntu-latest - steps: - - name: Echo report - run: | - echo "Report stage (skeleton). Collecting results from matrix..." + task ci:setup-nested-env cleanup: - name: Cleanup Resources - needs: report + name: Cleanup (${{ matrix.profile }}) + needs: setup if: always() runs-on: ubuntu-latest + strategy: + matrix: + include: + - profile: sds-replicated-volume + storage_name: sds + storage_class: linstor-thin-r2 + parent_storage_class: linstor-thin-r1-immediate + image_storage_class: linstor-thin-r1-immediate + attach_disk_size: 10Gi + data_disk_count: 2 + env: + CLEANUP_PREFIX: ${{ vars.CLEANUP_PREFIX || 'nightly-nested-e2e-' }} steps: - uses: actions/checkout@v4 - name: Install Task uses: arduino/setup-task@v2 + with: + version: 3.x - name: Install kubectl uses: azure/setup-kubectl@v4 with: - version: 'latest' - - - name: Build parent kubeconfig from secret (cleanup) - shell: bash - run: | - set -euo pipefail - mkdir -p "$HOME/.kube" - cat > "$HOME/.kube/config" <> "$GITHUB_ENV" + version: "latest" - name: Cleanup test namespaces + working-directory: ci/dvp-e2e run: | - set -euo pipefail - PREFIX="nightly-nested-e2e-" - echo "๐Ÿงน Cleaning up namespaces matching prefix '${PREFIX}'" - mapfile -t CANDIDATES < <(kubectl get ns -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "^${PREFIX}" || true) - OURS=() - for ns in "${CANDIDATES[@]:-}"; do - [ -z "$ns" ] && continue - if kubectl -n "$ns" get deploy jump-host >/dev/null 2>&1; then - OURS+=("$ns") - fi - done - if [ "${#OURS[@]}" -eq 0 ]; then - echo "[INFO] No namespaces to delete." - else - echo "[INFO] Deleting namespaces:" - printf ' - %s\n' "${OURS[@]}" - for ns in "${OURS[@]}"; do - kubectl delete ns "$ns" --wait=false || true - done - fi - - - name: Report cleanup results - if: always() - run: | - echo "### Cleanup Results" >> $GITHUB_STEP_SUMMARY - echo "โœ… Cleanup job completed" >> $GITHUB_STEP_SUMMARY - echo "๐Ÿงน Attempted to clean up namespaces matching 'nightly-nested-e2e-*'" >> $GITHUB_STEP_SUMMARY + task cleanup:namespaces \ + PREFIX="${CLEANUP_PREFIX}" \ + API_URL="${E2E_K8S_URL}" \ + SA_TOKEN="${{ secrets.E2E_NESTED_SA_SECRET }}" diff --git a/ci/dvp-e2e/Taskfile.yaml b/ci/dvp-e2e/Taskfile.yaml new file mode 100644 index 0000000000..e4ff030c00 --- /dev/null +++ b/ci/dvp-e2e/Taskfile.yaml @@ -0,0 +1,622 @@ +version: "3" +dotenv: + - .env + +vars: + # Paths and defaults + TMP_ROOT: + sh: git rev-parse --show-toplevel 2>/dev/null | xargs -I{} printf "%s/ci/dvp-e2e/tmp" {} + VALUES_TEMPLATE_FILE: values.yaml + SSH_FILE_NAME: id_ed + + # Charts + INFRA_CHART_PATH: ./charts/infra + CLUSTER_CONFIG_CHART_PATH: ./charts/cluster-config + +tasks: + # ------------------------------------------------------------ + # Preflight + # ------------------------------------------------------------ + default: + silent: true + desc: Check required utilities + cmds: + - | + deps=("kubectl" "jq" "yq" "docker" "helm" "htpasswd" "curl" "openssl" "d8") + for dep in "${deps[@]}"; do + if ! command -v "$dep" >/dev/null 2>&1; then + echo "Required utility '$dep' not found!" >&2 + exit 1 + fi + done + echo "All dependencies are installed!" + + password-gen: + desc: Generate password (openssl + bcrypt) + silent: true + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + PASSWORD_HASH_FILE: '{{ printf "%s/%s" .TMP_DIR "password-hash.txt" }}' + cmds: + - mkdir -p {{ .TMP_DIR }} + - | + pw="$(openssl rand -base64 20)" + htpasswd -BinC 10 "" <<< "$pw" | cut -d: -f2 | (base64 --wrap=0 2>/dev/null || base64 -w0 2>/dev/null || base64) > {{ .PASSWORD_HASH_FILE }} + status: + - test -f "{{ .PASSWORD_HASH_FILE }}" + + # ------------------------------------------------------------ + # Values per run (namespaces, domain, prefix) + # ------------------------------------------------------------ + run:values:prepare: + desc: Prepare values.yaml for the run + vars: + RUN_ID: "{{ .RUN_ID }}" + RUN_NAMESPACE: "{{ .RUN_NAMESPACE }}" + RUN_DIR: '{{ .RUN_DIR | default (printf "%s/runs/%s" .TMP_ROOT .RUN_ID) }}' + TARGET_VALUES_FILE: '{{ printf "%s/%s" .RUN_DIR "values.yaml" }}' + BASE_DOMAIN: + sh: yq eval '.domain // ""' {{ .VALUES_TEMPLATE_FILE }} + BASE_CLUSTER_PREFIX: + sh: yq eval '.clusterConfigurationPrefix // "cluster"' {{ .VALUES_TEMPLATE_FILE }} + cmds: + - mkdir -p {{ .RUN_DIR }} + - cp {{ .VALUES_TEMPLATE_FILE }} {{ .TARGET_VALUES_FILE }} + - yq eval --inplace '.namespace = "{{ .RUN_NAMESPACE }}"' {{ .TARGET_VALUES_FILE }} + - | + set -euo pipefail + DOMAIN_INPUT="{{ .BASE_DOMAIN }}" + if [ -n "$DOMAIN_INPUT" ]; then + DOMAIN_VAL="{{ .RUN_ID }}.$DOMAIN_INPUT" + else + DOMAIN_VAL="{{ .RUN_ID }}" + fi + export DOMAIN_VAL + yq eval --inplace '.domain = strenv(DOMAIN_VAL)' {{ .TARGET_VALUES_FILE }} + - | + set -euo pipefail + if command -v shasum >/dev/null 2>&1; then + RUN_ID_HASH=$(printf "%s" "{{ .RUN_ID }}" | shasum | awk '{print $1}' | cut -c1-6) + else + RUN_ID_HASH=$(printf "%s" "{{ .RUN_ID }}" | sha1sum 2>/dev/null | awk '{print $1}' | cut -c1-6) + fi + PREFIX_INPUT="{{ .BASE_CLUSTER_PREFIX }}-${RUN_ID_HASH}" + [ ${#PREFIX_INPUT} -gt 16 ] && PREFIX_INPUT="${PREFIX_INPUT:0:16}" + export PREFIX_INPUT + yq eval --inplace '.clusterConfigurationPrefix = strenv(PREFIX_INPUT)' {{ .TARGET_VALUES_FILE }} + + # ------------------------------------------------------------ + # CI: Setup nested environment (main entry point) + # ------------------------------------------------------------ + ci:setup-nested-env: + desc: Setup complete nested environment for CI (prepare + infra + bootstrap + storage) + vars: + RUN_ID: '{{ .RUN_ID | default (env "RUN_ID") | default "" }}' + PROFILE: '{{ .PROFILE | default (env "PROFILE") | default "" }}' + STORAGE_CLASS: '{{ .STORAGE_CLASS | default (env "STORAGE_CLASS") | default "" }}' + IMAGE_STORAGE_CLASS: '{{ .IMAGE_STORAGE_CLASS | default (env "IMAGE_STORAGE_CLASS") | default "" }}' + PARENT_STORAGE_CLASS: '{{ .PARENT_STORAGE_CLASS | default (env "PARENT_STORAGE_CLASS") | default "" }}' + ATTACH_DISK_SIZE: '{{ .ATTACH_DISK_SIZE | default (env "ATTACH_DISK_SIZE") | default "10Gi" }}' + DATA_DISK_COUNT: '{{ .DATA_DISK_COUNT | default (env "DATA_DISK_COUNT") | default "2" }}' + REGISTRY_DOCKER_CFG: '{{ .REGISTRY_DOCKER_CFG | default (env "REGISTRY_DOCKER_CFG") | default "" }}' + API_URL: '{{ .API_URL | default (env "API_URL") | default (env "E2E_K8S_URL") | default "" }}' + SA_TOKEN: '{{ .SA_TOKEN | default (env "SA_TOKEN") | default (env "E2E_NESTED_SA_SECRET") | default "" }}' + RUN_DIR: '{{ printf "%s/runs/%s" .TMP_ROOT .RUN_ID }}' + VALUES_FILE_PATH: '{{ printf "%s/values.yaml" .RUN_DIR }}' + PARENT_KUBECONFIG_PATH: '{{ printf "%s/parent.kubeconfig" .RUN_DIR }}' + NESTED_KUBECONFIG_PATH: '{{ printf "%s/nested/kubeconfig" .RUN_DIR }}' + EFFECTIVE_DISK_SC: "{{ if .IMAGE_STORAGE_CLASS }}{{ .IMAGE_STORAGE_CLASS }}{{ else }}{{ .STORAGE_CLASS }}{{ end }}" + cmds: + - task: ci:prepare-env + vars: + RUN_ID: "{{ .RUN_ID }}" + RUN_DIR: "{{ .RUN_DIR }}" + PROFILE: "{{ .PROFILE }}" + STORAGE_CLASS: "{{ .STORAGE_CLASS }}" + PARENT_STORAGE_CLASS: "{{ .PARENT_STORAGE_CLASS }}" + REGISTRY_DOCKER_CFG: "{{ .REGISTRY_DOCKER_CFG }}" + API_URL: "{{ .API_URL }}" + SA_TOKEN: "{{ .SA_TOKEN }}" + - task: install:nested:env + vars: + TMP_DIR: "{{ .RUN_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE_PATH }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG_PATH }}" + REGISTRY_DOCKER_CFG: "{{ .REGISTRY_DOCKER_CFG }}" + TARGET_STORAGE_CLASS: "{{ .PARENT_STORAGE_CLASS }}" + ATTACH_DISK_SIZE: "{{ .ATTACH_DISK_SIZE }}" + EFFECTIVE_DISK_SC: "{{ .EFFECTIVE_DISK_SC }}" + NAMESPACE: "{{ .RUN_ID }}" + NESTED_KUBECONFIG: "{{ .NESTED_KUBECONFIG_PATH }}" + SDS_SC_NAME: "{{ .STORAGE_CLASS }}" + DATA_DISK_COUNT: "{{ .DATA_DISK_COUNT }}" + + ci:prepare-env: + desc: Prepare environment (values, kubeconfig, infra manifests) + vars: + RUN_ID: '{{ .RUN_ID | default (env "RUN_ID") | default "" }}' + RUN_DIR: '{{ .RUN_DIR | default (printf "%s/runs/%s" .TMP_ROOT .RUN_ID) }}' + PROFILE: '{{ .PROFILE | default (env "PROFILE") | default "" }}' + STORAGE_CLASS: '{{ .STORAGE_CLASS | default (env "STORAGE_CLASS") | default "" }}' + PARENT_STORAGE_CLASS: '{{ .PARENT_STORAGE_CLASS | default (env "PARENT_STORAGE_CLASS") | default "" }}' + REGISTRY_DOCKER_CFG: '{{ .REGISTRY_DOCKER_CFG | default (env "REGISTRY_DOCKER_CFG") | default "" }}' + API_URL: '{{ .API_URL | default (env "API_URL") | default (env "E2E_K8S_URL") | default "" }}' + SA_TOKEN: '{{ .SA_TOKEN | default (env "SA_TOKEN") | default (env "E2E_NESTED_SA_SECRET") | default "" }}' + VALUES_FILE_PATH: '{{ printf "%s/values.yaml" .RUN_DIR }}' + PARENT_KUBECONFIG_PATH: '{{ printf "%s/parent.kubeconfig" .RUN_DIR }}' + cmds: + - | + set -euo pipefail + if [ -z "{{ .RUN_ID }}" ] || [ -z "{{ .STORAGE_CLASS }}" ] || [ -z "{{ .PARENT_STORAGE_CLASS }}" ]; then + echo "[ERR] RUN_ID/STORAGE_CLASS/PARENT_STORAGE_CLASS must be set" >&2 + exit 1 + fi + mkdir -p "{{ .RUN_DIR }}" + - task: run:values:prepare + vars: + RUN_ID: "{{ .RUN_ID }}" + RUN_NAMESPACE: "{{ .RUN_ID }}" + RUN_DIR: "{{ .RUN_DIR }}" + - | + set -euo pipefail + VALUES_FILE="{{ .VALUES_FILE_PATH }}" + if [ -n "{{ .REGISTRY_DOCKER_CFG }}" ]; then + REGISTRY_DOCKER_CFG='{{ .REGISTRY_DOCKER_CFG }}' yq eval --inplace '.deckhouse.registryDockerCfg = strenv(REGISTRY_DOCKER_CFG)' "$VALUES_FILE" + fi + yq eval --inplace '.storageProfile = "{{ .PROFILE }}"' "$VALUES_FILE" + - task: parent:kubeconfig + vars: + OUTPUT: "{{ .PARENT_KUBECONFIG_PATH }}" + API_URL: "{{ .API_URL }}" + SA_TOKEN: "{{ .SA_TOKEN }}" + - task: render-infra + vars: + TMP_DIR: "{{ .RUN_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE_PATH }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG_PATH }}" + + # ------------------------------------------------------------ + # Infra manifests and deployment + # ------------------------------------------------------------ + render-infra: + desc: Generate infra manifests + deps: + - task: ssh:ensure + vars: + TMP_DIR: "{{ .TMP_DIR }}" + SSH_FILE_NAME: "{{ .SSH_FILE_NAME }}" + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + GENERATED_VALUES_FILE: '{{ printf "%s/%s" .TMP_DIR "generated-values.yaml" }}' + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_PUB_KEY_FILE: '{{ printf "%s/%s.pub" .SSH_DIR .SSH_FILE_NAME }}' + DOMAIN: + sh: yq eval '.domain // ""' {{ .VALUES_FILE }} + sources: + - "./charts/infra/**/*" + - "{{ .VALUES_FILE }}" + generates: + - "{{ .TMP_DIR }}/infra.yaml" + env: + KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + cmds: + - mkdir -p {{ .TMP_DIR }} + - printf "" > {{ .GENERATED_VALUES_FILE }} + - | + export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.sshPublicKey = env(SSH_PUB_KEY)' {{ .GENERATED_VALUES_FILE }} + - | + if [ -n "${REGISTRY_DOCKER_CFG:-}" ]; then + yq eval --inplace '.deckhouse.registryDockerCfg = env(REGISTRY_DOCKER_CFG)' {{ .GENERATED_VALUES_FILE }} + fi + - | + DOMAIN_VALUE="{{ .DOMAIN }}" + if [ -n "$DOMAIN_VALUE" ] && [ "$DOMAIN_VALUE" != "null" ]; then + export DOMAIN_VALUE + yq eval --inplace '.domain = env(DOMAIN_VALUE)' {{ .GENERATED_VALUES_FILE }} + fi + - helm template dvp-over-dvp-infra {{ .INFRA_CHART_PATH }} -f {{ .VALUES_FILE }} -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/infra.yaml + + infra-deploy: + desc: Deploy infra (Namespace/RBAC/Ingress) + deps: + - task: render-infra + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + SSH_FILE_NAME: "{{ .SSH_FILE_NAME }}" + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + NAMESPACE: + sh: yq eval '.namespace' {{ .VALUES_FILE }} + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_PRIV_KEY_FILE: '{{ printf "%s/%s" .SSH_DIR .SSH_FILE_NAME }}' + SSH_PUB_KEY_FILE: '{{ printf "%s/%s.pub" .SSH_DIR .SSH_FILE_NAME }}' + env: + KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + cmds: + - kubectl apply --server-side --force-conflicts --validate=false -f {{ .TMP_DIR }}/infra.yaml || kubectl apply --validate=false -f {{ .TMP_DIR }}/infra.yaml + - | + # Persist SSH keypair in parent cluster namespace for diagnostics tools (nested_diag.sh) + # Secret contains private and public parts; will be removed with namespace cleanup + kubectl -n {{ .NAMESPACE }} create secret generic e2e-ssh-key \ + --dry-run=client -o yaml \ + --from-file=id_ed={{ .SSH_PRIV_KEY_FILE }} \ + --from-file=id_ed.pub={{ .SSH_PUB_KEY_FILE }} \ + | kubectl apply -f - + + infra:attach-storage-disks-hotplug: + desc: Attach storage disks to worker VMs using hotplug (VirtualMachineBlockDeviceAttachment) + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + NAMESPACE: + sh: yq eval '.namespace' {{ .VALUES_FILE }} + DISK_SIZE: '{{ .DISK_SIZE | default "10Gi" }}' + STORAGE_CLASS: '{{ .STORAGE_CLASS | default "linstor-thin-r2" }}' + DISK_COUNT: '{{ .DISK_COUNT | default "2" }}' + env: + KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + cmds: + - chmod +x scripts/attach_worker_disks.sh + - | + scripts/attach_worker_disks.sh \ + -n "{{ .NAMESPACE }}" \ + -s "{{ .STORAGE_CLASS }}" \ + -z "{{ .DISK_SIZE }}" \ + -c "{{ .DISK_COUNT }}" \ + -k "${KUBECONFIG}" + + # ------------------------------------------------------------ + # Kubeconfig for bootstrap and cluster config + # ------------------------------------------------------------ + render-kubeconfig: + desc: Generate kubeconfig for bootstrap (external parent API) + deps: + - password-gen + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + PARENT_KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + NAMESPACE: + sh: yq eval '.namespace' {{ .VALUES_FILE }} + SERVER: + sh: | + # Use external parent cluster API (ingress host) so that both dhctl Job + # and components inside the nested cluster can reach the parent API. + export KUBECONFIG='{{ .PARENT_KUBECONFIG }}' + HOST=$(kubectl -n d8-user-authn get ingress kubernetes-api -o json | jq -r '.spec.rules[0].host') + [ -z "$HOST" -o "$HOST" = "null" ] && { echo "[ERR] kubernetes-api ingress host not found" >&2; exit 1; } + echo "https://$HOST" + TOKEN: + sh: | + export KUBECONFIG='{{ .PARENT_KUBECONFIG }}' + for i in $(seq 1 5); do + TOKEN=$(kubectl -n {{ .NAMESPACE }} create token dkp-sa --duration=10h 2>/dev/null) && break + echo "[WARN] Failed to issue SA token (attempt $i); retrying in 3s" >&2 + sleep 3 + done + [ -z "${TOKEN:-}" ] && { echo "[ERR] Unable to obtain token for dkp-sa" >&2; exit 1; } + echo "$TOKEN" + env: + KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + silent: true + cmds: + - mkdir -p {{ .TMP_DIR }} + - | + cat < {{ .TMP_DIR }}/kubeconfig.yaml + apiVersion: v1 + clusters: + - cluster: + server: {{ .SERVER }} + insecure-skip-tls-verify: true + name: dvp + contexts: + - context: + cluster: dvp + namespace: {{ .NAMESPACE }} + user: {{ .NAMESPACE }}@dvp + name: {{ .NAMESPACE }}@dvp + current-context: {{ .NAMESPACE }}@dvp + kind: Config + preferences: {} + users: + - name: {{ .NAMESPACE }}@dvp + user: + token: {{ .TOKEN }} + EOF + + render-cluster-config: + desc: Generate cluster config (helm template) + silent: true + deps: + - task: render-kubeconfig + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + - task: password-gen + vars: + TMP_DIR: "{{ .TMP_DIR }}" + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + PARENT_KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + GENERATED_VALUES_FILE: '{{ printf "%s/%s" .TMP_DIR "generated-values.yaml" }}' + PASSWORD_HASH_FILE: '{{ printf "%s/%s" .TMP_DIR "password-hash.txt" }}' + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_PUB_KEY_FILE: '{{ printf "%s/%s.pub" .SSH_DIR .SSH_FILE_NAME }}' + cmds: + - printf "" > {{ .GENERATED_VALUES_FILE }} + - | + export PASSWORD_HASH="$(cat {{ .PASSWORD_HASH_FILE }})" + yq eval --inplace '.passwordHash = env(PASSWORD_HASH)' {{ .GENERATED_VALUES_FILE }} + - | + export NEW_KUBECONFIG_B64="$(cat {{ .TMP_DIR }}/kubeconfig.yaml | base64 | tr -d '\n')" + yq eval --inplace '.kubeconfigDataBase64 = env(NEW_KUBECONFIG_B64)' {{ .GENERATED_VALUES_FILE }} + - | + # Inject registry Docker config from environment (set by modules-actions/setup or manually) + if [ -n "${REGISTRY_DOCKER_CFG:-}" ]; then + yq eval --inplace '.deckhouse.registryDockerCfg = env(REGISTRY_DOCKER_CFG)' {{ .GENERATED_VALUES_FILE }} + fi + - | + if [ -n "{{ .TARGET_STORAGE_CLASS | default "" }}" ]; then + export _SC='{{ .TARGET_STORAGE_CLASS }}' + yq eval --inplace '.storageClass = env(_SC)' {{ .GENERATED_VALUES_FILE }} + fi + - | + export SSH_PUB_KEY="$(cat {{ .SSH_PUB_KEY_FILE }})" + yq eval --inplace '.sshPublicKey = env(SSH_PUB_KEY)' {{ .GENERATED_VALUES_FILE }} + - helm template dvp-over-dvp-cluster-config {{ .CLUSTER_CONFIG_CHART_PATH }} -f {{ .VALUES_FILE }} -f {{ .GENERATED_VALUES_FILE }} > {{ .TMP_DIR }}/config.yaml + + dhctl-bootstrap: + desc: Bootstrap Deckhouse over DVP via jump-host (docker dhctl with bastion) + deps: + - task: render-cluster-config + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + TARGET_STORAGE_CLASS: "{{ .TARGET_STORAGE_CLASS }}" + SSH_FILE_NAME: "{{ .SSH_FILE_NAME }}" + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + REGISTRY_DOCKER_CFG: '{{ .REGISTRY_DOCKER_CFG | default (env "REGISTRY_DOCKER_CFG") | default "" }}' + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_PRIV_KEY_FILE: '{{ printf "%s/%s" .SSH_DIR .SSH_FILE_NAME }}' + NAMESPACE: + sh: yq eval '.namespace' {{ .VALUES_FILE }} + DEFAULT_USER: + sh: yq eval '.image.defaultUser' {{ .VALUES_FILE }} + JUMPHOST_EXT_IP: + sh: export KUBECONFIG='{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}'; kubectl -n {{ .NAMESPACE }} exec -it deployment/jump-host -- dig @resolver4.opendns.com myip.opendns.com +short | tr -d '\r' + JUMPHOST_NODEPORT: + sh: export KUBECONFIG='{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}'; kubectl -n {{ .NAMESPACE }} get svc jump-host -o json | jq '.spec.ports[] | select(.port==2222) | .nodePort' + IMAGE: "dev-registry.deckhouse.io/sys/deckhouse-oss/install:main" + env: + KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + cmds: + - | + set -euo pipefail + # Configure registry auth for docker pull + if [ -n "{{ .REGISTRY_DOCKER_CFG }}" ]; then + mkdir -p ~/.docker + echo '{{ .REGISTRY_DOCKER_CFG }}' | base64 -d > ~/.docker/config.json + fi + # Pull dhctl image locally (runner authenticated in workflow) + docker pull --platform=linux/amd64 "{{ .IMAGE }}" + # Run dhctl bootstrap with SSH bastion (jump-host) + docker run --rm --platform=linux/amd64 \ + -v "{{ .TMP_DIR }}:/work" \ + "{{ .IMAGE }}" \ + dhctl bootstrap \ + --config=/work/config.yaml \ + --ssh-agent-private-keys=/work/ssh/{{ .SSH_FILE_NAME }} \ + --ssh-user={{ .DEFAULT_USER }} \ + --ssh-bastion-port={{ .JUMPHOST_NODEPORT }} \ + --ssh-bastion-host={{ .JUMPHOST_EXT_IP }} \ + --ssh-bastion-user=user \ + --preflight-skip-availability-ports-check \ + --preflight-skip-deckhouse-user-check \ + --preflight-skip-registry-credential \ + --preflight-skip-deckhouse-edition-check + - | + docker image rm {{ .IMAGE }} >/dev/null 2>&1 || true + + # ------------------------------------------------------------ + # SSH Keys management (use GH keys or generate new ones) + # ------------------------------------------------------------ + ssh:import-gh: + desc: Download predefined SSH keys from deckhouse/virtualization repo + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_FILE_NAME: '{{ .SSH_FILE_NAME | default "id_ed" }}' + GH_RAW_URL_PRIV: "https://raw.githubusercontent.com/deckhouse/virtualization/main/test/e2e/legacy/testdata/sshkeys/id_ed" + GH_RAW_URL_PUB: "https://raw.githubusercontent.com/deckhouse/virtualization/main/test/e2e/legacy/testdata/sshkeys/id_ed.pub" + cmds: + - mkdir -p {{ .SSH_DIR }} + - curl -fsSL {{ .GH_RAW_URL_PRIV }} -o {{ .SSH_DIR }}/{{ .SSH_FILE_NAME }} + - curl -fsSL {{ .GH_RAW_URL_PUB }} -o {{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub + - chmod 0600 {{ .SSH_DIR }}/{{ .SSH_FILE_NAME }} + - chmod 0644 {{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}.pub + status: + - test -f "{{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + + ssh:ensure: + desc: Ensure SSH keys exist (import from GitHub) + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + cmds: + - | + echo "[SSH] Importing GH keys to {{ .SSH_DIR }}/{{ .SSH_FILE_NAME }}" + task ssh:import-gh SSH_DIR='{{ .SSH_DIR }}' SSH_FILE_NAME='{{ .SSH_FILE_NAME }}' + + # ------------------------------------------------------------ + # Nested cluster helpers (SC + kubeconfig) + # ------------------------------------------------------------ + nested:kubeconfig: + desc: Build kubeconfig for nested cluster + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + NAMESPACE: "{{ .NAMESPACE }}" + DOMAIN: + sh: yq eval '.domain // ""' {{ .VALUES_FILE }} + DEFAULT_USER: + sh: yq eval '.image.defaultUser' {{ .VALUES_FILE }} + SSH_DIR: '{{ .SSH_DIR | default (printf "%s/%s" .TMP_DIR "ssh") }}' + SSH_FILE_NAME: '{{ .SSH_FILE_NAME | default "id_ed" }}' + SSH_PRIV_KEY_FILE: '{{ printf "%s/%s" .SSH_DIR .SSH_FILE_NAME }}' + NESTED_DIR: '{{ .NESTED_DIR | default (printf "%s/nested-%s" .TMP_DIR .NAMESPACE) }}' + NESTED_KUBECONFIG: '{{ .NESTED_KUBECONFIG | default (printf "%s/kubeconfig" .NESTED_DIR) }}' + PARENT_KUBECONFIG_PATH: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default "" }}' + cmds: + - | + set -euo pipefail + NESTED_DIR="{{ .NESTED_DIR }}" + NESTED_KUBECONFIG="{{ .NESTED_KUBECONFIG }}" + if ! mkdir -p "${NESTED_DIR}"; then + echo "[ERR] Failed to create nested directory: ${NESTED_DIR}" >&2 + exit 1 + fi + - chmod +x scripts/build_nested_kubeconfig.sh + - | + scripts/build_nested_kubeconfig.sh \ + -o "{{ .NESTED_KUBECONFIG }}" \ + -n "{{ .NAMESPACE }}" \ + -d "{{ .DOMAIN }}" \ + -k "{{ .PARENT_KUBECONFIG_PATH }}" \ + -s "{{ .SSH_PRIV_KEY_FILE }}" \ + -u "{{ .DEFAULT_USER }}" + + nested:storage:sds: + desc: Configure SDS storage profile in nested cluster + vars: + NESTED_KUBECONFIG: "{{ .NESTED_KUBECONFIG }}" + SDS_SC_NAME: '{{ .SDS_SC_NAME | default "linstor-thin-r2" }}' + SDS_DVCR_SIZE: '{{ .SDS_DVCR_SIZE | default "5Gi" }}' + cmds: + - chmod +x scripts/configure_sds_storage.sh + - | + scripts/configure_sds_storage.sh \ + -k "{{ .NESTED_KUBECONFIG }}" \ + -s "{{ .SDS_SC_NAME }}" \ + -d "{{ .SDS_DVCR_SIZE }}" + + # ------------------------------------------------------------ + # Cleanup helpers + # ------------------------------------------------------------ + cleanup:namespaces: + desc: Delete namespaces by prefix and wait for deletion + vars: + PREFIX: '{{ .PREFIX | default (env "CLEANUP_PREFIX") | default "nightly-nested-e2e-" }}' + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "cleanup") }}' + PARENT_KUBECONFIG_PATH: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") | default (printf "%s/%s" .TMP_ROOT "cleanup/parent.kubeconfig") }}' + API_URL: '{{ .API_URL | default (env "API_URL") | default (env "E2E_K8S_URL") | default "" }}' + SA_TOKEN: '{{ .SA_TOKEN | default (env "SA_TOKEN") | default (env "E2E_NESTED_SA_SECRET") | default "" }}' + cmds: + - | + set -euo pipefail + if [ ! -s "{{ .PARENT_KUBECONFIG_PATH }}" ]; then + if [ -z "{{ .API_URL }}" ] || [ -z "{{ .SA_TOKEN }}" ]; then + echo "[ERR] Unable to build parent kubeconfig: API_URL/SA_TOKEN are empty" >&2 + exit 1 + fi + mkdir -p "{{ .TMP_DIR }}" + task parent:kubeconfig OUTPUT='{{ .PARENT_KUBECONFIG_PATH }}' API_URL='{{ .API_URL }}' SA_TOKEN='{{ .SA_TOKEN }}' + fi + export KUBECONFIG='{{ .PARENT_KUBECONFIG_PATH }}' + echo "[CLEANUP] Prefix='{{ .PREFIX }}'" + ns_list=$(kubectl get ns -o json | jq -r --arg p '{{ .PREFIX }}' '.items[].metadata.name | select(startswith($p))') + if [ -z "${ns_list}" ]; then + echo "[INFO] No namespaces to delete"; exit 0 + fi + for ns in $ns_list; do + echo "[CLEANUP] Deleting namespace $ns ..." + kubectl delete ns "$ns" --wait=false || true + done + echo "[CLEANUP] Waiting for namespaces to be deleted..." + for ns in $ns_list; do + kubectl wait --for=delete ns/"$ns" --timeout=600s || echo "[WARN] Namespace $ns was not fully deleted within timeout" + done + + # ------------------------------------------------------------ + # CI helpers: kubeconfig + registry + # ------------------------------------------------------------ + parent:kubeconfig: + desc: Build parent kubeconfig from URL + SA token + vars: + OUTPUT: '{{ .OUTPUT | default (env "KUBECONFIG") | default "$HOME/.kube/config" }}' + API_URL: '{{ .API_URL | default (env "E2E_K8S_URL") | default "" }}' + SA_TOKEN: '{{ .SA_TOKEN | default (env "E2E_SA_TOKEN") | default "" }}' + cmds: + - | + set -euo pipefail + if [ -z "{{ .API_URL }}" ] || [ -z "{{ .SA_TOKEN }}" ]; then + echo "[ERR] API_URL/SA_TOKEN is empty" >&2; exit 1; fi + chmod +x ./scripts/build_parent_kubeconfig.sh + ./scripts/build_parent_kubeconfig.sh -o "{{ .OUTPUT }}" -a "{{ .API_URL }}" -t "{{ .SA_TOKEN }}" + + # ------------------------------------------------------------ + # CI: Unified installation task + # ------------------------------------------------------------ + install:nested:env: + desc: Install complete nested environment (infra + bootstrap + disks + kubeconfig + SDS) + vars: + TMP_DIR: '{{ .TMP_DIR | default (printf "%s/%s" .TMP_ROOT "default") }}' + VALUES_FILE: "{{ .VALUES_FILE | default .VALUES_TEMPLATE_FILE }}" + PARENT_KUBECONFIG: '{{ .PARENT_KUBECONFIG | default (env "KUBECONFIG") }}' + REGISTRY_DOCKER_CFG: '{{ .REGISTRY_DOCKER_CFG | default (env "REGISTRY_DOCKER_CFG") | default "" }}' + TARGET_STORAGE_CLASS: "{{ .TARGET_STORAGE_CLASS }}" + ATTACH_DISK_SIZE: '{{ .ATTACH_DISK_SIZE | default "10Gi" }}' + EFFECTIVE_DISK_SC: "{{ .EFFECTIVE_DISK_SC }}" + NAMESPACE: "{{ .NAMESPACE }}" + NESTED_DIR: '{{ .NESTED_DIR | default (printf "%s/nested-%s" .TMP_DIR .NAMESPACE) }}' + NESTED_KUBECONFIG: '{{ .NESTED_KUBECONFIG | default (printf "%s/kubeconfig" .NESTED_DIR) }}' + SDS_SC_NAME: "{{ .SDS_SC_NAME }}" + DATA_DISK_COUNT: '{{ .DATA_DISK_COUNT | default "2" }}' + cmds: + - echo "๐Ÿ“ฆ Installing infra (namespace/RBAC/ingress)" + - task: infra-deploy + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + - echo "๐Ÿš€ Bootstrapping nested cluster" + - task: dhctl-bootstrap + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + REGISTRY_DOCKER_CFG: "{{ .REGISTRY_DOCKER_CFG }}" + TARGET_STORAGE_CLASS: "{{ .TARGET_STORAGE_CLASS }}" + - echo "๐Ÿ’ฟ Attaching data disks to workers" + - task: infra:attach-storage-disks-hotplug + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + DISK_SIZE: "{{ .ATTACH_DISK_SIZE }}" + STORAGE_CLASS: "{{ .EFFECTIVE_DISK_SC }}" + DISK_COUNT: "{{ .DATA_DISK_COUNT }}" + - echo "๐Ÿ” Building nested kubeconfig" + - task: nested:kubeconfig + vars: + TMP_DIR: "{{ .TMP_DIR }}" + VALUES_FILE: "{{ .VALUES_FILE }}" + PARENT_KUBECONFIG: "{{ .PARENT_KUBECONFIG }}" + NAMESPACE: "{{ .NAMESPACE }}" + NESTED_DIR: "{{ .NESTED_DIR }}" + NESTED_KUBECONFIG: "{{ .NESTED_KUBECONFIG }}" + - echo "๐Ÿ’พ Configuring SDS storage" + - task: nested:storage:sds + vars: + TMP_DIR: "{{ .TMP_DIR }}" + NESTED_KUBECONFIG: "{{ .NESTED_KUBECONFIG }}" + SDS_SC_NAME: "{{ .SDS_SC_NAME }}" diff --git a/ci/dvp-e2e/charts/cluster-config/.helmignore b/ci/dvp-e2e/charts/cluster-config/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ci/dvp-e2e/charts/cluster-config/Chart.yaml b/ci/dvp-e2e/charts/cluster-config/Chart.yaml new file mode 100644 index 0000000000..de10150df7 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/Chart.yaml @@ -0,0 +1,16 @@ +apiVersion: v2 +name: cluster-config +description: Cluster configuration for E2E testing +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - cluster + - configuration + - e2e + - testing +home: https://github.com/deckhouse/deckhouse +sources: + - https://github.com/deckhouse/deckhouse +maintainers: + - name: Deckhouse Team diff --git a/ci/dvp-e2e/charts/cluster-config/templates/cluster-config.yaml b/ci/dvp-e2e/charts/cluster-config/templates/cluster-config.yaml new file mode 100644 index 0000000000..05837e1bc9 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/cluster-config.yaml @@ -0,0 +1,48 @@ +# Cluster configuration for DVP-over-DVP E2E testing +apiVersion: deckhouse.io/v1 +kind: ClusterConfiguration +clusterType: Cloud +cloud: + provider: DVP + prefix: {{ .Values.clusterConfigurationPrefix | default "demo-cluster" }} +podSubnetCIDR: 10.112.0.0/16 +serviceSubnetCIDR: 10.223.0.0/16 +kubernetesVersion: "Automatic" +clusterDomain: "internal.cluster.local" +--- +apiVersion: deckhouse.io/v1 +kind: InitConfiguration +deckhouse: + imagesRepo: dev-registry.deckhouse.io/sys/deckhouse-oss + registryDockerCfg: {{ .Values.deckhouse.registryDockerCfg | quote }} + devBranch: {{ .Values.deckhouse.tag }} +--- +apiVersion: deckhouse.io/v1 +kind: DVPClusterConfiguration +layout: Standard +sshPublicKey: {{ .Values.sshPublicKey }} +masterNodeGroup: + replicas: {{ .Values.instances.masterNodes.count }} + instanceClass: + virtualMachine: + bootloader: {{ .Values.image.bootloader }} + cpu: + cores: {{ .Values.instances.masterNodes.cores }} + coreFraction: {{ .Values.instances.masterNodes.coreFraction }} + memory: + size: {{ .Values.instances.masterNodes.memory }} + ipAddresses: + - Auto + virtualMachineClassName: "{{ .Values.namespace }}-cpu" + rootDisk: + size: 50Gi + storageClass: {{ .Values.storageClass }} + image: + kind: VirtualImage + name: image + etcdDisk: + size: 15Gi + storageClass: {{ .Values.storageClass }} +provider: + kubeconfigDataBase64: {{ .Values.kubeconfigDataBase64 }} + namespace: {{ .Values.namespace }} diff --git a/ci/dvp-e2e/charts/cluster-config/templates/disabled-modules.yaml b/ci/dvp-e2e/charts/cluster-config/templates/disabled-modules.yaml new file mode 100644 index 0000000000..2887a2b168 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/disabled-modules.yaml @@ -0,0 +1,10 @@ +{{- $modules := list "upmeter" "local-path-provisioner" "pod-reloader" "secret-copier" "namespace-configurator" -}} +{{ range $modules }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: {{ . }} +spec: + enabled: false +{{ end }} diff --git a/ci/dvp-e2e/charts/cluster-config/templates/e2e-sa.yaml b/ci/dvp-e2e/charts/cluster-config/templates/e2e-sa.yaml new file mode 100644 index 0000000000..a6e0b11732 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/e2e-sa.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: e2e-runner + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: e2e-runner-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: e2e-runner + namespace: kube-system diff --git a/ci/dvp-e2e/charts/cluster-config/templates/ingress.yaml b/ci/dvp-e2e/charts/cluster-config/templates/ingress.yaml new file mode 100644 index 0000000000..387a3c89bc --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/ingress.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: deckhouse.io/v1 +kind: IngressNginxController +metadata: + name: main +spec: + inlet: HostPort + enableIstioSidecar: false + ingressClass: nginx + hostPort: + httpPort: 80 + httpsPort: 443 + nodeSelector: + node-role.kubernetes.io/master: '' + tolerations: + - effect: NoSchedule + operator: Exists diff --git a/ci/dvp-e2e/charts/cluster-config/templates/mc.yaml b/ci/dvp-e2e/charts/cluster-config/templates/mc.yaml new file mode 100644 index 0000000000..cb5872e8b6 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/mc.yaml @@ -0,0 +1,89 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: deckhouse +spec: + version: 1 + enabled: true + settings: + bundle: Default + logLevel: Info +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: global +spec: + version: 1 + settings: + defaultClusterStorageClass: {{ .Values.storageClass | quote }} + modules: + publicDomainTemplate: "%s.{{ .Values.namespace }}.{{ .Values.domain }}" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: user-authn +spec: + version: 1 + enabled: true + settings: + controlPlaneConfigurator: + dexCAMode: DoNotNeed + publishAPI: + enabled: true + https: + mode: Global + global: + kubeconfigGeneratorMasterCA: "" +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: cni-cilium +spec: + version: 1 + enabled: true + settings: + tunnelMode: VXLAN +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: snapshot-controller +spec: + version: 1 + enabled: true +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: snapshot-controller +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: virtualization +spec: + version: 1 + enabled: true + settings: + dvcr: + storage: + persistentVolumeClaim: + size: 10Gi + type: PersistentVolumeClaim + virtualMachineCIDRs: + - 192.168.10.0/24 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: virtualization +spec: + imageTag: {{ .Values.virtualization.tag }} + scanInterval: 15s diff --git a/ci/dvp-e2e/charts/cluster-config/templates/ngc.yaml b/ci/dvp-e2e/charts/cluster-config/templates/ngc.yaml new file mode 100644 index 0000000000..3672dc8e79 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/ngc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: qemu-guest-agent-install-ubuntu.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["ubuntu-lts", "debian"] + content: | + bb-apt-install qemu-guest-agent + systemctl enable --now qemu-guest-agent +--- +apiVersion: deckhouse.io/v1alpha1 +kind: NodeGroupConfiguration +metadata: + name: install-tools.sh +spec: + weight: 98 + nodeGroups: ["*"] + bundles: ["*"] + content: | + bb-sync-file /etc/profile.d/01-kubectl-aliases.sh - << "EOF" + source <(/opt/deckhouse/bin/kubectl completion bash) + alias k=kubectl + complete -o default -F __start_kubectl k + EOF + + if [ ! -f /usr/local/bin/k9s ]; then + K9S_URL=$(curl -s https://api.github.com/repos/derailed/k9s/releases/latest | jq '.assets[] | select(.name=="k9s_Linux_amd64.tar.gz") | .browser_download_url' -r) + curl -L "${K9S_URL}" | tar -xz -C /usr/local/bin/ "k9s" + fi + + if [ ! -f /usr/local/bin/stern ]; then + STERN_URL=$(curl -s https://api.github.com/repos/stern/stern/releases/latest | jq '.assets[].browser_download_url | select(. | test("linux_amd64"))' -r) + curl -L "${STERN_URL}" | tar -xz -C /usr/local/bin/ "stern" + fi diff --git a/ci/dvp-e2e/charts/cluster-config/templates/nodegroups.yaml b/ci/dvp-e2e/charts/cluster-config/templates/nodegroups.yaml new file mode 100644 index 0000000000..4025e441b7 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/nodegroups.yaml @@ -0,0 +1,40 @@ +{{ range .Values.instances.additionalNodes }} +--- +apiVersion: deckhouse.io/v1alpha1 +kind: DVPInstanceClass +metadata: + name: {{ .name }} +spec: + virtualMachine: + virtualMachineClassName: "{{ $.Values.namespace }}-cpu" + cpu: + cores: {{ .cores }} + coreFraction: {{ .coreFraction }} + memory: + size: {{ .memory }} + bootloader: {{ $.Values.image.bootloader }} + rootDisk: + size: 50Gi + storageClass: {{ $.Values.storageClass }} + image: + kind: VirtualImage + name: image +--- +apiVersion: deckhouse.io/v1 +kind: NodeGroup +metadata: + name: {{ .name }} +spec: +{{- if eq .name "system" }} + nodeTemplate: + labels: + node-role.deckhouse.io/system: "" +{{- end }} + nodeType: {{ .nodeType | default "CloudEphemeral" }} + cloudInstances: + minPerZone: {{ .count }} + maxPerZone: {{ .count }} + classReference: + kind: DVPInstanceClass + name: {{ .name }} +{{ end }} diff --git a/ci/dvp-e2e/charts/cluster-config/templates/rbac.yaml b/ci/dvp-e2e/charts/cluster-config/templates/rbac.yaml new file mode 100644 index 0000000000..6b8998a1e8 --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/templates/rbac.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: deckhouse.io/v1 +kind: ClusterAuthorizationRule +metadata: + name: admin +spec: + subjects: + - kind: User + name: admin@deckhouse.io + accessLevel: SuperAdmin + portForwarding: true +--- +apiVersion: deckhouse.io/v1 +kind: User +metadata: + name: admin +spec: + email: admin@deckhouse.io + # echo "t3chn0l0gi4" | htpasswd -BinC 10 "" | cut -d: -f2 | base64 -w0 + password: {{ .Values.passwordHash }} diff --git a/ci/dvp-e2e/charts/cluster-config/values.yaml b/ci/dvp-e2e/charts/cluster-config/values.yaml new file mode 100644 index 0000000000..86c976457e --- /dev/null +++ b/ci/dvp-e2e/charts/cluster-config/values.yaml @@ -0,0 +1,45 @@ +# Cluster configuration for setting up nested clusters for running nightly E2E tests + +# Instance configuration +instances: + masterNodes: + count: 1 + cores: 8 + coreFraction: 50% + memory: 20Gi + additionalNodes: + - name: worker + count: 3 + cores: 6 + coreFraction: 50% + memory: 12Gi + +# Deckhouse configuration +deckhouse: + tag: main + +# Virtualization configuration +virtualization: + tag: main + +# Image configuration +image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ubuntu + bootloader: EFI + +# Ingress hosts +ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization + +# Storage class for parent cluster VMs (overridden by workflow from profile) +storageClass: ceph-pool-r2-csi-rbd-immediate + +# Feature flags (only those used by templates) +features: + virtualization: true diff --git a/ci/dvp-e2e/charts/infra/.helmignore b/ci/dvp-e2e/charts/infra/.helmignore new file mode 100644 index 0000000000..0e8a0eb36f --- /dev/null +++ b/ci/dvp-e2e/charts/infra/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ci/dvp-e2e/charts/infra/Chart.yaml b/ci/dvp-e2e/charts/infra/Chart.yaml new file mode 100644 index 0000000000..29da1942f3 --- /dev/null +++ b/ci/dvp-e2e/charts/infra/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: infra +description: Infrastructure components for E2E testing +type: application +version: 0.1.0 +appVersion: "1.0.0" +keywords: + - infrastructure + - e2e + - testing +home: https://github.com/deckhouse/deckhouse +sources: + - https://github.com/deckhouse/deckhouse +maintainers: + - name: Deckhouse Team diff --git a/ci/dvp-e2e/charts/infra/templates/ingress.yaml b/ci/dvp-e2e/charts/infra/templates/ingress.yaml new file mode 100644 index 0000000000..b419188353 --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/ingress.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: dvp-over-dvp-443 + namespace: {{ .Values.namespace }} +spec: + ports: + - port: 443 + targetPort: 443 + protocol: TCP + name: https + selector: + dvp.deckhouse.io/node-group: master +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: wildcard-https + namespace: {{ .Values.namespace }} + annotations: + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" +spec: + ingressClassName: nginx + rules: + {{- range .Values.ingressHosts }} + - host: "{{ . }}.{{ $.Values.namespace }}.{{ $.Values.domain }}" + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: dvp-over-dvp-443 + port: + number: 443 + {{- end }} diff --git a/ci/dvp-e2e/charts/infra/templates/jump-host/deploy.yaml b/ci/dvp-e2e/charts/infra/templates/jump-host/deploy.yaml new file mode 100644 index 0000000000..a6bee4278a --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/jump-host/deploy.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: jump-host + template: + metadata: + labels: + app: jump-host + spec: + containers: + - name: jump-host + image: registry-dvp.dev.flant.dev/tools/jump-host:v0.1.2 + imagePullPolicy: Always + resources: + limits: + cpu: "200m" + memory: "200Mi" + requests: + cpu: "200m" + memory: "200Mi" + ports: + - containerPort: 2222 + env: + - name: SSH_KEY + value: "{{ .Values.sshPublicKey }}" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + securityContext: + runAsNonRoot: true + runAsUser: 1000 + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + diff --git a/ci/dvp-e2e/charts/infra/templates/jump-host/svc.yaml b/ci/dvp-e2e/charts/infra/templates/jump-host/svc.yaml new file mode 100644 index 0000000000..e2b809dcab --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/jump-host/svc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: jump-host + namespace: {{ .Values.namespace }} +spec: + type: NodePort + selector: + app: jump-host + ports: + - protocol: TCP + port: 2222 + targetPort: 2222 + diff --git a/ci/dvp-e2e/charts/infra/templates/ns.yaml b/ci/dvp-e2e/charts/infra/templates/ns.yaml new file mode 100644 index 0000000000..064087cab7 --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/ns.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.namespace }} + labels: + heritage: deckhouse diff --git a/ci/dvp-e2e/charts/infra/templates/rbac/rbac.yaml b/ci/dvp-e2e/charts/infra/templates/rbac/rbac.yaml new file mode 100644 index 0000000000..1a6a4b9846 --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/rbac/rbac.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dkp-sa + namespace: {{ .Values.namespace }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: dkp-sa-secret + namespace: {{ .Values.namespace }} + annotations: + kubernetes.io/service-account.name: dkp-sa +type: kubernetes.io/service-account-token +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: dkp-sa-rb + namespace: {{ .Values.namespace }} +subjects: + - kind: ServiceAccount + name: dkp-sa + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: d8:use:role:manager + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: dkp-sa-cluster-admin-{{ .Values.namespace }} +subjects: + - kind: ServiceAccount + name: dkp-sa + namespace: {{ .Values.namespace }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/ci/dvp-e2e/charts/infra/templates/registry-secret.yaml b/ci/dvp-e2e/charts/infra/templates/registry-secret.yaml new file mode 100644 index 0000000000..d26d10600e --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/registry-secret.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: dhctl-regcred + namespace: {{ .Values.namespace }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ .Values.deckhouse.registryDockerCfg | quote }} + diff --git a/ci/dvp-e2e/charts/infra/templates/vi.yaml b/ci/dvp-e2e/charts/infra/templates/vi.yaml new file mode 100644 index 0000000000..66034a649d --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/vi.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualImage +metadata: + name: image + namespace: {{ .Values.namespace }} +spec: + storage: ContainerRegistry + dataSource: + type: HTTP + http: + url: {{ .Values.image.url }} diff --git a/ci/dvp-e2e/charts/infra/templates/vmc.yaml b/ci/dvp-e2e/charts/infra/templates/vmc.yaml new file mode 100644 index 0000000000..39330ced39 --- /dev/null +++ b/ci/dvp-e2e/charts/infra/templates/vmc.yaml @@ -0,0 +1,7 @@ +apiVersion: virtualization.deckhouse.io/v1alpha2 +kind: VirtualMachineClass +metadata: + name: "{{ .Values.namespace }}-cpu" +spec: + cpu: + type: Discovery diff --git a/ci/dvp-e2e/charts/infra/values.yaml b/ci/dvp-e2e/charts/infra/values.yaml new file mode 100644 index 0000000000..d43de3435c --- /dev/null +++ b/ci/dvp-e2e/charts/infra/values.yaml @@ -0,0 +1,2 @@ +# Minimal defaults; templates primarily consume values from external run values. +# This file can stay intentionally small to avoid confusion. diff --git a/ci/dvp-e2e/manifests/storage/sds-modules.yaml b/ci/dvp-e2e/manifests/storage/sds-modules.yaml new file mode 100644 index 0000000000..42030bda40 --- /dev/null +++ b/ci/dvp-e2e/manifests/storage/sds-modules.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-local-volume +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-local-volume +spec: + imageTag: main + scanInterval: 15s +--- +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume +spec: + enabled: true + version: 1 +--- +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s diff --git a/ci/dvp-e2e/manifests/storage/sds.yaml b/ci/dvp-e2e/manifests/storage/sds.yaml new file mode 100644 index 0000000000..0b8e27da48 --- /dev/null +++ b/ci/dvp-e2e/manifests/storage/sds.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: LVMVolumeGroup +metadata: + name: data +spec: + # Local VG; explicit local section is required for type=Local + type: Local + local: + actualVGNameOnTheNode: data + blockDeviceSelector: + devicePaths: + - /dev/sdd +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStoragePool +metadata: + name: data +spec: + # Pool type must be LVM or LVMThin + type: LVM + lvmVolumeGroups: + - name: data +--- +apiVersion: storage.deckhouse.io/v1alpha1 +kind: ReplicatedStorageClass +metadata: + name: linstor-thin-r2 +spec: + storagePool: data + reclaimPolicy: Delete + topology: Ignored + volumeAccess: Local diff --git a/ci/dvp-e2e/scripts/attach_worker_disks.sh b/ci/dvp-e2e/scripts/attach_worker_disks.sh new file mode 100755 index 0000000000..f6d0b2ca94 --- /dev/null +++ b/ci/dvp-e2e/scripts/attach_worker_disks.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +# Usage: +# attach_worker_disks.sh -n namespace -s storage_class -z disk_size -c disk_count [-k kubeconfig] + +namespace="" +storage_class="" +disk_size="10Gi" +disk_count="2" +kubeconfig="${KUBECONFIG:-}" + +while getopts ":n:s:z:c:k:" opt; do + case $opt in + n) namespace="$OPTARG" ;; + s) storage_class="$OPTARG" ;; + z) disk_size="$OPTARG" ;; + c) disk_count="$OPTARG" ;; + k) kubeconfig="$OPTARG" ;; + *) + echo "Usage: $0 -n -s -z -c [-k ]" >&2 + exit 2 + ;; + esac +done + +if [ -z "${namespace}" ] || [ -z "${storage_class}" ]; then + echo "Usage: $0 -n -s -z -c [-k ]" >&2 + exit 2 +fi + +if [ -n "${kubeconfig}" ]; then + export KUBECONFIG="${kubeconfig}" +fi + +echo "[INFRA] Attaching ${disk_count} storage disks to worker VMs using hotplug in namespace ${namespace}" + +# Wait for worker VMs +for i in $(seq 1 50); do + worker_count=$(kubectl -n "${namespace}" get vm -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2>/dev/null | grep -c worker || echo "0") + if [ "$worker_count" -gt 0 ]; then + echo "[INFRA] Found $worker_count worker VMs" + break + fi + echo "[INFRA] Waiting for worker VMs... ($i/50)" + sleep 10 +done + +# Get worker VMs +mapfile -t workers < <(kubectl -n "${namespace}" get vm -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' 2>/dev/null | grep worker || true) + +if [ ${#workers[@]} -eq 0 ]; then + echo "[INFRA] No worker VMs found; nothing to do" + exit 0 +fi + +echo "[INFRA] Found ${#workers[@]} worker VMs: ${workers[*]}" + +for vm in "${workers[@]}"; do + [ -z "$vm" ] && continue + echo "[INFRA] Processing VM: $vm" + + # Wait for VM to be Running + for i in $(seq 1 50); do + phase=$(kubectl -n "${namespace}" get vm "$vm" -o jsonpath='{.status.phase}' 2>/dev/null || true) + if [ "$phase" = "Running" ]; then + echo "[INFRA] VM $vm is Running" + break + fi + echo "[INFRA] VM $vm phase=$phase; retry $i/50" + sleep 10 + done + + for disk_num in $(seq 1 "${disk_count}"); do + vd="storage-disk-${disk_num}-$vm" + echo "[INFRA] Creating VirtualDisk $vd (${disk_size}, sc=${storage_class})" + + cat > "/tmp/vd-$vd.yaml" </dev/null 2>&1 || kubectl -n "${namespace}" apply -f "/tmp/vd-$vd.yaml" + + # Wait for VirtualDisk to be Ready + echo "[INFRA] Waiting for VirtualDisk $vd to be Ready..." + vd_phase="" + for j in $(seq 1 50); do + vd_phase=$(kubectl -n "${namespace}" get vd "$vd" -o jsonpath='{.status.phase}' 2>/dev/null || true) + if [ "$vd_phase" = "Ready" ]; then + echo "[INFRA] VirtualDisk $vd is Ready" + break + fi + echo "[INFRA] VD $vd phase=$vd_phase; retry $j/50" + sleep 5 + done + + if [ "$vd_phase" != "Ready" ]; then + echo "[ERROR] VirtualDisk $vd not Ready" + kubectl -n "${namespace}" get vd "$vd" -o yaml || true + kubectl -n "${namespace}" get events --sort-by=.lastTimestamp | tail -n 100 || true + exit 1 + fi + + # Wait for PVC + pvc_name="" + for j in $(seq 1 50); do + pvc_name=$(kubectl -n "${namespace}" get vd "$vd" -o jsonpath='{.status.target.persistentVolumeClaimName}' 2>/dev/null || true) + [ -n "$pvc_name" ] && break + echo "[INFRA] Waiting for PVC name for VD $vd; retry $j/50" + sleep 3 + done + + if [ -n "$pvc_name" ]; then + echo "[INFRA] Waiting PVC $pvc_name to reach phase=Bound..." + for j in $(seq 1 120); do + pvc_phase=$(kubectl -n "${namespace}" get pvc "$pvc_name" -o jsonpath='{.status.phase}' 2>/dev/null || true) + if [ "$pvc_phase" = "Bound" ]; then + break + fi + [ $((j % 10)) -eq 0 ] && echo "[INFRA] PVC $pvc_name phase=$pvc_phase; retry $j/120" + sleep 2 + done + if [ "$pvc_phase" != "Bound" ]; then + echo "[WARN] PVC $pvc_name not Bound after waiting" + fi + fi + + # Create hotplug attachment + att="att-$vd" + echo "[INFRA] Creating VirtualMachineBlockDeviceAttachment $att for VM $vm" + cat > "/tmp/att-$att.yaml" </dev/null 2>&1 || kubectl -n "${namespace}" apply -f "/tmp/att-$att.yaml" + + # Wait for attachment + echo "[INFRA] Waiting for VMBDA $att to be Attached..." + att_phase="" + success_by_vm=0 + for i in $(seq 1 50); do + att_phase=$(kubectl -n "${namespace}" get vmbda "$att" -o jsonpath='{.status.phase}' 2>/dev/null || true) + if [ "$att_phase" = "Attached" ]; then + echo "[INFRA] Disk $vd attached to VM $vm" + break + fi + if kubectl -n "${namespace}" get vm "$vm" -o json 2>/dev/null \ + | jq -e --arg vd "$att" --arg disk "$vd" ' + ([.status.blockDeviceRefs[]? + | select( + (.virtualMachineBlockDeviceAttachmentName == $vd) + or (.name == $disk) + ) + | select((.attached == true) and (.hotplugged == true)) + ] | length) > 0' >/dev/null; then + echo "[INFRA] VM reports disk $vd attached/hotplugged; proceeding" + success_by_vm=1 + break + fi + [ $((i % 10)) -eq 0 ] && echo "[INFRA] Disk $vd phase=$att_phase; retry $i/50" + sleep 5 + done + + if [ "$att_phase" != "Attached" ] && [ "${success_by_vm:-0}" -ne 1 ]; then + echo "[ERROR] VMBDA $att did not reach Attached state" + kubectl -n "${namespace}" get vmbda "$att" -o yaml || true + kubectl -n "${namespace}" get vm "$vm" -o json || true + kubectl -n "${namespace}" get events --sort-by=.lastTimestamp | tail -n 100 || true + exit 1 + fi + done + + echo "[INFRA] VM $vm configured with hotplug disks" +done + +echo "[INFRA] All worker VMs configured with storage disks via hotplug" diff --git a/ci/dvp-e2e/scripts/build_nested_kubeconfig.sh b/ci/dvp-e2e/scripts/build_nested_kubeconfig.sh new file mode 100755 index 0000000000..f7d9a34d7f --- /dev/null +++ b/ci/dvp-e2e/scripts/build_nested_kubeconfig.sh @@ -0,0 +1,144 @@ +#!/usr/bin/env bash + +# Copyright 2025 Flant JSC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +# Usage: +# build_nested_kubeconfig.sh -o /path/to/kubeconfig -n namespace -d domain -k parent_kubeconfig -s ssh_key -u user + +out="" +namespace="" +domain="" +parent_kubeconfig="" +ssh_key="" +ssh_user="ubuntu" + +while getopts ":o:n:d:k:s:u:" opt; do + case $opt in + o) out="$OPTARG" ;; + n) namespace="$OPTARG" ;; + d) domain="$OPTARG" ;; + k) parent_kubeconfig="$OPTARG" ;; + s) ssh_key="$OPTARG" ;; + u) ssh_user="$OPTARG" ;; + *) + echo "Usage: $0 -o -n -d -k -s [-u ]" >&2 + exit 2 + ;; + esac +done + +if [ -z "${out}" ] || [ -z "${namespace}" ] || [ -z "${domain}" ] || [ -z "${parent_kubeconfig}" ] || [ -z "${ssh_key}" ]; then + echo "Usage: $0 -o -n -d -k -s [-u ]" >&2 + exit 2 +fi + +if [ ! -s "${parent_kubeconfig}" ]; then + echo "[ERR] parent kubeconfig not found at ${parent_kubeconfig}" >&2 + exit 1 +fi + +if [ ! -f "${ssh_key}" ]; then + echo "[ERR] SSH key not found at ${ssh_key}" >&2 + exit 1 +fi + +# Create output directory +OUT_DIR="$(dirname "$out")" +if ! mkdir -p "${OUT_DIR}"; then + echo "[ERR] Failed to create output directory: ${OUT_DIR}" >&2 + exit 1 +fi + +# Find master VM +echo "[INFO] Finding master VM in namespace ${namespace}..." +MASTER_NAME=$(KUBECONFIG="${parent_kubeconfig}" kubectl -n "${namespace}" get vm -l dvp.deckhouse.io/node-group=master -o jsonpath='{.items[0].metadata.name}') +if [ -z "$MASTER_NAME" ]; then + echo "[ERR] master VM not found in namespace ${namespace}" >&2 + exit 1 +fi +echo "[INFO] Found master VM: ${MASTER_NAME}" + +# Get token via SSH +TOKEN_FILE="$(dirname "$out")/token.txt" +rm -f "$TOKEN_FILE" +SSH_OK=0 + +echo "[INFO] Obtaining token from nested cluster..." +for attempt in $(seq 1 6); do + if KUBECONFIG="${parent_kubeconfig}" d8 v ssh \ + --username="${ssh_user}" \ + --identity-file="${ssh_key}" \ + --local-ssh=true \ + --local-ssh-opts="-o StrictHostKeyChecking=no" \ + --local-ssh-opts="-o UserKnownHostsFile=/dev/null" \ + "${MASTER_NAME}.${namespace}" \ + -c ' + set -euo pipefail + SUDO="sudo /opt/deckhouse/bin/kubectl" + $SUDO -n kube-system get sa e2e-admin >/dev/null 2>&1 || $SUDO -n kube-system create sa e2e-admin >/dev/null 2>&1 + $SUDO -n kube-system get clusterrolebinding e2e-admin >/dev/null 2>&1 || $SUDO -n kube-system create clusterrolebinding e2e-admin --clusterrole=cluster-admin --serviceaccount=kube-system:e2e-admin >/dev/null 2>&1 + for i in $(seq 1 10); do + TOKEN=$($SUDO -n kube-system create token e2e-admin --duration=24h 2>/dev/null) && echo "$TOKEN" && break + echo "[WARN] Failed to create token (attempt $i/10); retrying in 3s" >&2 + sleep 3 + done + if [ -z "${TOKEN:-}" ]; then + echo "[ERR] Unable to create token for e2e-admin after 10 attempts" >&2 + exit 1 + fi + ' > "$TOKEN_FILE"; then + SSH_OK=1 + break + fi + echo "[WARN] d8 v ssh attempt $attempt failed; retry in 15s..." + sleep 15 +done + +if [ "$SSH_OK" -ne 1 ] || [ ! -s "$TOKEN_FILE" ]; then + echo "[ERR] Failed to obtain nested token via d8 v ssh after multiple attempts" >&2 + cat "$TOKEN_FILE" 2>/dev/null || true + exit 1 +fi + +NESTED_TOKEN=$(cat "$TOKEN_FILE") +SERVER_URL="https://api.${namespace}.${domain}" + +# Generate kubeconfig +cat > "$out" < + +out="" +api="${E2E_K8S_URL:-}" +tok="${E2E_SA_TOKEN:-}" + +while getopts ":o:a:t:" opt; do + case $opt in + o) out="$OPTARG" ;; + a) api="$OPTARG" ;; + t) tok="$OPTARG" ;; + *) echo "Usage: $0 -o -a -t " >&2; exit 2 ;; + esac +done + +if [ -z "${out}" ] || [ -z "${api}" ] || [ -z "${tok}" ]; then + echo "Usage: $0 -o -a -t " >&2 + exit 2 +fi + +mkdir -p "$(dirname "$out")" +cat >"$out" < -s [-d ]" >&2 + exit 2 + ;; + esac +done + +if [ -z "${kubeconfig}" ] || [ ! -f "${kubeconfig}" ]; then + echo "Error: kubeconfig is required and must exist" >&2 + exit 2 +fi + +export KUBECONFIG="${kubeconfig}" + +# Step 0: Wait for API server +echo "[SDS] Waiting for API server to be ready..." +for i in $(seq 1 50); do + if kubectl get nodes >/dev/null 2>&1; then + echo "[SDS] API server is ready!" + break + fi + echo "[SDS] API server not ready yet, retry $i/50" + sleep 10 +done + +# Step 1: Enable sds-node-configurator +echo "[SDS] Step 1: Enabling sds-node-configurator..." +cat <<'EOF' | kubectl apply -f - +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-node-configurator +spec: + imageTag: main + scanInterval: 15s +EOF + +cat <<'EOF' | kubectl -n d8-system apply -f - +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-node-configurator + namespace: d8-system +spec: + enabled: true + version: 1 + settings: + disableDs: false + enableThinProvisioning: true +EOF + +# Step 2: Wait for sds-node-configurator +echo "[SDS] Step 2: Waiting for sds-node-configurator to be Ready..." +if ! kubectl wait module sds-node-configurator --for=jsonpath='{.status.phase}'=Ready --timeout=600s >/dev/null 2>&1; then + echo "[WARN] sds-node-configurator did not reach Ready within 10 minutes" >&2 +fi + +# Step 3: Enable sds-replicated-volume +echo "[SDS] Step 3: Enabling sds-replicated-volume..." +cat <<'EOF' | kubectl apply -f - +apiVersion: deckhouse.io/v1alpha2 +kind: ModulePullOverride +metadata: + name: sds-replicated-volume +spec: + imageTag: main + scanInterval: 15s +EOF + +cat <<'EOF' | kubectl -n d8-system apply -f - +apiVersion: deckhouse.io/v1alpha1 +kind: ModuleConfig +metadata: + name: sds-replicated-volume + namespace: d8-system +spec: + enabled: true + version: 1 +EOF + +# Step 4: Wait for sds-replicated-volume +echo "[SDS] Step 4: Waiting for sds-replicated-volume to be Ready..." +if ! kubectl wait module sds-replicated-volume --for=jsonpath='{.status.phase}'=Ready --timeout=600s >/dev/null 2>&1; then + echo "[WARN] sds-replicated-volume did not reach Ready within 10 minutes" >&2 +fi + +# Step 6: Create LVMVolumeGroups per node +echo "[SDS] Creating per-node LVMVolumeGroups (type=Local)..." +NODES=$(kubectl get nodes -o json \ + | jq -r '.items[] | select(.metadata.labels["node-role.kubernetes.io/control-plane"]!=true and .metadata.labels["node-role.kubernetes.io/master"]!=true) | .metadata.name') + +if [ -z "$NODES" ]; then + NODES=$(kubectl get nodes -o json | jq -r '.items[].metadata.name') +fi + +for node in $NODES; do + [ -z "$node" ] && continue + MATCH_EXPR=$(yq eval -n ' + .key = "storage.deckhouse.io/device-path" | + .operator = "In" | + .values = ["/dev/sdb","/dev/vdb","/dev/xvdb","/dev/sdc","/dev/vdc","/dev/xvdc","/dev/sdd","/dev/vdd","/dev/xvdd"] + ') + NODE="$node" MATCH_EXPR="$MATCH_EXPR" yq eval -n ' + .apiVersion = "storage.deckhouse.io/v1alpha1" | + .kind = "LVMVolumeGroup" | + .metadata.name = "data-" + env(NODE) | + .spec.type = "Local" | + .spec.local.nodeName = env(NODE) | + .spec.actualVGNameOnTheNode = "data" | + .spec.blockDeviceSelector.matchExpressions = [ env(MATCH_EXPR) ] + ' | kubectl apply -f - +done + +# Step 7: Create ReplicatedStoragePool +echo "[SDS] Creating ReplicatedStoragePool 'data' from LVMVolumeGroups..." +LVGS=$(printf "%s\n" $NODES | sed 's/^/ - name: data-/') + +cat </dev/null 2>&1; then + echo "[ERR] StorageClass '${storage_class}' not found in nested cluster" >&2 + exit 1 +fi + +# Step 10: Set default StorageClass +echo "[SDS] Setting '${storage_class}' as default StorageClass via ModuleConfig global..." +PATCH=$(jq -cn --arg v "${storage_class}" '[{"op":"replace","path":"/spec/settings/defaultClusterStorageClass","value":$v}]') +kubectl patch mc global --type='json' -p="$PATCH" + +echo "[SDS] SDS storage configuration complete!" diff --git a/ci/dvp-e2e/values.yaml b/ci/dvp-e2e/values.yaml new file mode 100644 index 0000000000..a3dbb0b7d3 --- /dev/null +++ b/ci/dvp-e2e/values.yaml @@ -0,0 +1,31 @@ +domain: e2e.virtlab.flant.com +clusterConfigurationPrefix: e2e +deckhouse: + tag: main +virtualization: + tag: main +features: + virtualization: true +image: + url: https://89d64382-20df-4581-8cc7-80df331f67fa.selstorage.ru/ubuntu/noble-server-cloudimg-amd64.img + defaultUser: ubuntu + bootloader: EFI +ingressHosts: + - api + - grafana + - dex + - prometheus + - console + - virtualization +instances: + masterNodes: + count: 1 + cores: 8 + coreFraction: 50% + memory: 20Gi + additionalNodes: + - name: worker + count: 3 + cores: 6 + coreFraction: 50% + memory: 12Gi