diff --git a/.github/workflows/ci-tests-sharded.yml b/.github/workflows/ci-tests-sharded.yml new file mode 100644 index 000000000..fe78c1780 --- /dev/null +++ b/.github/workflows/ci-tests-sharded.yml @@ -0,0 +1,417 @@ +name: Continuous Integration Tests (Sharded) + +on: + pull_request: + +# Ensure we don't have multiple runs for the same PR +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + TOTAL_SHARDS: 4 + EXPECTED_COVERAGE: 75 + +jobs: + # ========================================================================== + # Unit Tests (unchanged) + # ========================================================================== + unit-tests: + runs-on: ubuntu-latest + steps: + - name: Checkout the repository + uses: actions/checkout@1e31de5234b9f8995739874a8ce0492dc87873e2 #v4.0.0 + + - name: Set up Go version + uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 #v4 + with: + go-version-file: go.mod + cache: true + + - run: go version + + - name: go test with coverage + run: | + sudo chmod +x ./internal/commands/.scripts/up.sh + ./internal/commands/.scripts/up.sh + + - name: Check if total coverage is greater then 77.7 + shell: bash + run: | + CODE_COV=$(go tool cover -func cover.out | grep total | awk '{print substr($3, 1, length($3)-1)}') + EXPECTED_CODE_COV=77.7 + var=$(awk 'BEGIN{ print "'$CODE_COV'"<"'$EXPECTED_CODE_COV'" }') + if [ "$var" -eq 1 ];then + echo "Your code coverage is too low. Coverage precentage is: $CODE_COV" + exit 1 + else + echo "Your code coverage test passed! Coverage precentage is: $CODE_COV" + exit 0 + fi + + # ========================================================================== + # Integration Tests - Sharded Execution + # ========================================================================== + integration-tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false # Run all shards even if one fails + max-parallel: 4 + matrix: + shard: [1, 2, 3, 4] + + outputs: + shard-1-status: ${{ steps.run-tests.outputs.status }} + + steps: + - name: Checkout the repository + uses: actions/checkout@1e31de5234b9f8995739874a8ce0492dc87873e2 #v4.0.0 + + - name: Set up Go version + uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 #v4 + with: + go-version-file: go.mod + cache: true + + - run: go version + + - name: Go Build + run: go build -o ./bin/cx ./cmd + + - name: Install gocovmerge + run: go install github.com/wadey/gocovmerge@latest + + - name: Install jq (for manifest parsing) + run: sudo apt-get install -y jq + + - name: Install pre-commit + run: | + pip install pre-commit + pre-commit install + + # Cache ScaResolver to avoid downloading on every run + - name: Cache ScaResolver + id: cache-scaresolver + uses: actions/cache@v4 + with: + path: /tmp/ScaResolver + key: scaresolver-linux64-v1 + restore-keys: | + scaresolver-linux64- + + - name: Download ScaResolver + if: steps.cache-scaresolver.outputs.cache-hit != 'true' + run: | + wget -q https://sca-downloads.s3.amazonaws.com/cli/latest/ScaResolver-linux64.tar.gz + tar -xzf ScaResolver-linux64.tar.gz -C /tmp + rm -f ScaResolver-linux64.tar.gz + + # Cache Docker image for Squid proxy + - name: Pull and cache Squid image + run: docker pull ubuntu/squid:5.2-22.04_beta + + - name: Start Squid Proxy + run: | + docker run \ + --name squid \ + -d \ + -p ${{ env.PROXY_PORT }}:3128 \ + -v $(pwd)/internal/commands/.scripts/squid/squid.conf:/etc/squid/squid.conf \ + -v $(pwd)/internal/commands/.scripts/squid/passwords:/etc/squid/passwords \ + ubuntu/squid:5.2-22.04_beta + env: + PROXY_PORT: 3128 + + - name: Run Integration Tests (Shard ${{ matrix.shard }}/${{ env.TOTAL_SHARDS }}) + id: run-tests + shell: bash + env: + # Shard configuration + SHARD_INDEX: ${{ matrix.shard }} + TOTAL_SHARDS: ${{ env.TOTAL_SHARDS }} + TEST_TIMEOUT: 45m + RERUN_TIMEOUT: 15m + # Checkmarx credentials + CX_BASE_URI: ${{ secrets.CX_BASE_URI }} + CX_CLIENT_ID: ${{ secrets.CX_CLIENT_ID }} + CX_CLIENT_SECRET: ${{ secrets.CX_CLIENT_SECRET }} + CX_BASE_AUTH_URI: ${{ secrets.CX_BASE_AUTH_URI }} + CX_AST_USERNAME: ${{ secrets.CX_AST_USERNAME }} + CX_AST_PASSWORD: ${{ secrets.CX_AST_PASSWORD }} + CX_APIKEY: ${{ secrets.CX_APIKEY }} + CX_TENANT: ${{ secrets.CX_TENANT }} + CX_SCAN_SSH_KEY: ${{ secrets.CX_SCAN_SSH_KEY }} + CX_ORIGIN: "cli-tests" + # Proxy settings + PROXY_HOST: localhost + PROXY_PORT: 3128 + PROXY_USERNAME: ${{ secrets.PROXY_USER }} + PROXY_PASSWORD: ${{ secrets.PROXY_PASSWORD }} + # GitHub PR settings + PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + PR_GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + PR_GITHUB_NAMESPACE: "checkmarx" + PR_GITHUB_REPO_NAME: "ast-cli" + PR_GITHUB_NUMBER: 983 + # GitLab settings + PR_GITLAB_TOKEN: ${{ secrets.PR_GITLAB_TOKEN }} + PR_GITLAB_NAMESPACE: ${{ secrets.PR_GITLAB_NAMESPACE }} + PR_GITLAB_REPO_NAME: ${{ secrets.PR_GITLAB_REPO_NAME }} + PR_GITLAB_PROJECT_ID: ${{ secrets.PR_GITLAB_PROJECT_ID }} + PR_GITLAB_IID: ${{ secrets.PR_GITLAB_IID }} + GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} + # Azure settings + AZURE_ORG: ${{ secrets.AZURE_ORG }} + AZURE_PROJECT: ${{ secrets.AZURE_PROJECT }} + AZURE_REPOS: ${{ secrets.AZURE_REPOS }} + AZURE_TOKEN: ${{ secrets.AZURE_TOKEN }} + AZURE_NEW_ORG: ${{ secrets.AZURE_NEW_ORG }} + AZURE_PROJECT_NAME: ${{ secrets.AZURE_PROJECT_NAME }} + AZURE_PR_NUMBER: 1 + AZURE_NEW_TOKEN: ${{ secrets.AZURE_NEW_TOKEN }} + # Bitbucket settings + BITBUCKET_WORKSPACE: ${{ secrets.BITBUCKET_WORKSPACE }} + BITBUCKET_REPOS: ${{ secrets.BITBUCKET_REPOS }} + BITBUCKET_USERNAME: ${{ secrets.BITBUCKET_USERNAME }} + BITBUCKET_PASSWORD: ${{ secrets.BITBUCKET_PASSWORD }} + PR_BITBUCKET_TOKEN: ${{ secrets.PR_BITBUCKET_TOKEN }} + PR_BITBUCKET_NAMESPACE: "AstSystemTest" + PR_BITBUCKET_REPO_NAME: "cliIntegrationTest" + PR_BITBUCKET_ID: 1 + GITHUB_ACTOR: ${{ github.actor }} + run: | + chmod +x ./internal/commands/.scripts/integration_shard.sh + + # Run the shard + set +e + ./internal/commands/.scripts/integration_shard.sh ${{ matrix.shard }} ${{ env.TOTAL_SHARDS }} + exit_code=$? + set -e + + # Set output for downstream jobs + if [ $exit_code -eq 0 ]; then + echo "status=passed" >> $GITHUB_OUTPUT + else + echo "status=failed" >> $GITHUB_OUTPUT + fi + + exit $exit_code + + - name: Stop Squid Proxy + if: always() + run: docker rm -f squid || true + + - name: Upload Shard Artifacts + if: always() + uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 #v4 + with: + name: shard-${{ matrix.shard }}-results + path: | + shard_${{ matrix.shard }}_of_${{ env.TOTAL_SHARDS }}/ + retention-days: 7 + + # ========================================================================== + # Validate All Tests Were Executed + # ========================================================================== + validate-integration-tests: + runs-on: ubuntu-latest + needs: integration-tests + if: always() # Run even if some shards failed + + steps: + - name: Checkout the repository + uses: actions/checkout@1e31de5234b9f8995739874a8ce0492dc87873e2 #v4.0.0 + + - name: Set up Go version + uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 #v4 + with: + go-version-file: go.mod + cache: true + + - name: Install gocovmerge + run: go install github.com/wadey/gocovmerge@latest + + - name: Install jq + run: sudo apt-get install -y jq + + - name: Download All Shard Artifacts + uses: actions/download-artifact@v4 + with: + pattern: shard-*-results + merge-multiple: false + + - name: Organize Artifacts + run: | + # Move artifacts to expected structure + for shard in $(seq 1 ${{ env.TOTAL_SHARDS }}); do + artifact_dir="shard-${shard}-results" + if [ -d "$artifact_dir" ]; then + # The artifact might contain nested directory + if [ -d "$artifact_dir/shard_${shard}_of_${{ env.TOTAL_SHARDS }}" ]; then + mv "$artifact_dir/shard_${shard}_of_${{ env.TOTAL_SHARDS }}" . + else + mv "$artifact_dir" "shard_${shard}_of_${{ env.TOTAL_SHARDS }}" + fi + fi + done + + # Debug: List what we have + echo "Available shard directories:" + ls -la shard_* || echo "No shard directories found" + + - name: Validate All Tests Executed + id: validate + env: + TOTAL_SHARDS: ${{ env.TOTAL_SHARDS }} + EXPECTED_COVERAGE: ${{ env.EXPECTED_COVERAGE }} + ARTIFACTS_DIR: "." + run: | + chmod +x ./internal/commands/.scripts/integration_validate.sh + + set +e + ./internal/commands/.scripts/integration_validate.sh ${{ env.TOTAL_SHARDS }} + exit_code=$? + set -e + + if [ $exit_code -eq 0 ]; then + echo "validation=passed" >> $GITHUB_OUTPUT + else + echo "validation=failed" >> $GITHUB_OUTPUT + fi + + exit $exit_code + + - name: Upload Validation Report + if: always() + uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 #v4 + with: + name: validation-report + path: | + validation_report.json + test_summary.md + merged_coverage.out + coverage.html + retention-days: 30 + + - name: Upload Final Coverage + uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 #v4 + with: + name: ${{ runner.os }}-coverage-latest + path: coverage.html + + - name: Post Summary to PR + if: always() + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + + let summary = '## Integration Test Results\n\n'; + + try { + if (fs.existsSync('test_summary.md')) { + summary = fs.readFileSync('test_summary.md', 'utf8'); + } + } catch (e) { + summary += 'Unable to read test summary.\n'; + } + + // Also add to job summary + await core.summary.addRaw(summary).write(); + + - name: Check Validation Status + if: always() + run: | + if [ -f validation_report.json ]; then + echo "=== Validation Report ===" + cat validation_report.json | jq . + + status=$(jq -r '.final_status' validation_report.json) + if [ "$status" != "passed" ]; then + echo "::error::Integration tests validation failed!" + exit 1 + fi + else + echo "::error::Validation report not found!" + exit 1 + fi + + # ========================================================================== + # Lint (unchanged) + # ========================================================================== + lint: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@1e31de5234b9f8995739874a8ce0492dc87873e2 #v4.0.0 + + - name: Set up Go version + uses: actions/setup-go@4d34df0c2316fe8122ab82dc22947d607c0c91f9 #v4 + with: + go-version-file: go.mod + cache: true + + - run: go version + - run: go mod tidy + + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc #v3 + with: + skip-pkg-cache: true + version: v1.64.2 + args: -c .golangci.yml --timeout 5m + only-new-issues: true + + # ========================================================================== + # Security Scans (unchanged) + # ========================================================================== + govulncheck: + runs-on: ubuntu-latest + name: govulncheck + steps: + - id: govulncheck + uses: golang/govulncheck-action@7da72f730e37eeaad891fcff0a532d27ed737cd4 #v1 + continue-on-error: true + with: + go-version-file: go.mod + go-package: ./... + + checkDockerImage: + runs-on: ubuntu-latest + name: scan Docker Image with Trivy + steps: + - name: Checkout code + uses: actions/checkout@722adc63f1aa60a57ec37892e133b1d319cae598 #2.0.0 + + - name: Set up Docker + uses: docker/setup-buildx-action@cf09c5c41b299b55c366aff30022701412eb6ab0 #v1.0.0 + + - name: Log in to Docker Hub + uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b #v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Build the project + run: go build -o ./cx ./cmd + + - name: Build Docker image + run: docker build -t ast-cli:${{ github.sha }} . + + - name: Run Trivy scanner without downloading DBs + uses: aquasecurity/trivy-action@915b19bbe73b92a6cf82a1bc12b087c9a19a5fe2 #v0.28.0 + with: + scan-type: 'image' + image-ref: ast-cli:${{ github.sha }} + format: 'table' + exit-code: '1' + ignore-unfixed: true + vuln-type: 'os,library' + output: './trivy-image-results.txt' + env: + TRIVY_SKIP_JAVA_DB_UPDATE: true + + - name: Inspect action report + if: always() + shell: bash + run: cat ./trivy-image-results.txt diff --git a/internal/commands/.scripts/integration_setup.sh b/internal/commands/.scripts/integration_setup.sh new file mode 100644 index 000000000..b648b3b4b --- /dev/null +++ b/internal/commands/.scripts/integration_setup.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# ============================================================================= +# Integration Test Setup Script +# ============================================================================= +# Common setup steps for integration tests (proxy, tools, etc.) +# Used by both sharded and non-sharded test runs. +# +# Usage: source ./integration_setup.sh +# OR +# ./integration_setup.sh [--proxy-only|--tools-only|--all] +# ============================================================================= + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Configuration +PROXY_PORT="${PROXY_PORT:-3128}" +PROXY_CONTAINER_NAME="${PROXY_CONTAINER_NAME:-squid}" +SCARESOLVER_PATH="${SCARESOLVER_PATH:-/tmp}" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[SETUP]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SETUP]${NC} $1" +} + +log_error() { + echo -e "${RED}[SETUP]${NC} $1" +} + +# ============================================================================= +# Setup Squid Proxy +# ============================================================================= +setup_proxy() { + log_info "Setting up Squid proxy..." + + # Check if already running + if docker ps -q -f name="$PROXY_CONTAINER_NAME" | grep -q .; then + log_info "Squid proxy already running" + return 0 + fi + + # Remove if exists but stopped + docker rm -f "$PROXY_CONTAINER_NAME" 2>/dev/null || true + + # Start proxy + docker run \ + --name "$PROXY_CONTAINER_NAME" \ + -d \ + -p "${PROXY_PORT}:3128" \ + -v "${SCRIPT_DIR}/squid/squid.conf:/etc/squid/squid.conf" \ + -v "${SCRIPT_DIR}/squid/passwords:/etc/squid/passwords" \ + ubuntu/squid:5.2-22.04_beta + + # Wait for proxy to be ready + log_info "Waiting for proxy to be ready..." + for i in {1..30}; do + if docker exec "$PROXY_CONTAINER_NAME" squid -k check 2>/dev/null; then + log_success "Squid proxy is ready" + return 0 + fi + sleep 1 + done + + log_error "Proxy failed to start" + return 1 +} + +# ============================================================================= +# Stop Squid Proxy +# ============================================================================= +stop_proxy() { + log_info "Stopping Squid proxy..." + docker rm -f "$PROXY_CONTAINER_NAME" 2>/dev/null || true + log_success "Squid proxy stopped" +} + +# ============================================================================= +# Download ScaResolver +# ============================================================================= +setup_scaresolver() { + log_info "Setting up ScaResolver..." + + if [ -f "${SCARESOLVER_PATH}/ScaResolver" ]; then + log_info "ScaResolver already exists" + return 0 + fi + + log_info "Downloading ScaResolver..." + wget -q https://sca-downloads.s3.amazonaws.com/cli/latest/ScaResolver-linux64.tar.gz -O /tmp/ScaResolver-linux64.tar.gz + tar -xzf /tmp/ScaResolver-linux64.tar.gz -C "$SCARESOLVER_PATH" + rm -f /tmp/ScaResolver-linux64.tar.gz + + log_success "ScaResolver installed to ${SCARESOLVER_PATH}" +} + +# ============================================================================= +# Install Go Tools +# ============================================================================= +setup_go_tools() { + log_info "Setting up Go tools..." + + if ! command -v gocovmerge &> /dev/null; then + log_info "Installing gocovmerge..." + go install github.com/wadey/gocovmerge@latest + fi + + log_success "Go tools ready" +} + +# ============================================================================= +# Full Setup +# ============================================================================= +setup_all() { + setup_proxy + setup_scaresolver + setup_go_tools +} + +# ============================================================================= +# Main +# ============================================================================= +main() { + case "${1:-all}" in + --proxy-only) + setup_proxy + ;; + --tools-only) + setup_scaresolver + setup_go_tools + ;; + --stop-proxy) + stop_proxy + ;; + --all|*) + setup_all + ;; + esac +} + +# Only run main if script is executed directly (not sourced) +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + main "$@" +fi diff --git a/internal/commands/.scripts/integration_shard.sh b/internal/commands/.scripts/integration_shard.sh new file mode 100644 index 000000000..8292072ab --- /dev/null +++ b/internal/commands/.scripts/integration_shard.sh @@ -0,0 +1,400 @@ +#!/bin/bash +# ============================================================================= +# Production-Ready Integration Test Sharding Script +# ============================================================================= +# This script dynamically discovers all integration tests and runs a specific +# shard. It ensures all tests are accounted for and provides validation. +# +# Usage: ./integration_shard.sh +# Example: ./integration_shard.sh 1 4 (runs shard 1 of 4) +# +# Environment Variables: +# SHARD_INDEX - Current shard (1-based), overrides first argument +# TOTAL_SHARDS - Total number of shards, overrides second argument +# TEST_TIMEOUT - Timeout per shard (default: 45m) +# RERUN_TIMEOUT - Timeout for rerunning failed tests (default: 15m) +# ============================================================================= + +set -euo pipefail + +# Configuration +SHARD_INDEX="${SHARD_INDEX:-${1:-1}}" +TOTAL_SHARDS="${TOTAL_SHARDS:-${2:-4}}" +TEST_TIMEOUT="${TEST_TIMEOUT:-45m}" +RERUN_TIMEOUT="${RERUN_TIMEOUT:-15m}" +TEST_PACKAGE="github.com/checkmarx/ast-cli/test/integration" +COVERAGE_PACKAGES="github.com/checkmarx/ast-cli/internal/commands,github.com/checkmarx/ast-cli/internal/services,github.com/checkmarx/ast-cli/internal/wrappers" + +# Output files +SHARD_DIR="./shard_${SHARD_INDEX}_of_${TOTAL_SHARDS}" +TEST_LIST_FILE="${SHARD_DIR}/test_list.txt" +EXECUTED_TESTS_FILE="${SHARD_DIR}/executed_tests.txt" +FAILED_TESTS_FILE="${SHARD_DIR}/failed_tests.txt" +COVERAGE_FILE="${SHARD_DIR}/cover.out" +TEST_OUTPUT_FILE="${SHARD_DIR}/test_output.log" +MANIFEST_FILE="${SHARD_DIR}/manifest.json" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# ============================================================================= +# Step 1: Setup and Validation +# ============================================================================= +setup() { + log_info "Setting up shard ${SHARD_INDEX} of ${TOTAL_SHARDS}" + + # Validate inputs + if [[ ! "$SHARD_INDEX" =~ ^[0-9]+$ ]] || [[ ! "$TOTAL_SHARDS" =~ ^[0-9]+$ ]]; then + log_error "SHARD_INDEX and TOTAL_SHARDS must be positive integers" + exit 1 + fi + + if [ "$SHARD_INDEX" -lt 1 ] || [ "$SHARD_INDEX" -gt "$TOTAL_SHARDS" ]; then + log_error "SHARD_INDEX must be between 1 and TOTAL_SHARDS" + exit 1 + fi + + # Create shard directory + mkdir -p "$SHARD_DIR" + + # Initialize files + > "$TEST_LIST_FILE" + > "$EXECUTED_TESTS_FILE" + > "$FAILED_TESTS_FILE" + > "$TEST_OUTPUT_FILE" +} + +# ============================================================================= +# Step 2: Discover All Tests +# ============================================================================= +discover_tests() { + log_info "Discovering all integration tests..." + + # Get all test function names (sorted for deterministic sharding) + ALL_TESTS=$(go test -tags integration -list "Test.*" "$TEST_PACKAGE" 2>/dev/null | grep "^Test" | sort) + + if [ -z "$ALL_TESTS" ]; then + log_error "No tests discovered! Check if test package exists." + exit 1 + fi + + TOTAL_TESTS=$(echo "$ALL_TESTS" | wc -l | tr -d ' ') + log_info "Discovered ${TOTAL_TESTS} total tests" + + echo "$ALL_TESTS" +} + +# ============================================================================= +# Step 3: Calculate Shard Assignment +# ============================================================================= +get_shard_tests() { + local all_tests="$1" + local shard_tests="" + local test_index=0 + + # Distribute tests using modulo for even distribution + # This ensures new tests are automatically distributed across shards + while IFS= read -r test_name; do + test_index=$((test_index + 1)) + # Assign test to shard using: (test_index - 1) % total_shards + 1 + assigned_shard=$(( ((test_index - 1) % TOTAL_SHARDS) + 1 )) + + if [ "$assigned_shard" -eq "$SHARD_INDEX" ]; then + if [ -n "$shard_tests" ]; then + shard_tests="${shard_tests}"$'\n'"${test_name}" + else + shard_tests="${test_name}" + fi + fi + done <<< "$all_tests" + + echo "$shard_tests" +} + +# ============================================================================= +# Step 4: Build Test Pattern +# ============================================================================= +build_test_pattern() { + local tests="$1" + + if [ -z "$tests" ]; then + echo "" + return + fi + + # Build regex pattern: ^(Test1|Test2|Test3)$ + local pattern="^(" + local first=true + + while IFS= read -r test_name; do + if [ "$first" = true ]; then + pattern="${pattern}${test_name}" + first=false + else + pattern="${pattern}|${test_name}" + fi + done <<< "$tests" + + pattern="${pattern})$" + echo "$pattern" +} + +# ============================================================================= +# Step 5: Run Tests +# ============================================================================= +run_tests() { + local pattern="$1" + local shard_test_count="$2" + + if [ -z "$pattern" ] || [ "$shard_test_count" -eq 0 ]; then + log_warning "No tests assigned to this shard" + echo '{"shard": '"$SHARD_INDEX"', "total_shards": '"$TOTAL_SHARDS"', "tests_assigned": 0, "tests_passed": 0, "tests_failed": 0, "status": "empty"}' > "$MANIFEST_FILE" + return 0 + fi + + log_info "Running ${shard_test_count} tests with pattern: ${pattern:0:100}..." + + # Run tests and capture output + set +e + go test \ + -tags integration \ + -v \ + -timeout "$TEST_TIMEOUT" \ + -coverpkg "$COVERAGE_PACKAGES" \ + -coverprofile "$COVERAGE_FILE" \ + -run "$pattern" \ + "$TEST_PACKAGE" 2>&1 | tee "$TEST_OUTPUT_FILE" + + local test_exit_code=${PIPESTATUS[0]} + set -e + + return $test_exit_code +} + +# ============================================================================= +# Step 6: Extract Results +# ============================================================================= +extract_results() { + # Extract passed tests + grep -E "^--- PASS: " "$TEST_OUTPUT_FILE" | awk '{print $3}' > "$EXECUTED_TESTS_FILE" 2>/dev/null || true + + # Extract failed tests + grep -E "^--- FAIL: " "$TEST_OUTPUT_FILE" | awk '{print $3}' > "$FAILED_TESTS_FILE" 2>/dev/null || true + + local passed_count=$(wc -l < "$EXECUTED_TESTS_FILE" 2>/dev/null | tr -d ' ' || echo "0") + local failed_count=$(wc -l < "$FAILED_TESTS_FILE" 2>/dev/null | tr -d ' ' || echo "0") + + log_info "Passed: ${passed_count}, Failed: ${failed_count}" + + echo "$passed_count $failed_count" +} + +# ============================================================================= +# Step 7: Rerun Failed Tests +# ============================================================================= +rerun_failed_tests() { + if [ ! -s "$FAILED_TESTS_FILE" ]; then + return 0 + fi + + log_warning "Rerunning failed tests..." + + local rerun_status=0 + local rerun_coverage="${SHARD_DIR}/cover_rerun.out" + + while IFS= read -r test_name; do + log_info "Rerunning: ${test_name}" + + set +e + go test \ + -tags integration \ + -v \ + -timeout "$RERUN_TIMEOUT" \ + -coverpkg "$COVERAGE_PACKAGES" \ + -coverprofile "$rerun_coverage" \ + -run "^${test_name}$" \ + "$TEST_PACKAGE" 2>&1 | tee -a "$TEST_OUTPUT_FILE" + + if [ ${PIPESTATUS[0]} -ne 0 ]; then + rerun_status=1 + else + # Remove from failed, add to passed + sed -i "/${test_name}/d" "$FAILED_TESTS_FILE" 2>/dev/null || true + echo "$test_name" >> "$EXECUTED_TESTS_FILE" + fi + set -e + + # Merge coverage if rerun coverage exists + if [ -f "$rerun_coverage" ] && [ -f "$COVERAGE_FILE" ]; then + if command -v gocovmerge &> /dev/null; then + gocovmerge "$COVERAGE_FILE" "$rerun_coverage" > "${SHARD_DIR}/merged_coverage.out" + mv "${SHARD_DIR}/merged_coverage.out" "$COVERAGE_FILE" + fi + rm -f "$rerun_coverage" + fi + done < "$FAILED_TESTS_FILE" + + return $rerun_status +} + +# ============================================================================= +# Step 8: Generate Manifest +# ============================================================================= +generate_manifest() { + local assigned_count="$1" + local passed_count="$2" + local failed_count="$3" + local status="$4" + + # Get list of executed tests + local executed_tests_json="[]" + if [ -s "$EXECUTED_TESTS_FILE" ]; then + executed_tests_json=$(cat "$EXECUTED_TESTS_FILE" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + # Get list of failed tests + local failed_tests_json="[]" + if [ -s "$FAILED_TESTS_FILE" ]; then + failed_tests_json=$(cat "$FAILED_TESTS_FILE" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + # Get list of assigned tests + local assigned_tests_json="[]" + if [ -s "$TEST_LIST_FILE" ]; then + assigned_tests_json=$(cat "$TEST_LIST_FILE" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + cat > "$MANIFEST_FILE" << EOF +{ + "shard_index": ${SHARD_INDEX}, + "total_shards": ${TOTAL_SHARDS}, + "tests_assigned": ${assigned_count}, + "tests_passed": ${passed_count}, + "tests_failed": ${failed_count}, + "status": "${status}", + "assigned_tests": ${assigned_tests_json}, + "executed_tests": ${executed_tests_json}, + "failed_tests": ${failed_tests_json}, + "coverage_file": "${COVERAGE_FILE}", + "timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" +} +EOF + + log_info "Manifest generated: ${MANIFEST_FILE}" +} + +# ============================================================================= +# Main Execution +# ============================================================================= +main() { + local exit_code=0 + + log_info "========================================" + log_info "Integration Test Shard ${SHARD_INDEX}/${TOTAL_SHARDS}" + log_info "========================================" + + # Setup + setup + + # Discover all tests + ALL_TESTS=$(discover_tests) + TOTAL_TEST_COUNT=$(echo "$ALL_TESTS" | wc -l | tr -d ' ') + + # Get tests for this shard + SHARD_TESTS=$(get_shard_tests "$ALL_TESTS") + + if [ -z "$SHARD_TESTS" ]; then + SHARD_TEST_COUNT=0 + else + SHARD_TEST_COUNT=$(echo "$SHARD_TESTS" | wc -l | tr -d ' ') + fi + + # Save assigned tests to file + echo "$SHARD_TESTS" > "$TEST_LIST_FILE" + + log_info "Total tests: ${TOTAL_TEST_COUNT}" + log_info "Tests for shard ${SHARD_INDEX}: ${SHARD_TEST_COUNT}" + + # Build pattern and run tests + TEST_PATTERN=$(build_test_pattern "$SHARD_TESTS") + + set +e + run_tests "$TEST_PATTERN" "$SHARD_TEST_COUNT" + test_exit_code=$? + set -e + + # Extract results + results=$(extract_results) + passed_count=$(echo "$results" | awk '{print $1}') + failed_count=$(echo "$results" | awk '{print $2}') + + # Rerun failed tests if any + if [ "$failed_count" -gt 0 ]; then + set +e + rerun_failed_tests + rerun_exit_code=$? + set -e + + # Re-extract results after rerun + results=$(extract_results) + passed_count=$(echo "$results" | awk '{print $1}') + failed_count=$(echo "$results" | awk '{print $2}') + + if [ "$rerun_exit_code" -ne 0 ]; then + exit_code=1 + fi + fi + + # Determine final status + if [ "$failed_count" -gt 0 ]; then + status="failed" + exit_code=1 + elif [ "$passed_count" -eq "$SHARD_TEST_COUNT" ]; then + status="passed" + else + status="incomplete" + exit_code=1 + fi + + # Generate manifest + generate_manifest "$SHARD_TEST_COUNT" "$passed_count" "$failed_count" "$status" + + # Summary + log_info "========================================" + log_info "Shard ${SHARD_INDEX}/${TOTAL_SHARDS} Summary" + log_info "========================================" + log_info "Assigned: ${SHARD_TEST_COUNT}" + log_info "Passed: ${passed_count}" + log_info "Failed: ${failed_count}" + log_info "Status: ${status}" + + if [ "$exit_code" -eq 0 ]; then + log_success "Shard ${SHARD_INDEX} completed successfully!" + else + log_error "Shard ${SHARD_INDEX} failed!" + fi + + exit $exit_code +} + +main "$@" diff --git a/internal/commands/.scripts/integration_validate.sh b/internal/commands/.scripts/integration_validate.sh new file mode 100644 index 000000000..92480a6f9 --- /dev/null +++ b/internal/commands/.scripts/integration_validate.sh @@ -0,0 +1,463 @@ +#!/bin/bash +# ============================================================================= +# Integration Test Validation & Aggregation Script +# ============================================================================= +# This script runs after all shards complete to: +# 1. Verify ALL tests were executed across all shards +# 2. Merge coverage reports from all shards +# 3. Generate final summary report +# 4. Fail if any tests were missed or failed +# +# Usage: ./integration_validate.sh +# Example: ./integration_validate.sh 4 +# +# Environment Variables: +# TOTAL_SHARDS - Total number of shards +# EXPECTED_COVERAGE - Minimum coverage percentage (default: 75) +# ARTIFACTS_DIR - Directory containing shard artifacts (default: .) +# ============================================================================= + +set -euo pipefail + +# Configuration +TOTAL_SHARDS="${TOTAL_SHARDS:-${1:-4}}" +EXPECTED_COVERAGE="${EXPECTED_COVERAGE:-75}" +ARTIFACTS_DIR="${ARTIFACTS_DIR:-.}" +TEST_PACKAGE="github.com/checkmarx/ast-cli/test/integration" + +# Output files +VALIDATION_REPORT="validation_report.json" +MERGED_COVERAGE="merged_coverage.out" +FINAL_COVERAGE_HTML="coverage.html" +SUMMARY_FILE="test_summary.md" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# ============================================================================= +# Step 1: Discover All Expected Tests +# ============================================================================= +discover_all_tests() { + log_info "Discovering all expected integration tests..." + + ALL_TESTS=$(go test -tags integration -list "Test.*" "$TEST_PACKAGE" 2>/dev/null | grep "^Test" | sort) + + if [ -z "$ALL_TESTS" ]; then + log_error "No tests discovered!" + exit 1 + fi + + TOTAL_EXPECTED=$(echo "$ALL_TESTS" | wc -l | tr -d ' ') + log_info "Expected total tests: ${TOTAL_EXPECTED}" + + echo "$ALL_TESTS" +} + +# ============================================================================= +# Step 2: Collect Results from All Shards +# ============================================================================= +collect_shard_results() { + log_info "Collecting results from ${TOTAL_SHARDS} shards..." + + local all_executed="" + local all_failed="" + local all_assigned="" + local total_passed=0 + local total_failed=0 + local total_assigned=0 + local shards_found=0 + local shards_passed=0 + + for shard in $(seq 1 "$TOTAL_SHARDS"); do + local shard_dir="${ARTIFACTS_DIR}/shard_${shard}_of_${TOTAL_SHARDS}" + local manifest="${shard_dir}/manifest.json" + + if [ ! -f "$manifest" ]; then + log_error "Manifest not found for shard ${shard}: ${manifest}" + continue + fi + + shards_found=$((shards_found + 1)) + + # Parse manifest + local shard_assigned=$(jq -r '.tests_assigned' "$manifest") + local shard_passed=$(jq -r '.tests_passed' "$manifest") + local shard_failed=$(jq -r '.tests_failed' "$manifest") + local shard_status=$(jq -r '.status' "$manifest") + + log_info "Shard ${shard}: assigned=${shard_assigned}, passed=${shard_passed}, failed=${shard_failed}, status=${shard_status}" + + # Aggregate counts + total_assigned=$((total_assigned + shard_assigned)) + total_passed=$((total_passed + shard_passed)) + total_failed=$((total_failed + shard_failed)) + + if [ "$shard_status" = "passed" ]; then + shards_passed=$((shards_passed + 1)) + fi + + # Collect executed tests + local executed_file="${shard_dir}/executed_tests.txt" + if [ -f "$executed_file" ]; then + if [ -n "$all_executed" ]; then + all_executed="${all_executed}"$'\n'"$(cat "$executed_file")" + else + all_executed=$(cat "$executed_file") + fi + fi + + # Collect failed tests + local failed_file="${shard_dir}/failed_tests.txt" + if [ -f "$failed_file" ] && [ -s "$failed_file" ]; then + if [ -n "$all_failed" ]; then + all_failed="${all_failed}"$'\n'"$(cat "$failed_file")" + else + all_failed=$(cat "$failed_file") + fi + fi + + # Collect assigned tests + local assigned_file="${shard_dir}/test_list.txt" + if [ -f "$assigned_file" ]; then + if [ -n "$all_assigned" ]; then + all_assigned="${all_assigned}"$'\n'"$(cat "$assigned_file")" + else + all_assigned=$(cat "$assigned_file") + fi + fi + done + + # Return results via global variables + SHARDS_FOUND=$shards_found + SHARDS_PASSED=$shards_passed + TOTAL_ASSIGNED=$total_assigned + TOTAL_PASSED=$total_passed + TOTAL_FAILED=$total_failed + ALL_EXECUTED_TESTS="$all_executed" + ALL_FAILED_TESTS="$all_failed" + ALL_ASSIGNED_TESTS="$all_assigned" +} + +# ============================================================================= +# Step 3: Validate All Tests Were Executed +# ============================================================================= +validate_test_coverage() { + local expected_tests="$1" + + log_info "Validating all tests were executed..." + + local expected_sorted=$(echo "$expected_tests" | sort | uniq) + local executed_sorted=$(echo "$ALL_EXECUTED_TESTS" | sort | uniq) + local assigned_sorted=$(echo "$ALL_ASSIGNED_TESTS" | sort | uniq) + + local expected_count=$(echo "$expected_sorted" | grep -c "^Test" || echo "0") + local executed_count=$(echo "$executed_sorted" | grep -c "^Test" || echo "0") + local assigned_count=$(echo "$assigned_sorted" | grep -c "^Test" || echo "0") + + log_info "Expected: ${expected_count}, Assigned: ${assigned_count}, Executed: ${executed_count}" + + # Find missing tests (expected but not assigned) + MISSING_FROM_ASSIGNMENT=$(comm -23 <(echo "$expected_sorted") <(echo "$assigned_sorted") | grep "^Test" || true) + + # Find tests not executed (assigned but not executed) + NOT_EXECUTED=$(comm -23 <(echo "$assigned_sorted") <(echo "$executed_sorted") | grep "^Test" || true) + + # Find extra tests (executed but not expected - shouldn't happen) + EXTRA_TESTS=$(comm -13 <(echo "$expected_sorted") <(echo "$executed_sorted") | grep "^Test" || true) + + local validation_passed=true + + if [ -n "$MISSING_FROM_ASSIGNMENT" ]; then + log_error "Tests missing from shard assignment:" + echo "$MISSING_FROM_ASSIGNMENT" | head -20 + validation_passed=false + fi + + if [ -n "$NOT_EXECUTED" ]; then + log_error "Tests assigned but not executed:" + echo "$NOT_EXECUTED" | head -20 + validation_passed=false + fi + + if [ -n "$EXTRA_TESTS" ]; then + log_warning "Extra tests executed (not in expected list):" + echo "$EXTRA_TESTS" | head -10 + fi + + if [ "$validation_passed" = true ]; then + log_success "All expected tests were executed!" + return 0 + else + return 1 + fi +} + +# ============================================================================= +# Step 4: Merge Coverage Reports +# ============================================================================= +merge_coverage() { + log_info "Merging coverage reports..." + + local coverage_files="" + + for shard in $(seq 1 "$TOTAL_SHARDS"); do + local coverage_file="${ARTIFACTS_DIR}/shard_${shard}_of_${TOTAL_SHARDS}/cover.out" + if [ -f "$coverage_file" ]; then + if [ -n "$coverage_files" ]; then + coverage_files="${coverage_files} ${coverage_file}" + else + coverage_files="${coverage_file}" + fi + fi + done + + if [ -z "$coverage_files" ]; then + log_warning "No coverage files found to merge" + return 0 + fi + + # Check if gocovmerge is available + if command -v gocovmerge &> /dev/null; then + log_info "Merging with gocovmerge: ${coverage_files}" + gocovmerge $coverage_files > "$MERGED_COVERAGE" + else + log_warning "gocovmerge not found, using first coverage file" + cp $(echo "$coverage_files" | awk '{print $1}') "$MERGED_COVERAGE" + fi + + # Generate HTML report + if [ -f "$MERGED_COVERAGE" ]; then + go tool cover -html="$MERGED_COVERAGE" -o "$FINAL_COVERAGE_HTML" 2>/dev/null || true + + # Calculate coverage percentage + COVERAGE_PCT=$(go tool cover -func "$MERGED_COVERAGE" | grep total | awk '{print substr($3, 1, length($3)-1)}') + log_info "Total coverage: ${COVERAGE_PCT}%" + fi +} + +# ============================================================================= +# Step 5: Check Coverage Threshold +# ============================================================================= +check_coverage_threshold() { + if [ -z "${COVERAGE_PCT:-}" ]; then + log_warning "Coverage percentage not available" + return 0 + fi + + log_info "Checking coverage threshold: ${COVERAGE_PCT}% >= ${EXPECTED_COVERAGE}%" + + local result=$(awk "BEGIN {print ($COVERAGE_PCT < $EXPECTED_COVERAGE)}") + if [ "$result" -eq 1 ]; then + log_error "Coverage ${COVERAGE_PCT}% is below threshold ${EXPECTED_COVERAGE}%" + return 1 + else + log_success "Coverage ${COVERAGE_PCT}% meets threshold ${EXPECTED_COVERAGE}%" + return 0 + fi +} + +# ============================================================================= +# Step 6: Generate Reports +# ============================================================================= +generate_reports() { + local expected_count="$1" + local validation_status="$2" + local coverage_status="$3" + + # JSON Report + local missing_json="[]" + local not_executed_json="[]" + local failed_json="[]" + + if [ -n "$MISSING_FROM_ASSIGNMENT" ]; then + missing_json=$(echo "$MISSING_FROM_ASSIGNMENT" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + if [ -n "$NOT_EXECUTED" ]; then + not_executed_json=$(echo "$NOT_EXECUTED" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + if [ -n "$ALL_FAILED_TESTS" ]; then + failed_json=$(echo "$ALL_FAILED_TESTS" | jq -R -s 'split("\n") | map(select(length > 0))') + fi + + local final_status="passed" + if [ "$validation_status" != "0" ] || [ "$coverage_status" != "0" ] || [ "$TOTAL_FAILED" -gt 0 ]; then + final_status="failed" + fi + + cat > "$VALIDATION_REPORT" << EOF +{ + "total_shards": ${TOTAL_SHARDS}, + "shards_found": ${SHARDS_FOUND}, + "shards_passed": ${SHARDS_PASSED}, + "expected_tests": ${expected_count}, + "assigned_tests": ${TOTAL_ASSIGNED}, + "passed_tests": ${TOTAL_PASSED}, + "failed_tests": ${TOTAL_FAILED}, + "coverage_percentage": ${COVERAGE_PCT:-0}, + "coverage_threshold": ${EXPECTED_COVERAGE}, + "validation_status": "${validation_status}", + "coverage_status": "${coverage_status}", + "final_status": "${final_status}", + "missing_from_assignment": ${missing_json}, + "not_executed": ${not_executed_json}, + "failed_tests_list": ${failed_json}, + "timestamp": "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" +} +EOF + + # Markdown Summary + cat > "$SUMMARY_FILE" << EOF +# Integration Test Summary + +## Overview +| Metric | Value | +|--------|-------| +| Total Shards | ${TOTAL_SHARDS} | +| Shards Completed | ${SHARDS_FOUND} | +| Shards Passed | ${SHARDS_PASSED} | +| Expected Tests | ${expected_count} | +| Assigned Tests | ${TOTAL_ASSIGNED} | +| Passed Tests | ${TOTAL_PASSED} | +| Failed Tests | ${TOTAL_FAILED} | +| Coverage | ${COVERAGE_PCT:-N/A}% | +| Threshold | ${EXPECTED_COVERAGE}% | +| **Final Status** | **${final_status^^}** | + +## Validation Results +- Test Assignment: $([ "$validation_status" = "0" ] && echo "PASS" || echo "FAIL") +- Coverage Threshold: $([ "$coverage_status" = "0" ] && echo "PASS" || echo "FAIL") +- All Tests Executed: $([ "$TOTAL_ASSIGNED" -eq "$TOTAL_PASSED" ] && echo "PASS" || echo "FAIL") +EOF + + if [ -n "$ALL_FAILED_TESTS" ]; then + echo "" >> "$SUMMARY_FILE" + echo "## Failed Tests" >> "$SUMMARY_FILE" + echo '```' >> "$SUMMARY_FILE" + echo "$ALL_FAILED_TESTS" >> "$SUMMARY_FILE" + echo '```' >> "$SUMMARY_FILE" + fi + + if [ -n "$MISSING_FROM_ASSIGNMENT" ]; then + echo "" >> "$SUMMARY_FILE" + echo "## Missing Tests (Not Assigned to Any Shard)" >> "$SUMMARY_FILE" + echo '```' >> "$SUMMARY_FILE" + echo "$MISSING_FROM_ASSIGNMENT" >> "$SUMMARY_FILE" + echo '```' >> "$SUMMARY_FILE" + fi + + log_info "Reports generated: ${VALIDATION_REPORT}, ${SUMMARY_FILE}" +} + +# ============================================================================= +# Step 7: Cleanup Test Data +# ============================================================================= +run_cleanup() { + log_info "Running test data cleanup..." + + set +e + go test -v github.com/checkmarx/ast-cli/test/cleandata 2>&1 || true + set -e +} + +# ============================================================================= +# Main Execution +# ============================================================================= +main() { + local exit_code=0 + + log_info "========================================" + log_info "Integration Test Validation" + log_info "========================================" + log_info "Total Shards: ${TOTAL_SHARDS}" + log_info "Expected Coverage: ${EXPECTED_COVERAGE}%" + + # Discover expected tests + EXPECTED_TESTS=$(discover_all_tests) + EXPECTED_COUNT=$(echo "$EXPECTED_TESTS" | wc -l | tr -d ' ') + + # Collect shard results + collect_shard_results + + # Validate test coverage + set +e + validate_test_coverage "$EXPECTED_TESTS" + validation_status=$? + set -e + + # Merge coverage + merge_coverage + + # Check coverage threshold + set +e + check_coverage_threshold + coverage_status=$? + set -e + + # Generate reports + generate_reports "$EXPECTED_COUNT" "$validation_status" "$coverage_status" + + # Run cleanup + run_cleanup + + # Determine final status + if [ "$validation_status" -ne 0 ]; then + log_error "Validation failed - not all tests were executed!" + exit_code=1 + fi + + if [ "$coverage_status" -ne 0 ]; then + log_error "Coverage threshold not met!" + exit_code=1 + fi + + if [ "$TOTAL_FAILED" -gt 0 ]; then + log_error "${TOTAL_FAILED} tests failed!" + exit_code=1 + fi + + if [ "$SHARDS_FOUND" -ne "$TOTAL_SHARDS" ]; then + log_error "Not all shards completed! Found ${SHARDS_FOUND}/${TOTAL_SHARDS}" + exit_code=1 + fi + + # Final summary + log_info "========================================" + log_info "Final Summary" + log_info "========================================" + log_info "Expected: ${EXPECTED_COUNT} tests" + log_info "Executed: ${TOTAL_PASSED} passed, ${TOTAL_FAILED} failed" + log_info "Coverage: ${COVERAGE_PCT:-N/A}%" + + if [ "$exit_code" -eq 0 ]; then + log_success "All validations passed!" + else + log_error "Validation failed!" + cat "$SUMMARY_FILE" + fi + + exit $exit_code +} + +main "$@"