Skip to content

Merge pull request #2941 from mrveiss/issue-2496 #1982

Merge pull request #2941 from mrveiss/issue-2496

Merge pull request #2941 from mrveiss/issue-2496 #1982

Workflow file for this run

# AutoBot CI/CD Pipeline
# Uses self-hosted runner to avoid GitHub Actions quota limits
name: AutoBot CI/CD Pipeline
on:
push:
branches: [ main, Dev_new_gui ]
pull_request:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
security-tests:
runs-on: self-hosted
steps:
- uses: actions/checkout@v6
- name: Install Python 3.12 via deadsnakes PPA
run: |
if ! command -v python3.12 &> /dev/null; then
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt-get update -y
sudo apt-get install -y python3.12 python3.12-venv python3.12-dev
fi
- name: Free disk space and set up venv
run: |
pip cache purge 2>/dev/null || true
rm -rf .venv 2>/dev/null || true
python3.12 -m venv .venv
source .venv/bin/activate
echo "VIRTUAL_ENV=$VIRTUAL_ENV" >> $GITHUB_ENV
echo "$VIRTUAL_ENV/bin" >> $GITHUB_PATH
- name: Install Python dependencies
run: |
source .venv/bin/activate
python -m pip install --upgrade pip setuptools wheel
echo "Installing CI-safe requirements..."
python -m pip install -r requirements-ci.txt --prefer-binary
echo "Installing testing dependencies..."
python -m pip install pytest pytest-asyncio pytest-cov flake8 black isort mypy bandit
- name: Create necessary directories and config files
run: |
mkdir -p data logs static config
touch data/.gitkeep logs/.gitkeep static/.gitkeep
# Create minimal config.yaml for CI testing
cat > config/config.yaml << 'EOF'
# CI Testing Configuration
llm:
orchestrator_llm: "mock"
task_llm: "mock"
ollama:
model: "mock-model"
models: {}
unified:
embedding:
providers:
ollama:
selected_model: "mock-embed"
deployment:
mode: "local"
data:
reliability_stats_file: "data/reliability_stats.json"
diagnostics:
enabled: false
use_llm_for_analysis: false
use_web_search_for_analysis: false
auto_apply_fixes: false
redis:
host: "localhost"
port: 6379
db: 0
EOF
- name: Run code quality checks
run: |
source .venv/bin/activate
echo "Running code quality checks..."
# Code formatting check (matches pre-commit black config)
echo "Checking code formatting with black..."
black --check autobot-backend/ autobot-slm-backend/ autobot-shared/ --line-length=88
# Import sorting check — reads pyproject.toml for profile, src_paths, known_first_party (#2679)
echo "Checking import sorting with isort..."
isort --check-only --settings-path=. autobot-backend/ autobot-slm-backend/ autobot-shared/
# Linting (uses project .flake8 config — same as pre-commit)
echo "Running flake8 linter..."
flake8 --config=.flake8 autobot-backend/ autobot-slm-backend/ autobot-shared/
- name: Run security analysis
run: |
source .venv/bin/activate
echo "🔒 Running security analysis..."
# Security vulnerability scan — fail on medium+ severity/confidence
bandit -r autobot-backend/ autobot-slm-backend/ autobot-shared/ \
--severity-level medium --confidence-level medium \
-f json -o bandit-report.json
if [ -f bandit-report.json ]; then
echo "Security report generated"
python -m json.tool < bandit-report.json || true
fi
- name: Run unit tests for security modules
run: |
source .venv/bin/activate
echo "🧪 Running security unit tests..."
# Tests are colocated with source files (#734)
TEST_FILES=""
for file in autobot-backend/security/secure_command_executor_test.py autobot-backend/security/enhanced_security_layer_test.py autobot-backend/api/security_api_test.py; do
if [ -f "$file" ]; then
TEST_FILES="$TEST_FILES $file"
fi
done
if [ -n "$TEST_FILES" ]; then
python -m pytest $TEST_FILES -v --tb=short --cov=autobot-backend --cov-report=xml --cov-report=term
else
echo "⚠️ No security unit test files found - skipping"
fi
- name: Run integration tests
run: |
source .venv/bin/activate
echo "🔄 Running integration tests..."
# Integration tests remain in shared directory (#734)
TEST_DIR="infrastructure/shared/tests/integration"
if [ -d "$TEST_DIR" ]; then
python -m pytest "$TEST_DIR" -v --tb=short --maxfail=5
else
echo "⚠️ No integration test directory found - skipping"
fi
- name: Upload coverage reports
uses: codecov/codecov-action@v6
with:
files: ./coverage.xml
flags: unittests
name: codecov-umbrella
fail_ci_if_error: false
- name: Cleanup
if: always()
run: |
rm -rf .venv || true
frontend-tests:
runs-on: self-hosted
steps:
- uses: actions/checkout@v6
- name: Setup Node.js
uses: actions/setup-node@v6
with:
node-version: '20'
- name: Install frontend dependencies
run: |
cd autobot-frontend
npm ci
- name: Run frontend linting
run: |
cd autobot-frontend
npm run lint
- name: Run frontend type checking
run: |
cd autobot-frontend
npx vue-tsc --noEmit -p tsconfig.app.json
- name: Build frontend
run: |
cd autobot-frontend
npm run build
- name: Run frontend unit tests
run: |
cd autobot-frontend
npm run test:unit
- name: Cleanup
if: always()
run: |
rm -rf autobot-frontend/node_modules autobot-frontend/dist || true
deployment-check:
runs-on: self-hosted
needs: [security-tests, frontend-tests]
if: github.ref == 'refs/heads/main' || github.ref == 'refs/heads/Dev_new_gui'
steps:
- uses: actions/checkout@v6
- name: Install Python 3.12 via deadsnakes PPA
run: |
if ! command -v python3.12 &> /dev/null; then
sudo add-apt-repository -y ppa:deadsnakes/ppa
sudo apt-get update -y
sudo apt-get install -y python3.12 python3.12-venv python3.12-dev
fi
- name: Free disk space and set up venv
run: |
pip cache purge 2>/dev/null || true
rm -rf .venv 2>/dev/null || true
python3.12 -m venv .venv
source .venv/bin/activate
echo "VIRTUAL_ENV=$VIRTUAL_ENV" >> $GITHUB_ENV
echo "$VIRTUAL_ENV/bin" >> $GITHUB_PATH
- name: Install dependencies
run: |
source .venv/bin/activate
python -m pip install --upgrade pip setuptools wheel
python -m pip install -r requirements-ci.txt --prefer-binary
- name: Create necessary directories and config files
run: |
mkdir -p data logs static config
touch data/.gitkeep logs/.gitkeep static/.gitkeep
# Create minimal config.yaml for CI testing
cat > config/config.yaml << 'EOF'
# CI Testing Configuration
llm:
orchestrator_llm: "mock"
task_llm: "mock"
ollama:
model: "mock-model"
models: {}
unified:
embedding:
providers:
ollama:
selected_model: "mock-embed"
deployment:
mode: "local"
data:
reliability_stats_file: "data/reliability_stats.json"
diagnostics:
enabled: false
use_llm_for_analysis: false
use_web_search_for_analysis: false
auto_apply_fixes: false
redis:
host: "localhost"
port: 6379
db: 0
EOF
- name: Test production configuration
run: |
source .venv/bin/activate
echo "🚀 Testing production readiness..."
# Check that all required files exist
echo "Checking required files..."
test -f main.py || (echo "❌ main.py missing" && exit 1)
test -f requirements.txt || (echo "❌ requirements.txt missing" && exit 1)
echo "✅ All required files present"
# Test configuration loading (PYTHONPATH includes backend for imports)
PYTHONPATH="autobot-backend:autobot-shared:$PYTHONPATH" \
python3 -c 'from config import config; print("Configuration system working")'
# Test core imports
PYTHONPATH="autobot-backend:autobot-shared:$PYTHONPATH" \
python3 -c 'from security.enhanced_security_layer import EnhancedSecurityLayer; from security.secure_command_executor import SecureCommandExecutor; from app_factory import create_app; print("Core imports working")'
- name: Generate deployment artifact
run: |
echo "📦 Generating deployment summary..."
echo "# AutoBot Deployment Summary" > DEPLOYMENT_SUMMARY.md
echo "Generated at: $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> DEPLOYMENT_SUMMARY.md
echo "Commit: $GITHUB_SHA" >> DEPLOYMENT_SUMMARY.md
echo "Branch: $GITHUB_REF_NAME" >> DEPLOYMENT_SUMMARY.md
echo "" >> DEPLOYMENT_SUMMARY.md
echo "## Test Results" >> DEPLOYMENT_SUMMARY.md
echo "- ✅ Security tests passed" >> DEPLOYMENT_SUMMARY.md
echo "- ✅ Integration tests passed" >> DEPLOYMENT_SUMMARY.md
echo "- ✅ Frontend build successful" >> DEPLOYMENT_SUMMARY.md
echo "" >> DEPLOYMENT_SUMMARY.md
echo "## Security Features" >> DEPLOYMENT_SUMMARY.md
echo "- Command execution sandboxing ✅" >> DEPLOYMENT_SUMMARY.md
echo "- Risk assessment system ✅" >> DEPLOYMENT_SUMMARY.md
echo "- Audit logging ✅" >> DEPLOYMENT_SUMMARY.md
echo "- Role-based access control ✅" >> DEPLOYMENT_SUMMARY.md
echo "- API security endpoints ✅" >> DEPLOYMENT_SUMMARY.md
cat DEPLOYMENT_SUMMARY.md
- name: Upload deployment artifact
uses: actions/upload-artifact@v7
with:
name: deployment-summary
path: DEPLOYMENT_SUMMARY.md
- name: Cleanup
if: always()
run: |
rm -rf .venv || true
notify:
runs-on: self-hosted
needs: [security-tests, frontend-tests, deployment-check]
if: always()
steps:
- name: Notify results
run: |
echo "🎯 CI/CD Pipeline Results:"
echo "=========================="
if [ "${{ needs.security-tests.result }}" == "success" ]; then
echo "✅ Security tests: PASSED"
else
echo "❌ Security tests: FAILED"
fi
if [ "${{ needs.frontend-tests.result }}" == "success" ]; then
echo "✅ Frontend tests: PASSED"
else
echo "❌ Frontend tests: FAILED"
fi
if [ "${{ needs.deployment-check.result }}" == "success" ]; then
echo "✅ Deployment check: PASSED"
else
echo "❌ Deployment check: FAILED"
fi
echo ""
if [ "${{ needs.security-tests.result }}" == "success" ] &&
[ "${{ needs.frontend-tests.result }}" == "success" ] &&
[ "${{ needs.deployment-check.result }}" == "success" ]; then
echo "🎉 All checks passed! AutoBot is ready for deployment."
else
echo "⚠️ Some checks failed. Review the logs above."
fi