Skip to content

Daily Benchmark

Daily Benchmark #143

name: Daily Benchmark
on:
schedule:
- cron: '0 6 * * *' # Run daily at 6 AM UTC
workflow_dispatch: # Allow manual triggers
inputs:
skip_benchmark:
description: 'Skip benchmark job (for testing analysis/publish only)'
required: false
default: false
type: boolean
jobs:
setup:
runs-on: ubuntu-latest
outputs:
cache-hit: ${{ steps.cache.outputs.cache-hit }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
id: cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install dependencies
run: pnpm install
- name: Build TypeScript scripts
run: pnpm run build:scripts
- name: Upload built scripts
uses: actions/upload-artifact@v4
with:
name: built-scripts
path: dist/
retention-days: 1
benchmark:
runs-on: ubuntu-latest
needs: setup
if: ${{ !inputs.skip_benchmark }}
strategy:
fail-fast: false
matrix:
tool: [nx, turbo, lerna, lage, moon]
outputs:
results-available: ${{ steps.benchmark.conclusion == 'success' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Restore pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install dependencies
run: pnpm install
- name: Download built scripts
uses: actions/download-artifact@v4
with:
name: built-scripts
path: dist/
- name: Run benchmark for ${{ matrix.tool }}
id: benchmark
run: |
node dist/scripts/benchmark-single-tool.js ${{ matrix.tool }} --output json > benchmark-results-${{ matrix.tool }}.json
- name: Upload benchmark results for ${{ matrix.tool }}
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ matrix.tool }}
path: benchmark-results-${{ matrix.tool }}.json
retention-days: 1
analysis:
runs-on: ubuntu-latest
needs: [setup, benchmark]
if: ${{ always() && (inputs.skip_benchmark || needs.benchmark.outputs.results-available != 'false') }}
outputs:
readme-updated: ${{ steps.compare.outputs.readme-updated }}
performance-regression: ${{ steps.compare.outputs.performance-regression }}
tag-name: ${{ steps.release-info.outputs.tag-name }}
release-name: ${{ steps.release-info.outputs.release-name }}
release-exists: ${{ steps.release-info.outputs.release-exists }}
description-file: ${{ steps.release-info.outputs.description-file }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '18'
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Download built scripts
uses: actions/download-artifact@v4
with:
name: built-scripts
path: dist/
- name: Download benchmark results
if: ${{ !inputs.skip_benchmark }}
uses: actions/download-artifact@v4
with:
pattern: benchmark-results-*
merge-multiple: true
- name: Combine benchmark results
if: ${{ !inputs.skip_benchmark }}
run: |
node dist/scripts/combine-results.js --output json > benchmark-results.json
- name: Read current results
if: ${{ !inputs.skip_benchmark }}
run: |
echo "RESULTS_JSON<<EOF" >> $GITHUB_ENV
cat benchmark-results.json >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Set empty results when benchmark skipped
if: ${{ inputs.skip_benchmark }}
run: |
echo "RESULTS_JSON={}" >> $GITHUB_ENV
- name: Read previous results
id: previous-results
run: |
if [ -f "previous-benchmark-results.json" ]; then
echo "PREVIOUS_RESULTS_JSON<<EOF" >> $GITHUB_ENV
cat previous-benchmark-results.json >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
else
echo "PREVIOUS_RESULTS_JSON={}" >> $GITHUB_ENV
fi
- name: Compare results and update README
id: compare
run: |
output=$(node dist/scripts/compare-and-update-readme.js)
echo "$output"
if echo "$output" | grep -q "readme-updated=true"; then
echo "readme-updated=true" >> $GITHUB_OUTPUT
else
echo "readme-updated=false" >> $GITHUB_OUTPUT
fi
if echo "$output" | grep -q "performance-regression=true"; then
echo "performance-regression=true" >> $GITHUB_OUTPUT
else
echo "performance-regression=false" >> $GITHUB_OUTPUT
fi
env:
CURRENT_RESULTS: ${{ env.RESULTS_JSON }}
PREVIOUS_RESULTS: ${{ env.PREVIOUS_RESULTS_JSON }}
- name: Save current results
if: ${{ !inputs.skip_benchmark }}
run: |
cp benchmark-results.json previous-benchmark-results.json
- name: Prepare release information
id: release-info
run: |
output=$(node dist/scripts/create-release.js)
echo "$output"
# Parse outputs
tag_name=$(echo "$output" | grep "tag-name=" | cut -d'=' -f2)
release_name=$(echo "$output" | grep "release-name=" | cut -d'=' -f2)
release_exists=$(echo "$output" | grep "release-exists=" | cut -d'=' -f2)
description_file=$(echo "$output" | grep "release-description-file=" | cut -d'=' -f2)
echo "tag-name=$tag_name" >> $GITHUB_OUTPUT
echo "release-name=$release_name" >> $GITHUB_OUTPUT
echo "release-exists=$release_exists" >> $GITHUB_OUTPUT
echo "description-file=$description_file" >> $GITHUB_OUTPUT
env:
BENCHMARK_RESULTS: ${{ env.RESULTS_JSON }}
- name: Upload analysis artifacts
uses: actions/upload-artifact@v4
with:
name: analysis-results
path: |
previous-benchmark-results.json
${{ steps.release-info.outputs.description-file }}
README.md
retention-days: 1
publish:
runs-on: ubuntu-latest
needs: [benchmark, analysis]
if: always() && needs.analysis.outputs.readme-updated == 'true'
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
token: ${{ secrets.PAT_TOKEN }}
fetch-depth: 0
- name: Download analysis results
uses: actions/download-artifact@v4
with:
name: analysis-results
- name: Download benchmark results
if: ${{ !inputs.skip_benchmark }}
uses: actions/download-artifact@v4
with:
pattern: benchmark-results-*
merge-multiple: true
- name: Combine benchmark results for commit
if: ${{ !inputs.skip_benchmark }}
run: |
node dist/scripts/combine-results.js --output json > benchmark-results.json
- name: Commit and push changes
if: needs.analysis.outputs.readme-updated == 'true'
run: |
git config --local user.email "[email protected]"
git config --local user.name "GitHub Action"
git add README.md previous-benchmark-results.json
git commit -m "📊 Update benchmark results - $(date '+%Y-%m-%d') [${{ needs.analysis.outputs.tag-name }}]"
git pull --rebase origin main
- name: Create or update GitHub release
uses: actions/github-script@v7
with:
github-token: ${{ secrets.PAT_TOKEN }}
script: |
const fs = require('fs');
const tagName = '${{ needs.analysis.outputs.tag-name }}';
const releaseName = '${{ needs.analysis.outputs.release-name }}';
const tagExists = '${{ needs.analysis.outputs.release-exists }}' === 'true';
const descriptionFile = '${{ needs.analysis.outputs.description-file }}';
let releaseBody = 'No benchmark results available.';
try {
releaseBody = fs.readFileSync(descriptionFile, 'utf8');
} catch (error) {
console.log('Could not read release description file:', error.message);
}
// Always try to get existing release first, regardless of tag existence
let releaseExists = false;
let existingRelease = null;
try {
const { data: release } = await github.rest.repos.getReleaseByTag({
owner: context.repo.owner,
repo: context.repo.repo,
tag: tagName
});
existingRelease = release;
releaseExists = true;
console.log(`Found existing release with tag: ${tagName}`);
} catch (error) {
if (error.status === 404) {
console.log(`No existing release found for tag: ${tagName}`);
releaseExists = false;
} else {
console.log('Error checking for existing release:', error.message);
throw error;
}
}
if (releaseExists && existingRelease) {
console.log(`Updating existing release with tag: ${tagName}`);
await github.rest.repos.updateRelease({
owner: context.repo.owner,
repo: context.repo.repo,
release_id: existingRelease.id,
name: releaseName,
body: releaseBody,
prerelease: false
});
console.log('Successfully updated existing release');
} else {
console.log(`Creating new release with tag: ${tagName}`);
await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tagName,
name: releaseName,
body: releaseBody,
draft: false,
prerelease: false
});
console.log('Successfully created new release');
}
- name: Create issue on significant performance regression
if: needs.analysis.outputs.performance-regression == 'true'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const title = '⚠️ Performance Regression Detected';
const releaseTag = '${{ needs.analysis.outputs.tag-name }}';
let currentResults = 'Not available';
let previousResults = 'Not available';
try {
currentResults = fs.readFileSync('benchmark-results.json', 'utf8');
} catch (error) {
console.log('Could not read current results:', error.message);
}
try {
previousResults = fs.readFileSync('previous-benchmark-results.json', 'utf8');
} catch (error) {
console.log('Could not read previous results:', error.message);
}
const body = `
A significant performance regression has been detected in today's benchmark run.
**Release with results:** [${releaseTag}](https://github.com/${context.repo.owner}/${context.repo.repo}/releases/tag/${releaseTag})
**Current Results:**
\`\`\`json
${currentResults}
\`\`\`
**Previous Results:**
\`\`\`json
${previousResults}
\`\`\`
Please investigate the cause of this performance change.
`;
await github.rest.issues.create({
owner: context.repo.owner,
repo: context.repo.repo,
title: title,
body: body,
labels: ['performance', 'regression']
});