diff --git a/.github/workflows/atex-build.yaml b/.github/workflows/atex-build.yaml
new file mode 100644
index 000000000000..21f308113f09
--- /dev/null
+++ b/.github/workflows/atex-build.yaml
@@ -0,0 +1,88 @@
+name: ATEX - Build Content
+
+on:
+ pull_request:
+ types: [opened, synchronize, reopened]
+
+env:
+ ARTIFACT_RETENTION_DAYS: 1
+
+permissions:
+ contents: read
+
+jobs:
+ build_content:
+ name: Build content for CentOS Stream ${{ matrix.centos_stream_major }}
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ centos_stream_major: [8, 9, 10]
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+
+ - name: Install system dependencies
+ run: |
+ dnf install -y \
+ cmake make openscap-utils python3-pyyaml \
+ bats ansible python3-pip ShellCheck git \
+ gcc gcc-c++ python3-devel libxml2-devel \
+ libxslt-devel python3-setuptools gawk
+
+ - name: Install Python dependencies
+ run: pip install -r requirements.txt -r test-requirements.txt
+
+ - name: Build content
+ env:
+ CENTOS_STREAM_MAJOR: ${{ matrix.centos_stream_major }}
+ run: |
+ rm -rf build
+ mkdir build
+ cd build
+
+ # Build configuration matching Contest and scap-security-guide.spec defaults
+ # Includes options required by tests to avoid rebuilds
+ cmake ../ \
+ -DCMAKE_BUILD_TYPE:STRING=Release \
+ -DSSG_CENTOS_DERIVATIVES_ENABLED:BOOL=ON \
+ -DSSG_PRODUCT_DEFAULT:BOOL=OFF \
+ "-DSSG_PRODUCT_RHEL${CENTOS_STREAM_MAJOR}:BOOL=ON" \
+ -DSSG_SCE_ENABLED:BOOL=ON \
+ -DSSG_BASH_SCRIPTS_ENABLED:BOOL=OFF \
+ -DSSG_BUILD_DISA_DELTA_FILES:BOOL=OFF \
+ -DSSG_SEPARATE_SCAP_FILES_ENABLED:BOOL=OFF \
+ -DSSG_ANSIBLE_PLAYBOOKS_PER_RULE_ENABLED:BOOL=ON
+
+ # Build using all available cores
+ cores=$(nproc) || cores=4
+ make "-j$cores"
+
+ # Clean up temporary metadata
+ rm -rf jinja2_cache
+
+ - name: Upload build artifacts
+ uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ with:
+ name: content-centos-stream${{ matrix.centos_stream_major }}
+ path: .
+ retention-days: ${{ env.ARTIFACT_RETENTION_DAYS }}
+
+ save_pr_info:
+ name: Save PR information for workflow_run
+ runs-on: ubuntu-latest
+ steps:
+ - name: Save PR number and SHA
+ run: |
+ mkdir -p pr-info
+ echo ${{ github.event.pull_request.number }} > pr-info/pr-number.txt
+ echo ${{ github.event.pull_request.head.sha }} > pr-info/pr-sha.txt
+
+ - name: Upload PR info
+ uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ with:
+ name: pr-info
+ path: pr-info/
+ retention-days: ${{ env.ARTIFACT_RETENTION_DAYS }}
diff --git a/.github/workflows/atex-test.yaml b/.github/workflows/atex-test.yaml
new file mode 100644
index 000000000000..c8d45cb50c46
--- /dev/null
+++ b/.github/workflows/atex-test.yaml
@@ -0,0 +1,310 @@
+name: ATEX - Test and Upload Results
+
+on:
+ workflow_run:
+ workflows: ["ATEX - Build Content"]
+ types:
+ - completed
+
+env:
+ ATEX_REPO: RHSecurityCompliance/atex-results-testing-farm
+ ATEX_HTML_REPO: RHSecurityCompliance/atex-html
+ CONTEST_REPO: RHSecurityCompliance/contest
+ ARTIFACT_RETENTION_DAYS: 1
+ TEST_TIMEOUT: 1440 # 24 hours
+
+permissions:
+ contents: read
+ actions: read
+ pull-requests: write
+ checks: write
+
+jobs:
+ # Only run if the build workflow succeeded
+ check_build:
+ runs-on: ubuntu-latest
+ if: ${{ github.event.workflow_run.conclusion == 'success' }}
+ outputs:
+ pr_number: ${{ steps.get_pr.outputs.pr_number }}
+ pr_sha: ${{ steps.get_pr.outputs.pr_sha }}
+ check_id: ${{ steps.create_check.outputs.check_id }}
+ steps:
+ - name: Download PR info
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ run-id: ${{ github.event.workflow_run.id }}
+ name: pr-info
+ path: pr-info/
+
+ - name: Get PR number and SHA
+ id: get_pr
+ run: |
+ PR_NUMBER=$(cat pr-info/pr-number.txt)
+ PR_SHA=$(cat pr-info/pr-sha.txt)
+ echo "pr_number=${PR_NUMBER}" >> $GITHUB_OUTPUT
+ echo "pr_sha=${PR_SHA}" >> $GITHUB_OUTPUT
+ echo "PR Number: ${PR_NUMBER}"
+ echo "PR SHA: ${PR_SHA}"
+
+ - name: Create GitHub check run
+ uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2.0.0
+ id: create_check
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ name: ATEX - Test and Upload Results
+ status: in_progress
+ sha: ${{ steps.get_pr.outputs.pr_sha }}
+ output: |
+ {"summary":"Running ATEX tests: Job: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}","title":"ATEX Testing in Progress"}
+
+ test:
+ name: Test on CentOS Stream ${{ matrix.centos_stream_major }}
+ runs-on: ubuntu-latest
+ needs: check_build
+ strategy:
+ matrix:
+ centos_stream_major: [8, 9, 10]
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+
+ - name: Download build artifacts
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ run-id: ${{ github.event.workflow_run.id }}
+ name: content-centos-stream${{ matrix.centos_stream_major }}
+ path: content-centos-stream${{ matrix.centos_stream_major }}/
+
+ - name: Checkout Contest Test Suite
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+ with:
+ repository: ${{ env.CONTEST_REPO }}
+ ref: main
+ path: contest
+ fetch-depth: 1
+
+ - name: Install test dependencies
+ run: |
+ dnf -y install python3-pip git rsync
+ pip install fmf git+https://github.com/RHSecurityCompliance/atex.git
+
+ - name: Run tests on Testing Farm
+ env:
+ TESTING_FARM_API_TOKEN: ${{ secrets.TESTING_FARM_API_TOKEN }}
+ CS_MAJOR: ${{ matrix.centos_stream_major }}
+ run: |
+ python3 tests/run_tests_testingfarm.py \
+ --contest-dir contest \
+ --content-dir content-centos-stream${CS_MAJOR} \
+ --plan "/plans/daily" \
+ --tests "/hardening/host-os/oscap/stig" \
+ --compose "CentOS-Stream-${CS_MAJOR}" \
+ --arch x86_64 \
+ --os-major-version "${CS_MAJOR}" \
+ --timeout ${{ env.TEST_TIMEOUT }}
+
+ - name: Upload test results
+ if: always()
+ uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ with:
+ name: test-results-centos-stream${{ matrix.centos_stream_major }}
+ path: |
+ results-centos-stream-${{ matrix.centos_stream_major }}-x86_64.json.gz
+ files-centos-stream-${{ matrix.centos_stream_major }}-x86_64/
+ atex_debug.log.gz
+ retention-days: ${{ env.ARTIFACT_RETENTION_DAYS }}
+
+ upload:
+ name: Upload and publish test results
+ runs-on: ubuntu-latest
+ needs: [check_build, test]
+ if: always() # Run even if tests fail
+ container:
+ image: fedora:latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+
+ - name: Install dependencies
+ if: always()
+ run: |
+ dnf -y install python3-pip git rsync
+ pip install fmf git+https://github.com/RHSecurityCompliance/atex.git
+
+ - name: Checkout ATEX results repository
+ if: always()
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+ with:
+ repository: ${{ env.ATEX_REPO }}
+ ref: main
+ path: atex-results-testing-farm
+ token: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+
+ - name: Initialize FMF metadata
+ if: always()
+ working-directory: atex-results-testing-farm
+ run: fmf init
+
+ - name: Create TMT dummy plan for artifact transport
+ if: always()
+ working-directory: atex-results-testing-farm
+ run: |
+ cat > main.fmf <<'EOF'
+ /dummy_plan:
+ discover:
+ how: shell
+ tests:
+ - name: /dummy_test
+ test: mv * "$TMT_TEST_DATA/."
+ execute:
+ how: tmt
+ EOF
+
+ # Download test results for all CentOS Stream versions
+ - name: Download test results - CentOS Stream 8
+ if: always()
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ name: test-results-centos-stream8
+ path: test-results/cs8/
+ continue-on-error: true
+
+ - name: Download test results - CentOS Stream 9
+ if: always()
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ name: test-results-centos-stream9
+ path: test-results/cs9/
+ continue-on-error: true
+
+ - name: Download test results - CentOS Stream 10
+ if: always()
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ name: test-results-centos-stream10
+ path: test-results/cs10/
+ continue-on-error: true
+
+ - name: Checkout ATEX HTML viewer
+ if: always()
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.1
+ with:
+ repository: ${{ env.ATEX_HTML_REPO }}
+ ref: main
+ path: atex-html
+
+ - name: Update HTML title with PR number
+ if: always()
+ run: |
+ sed "/
/s/>.*>Test outputs from PR #${{ needs.check_build.outputs.pr_number }} HTML" \
+ -i atex-html/index.html
+
+ - name: Merge test results from all versions
+ if: always()
+ run: |
+ mkdir -p atex-results-testing-farm/files_dir/
+
+ # Process and merge results for all CentOS Stream versions
+ for version in 8 9 10; do
+ results_file="test-results/cs${version}/results-centos-stream-${version}-x86_64.json.gz"
+ files_dir="test-results/cs${version}/files-centos-stream-${version}-x86_64"
+
+ if [ -f "${results_file}" ]; then
+ cat "${results_file}"
+ rm -f "${results_file}"
+ [ -d "${files_dir}" ] && cp -r "${files_dir}"/* atex-results-testing-farm/files_dir/
+ fi
+ done > results.json.gz
+
+ - name: Convert results to SQLite database
+ if: always()
+ run: |
+ python atex-html/json2db.py results.json.gz atex-results-testing-farm/results.sqlite.gz
+
+ - name: Prepare HTML results viewer
+ if: always()
+ run: |
+ cp -rf atex-html/index.html atex-html/sqljs/ atex-results-testing-farm/
+
+ - name: Commit and tag results in ATEX repository
+ if: always()
+ working-directory: atex-results-testing-farm
+ env:
+ GH_TOKEN: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+ PR_NUMBER: ${{ needs.check_build.outputs.pr_number }}
+ run: |
+ git config user.name "openscap-ci[bot]"
+ git config user.email "openscap.ci@gmail.com"
+
+ git add .
+ git commit -m "Test outputs from PR #${PR_NUMBER}"
+ git tag PR${PR_NUMBER}
+ git push origin PR${PR_NUMBER}
+
+ - name: Submit results to Testing Farm
+ if: always()
+ id: testing_farm_request
+ env:
+ TESTING_FARM_API_TOKEN: ${{ secrets.TESTING_FARM_API_TOKEN }}
+ PR_NUMBER: ${{ needs.check_build.outputs.pr_number }}
+ run: |
+ python3 tests/submit_results_to_testing_farm.py \
+ --repo-url "https://github.com/${{ env.ATEX_REPO }}" \
+ --pr-number "${PR_NUMBER}" 2>&1 | tee tf_output.log
+
+ # Extract HTML link from output
+ html_link=$(grep -oP 'HTML: \K.*' tf_output.log || echo 'No HTML link found')
+ echo "HTML_LINK=${html_link}" >> "$GITHUB_OUTPUT"
+
+ - name: Find existing PR comment
+ if: always()
+ uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3
+ id: fc
+ with:
+ issue-number: ${{ needs.check_build.outputs.pr_number }}
+ comment-author: 'github-actions[bot]'
+ body-includes: ATEX Test Results
+
+ - name: Create or update PR comment with results
+ if: always()
+ uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v4
+ with:
+ comment-id: ${{ steps.fc.outputs.comment-id }}
+ issue-number: ${{ needs.check_build.outputs.pr_number }}
+ body: |
+ ### ATEX Test Results
+
+ Test artifacts have been submitted to Testing Farm.
+
+ **Results:** [View Test Results](${{ steps.testing_farm_request.outputs.HTML_LINK }})
+ **Workflow Run:** [View Workflow Details](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }})
+
+ _This comment was automatically generated by the ATEX workflow._
+ edit-mode: replace
+
+ - name: Cleanup temporary tag
+ if: always()
+ working-directory: atex-results-testing-farm
+ env:
+ GH_TOKEN: ${{ secrets.ATEX_RESULTS_TF_REPO_TOKEN }}
+ PR_NUMBER: ${{ needs.check_build.outputs.pr_number }}
+ run: |
+ git push --delete origin PR${PR_NUMBER}
+
+ - name: Update GitHub check run
+ if: always()
+ uses: LouisBrunner/checks-action@6b626ffbad7cc56fd58627f774b9067e6118af23 # v2.0.0
+ with:
+ token: ${{ secrets.GITHUB_TOKEN }}
+ check_id: ${{ needs.check_build.outputs.check_id }}
+ sha: ${{ needs.check_build.outputs.pr_sha }}
+ status: completed
+ conclusion: ${{ job.status }}
+ output: |
+ {"summary":"ATEX tests completed. Job: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}. View results: ${{ steps.testing_farm_request.outputs.HTML_LINK }}","title":"ATEX Testing Complete"}
diff --git a/requirements.txt b/requirements.txt
index 6e74583d1401..63cc76272a09 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -17,3 +17,4 @@ prometheus_client
requests
compliance-trestle==3.10.4
pyopenssl>=23.2.0
+pcre2
diff --git a/tests/run_tests_testingfarm.py b/tests/run_tests_testingfarm.py
new file mode 100644
index 000000000000..1d481c3ecc66
--- /dev/null
+++ b/tests/run_tests_testingfarm.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+
+import sys
+import time
+import gzip
+import logging
+import argparse
+import contextlib
+from pathlib import Path
+
+from atex.provisioner.testingfarm import TestingFarmProvisioner
+from atex.orchestrator.contest import ContestOrchestrator
+from atex.aggregator.json import JSONAggregator
+from atex.fmf import FMFTests
+
+logger = logging.getLogger("ATEX")
+
+
+def parse_args():
+ """Parse command-line arguments."""
+ parser = argparse.ArgumentParser(description="Run tests on Testing Farm using atex")
+ parser.add_argument("--contest-dir", required=True, help="Path to contest repository")
+ parser.add_argument("--content-dir", required=True, help="Path to built content directory")
+ parser.add_argument("--plan", required=True, help="TMT plan to run (e.g., daily|ci-gating|weekly)")
+ parser.add_argument("--compose", required=True, help="compose (e.g., Centos-Stream-9)")
+ parser.add_argument("--arch", default="x86_64", help="Architecture")
+ parser.add_argument("--os-major-version", required=True, help="OS Major Version (8|9|10)")
+ parser.add_argument("--tests", nargs="*", help="Specific tests to run (optional, runs all if not specified)")
+ parser.add_argument("--timeout", type=int, default=120, help="Timeout in minutes")
+ parser.add_argument("--max-remotes", type=int, default=10, help="Maximum number of parallel test executions")
+ parser.add_argument("--reruns", type=int, default=1, help="Number of test reruns on failure")
+ return parser.parse_args()
+
+
+def setup_logging():
+ """Setup logging configuration with console and file handlers."""
+ console_log = logging.StreamHandler(sys.stderr)
+ console_log.setLevel(logging.INFO)
+
+ debug_log_fobj = gzip.open("atex_debug.log.gz", "wt")
+ file_log = logging.StreamHandler(debug_log_fobj)
+ file_log.setLevel(logging.DEBUG)
+
+ logging.basicConfig(
+ level=logging.DEBUG,
+ format="%(asctime)s %(name)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ handlers=(console_log, file_log),
+ force=True,
+ )
+
+ return debug_log_fobj
+
+
+def main():
+ """Main function to run tests on Testing Farm."""
+ args = parse_args()
+
+ # Variables exported to tests
+ test_env = {
+ "CONTEST_CONTENT": ContestOrchestrator.content_dir_on_remote,
+ "CONTEST_VERBOSE": "2",
+ }
+
+ with contextlib.ExitStack() as stack:
+ # Setup logging
+ debug_log_fobj = setup_logging()
+ stack.enter_context(contextlib.closing(debug_log_fobj))
+
+ # Load FMF tests from contest directory
+ fmf_tests = FMFTests(
+ args.contest_dir,
+ args.plan,
+ names=args.tests or None,
+ context={
+ "distro": f"centos-stream-{args.os_major_version}",
+ "arch": args.arch,
+ },
+ )
+
+ logger.info(f"plan: {args.plan}")
+ logger.info(f"os major version: {args.os_major_version}")
+ logger.info(f"arch: {args.arch}")
+ logger.info(f"compose: {args.compose}")
+ logger.info("will run:")
+ for test in fmf_tests.tests:
+ logger.info(f" {test}")
+
+ # Setup result aggregator
+ output_results = f"results-centos-stream-{args.os_major_version}-{args.arch}.json.gz"
+ output_files = f"files-centos-stream-{args.os_major_version}-{args.arch}"
+ partial_runs = Path(output_files) / "old_runs"
+ aggregator = JSONAggregator(output_results, output_files)
+ stack.enter_context(aggregator)
+
+ partial_runs.mkdir(parents=True, exist_ok=True)
+
+ platform_name = f"cs{args.os_major_version}@{args.arch}"
+
+ # Setup Testing Farm provisioner
+ prov = TestingFarmProvisioner(
+ compose=args.compose,
+ arch=args.arch,
+ max_retries=2,
+ timeout=args.timeout,
+ )
+
+ # Setup Contest orchestrator
+ orchestrator = ContestOrchestrator(
+ platform=platform_name,
+ fmf_tests=fmf_tests,
+ provisioners=[prov],
+ aggregator=aggregator,
+ tmp_dir=partial_runs,
+ max_remotes=args.max_remotes,
+ max_spares=2,
+ max_reruns=args.reruns,
+ content_dir=args.content_dir,
+ env=test_env,
+ )
+ stack.enter_context(orchestrator)
+
+ logger.info("Starting test execution...")
+ next_writeout = time.monotonic() + 600
+ while orchestrator.serve_once():
+ if time.monotonic() > next_writeout:
+ logger.info(
+ f"queued: {len(orchestrator.to_run)}/{len(fmf_tests.tests)} tests, "
+ f"running: {len(orchestrator.running_tests)} tests",
+ )
+ next_writeout = time.monotonic() + 600
+ time.sleep(1)
+
+ logger.info("Test execution completed!")
+
+ # Log final output locations
+ logger.info(f"Results written to: {output_results}")
+ logger.info(f"Test files in: {output_files}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/submit_results_to_testing_farm.py b/tests/submit_results_to_testing_farm.py
new file mode 100644
index 000000000000..477035f69297
--- /dev/null
+++ b/tests/submit_results_to_testing_farm.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+
+import sys
+import time
+import atexit
+import logging
+import argparse
+import xml.etree.ElementTree as ET
+
+from atex.provisioner.testingfarm import api
+
+
+# Reuse urllib3 PoolManager configured for heavy Retry attempts
+# (because of TestingFarm API reboots, and other transient issues)
+http = api._http
+
+
+def parse_args():
+ """Parse command-line arguments."""
+ parser = argparse.ArgumentParser(description="Submit TMT test to Testing Farm")
+ parser.add_argument("--repo-url", required=True, help="GitHub repository URL")
+ parser.add_argument("--pr-number", required=True, help="Pull request number")
+ parser.add_argument("--plan-name", default="/dummy_plan", help="TMT plan name to run")
+ parser.add_argument("--os", default=None, help="OS to test on (e.g., rhel-9)")
+ parser.add_argument("--arch", default="x86_64", help="Architecture to test on")
+ return parser.parse_args()
+
+
+def setup_logging():
+ """Setup logging configuration."""
+ logging.basicConfig(
+ level=logging.INFO, # use DEBUG to see HTTP queries
+ stream=sys.stderr,
+ format="%(asctime)s %(name)s: %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ )
+
+
+def build_request_json(args):
+ """Build the Testing Farm API request JSON payload."""
+ return {
+ "test": {
+ "fmf": {
+ "url": args.repo_url,
+ "ref": f"PR{args.pr_number}",
+ "name": args.plan_name,
+ },
+ },
+ "environments": [{"arch": args.arch, "os": args.os}],
+ }
+
+
+def get_html_link(artifacts_url):
+ """
+ Get the HTML link for test results from Testing Farm artifacts.
+
+ Args:
+ artifacts_url: URL to Testing Farm artifacts
+
+ Returns:
+ str: URL to the HTML results viewer
+
+ Raises:
+ RuntimeError: If results.xml or workdir cannot be retrieved
+ """
+ # Get results.xml for those artifacts, which is a XML representation of the
+ # HTML artifacts view and contains links to logs and workdir
+ reply = http.request("GET", f"{artifacts_url}/results.xml")
+ if reply.status != 200:
+ raise RuntimeError("could not get results.xml")
+
+ # Find which log is the workdir and get its URL
+ results_xml = ET.fromstring(reply.data)
+ for log in results_xml.find("testsuite").find("logs"):
+ if log.get("name") == "workdir":
+ workdir_url = log.get("href")
+ break
+ else:
+ raise RuntimeError("could not find workdir")
+
+ # TODO: a more reliable way would be to read
+ # {workdir_url}/testing-farm/sanity/execute/results.yaml
+ # as YAML and look for the test name and get its 'data-path'
+ # relative to the /execute/ dir
+ return f"{workdir_url}/dummy_plan/execute/data/guest/default-0/dummy_test-1/data/index.html?q=TRUE"
+
+
+def main():
+ """Main function to submit test results to Testing Farm."""
+ args = parse_args()
+ setup_logging()
+
+ request_json = build_request_json(args)
+
+ # Do faster queries than the default 30 secs, because we don't track
+ # many dozens of requests, just one
+ class FastRequest(api.Request):
+ """
+ A request class that executes queries faster than the default 30 seconds.
+
+ This optimization is implemented because the system does not track
+ many dozens of requests, typically just one, eliminating the need
+ for the standard, longer default timeout.
+ """
+ api_query_limit = 5
+
+ req = FastRequest()
+ req.submit(request_json)
+ atexit.register(req.cancel) # just in case we traceback
+
+ req.wait_for_state("running")
+
+ # Artifacts URL doesn't appear instantly, wait for it
+ while "run" not in req:
+ time.sleep(FastRequest.api_query_limit)
+ while "artifacts" not in req["run"]:
+ time.sleep(FastRequest.api_query_limit)
+
+ artifacts_url = req["run"]["artifacts"]
+ logging.info(f"artifacts: {artifacts_url}")
+
+ # results.xml appears only after completion
+ req.wait_for_state("complete")
+ atexit.unregister(req.cancel)
+
+ # Get and print HTML link
+ html_link = get_html_link(artifacts_url)
+ logging.info(f"HTML: {html_link}")
+
+
+if __name__ == "__main__":
+ main()