diff --git a/README.md b/README.md
index d307b44..ea1b4b5 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,30 @@ To make output more concise for CI environments, use the `--no-nom` flag. This
replaces `nom` with a streamlined status reporter, which updates only when
there's a change in the number of pending builds, uploads, or downloads.
+## GitHub Actions Job Summaries
+
+When running in GitHub Actions, nix-fast-build automatically generates job
+summaries that appear directly in the Actions UI. The summary includes:
+
+- Overall build status (✅ Success / ❌ Failed)
+- Summary table with success/failure counts by operation type (EVAL, BUILD, UPLOAD, etc.)
+- Detailed sections for each failed build with build logs
+
+This feature is automatically enabled when the `GITHUB_ACTIONS` environment
+variable is set to `true` and `GITHUB_STEP_SUMMARY` is available. No additional
+configuration is required.
+
+Example GitHub Actions workflow:
+
+```yaml
+- name: Build with nix-fast-build
+ run: nix-fast-build --no-nom --skip-cached
+```
+
+Build logs for failed packages are retrieved using `nix log` and displayed in
+collapsible sections within the summary. Very long logs are automatically
+truncated to the last 100 lines.
+
## Avoiding Redundant Package Downloads
By default, `nix build` will download pre-built packages, leading to needless
diff --git a/nix_fast_build/__init__.py b/nix_fast_build/__init__.py
index fedc419..db3dcfd 100644
--- a/nix_fast_build/__init__.py
+++ b/nix_fast_build/__init__.py
@@ -110,6 +110,7 @@ class Result:
success: bool
duration: float
error: str | None
+ log_output: str | None = None
def _maybe_remote(
@@ -587,6 +588,14 @@ async def run_cachix_daemon_stop(
return await proc.wait()
+@dataclass
+class BuildResult:
+ """Result of a build operation."""
+
+ return_code: int
+ log_output: str
+
+
@dataclass
class Build:
attr: str
@@ -595,18 +604,46 @@ class Build:
async def build(
self, stack: AsyncExitStack, build_output: IO[str], opts: Options
- ) -> int:
+ ) -> BuildResult:
+ """Build and return BuildResult."""
proc = await stack.enter_async_context(
nix_build(self.attr, self.drv_path, build_output, opts)
)
+
rc = 0
for _ in range(opts.retries + 1):
rc = await proc.wait()
if rc == 0:
logger.debug(f"build {self.attr} succeeded")
- return rc
+ return BuildResult(return_code=rc, log_output="")
logger.warning(f"build {self.attr} exited with {rc}")
- return rc
+
+ # If build failed, get the log using nix log
+ if rc != 0:
+ log_output = await self.get_build_log(opts)
+ return BuildResult(return_code=rc, log_output=log_output)
+
+ return BuildResult(return_code=rc, log_output="")
+
+ async def get_build_log(self, opts: Options) -> str:
+ """Get build log using nix log command."""
+ cmd = maybe_remote(nix_command(["log", self.drv_path]), opts)
+ logger.debug("run %s", shlex.join(cmd))
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ *cmd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, stderr = await proc.communicate()
+ if proc.returncode == 0 and stdout:
+ return stdout.decode("utf-8", errors="replace")
+ # If nix log fails, return stderr or empty
+ if stderr:
+ return stderr.decode("utf-8", errors="replace")
+ except OSError as e:
+ logger.debug(f"Failed to get build log: {e}")
+ return ""
async def nix_copy(
self, args: list[str], exit_stack: AsyncExitStack, opts: Options
@@ -825,18 +862,18 @@ async def run_builds(
drv_paths.add(job.drv_path)
build = Build(job.attr, job.drv_path, job.outputs)
start_time = timeit.default_timer()
- rc = await build.build(stack, build_output, opts)
+ build_result = await build.build(stack, build_output, opts)
results.append(
Result(
result_type=ResultType.BUILD,
attr=job.attr,
- success=rc == 0,
+ success=build_result.return_code == 0,
duration=timeit.default_timer() - start_time,
- # TODO: add log output here
- error=f"build exited with {rc}" if rc != 0 else None,
+ error=f"build exited with {build_result.return_code}" if build_result.return_code != 0 else None,
+ log_output=build_result.log_output if build_result.return_code != 0 else None,
)
)
- if rc != 0:
+ if build_result.return_code != 0:
continue
upload_queue.put_nowait(build)
download_queue.put_nowait(build)
@@ -855,6 +892,9 @@ async def run_uploads(
if isinstance(build, StopTask):
logger.debug("finish upload task")
return 0
+ # Skip if copy_to is not configured
+ if not opts.copy_to:
+ continue
start_time = timeit.default_timer()
rc = await build.upload(stack, opts)
results.append(
@@ -880,6 +920,9 @@ async def run_cachix_upload(
if isinstance(build, StopTask):
logger.debug("finish cachix upload task")
return 0
+ # Skip if cachix is not configured
+ if cachix_socket_path is None:
+ continue
start_time = timeit.default_timer()
rc = await build.upload_cachix(cachix_socket_path, opts)
results.append(
@@ -903,6 +946,9 @@ async def run_attic_upload(
if isinstance(build, StopTask):
logger.debug("finish attic upload task")
return 0
+ # Skip if attic is not configured
+ if opts.attic_cache is None:
+ continue
start_time = timeit.default_timer()
rc = await build.upload_attic(opts)
results.append(
@@ -927,6 +973,9 @@ async def run_downloads(
if isinstance(build, StopTask):
logger.debug("finish download task")
return 0
+ # Skip if not using remote or download is disabled
+ if not opts.remote_url or not opts.download:
+ continue
start_time = timeit.default_timer()
rc = await build.download(stack, opts)
results.append(
@@ -968,6 +1017,95 @@ class Summary:
failed_attrs: list[str] = field(default_factory=list)
+def is_github_actions() -> bool:
+ """Detect if running inside GitHub Actions."""
+ return os.environ.get("GITHUB_ACTIONS") == "true"
+
+
+def get_github_summary_file() -> Path | None:
+ """Get the GitHub summary file path from environment."""
+ if is_github_actions():
+ summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
+ if summary_path:
+ return Path(summary_path)
+ return None
+
+
+def write_github_summary(
+ summary_file: Path, opts: Options, results: list[Result], rc: int
+) -> None:
+ """Write GitHub Actions job summary in markdown format."""
+ # Group results by type
+ stats_by_type: dict[ResultType, Summary] = defaultdict(Summary)
+ failed_builds: list[Result] = []
+
+ for r in results:
+ stats = stats_by_type[r.result_type]
+ stats.successes += 1 if r.success else 0
+ stats.failures += 1 if not r.success else 0
+ if not r.success:
+ stats.failed_attrs.append(r.attr)
+ if r.result_type == ResultType.BUILD:
+ failed_builds.append(r)
+
+ # Build the markdown content
+ lines = []
+ lines.append("# nix-fast-build Results\n")
+
+ # Overall status
+ if rc == 0:
+ lines.append("## ✅ Build Successful\n")
+ else:
+ lines.append("## ❌ Build Failed\n")
+
+ # Summary table
+ lines.append("## Summary\n")
+ lines.append("| Type | Successes | Failures |")
+ lines.append("|------|-----------|----------|")
+
+ for result_type, summary in sorted(stats_by_type.items(), key=lambda x: x[0].name):
+ # Only show result types that have actual operations
+ if summary.successes == 0 and summary.failures == 0:
+ continue
+ emoji = "✅" if summary.failures == 0 else "❌"
+ lines.append(
+ f"| {emoji} {result_type.name} | {summary.successes} | {summary.failures} |"
+ )
+
+ # Failed builds section with logs
+ if failed_builds:
+ lines.append("\n## Failed Builds\n")
+ for result in failed_builds:
+ attr_name = f"{opts.flake_url}#{opts.flake_fragment}.{result.attr}"
+ lines.append(f"\n### ❌ {result.attr}\n")
+ lines.append(f"**Full attribute:** `{attr_name}`\n")
+ lines.append(f"**Duration:** {result.duration:.2f}s\n")
+ if result.error:
+ lines.append(f"**Error:** {result.error}\n")
+ if result.log_output:
+ # Truncate very long logs (keep last 100 lines)
+ log_lines = result.log_output.strip().split("\n")
+ if len(log_lines) > 100:
+ log_lines = [
+ "... (truncated, showing last 100 lines) ...",
+ *log_lines[-100:],
+ ]
+ lines.append("\n")
+ lines.append(f"Build Log ({len(log_lines)} lines)
\n")
+ lines.append("```")
+ lines.extend(log_lines)
+ lines.append("```")
+ lines.append(" \n")
+
+ # Write to file
+ try:
+ with summary_file.open("a") as f:
+ f.write("\n".join(lines))
+ logger.info(f"GitHub summary written to {summary_file}")
+ except OSError as e:
+ logger.warning(f"Failed to write GitHub summary to {summary_file}: {e}")
+
+
async def run(stack: AsyncExitStack, opts: Options) -> int:
if opts.remote:
tmp_dir = await stack.enter_async_context(remote_temp_dir(opts))
@@ -1100,7 +1238,7 @@ async def run(stack: AsyncExitStack, opts: Options) -> int:
assert task.done(), f"Task {task.get_name()} is not done"
rc = 0
- stats_by_type = defaultdict(Summary)
+ stats_by_type: dict[ResultType, Summary] = defaultdict(Summary)
for r in results:
stats = stats_by_type[r.result_type]
stats.successes += 1 if r.success else 0
@@ -1137,6 +1275,11 @@ async def run(stack: AsyncExitStack, opts: Options) -> int:
elif opts.result_format == ResultFormat.JUNIT:
dump_junit_xml(f, opts.flake_url, opts.flake_fragment, results)
+ # Write GitHub Actions summary if configured
+ github_summary_file = get_github_summary_file()
+ if github_summary_file:
+ write_github_summary(github_summary_file, opts, results, rc)
+
return rc
diff --git a/tests/test_github_integration.py b/tests/test_github_integration.py
new file mode 100644
index 0000000..ee9ca6c
--- /dev/null
+++ b/tests/test_github_integration.py
@@ -0,0 +1,155 @@
+"""Integration test for GitHub Actions summary feature."""
+
+import os
+from pathlib import Path
+from tempfile import TemporaryDirectory
+
+from nix_fast_build import (
+ Options,
+ Result,
+ ResultType,
+ get_github_summary_file,
+ write_github_summary,
+)
+
+
+def test_github_actions_workflow() -> None:
+ """Test complete GitHub Actions workflow with environment variables."""
+ original_actions = os.environ.get("GITHUB_ACTIONS")
+ original_summary = os.environ.get("GITHUB_STEP_SUMMARY")
+
+ try:
+ with TemporaryDirectory() as d:
+ summary_path = Path(d) / "github_summary.md"
+
+ # Set up GitHub Actions environment
+ os.environ["GITHUB_ACTIONS"] = "true"
+ os.environ["GITHUB_STEP_SUMMARY"] = str(summary_path)
+
+ # Create options
+ opts = Options(
+ flake_url="github:example/repo", flake_fragment="checks.x86_64-linux"
+ )
+
+ # Verify it picks up the environment variable
+ github_summary_file = get_github_summary_file()
+ assert github_summary_file == summary_path
+
+ # Simulate build results
+ results = [
+ Result(
+ result_type=ResultType.EVAL,
+ attr="package-a",
+ success=True,
+ duration=2.1,
+ error=None,
+ ),
+ Result(
+ result_type=ResultType.BUILD,
+ attr="package-a",
+ success=True,
+ duration=15.3,
+ error=None,
+ ),
+ Result(
+ result_type=ResultType.EVAL,
+ attr="package-b",
+ success=True,
+ duration=1.8,
+ error=None,
+ ),
+ Result(
+ result_type=ResultType.BUILD,
+ attr="package-b",
+ success=False,
+ duration=8.2,
+ error="build exited with 1",
+ log_output=(
+ "error: builder for '/nix/store/xxx-package-b.drv' failed with exit code 1:\n"
+ "last 25 lines of build log:\n"
+ "> building\n"
+ "> checking for compiler\n"
+ "> error: missing dependency: libfoo\n"
+ "> build failed"
+ ),
+ ),
+ Result(
+ result_type=ResultType.UPLOAD,
+ attr="package-a",
+ success=True,
+ duration=3.5,
+ error=None,
+ ),
+ ]
+
+ # Write the summary
+ write_github_summary(summary_path, opts, results, rc=1)
+
+ # Verify the summary was written
+ assert summary_path.exists()
+ content = summary_path.read_text()
+
+ # Verify content includes expected sections
+ assert "# nix-fast-build Results" in content
+ assert "❌ Build Failed" in content
+ assert "## Summary" in content
+ assert "## Failed Builds" in content
+
+ # Verify statistics are correct
+ assert "EVAL | 2 | 0" in content
+ assert "BUILD | 1 | 1" in content
+ assert "UPLOAD | 1 | 0" in content
+
+ # Verify failed build details
+ assert "### ❌ package-b" in content
+ assert "github:example/repo#checks.x86_64-linux.package-b" in content
+ assert "build exited with 1" in content
+ assert "missing dependency: libfoo" in content
+ assert "" in content
+ assert "Build Log" in content
+
+ finally:
+ # Restore original environment
+ if original_actions is None:
+ os.environ.pop("GITHUB_ACTIONS", None)
+ else:
+ os.environ["GITHUB_ACTIONS"] = original_actions
+ if original_summary is None:
+ os.environ.pop("GITHUB_STEP_SUMMARY", None)
+ else:
+ os.environ["GITHUB_STEP_SUMMARY"] = original_summary
+
+
+def test_long_log_truncation() -> None:
+ """Test that very long logs are truncated."""
+ with TemporaryDirectory() as d:
+ summary_file = Path(d) / "summary.md"
+ opts = Options(flake_url=".#checks", flake_fragment="checks")
+
+ # Create a log with more than 100 lines
+ long_log = "\n".join([f"log line {i}" for i in range(150)])
+
+ results = [
+ Result(
+ result_type=ResultType.BUILD,
+ attr="test-package",
+ success=False,
+ duration=5.3,
+ error="build failed",
+ log_output=long_log,
+ ),
+ ]
+
+ write_github_summary(summary_file, opts, results, rc=1)
+
+ content = summary_file.read_text()
+
+ # Check for truncation message
+ assert "truncated, showing last 100 lines" in content
+
+ # Verify only last 100 lines + truncation message are present
+ assert (
+ "log line 50" in content
+ ) # Should be present (line 50 onwards, first of last 100)
+ assert "log line 49" not in content # Should be truncated
+ assert "log line 149" in content # Last line should be present