Skip to content
Draft
132 changes: 126 additions & 6 deletions nix_fast_build/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ class Result:
success: bool
duration: float
error: str | None
log_output: str | None = None


def _maybe_remote(
Expand Down Expand Up @@ -595,18 +596,46 @@ class Build:

async def build(
self, stack: AsyncExitStack, build_output: IO[str], opts: Options
) -> int:
) -> tuple[int, str]:
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot make this a dataclass

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. Created BuildResult dataclass to replace the tuple return type. (9cb1c16)

"""Build and return (return_code, log_output)."""
proc = await stack.enter_async_context(
nix_build(self.attr, self.drv_path, build_output, opts)
)

rc = 0
for _ in range(opts.retries + 1):
rc = await proc.wait()
if rc == 0:
logger.debug(f"build {self.attr} succeeded")
return rc
return rc, ""
logger.warning(f"build {self.attr} exited with {rc}")
return rc

# If build failed, get the log using nix log
if rc != 0:
log_output = await self.get_build_log(opts)
return rc, log_output

return rc, ""

async def get_build_log(self, opts: Options) -> str:
"""Get build log using nix log command."""
cmd = maybe_remote(nix_command(["log", self.drv_path]), opts)
logger.debug("run %s", shlex.join(cmd))
try:
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
if proc.returncode == 0 and stdout:
return stdout.decode("utf-8", errors="replace")
# If nix log fails, return stderr or empty
if stderr:
return stderr.decode("utf-8", errors="replace")
except OSError as e:
logger.debug(f"Failed to get build log: {e}")
return ""

async def nix_copy(
self, args: list[str], exit_stack: AsyncExitStack, opts: Options
Expand Down Expand Up @@ -825,15 +854,15 @@ async def run_builds(
drv_paths.add(job.drv_path)
build = Build(job.attr, job.drv_path, job.outputs)
start_time = timeit.default_timer()
rc = await build.build(stack, build_output, opts)
rc, log_output = await build.build(stack, build_output, opts)
results.append(
Result(
result_type=ResultType.BUILD,
attr=job.attr,
success=rc == 0,
duration=timeit.default_timer() - start_time,
# TODO: add log output here
error=f"build exited with {rc}" if rc != 0 else None,
log_output=log_output if rc != 0 else None,
)
)
if rc != 0:
Expand Down Expand Up @@ -968,6 +997,92 @@ class Summary:
failed_attrs: list[str] = field(default_factory=list)


def is_github_actions() -> bool:
"""Detect if running inside GitHub Actions."""
return os.environ.get("GITHUB_ACTIONS") == "true"
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@copilot still document this behaviour in the readme.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. Added "GitHub Actions Job Summaries" section to README documenting the automatic detection and build log behavior. (9cb1c16)



def get_github_summary_file() -> Path | None:
"""Get the GitHub summary file path from environment."""
if is_github_actions():
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_path:
return Path(summary_path)
return None


def write_github_summary(
summary_file: Path, opts: Options, results: list[Result], rc: int
) -> None:
"""Write GitHub Actions job summary in markdown format."""
# Group results by type
stats_by_type: dict[ResultType, Summary] = defaultdict(Summary)
failed_builds: list[Result] = []

for r in results:
stats = stats_by_type[r.result_type]
stats.successes += 1 if r.success else 0
stats.failures += 1 if not r.success else 0
if not r.success:
stats.failed_attrs.append(r.attr)
if r.result_type == ResultType.BUILD:
failed_builds.append(r)

# Build the markdown content
lines = []
lines.append("# nix-fast-build Results\n")

# Overall status
if rc == 0:
lines.append("## ✅ Build Successful\n")
else:
lines.append("## ❌ Build Failed\n")

# Summary table
lines.append("## Summary\n")
lines.append("| Type | Successes | Failures |")
lines.append("|------|-----------|----------|")

for result_type, summary in sorted(stats_by_type.items(), key=lambda x: x[0].name):
emoji = "✅" if summary.failures == 0 else "❌"
lines.append(
f"| {emoji} {result_type.name} | {summary.successes} | {summary.failures} |"
)

# Failed builds section with logs
if failed_builds:
lines.append("\n## Failed Builds\n")
for result in failed_builds:
attr_name = f"{opts.flake_url}#{opts.flake_fragment}.{result.attr}"
lines.append(f"\n### ❌ {result.attr}\n")
lines.append(f"**Full attribute:** `{attr_name}`\n")
lines.append(f"**Duration:** {result.duration:.2f}s\n")
if result.error:
lines.append(f"**Error:** {result.error}\n")
if result.log_output:
# Truncate very long logs (keep last 100 lines)
log_lines = result.log_output.strip().split("\n")
if len(log_lines) > 100:
log_lines = [
"... (truncated, showing last 100 lines) ...",
*log_lines[-100:],
]
lines.append("\n<details>")
lines.append(f"<summary>Build Log ({len(log_lines)} lines)</summary>\n")
lines.append("```")
lines.extend(log_lines)
lines.append("```")
lines.append("</details>\n")

# Write to file
try:
with summary_file.open("a") as f:
f.write("\n".join(lines))
logger.info(f"GitHub summary written to {summary_file}")
except OSError as e:
logger.warning(f"Failed to write GitHub summary to {summary_file}: {e}")


async def run(stack: AsyncExitStack, opts: Options) -> int:
if opts.remote:
tmp_dir = await stack.enter_async_context(remote_temp_dir(opts))
Expand Down Expand Up @@ -1100,7 +1215,7 @@ async def run(stack: AsyncExitStack, opts: Options) -> int:
assert task.done(), f"Task {task.get_name()} is not done"

rc = 0
stats_by_type = defaultdict(Summary)
stats_by_type: dict[ResultType, Summary] = defaultdict(Summary)
for r in results:
stats = stats_by_type[r.result_type]
stats.successes += 1 if r.success else 0
Expand Down Expand Up @@ -1137,6 +1252,11 @@ async def run(stack: AsyncExitStack, opts: Options) -> int:
elif opts.result_format == ResultFormat.JUNIT:
dump_junit_xml(f, opts.flake_url, opts.flake_fragment, results)

# Write GitHub Actions summary if configured
github_summary_file = get_github_summary_file()
if github_summary_file:
write_github_summary(github_summary_file, opts, results, rc)

return rc


Expand Down
155 changes: 155 additions & 0 deletions tests/test_github_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
"""Integration test for GitHub Actions summary feature."""

import os
from pathlib import Path
from tempfile import TemporaryDirectory

from nix_fast_build import (
Options,
Result,
ResultType,
get_github_summary_file,
write_github_summary,
)


def test_github_actions_workflow() -> None:
"""Test complete GitHub Actions workflow with environment variables."""
original_actions = os.environ.get("GITHUB_ACTIONS")
original_summary = os.environ.get("GITHUB_STEP_SUMMARY")

try:
with TemporaryDirectory() as d:
summary_path = Path(d) / "github_summary.md"

# Set up GitHub Actions environment
os.environ["GITHUB_ACTIONS"] = "true"
os.environ["GITHUB_STEP_SUMMARY"] = str(summary_path)

# Create options
opts = Options(
flake_url="github:example/repo", flake_fragment="checks.x86_64-linux"
)

# Verify it picks up the environment variable
github_summary_file = get_github_summary_file()
assert github_summary_file == summary_path

# Simulate build results
results = [
Result(
result_type=ResultType.EVAL,
attr="package-a",
success=True,
duration=2.1,
error=None,
),
Result(
result_type=ResultType.BUILD,
attr="package-a",
success=True,
duration=15.3,
error=None,
),
Result(
result_type=ResultType.EVAL,
attr="package-b",
success=True,
duration=1.8,
error=None,
),
Result(
result_type=ResultType.BUILD,
attr="package-b",
success=False,
duration=8.2,
error="build exited with 1",
log_output=(
"error: builder for '/nix/store/xxx-package-b.drv' failed with exit code 1:\n"
"last 25 lines of build log:\n"
"> building\n"
"> checking for compiler\n"
"> error: missing dependency: libfoo\n"
"> build failed"
),
),
Result(
result_type=ResultType.UPLOAD,
attr="package-a",
success=True,
duration=3.5,
error=None,
),
]

# Write the summary
write_github_summary(summary_path, opts, results, rc=1)

# Verify the summary was written
assert summary_path.exists()
content = summary_path.read_text()

# Verify content includes expected sections
assert "# nix-fast-build Results" in content
assert "❌ Build Failed" in content
assert "## Summary" in content
assert "## Failed Builds" in content

# Verify statistics are correct
assert "EVAL | 2 | 0" in content
assert "BUILD | 1 | 1" in content
assert "UPLOAD | 1 | 0" in content

# Verify failed build details
assert "### ❌ package-b" in content
assert "github:example/repo#checks.x86_64-linux.package-b" in content
assert "build exited with 1" in content
assert "missing dependency: libfoo" in content
assert "<details>" in content
assert "Build Log" in content

finally:
# Restore original environment
if original_actions is None:
os.environ.pop("GITHUB_ACTIONS", None)
else:
os.environ["GITHUB_ACTIONS"] = original_actions
if original_summary is None:
os.environ.pop("GITHUB_STEP_SUMMARY", None)
else:
os.environ["GITHUB_STEP_SUMMARY"] = original_summary


def test_long_log_truncation() -> None:
"""Test that very long logs are truncated."""
with TemporaryDirectory() as d:
summary_file = Path(d) / "summary.md"
opts = Options(flake_url=".#checks", flake_fragment="checks")

# Create a log with more than 100 lines
long_log = "\n".join([f"log line {i}" for i in range(150)])

results = [
Result(
result_type=ResultType.BUILD,
attr="test-package",
success=False,
duration=5.3,
error="build failed",
log_output=long_log,
),
]

write_github_summary(summary_file, opts, results, rc=1)

content = summary_file.read_text()

# Check for truncation message
assert "truncated, showing last 100 lines" in content

# Verify only last 100 lines + truncation message are present
assert (
"log line 50" in content
) # Should be present (line 50 onwards, first of last 100)
assert "log line 49" not in content # Should be truncated
assert "log line 149" in content # Last line should be present
Loading