- """
- print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
- continue
-
- test_report = ""
-
- search = re.search(r"""
([\w\W]+?)""", test)
- container = search and search.group(1)
- if not container:
- container = '
'
-
- expected_xhtml = None
- expected_image: bytes | None = None
- expected_image_url = TEST_REPORT / f"{counter}.expected.bmp"
- if props.get("name"):
- ref_image = file.parent / f"{props.get('name')}.bmp"
- if ref_image.exists():
- with ref_image.open("rb") as imageReader:
- expected_image = imageReader.read()
-
- with expected_image_url.open("wb") as imageWriter:
- imageWriter.write(expected_image)
-
- expected_image_url = ref_image
-
- for tag, info, rendering in re.findall(
- r"""<(rendering|error)([^>]*)>([\w\W]+?)(?:rendering|error)>""", test
- ):
- renderingProps = getInfo(info)
- test_skipped = category_skipped or "skip" in renderingProps
- if test_skipped and not args.runSkipped:
- skippedCount += 1
- skipped += 1
-
- print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
- continue
-
- input_path = TEST_REPORT / f"{counter}.xhtml"
-
- update_temp_file(input_path, container, rendering)
-
- # generate temporary bmp
- img_path = TEST_REPORT / f"{counter}.bmp"
-
- xsize = props.get("size", "200")
- ysize = xsize
- page = props.get("page")
- if props.get("size") == "full":
- xsize = "800"
- ysize = "600"
-
- runPaperMuncher(
- paperMuncher, type, xsize, ysize, page, img_path, input_path
- )
-
- with img_path.open("rb") as imageFile:
- output_image: bytes = imageFile.read()
-
- # the first template is the expected value
- if not expected_xhtml:
- expected_xhtml = rendering
- if not expected_image:
- expected_image = output_image
- with (TEST_REPORT / f"{counter}.expected.bmp").open(
- "wb"
- ) as imageWriter:
- imageWriter.write(expected_image)
- continue
-
- # check if the rendering is different
- assert expected_image is not None
- assert output_image is not None
-
- ok = compareImages(expected_image, output_image) == (tag == "rendering")
- if ok:
- passCount += 1
- else:
- failCount += 1
-
- help = renderingProps.get("help")
-
- if ok:
- passed += 1
- print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True)
- else:
- failed += 1
- print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True)
- test_failed += f"""Test {counter} failed.
-file://{input_path}
-file://{TEST_REPORT / "report.html"}#case-{counter}
-"""
-
- add_infos = []
- if test_skipped:
- add_infos.append("skip flag")
- if len(add_infos) != 0:
- add_infos = " [" + ", ".join(add_infos) + "]"
- else:
- add_infos = ""
-
- test_report += f"""
-
-
-
{counter} - {tag} {add_infos}
-
{help}
-
-
-

-
Actual
-
-
-
-

-
{"Reference" if (tag == "rendering") else "Unexpected"}
-
-
-
-
- Rendition
-
-
-
Reference
-
Source
-
- """
-
- counter += 1
-
- if args.fast:
- break
- report += f"""
-
-
-
{props.get("name")}
-
{props.get("help") or ""}
-
Source
-
{passCount} passed, {failCount} failed and {skippedCount} skipped
-
- {test_report}
-
- """
- print()
- report += f"""
-
- """
-
- report += """
-
-
-
-
-
-
- """
-
- with (TEST_REPORT / "report.html").open("w") as f:
- f.write(report)
-
- if not args.headless:
- if shell.which("xdg-open"):
- shell.exec("xdg-open", str(TEST_REPORT / "report.html"))
- elif shell.which("open"):
- shell.exec("open", str(TEST_REPORT / "report.html"))
-
- print()
- if failed:
- print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(args, 'witty')}{vt100.RESET}")
- print(
- f"{vt100.RED}Failed {failed} tests{vt100.RESET}, {vt100.GREEN}Passed {passed} tests{vt100.RESET}"
- )
- print(f"Report: {TEST_REPORT / 'report.html'}")
-
- print()
- print("Failed tests details:")
- print(test_failed)
- raise RuntimeError("Some tests failed")
- else:
- print(f"{vt100.GREEN}// {fetchMessage(args, 'nice')}{vt100.RESET}")
- print(f"{vt100.GREEN}All tests passed{vt100.RESET}")
- print(f"Report: {TEST_REPORT / 'report.html'}")
diff --git a/meta/plugins/reftests/CLIReport.py b/meta/plugins/reftests/CLIReport.py
new file mode 100644
index 00000000..f242de1c
--- /dev/null
+++ b/meta/plugins/reftests/CLIReport.py
@@ -0,0 +1,48 @@
+from cutekit import model, vt100, const
+from pathlib import Path
+
+from .utils import fetchMessage
+from .Test import TestCase
+from .reporter import Reporter
+
+
+class CLIReport(Reporter):
+ """
+ Object to abstract the generation of the cli report for the reftests.
+ """
+
+ def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path):
+ self.TEST_REPORT: Path = TEST_REPORT
+
+ def addTestCase(self, testId: int, passed: bool, test: TestCase):
+ if passed:
+ print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True)
+ else:
+ print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True)
+
+ def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int):
+ pass
+
+ def addSkippedFile(self, testId: int, props):
+ print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
+
+ def addSkippedCase(self):
+ print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
+
+ def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context):
+ print()
+ if totalFailed:
+ print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(manifests, 'witty')}{vt100.RESET}")
+ print(
+ f"{vt100.RED}Failed {totalFailed} tests{vt100.RESET}, {vt100.GREEN}Passed {totalPassed} tests{vt100.RESET}"
+ )
+ print(f"Report: {self.TEST_REPORT / 'report.html'}")
+
+ print()
+ print("Failed tests details:")
+ print(context.testFailed)
+ raise RuntimeError("Some tests failed")
+ else:
+ print(f"{vt100.GREEN}// {fetchMessage(manifests, 'nice')}{vt100.RESET}")
+ print(f"{vt100.GREEN}All tests passed{vt100.RESET}")
+ print(f"Report: {self.TEST_REPORT / 'report.html'}")
diff --git a/meta/plugins/reftests/Test.py b/meta/plugins/reftests/Test.py
new file mode 100644
index 00000000..ae88afe4
--- /dev/null
+++ b/meta/plugins/reftests/Test.py
@@ -0,0 +1,102 @@
+from pathlib import Path
+from cutekit import const
+import textwrap
+import re
+
+TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute()
+
+
+class TestReference:
+ def __init__(self, path: Path, imagePath: Path, image: bytes | None = None):
+ self.path = path # path to the reference document
+ self.imagePath = imagePath
+ self.image = image
+
+
+class TestCase:
+ def __init__(self, props, inputPath, outputPath, context, tag, testDocument, caseProps,
+ container=None, reference: TestReference | None = None):
+ self.outputImage: bytes | None = None
+ self.addInfos = []
+
+ self.type = props.get("type") # the type of test [render (default) | print]
+ self.xsize = props.get("size", "200")
+ self.ysize = self.xsize
+
+ if props.get("size") == "full":
+ self.xsize = "800"
+ self.ysize = "600"
+
+ self.page = props.get("page") # page size
+ self.inputPath = inputPath # path to test case's document
+ self.outputPath = outputPath # path to the output image
+ self.context = context
+ self.ref = reference
+ self.help = caseProps.get("help", "")
+ self.tag = tag # test tag [rendering | error]
+
+ if not container:
+ container = '
'
+
+ self.container = container
+ self.testDocument = testDocument
+
+ def render(self):
+ def updateTempFile(path, rendering):
+ # write xhtml into the temporary file
+ xhtml = re.sub(r"
", rendering, self.container) if self.container else rendering
+ with path.open("w") as f:
+ f.write(f"\n{textwrap.dedent(xhtml)}")
+
+ updateTempFile(self.inputPath, self.testDocument)
+
+ runPaperMuncher(self.context.paperMuncher, self)
+
+ with self.outputPath.open("rb") as imageFile:
+ self.outputImage = imageFile.read()
+
+ def run(self) -> bool:
+ self.render()
+
+ return areImagesIdentical(self.ref.image, self.outputImage) == (self.tag == "rendering")
+
+
+def runPaperMuncher(executable, test: TestCase):
+ command = ["--feature", "*=on", "--quiet"]
+
+ if test.type == "print":
+ command.extend(["--flow", "paginate"])
+
+ if test.xsize or not test.page:
+ command.extend(["--width", (test.xsize or 200) + "px"])
+
+ if test.ysize or not test.page:
+ command.extend(["--height", (test.ysize or 200) + "px"])
+
+ if test.page:
+ command.extend(["--page", test.page])
+
+ command += [
+ "-o",
+ test.outputPath,
+ test.inputPath,
+ ]
+
+ executable.popen(*command)
+
+
+def areImagesIdentical(image1: bytes, image2: bytes) -> bool:
+ """
+ Compare the results from the reftests by checking if the images are identical.
+
+ This method is sensitive to any changes in the image, including compression artifacts.
+ If you want to compare the images with more tolerance use a SSIM.
+
+ Args:
+ image1: The byte content of the first image.
+ image2: The byte content of the second image.
+
+ Returns:
+ True if the images are identical (byte-for-byte), False otherwise.
+ """
+ return image1 == image2
diff --git a/meta/plugins/reftests/WebReport.py b/meta/plugins/reftests/WebReport.py
new file mode 100644
index 00000000..06187ec8
--- /dev/null
+++ b/meta/plugins/reftests/WebReport.py
@@ -0,0 +1,104 @@
+from cutekit import model, shell
+from pathlib import Path
+
+from .utils import fetchMessage
+from .Test import TestCase
+from .reporter import Reporter
+
+
+class WebReport(Reporter):
+ """
+ Object to abstract the generation of the web report for the reftests.
+ """
+
+ def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path):
+ self.TEST_REPORT: Path = TEST_REPORT
+ self.html = f"""
+
+
+
+
Reftest
+
+
+
+
+
+ """
+ self.testHtml = ""
+
+ def addTestCase(self, testId: int, passed: bool, test: TestCase):
+
+ addInfos = " - ".join(test.addInfos)
+ self.testHtml += f"""
+
+
+
{testId} - {test.tag} {addInfos}
+
{test.help}
+
+
+

+
Actual
+
+
+
+

+
{"Reference" if (test.tag == "rendering") else "Unexpected"}
+
+
+
+
+ Rendition
+
+
+
Reference
+
Source
+
+ """
+
+ def addSkippedCase(self):
+ pass
+
+ def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int):
+ self.html += f"""
+
+
+
{props.get("name")}
+
{props.get("help") or ""}
+
Source
+
{passCount} passed, {failCount} failed and {skippedCount} skipped
+
+ {self.testHtml}
+
+ """
+ self.testHtml = ""
+
+ def addSkippedFile(self, testId: int, props):
+ self.html += f"""
+
+
+
{props.get("name") or "Unnamed"}
+
Test Skipped
+
+
+ """
+
+ def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context):
+ self.html += f"""
+
+
+
+
+ """
+ with (self.TEST_REPORT / "report.html").open("w") as f:
+ f.write(self.html)
+
+ # Automatically open the report in the default browser
+ if shell.which("xdg-open"):
+ shell.exec("xdg-open", str(self.TEST_REPORT / "report.html"))
+ elif shell.which("open"):
+ shell.exec("open", str(self.TEST_REPORT / "report.html"))
diff --git a/meta/plugins/reftests/__init__.py b/meta/plugins/reftests/__init__.py
new file mode 100644
index 00000000..12e32259
--- /dev/null
+++ b/meta/plugins/reftests/__init__.py
@@ -0,0 +1 @@
+from . import reftest # noqa E402, F401: Needed for side effect
diff --git a/meta/plugins/reftests/reftest.py b/meta/plugins/reftests/reftest.py
new file mode 100644
index 00000000..038651ea
--- /dev/null
+++ b/meta/plugins/reftests/reftest.py
@@ -0,0 +1,310 @@
+from cutekit import vt100, cli, builder, model, const
+from pathlib import Path
+
+import re
+
+# Local imports
+from .WebReport import WebReport
+from .CLIReport import CLIReport
+from .reporter import Reporter
+from .Test import TestCase, TestReference
+
+SOURCE_DIR: Path = Path(__file__).parent
+TESTS_DIR: Path = SOURCE_DIR.parent.parent.parent / "tests"
+TEST_REPORT: Path = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute()
+
+
+def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope:
+ """
+ Build paper-muncher with the given target arguments for later use in reftests.
+
+ Args:
+ args: The target arguments, which define the context for the build.
+
+ Returns:
+ The ProductScope result from building paper-muncher.
+
+ Raises:
+ RuntimeError: If the "paper-muncher" component cannot be found.
+ """
+
+ scope = builder.TargetScope.use(args)
+ PmComponent = scope.registry.lookup("paper-muncher", model.Component)
+ if PmComponent is None:
+ raise RuntimeError("paper-muncher not found")
+ return builder.build(scope, PmComponent)[0]
+
+
+class RefTestArgs(model.TargetArgs):
+ glob: str = cli.arg("g", "glob", "Glob pattern to match test files")
+ headless: bool = cli.arg(
+ None, "headless", "Run the tests without opening the report."
+ )
+ fast: bool = cli.arg(
+ None, "fast", "Proceed to the next test as soon as an error occurs."
+ )
+ runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless")
+
+
+class TestResults:
+ """Tracks test execution results."""
+
+ def __init__(self) -> None:
+ self.passed: int = 0
+ self.failed: int = 0
+ self.skipped: int = 0
+ self.failedDetails: str = ""
+
+ def addPassed(self) -> None:
+ self.passed += 1
+
+ def addFailed(self, details: str = "") -> None:
+ self.failed += 1
+ if details:
+ self.failedDetails += details
+
+ def addSkipped(self) -> None:
+ self.skipped += 1
+
+ def merge(self, other: 'TestResults') -> None:
+ """Merge another TestResults into this one."""
+ self.passed += other.passed
+ self.failed += other.failed
+ self.skipped += other.skipped
+ self.failedDetails += other.failedDetails
+
+
+class TestRunnerContext:
+ """Context for test execution containing shared state and configuration."""
+
+ def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, reporters: list[Reporter]) -> None:
+ self.reporters: list[Reporter] = reporters
+ self.args: RefTestArgs = args
+ self.paperMuncher: builder.ProductScope = paperMuncher
+ self.currentTestId: int = 0
+ self.results: TestResults = TestResults()
+
+ def nextTestId(self) -> int:
+ """Get the current test ID and increment for next test."""
+ testId = self.currentTestId
+ self.currentTestId += 1
+ return testId
+
+ def shouldRunSkipped(self) -> bool:
+ """Check if skipped tests should be run."""
+ return self.args.runSkipped
+
+ def shouldStopOnFailure(self) -> bool:
+ """Check if execution should stop on first failure."""
+ return self.args.fast
+
+
+REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""")
+REG_TESTS = re.compile(r"""<(rendering|error)([^>]*)>([\w\W]+?)(?:rendering|error)>""")
+REG_TEST_BLOCKS = re.compile(r"""
]*)>([\w\W]+?)""")
+REG_CONTAINER = re.compile(r"""
([\w\W]+?)""")
+
+DEFAULT_CONTAINER = '
'
+
+
+class TestParser:
+ """Handles all test file parsing operations."""
+
+ @staticmethod
+ def parseProperties(text: str) -> dict[str, str]:
+ """Parse properties from XML-like attributes."""
+ return {prop: value for prop, value in REG_INFO.findall(text)}
+
+ @staticmethod
+ def parseTestCases(content: str) -> list[tuple[str, str, str]]:
+ """Parse individual test cases (rendering/error tags)."""
+ return REG_TESTS.findall(content)
+
+ @staticmethod
+ def parseTestBlocks(content: str) -> list[tuple[str, str]]:
+ """Parse test blocks from file content."""
+ return REG_TEST_BLOCKS.findall(content)
+
+ @staticmethod
+ def extractContainer(content: str) -> str:
+ """Extract container from test content or return default."""
+ match = REG_CONTAINER.search(content)
+ if match:
+ return match.group(1)
+ return DEFAULT_CONTAINER
+
+
+class ReportManager:
+ """Manages all test reporting operations."""
+
+ def __init__(self, reporters: list[Reporter]) -> None:
+ self._reporters: list[Reporter] = reporters
+
+ def reportTestCase(self, testId: int, passed: bool, test: TestCase) -> None:
+ """Report a single test case result."""
+ for reporter in self._reporters:
+ reporter.addTestCase(testId, passed, test)
+
+ def reportSkippedCase(self) -> None:
+ """Report a skipped test case."""
+ for reporter in self._reporters:
+ reporter.addSkippedCase()
+
+ def reportSkippedFile(self, testId: int, props: dict[str, str]) -> None:
+ """Report a skipped test file."""
+ for reporter in self._reporters:
+ reporter.addSkippedFile(testId, props)
+
+ def reportTestCategory(self, testId: int, props: dict[str, str], file: Path,
+ passCount: int, failCount: int, skippedCount: int) -> None:
+ """Report results for a test category."""
+ for reporter in self._reporters:
+ reporter.addTestCategory(testId, props, file, passCount, failCount, skippedCount)
+
+ def finish(self, manifests: model.Registry, results: TestResults, context: TestRunnerContext) -> None:
+ """Finish reporting and display final results."""
+ for reporter in self._reporters:
+ reporter.finish(manifests, results.failed, results.passed, results.skipped, context)
+
+
+class TestRunner:
+ """Handles test execution logic."""
+
+ def __init__(self, context: TestRunnerContext, reportManager: ReportManager) -> None:
+ self._context: TestRunnerContext = context
+ self._reportManager: ReportManager = reportManager
+ self._parser: TestParser = TestParser()
+
+ def _generateReferenceImage(self, testCase: TestCase) -> TestReference:
+ """Generate reference image from a test case."""
+ testCase.render()
+ return TestReference(
+ testCase.inputPath,
+ testCase.outputPath,
+ testCase.outputImage
+ )
+
+ def _runSingleTestCase(self, test: TestCase, skipped: bool = False) -> bool:
+ """Run a single test case and report results."""
+ testId: int = self._context.currentTestId
+
+ if skipped:
+ test.addInfos.append("skip flag")
+
+ ok: bool = test.run()
+ if not ok:
+ failureDetails: str = f"""Test {testId} failed.
+ file://{test.inputPath}
+ file://{TEST_REPORT / "report.html"}#case-{testId}
+ """
+ self._context.results.addFailed(failureDetails)
+ else:
+ self._context.results.addPassed()
+
+ self._reportManager.reportTestCase(testId, ok, test)
+ self._context.currentTestId += 1
+
+ return ok
+
+ def _runTestCategory(self, test_content: str, props: dict[str, str],
+ container: str, file: Path, categorySkipped: bool = False) -> TestResults:
+ """Run all test cases in a category."""
+ categoryResults: TestResults = TestResults()
+ testCases: list[tuple[str, str, str]] = self._parser.parseTestCases(test_content)
+ reference: TestReference | None = None
+
+ for tag, info, testDocument in testCases:
+ caseProps: dict[str, str] = self._parser.parseProperties(info)
+ inputPath: Path = TEST_REPORT / f"{self._context.currentTestId}.xhtml"
+ imgPath: Path = TEST_REPORT / f"{self._context.currentTestId}.bmp"
+
+ # First test case is the reference
+ if not reference:
+ test = TestCase(props, inputPath, imgPath, self._context, tag,
+ testDocument, caseProps, container=container)
+ reference = self._generateReferenceImage(test)
+ continue
+
+ testSkipped: bool = categorySkipped or "skip" in caseProps
+
+ if testSkipped and not self._context.shouldRunSkipped():
+ categoryResults.addSkipped()
+ self._reportManager.reportSkippedCase()
+ continue
+
+ test = TestCase(props, inputPath, imgPath, self._context, tag,
+ testDocument, caseProps, container=container, reference=reference)
+
+ success: bool = self._runSingleTestCase(test, testSkipped)
+ if success:
+ categoryResults.addPassed()
+ else:
+ categoryResults.addFailed()
+
+ if self._context.shouldStopOnFailure():
+ break
+
+ self._reportManager.reportTestCategory(
+ self._context.currentTestId, props, file,
+ categoryResults.passed, categoryResults.failed, categoryResults.skipped
+ )
+
+ return categoryResults
+
+ def runTestFile(self, file: Path) -> TestResults:
+ """Run all tests in a file."""
+ fileResults: TestResults = TestResults()
+ print(f"Running {file.relative_to(TESTS_DIR)}...")
+
+ with file.open() as f:
+ content: str = f.read()
+
+ testBlocks: list[tuple[str, str]] = self._parser.parseTestBlocks(content)
+
+ for info, test_content in testBlocks:
+ props: dict[str, str] = self._parser.parseProperties(info)
+ categorySkipped: bool = "skip" in props
+
+ if categorySkipped and not self._context.shouldRunSkipped():
+ fileResults.addSkipped()
+ self._reportManager.reportSkippedFile(self._context.currentTestId, props)
+ continue
+
+ container: str = self._parser.extractContainer(test_content)
+ categoryResults: TestResults = self._runTestCategory(
+ test_content, props, container, file, categorySkipped
+ )
+ fileResults.merge(categoryResults)
+
+ print()
+ return fileResults
+
+
+@cli.command("reftests", "Manage the reftests")
+def _() -> None:
+ """Placeholder for the reftests command group."""
+ ...
+
+
+@cli.command("reftests/run", "Manage the reftests")
+def _(args: RefTestArgs) -> None:
+ """Run the reftest suite."""
+ paperMuncher: builder.ProductScope = buildPaperMuncher(args)
+ manifests: model.Registry = model.Registry.use(args)
+
+ TEST_REPORT.mkdir(parents=True, exist_ok=True)
+
+ reporters: list[Reporter] = []
+ if not args.headless:
+ reporters.append(WebReport(SOURCE_DIR, TEST_REPORT))
+ reporters.append(CLIReport(SOURCE_DIR, TEST_REPORT))
+
+ context: TestRunnerContext = TestRunnerContext(args, paperMuncher, reporters)
+ reportManager: ReportManager = ReportManager(reporters)
+ testRunner: TestRunner = TestRunner(context, reportManager)
+
+ for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"):
+ fileResults: TestResults = testRunner.runTestFile(file)
+ context.results.merge(fileResults)
+
+ reportManager.finish(manifests, context.results, context)
diff --git a/meta/plugins/reftests/report.css b/meta/plugins/reftests/report.css
new file mode 100644
index 00000000..8d6defcb
--- /dev/null
+++ b/meta/plugins/reftests/report.css
@@ -0,0 +1,139 @@
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+body {
+ --bg: #1b1b1c;
+ --bg2: #161616;
+ --font: #fafafa;
+ --failed: #c52b2b;
+ --passed: #74b553;
+}
+
+body.light {
+ --bg: #f3eee7;
+ --bg2: #f7ece7;
+ --font: #090909;
+ --failed: #c52b2b;
+ --passed: #74b553;
+}
+
+header {
+ padding: 8px;
+ background-color: var(--bg2);
+ color: #fafafa;
+ z-index: 100;
+}
+
+footer {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ padding: 8px;
+ background-color: var(--bg2);
+ z-index: 100;
+}
+
+.infoBar {
+ position: absolute;
+ transform: translateY(-1rem);
+ height: 100%;
+ width: 1rem;
+ left: 0;
+}
+
+.failed .infoBar {
+ background: var(--failed);
+}
+
+.passed .infoBar {
+ background: var(--passed);
+}
+
+.dark a:link {
+ color: #8bd3ff;
+}
+
+.dark a:visited {
+ color: #8e8bff;
+}
+
+.light a:link {
+ color: #267eb3;
+}
+
+.light a:visited {
+ color: #267eb3;
+}
+
+body {
+ font-family: sans-serif;
+ background-color: var(--bg);
+ color: var(--font);
+ font-size: 0.9rem;
+}
+
+.test {
+ padding: 1rem;
+ background-color: var(--bg2);
+ border-bottom: 1px solid #4f4f4f;
+ position: sticky;
+ gap: 0.2rem;
+ top: 0;
+ z-index: 100;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+}
+
+h1 {
+ font-size: 1.2rem;
+ text-decoration: underline;
+}
+
+h2 {
+ font-size: 1.1rem;
+}
+
+.wrapper {
+ width: fit-content;
+}
+
+.test-case {
+ padding: 1rem;
+ padding-left: 2rem;
+ border-bottom: 1px solid #333;
+ width: fit-content;
+ min-width: 100vw;
+}
+
+.passed {
+}
+
+.failed {
+}
+
+.outputs {
+ margin: 1.2rem 0;
+ display: flex;
+ gap: 1rem;
+ width: fit-content;
+}
+
+.outputs > div {
+ display: flex;
+ gap: 0.5rem;
+ flex-direction: column-reverse;
+ align-items: center;
+}
+
+.actual {
+ border: 0px solid blue;
+}
+
+iframe {
+ border: none;
+}
\ No newline at end of file
diff --git a/meta/plugins/reftests/report.js b/meta/plugins/reftests/report.js
new file mode 100644
index 00000000..397bd52e
--- /dev/null
+++ b/meta/plugins/reftests/report.js
@@ -0,0 +1,23 @@
+function initTheme() {
+ const prefersDarkScheme = window.matchMedia("(prefers-color-scheme: dark)").matches;
+ if (prefersDarkScheme) {
+ document.body.classList.remove("light");
+ document.body.classList.add("dark");
+
+ } else {
+ document.body.classList.add("light");
+ document.body.classList.remove("dark");
+ }
+}
+
+initTheme();
+
+// Use a broadcast channel to tell other ref-tests instances to stop
+const id = Math.random().toString(36).substring(7);
+const channel = new BroadcastChannel('reftest');
+channel.onmessage = (event) => {
+ if (event.data.id !== id && event.data.msg === 'stop') {
+ window.close();
+ }
+}
+channel.postMessage({from: id, msg: 'stop'});
diff --git a/meta/plugins/reftests/reporter.py b/meta/plugins/reftests/reporter.py
new file mode 100644
index 00000000..ff7cc60e
--- /dev/null
+++ b/meta/plugins/reftests/reporter.py
@@ -0,0 +1,34 @@
+from abc import ABC, abstractmethod
+from cutekit import model
+from pathlib import Path
+
+from .Test import TestCase
+
+
+class Reporter(ABC):
+ """
+ Object to ensure every reporter has the same interface (and to clean the types).
+ """
+
+ def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path):
+ pass
+
+ @abstractmethod
+ def addTestCase(self, testId: int, passed: bool, test: TestCase):
+ pass
+
+ @abstractmethod
+ def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int):
+ pass
+
+ @abstractmethod
+ def addSkippedFile(self, testId: int, props):
+ pass
+
+ @abstractmethod
+ def addSkippedCase(self):
+ pass
+
+ @abstractmethod
+ def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context):
+ pass
diff --git a/meta/plugins/reftests/utils.py b/meta/plugins/reftests/utils.py
new file mode 100644
index 00000000..0cdb8f0f
--- /dev/null
+++ b/meta/plugins/reftests/utils.py
@@ -0,0 +1,45 @@
+from cutekit import model
+from random import randint
+from pathlib import Path
+
+
+def fetchFile(manifests: model.Registry, component: str, path: str) -> str:
+ """
+ Fetches the text content of a file from a specific component's directory.
+
+ Args:
+ manifests: The component registry used to look up component information.
+ component: The name of the component (e.g., "karm-core").
+ path: The relative path to the file within that component's directory
+ (e.g., "base/defs/error.inc").
+
+ Returns:
+ The entire content of the specified file as a string.
+
+ Raises:
+ AssertionError: If the specified component is not found in the registry.
+ """
+ component = manifests.lookup(component, model.Component)
+ assert component is not None
+ p = Path(component.dirname()) / path
+ with p.open() as f:
+ return f.read()
+
+
+def fetchMessage(manifests: model.Registry, type: str) -> str:
+ """
+ Fetches a random message from a ".inc" file. (e.g., funny error/success messages)
+
+ Args:
+ manifests: The component registry used to look up component information.
+ type: The type of message to fetch (e.g., "witty", "nice"), which
+ corresponds to the name of the .inc file.
+
+ Returns:
+ A randomly selected message string from the fetched file.
+ """
+
+ messages = eval(
+ "[" + fetchFile(manifests, "karm-core", "base/defs/" + type + ".inc") + "]"
+ )
+ return messages[randint(0, len(messages) - 1)]