From 88f80816283e942f67e0d329e4e946e3000d1969 Mon Sep 17 00:00:00 2001 From: Lou ! Date: Thu, 13 Nov 2025 10:07:59 +0100 Subject: [PATCH 1/2] reftests: refactor to modularize it and make it more future proof. --- meta/plugins/__init__.py | 3 +- meta/plugins/reftest.py | 523 ----------------------------- meta/plugins/reftests/CLIReport.py | 47 +++ meta/plugins/reftests/Test.py | 102 ++++++ meta/plugins/reftests/WebReport.py | 101 ++++++ meta/plugins/reftests/__init__.py | 1 + meta/plugins/reftests/reftest.py | 237 +++++++++++++ meta/plugins/reftests/report.css | 139 ++++++++ meta/plugins/reftests/report.js | 23 ++ meta/plugins/reftests/reporter.py | 28 ++ meta/plugins/reftests/utils.py | 45 +++ 11 files changed, 725 insertions(+), 524 deletions(-) delete mode 100644 meta/plugins/reftest.py create mode 100644 meta/plugins/reftests/CLIReport.py create mode 100644 meta/plugins/reftests/Test.py create mode 100644 meta/plugins/reftests/WebReport.py create mode 100644 meta/plugins/reftests/__init__.py create mode 100644 meta/plugins/reftests/reftest.py create mode 100644 meta/plugins/reftests/report.css create mode 100644 meta/plugins/reftests/report.js create mode 100644 meta/plugins/reftests/reporter.py create mode 100644 meta/plugins/reftests/utils.py diff --git a/meta/plugins/__init__.py b/meta/plugins/__init__.py index c9d83ee5..9e1388a4 100644 --- a/meta/plugins/__init__.py +++ b/meta/plugins/__init__.py @@ -2,4 +2,5 @@ ensure((0, 10, 0)) -from . import reftest, tools, wpt # noqa E402, F401: Needed for side effect +from . import tools, wpt # noqa E402, F401: Needed for side effect +from . import reftests # noqa E402, F401: Needed for side effect diff --git a/meta/plugins/reftest.py b/meta/plugins/reftest.py deleted file mode 100644 index a39f924b..00000000 --- a/meta/plugins/reftest.py +++ /dev/null @@ -1,523 +0,0 @@ -from cutekit import shell, vt100, cli, builder, model, const -from pathlib import Path -from random import randint -import re -import textwrap -import time - - -def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: - scope = builder.TargetScope.use(args) - component = scope.registry.lookup("paper-muncher", model.Component) - if component is None: - raise RuntimeError("paper-muncher not found") - return builder.build(scope, component)[0] - - -def fetchFile(args: model.TargetArgs, component: str, path: str) -> str: - r = model.Registry.use(args) - c = r.lookup(component, model.Component) - assert c is not None - p = Path(c.dirname()) / path - with p.open() as f: - return f.read() - - -def fetchMessage(args: model.TargetArgs, type: str) -> str: - message = eval( - "[" + fetchFile(args, "karm-core", "base/defs/" + type + ".inc") + "]" - ) - return message[randint(0, len(message) - 1)] - - -def compareImages( - lhs: bytes, - rhs: bytes, - lowEpsilon: float = 0.05, - highEpsilon: float = 0.1, - strict=False, -) -> bool: - if strict: - return lhs == rhs - - if len(lhs) != len(rhs): - return False - - if lhs == rhs: - return True - - errorSum = 0 - for i in range(len(lhs)): - diff = abs(lhs[i] - rhs[i]) / 255 - if diff > highEpsilon: - # print(f"Image rejected with diff = {diff}") - return False - errorSum += diff > lowEpsilon - - if errorSum > len(lhs) // 100: - # print(f"Image reject with errorSum = {errorSum}") - return False - - return True - - -def runPaperMuncher(executable, type, xsize, ysize, page, outputPath, inputPath): - command = ["--feature", "*=on", "--quiet"] - - if type == "print": - command.extend(["--flow", "paginate"]) - - if xsize or not page: - command.extend(["--width", (xsize or 200) + "px"]) - - if ysize or not page: - command.extend(["--height", (ysize or 200) + "px"]) - - if page: - command.extend(["--page", page]) - - command += [ - "-o", - outputPath, - inputPath, - ] - - executable.popen(*command) - - -class RefTestArgs(model.TargetArgs): - glob: str = cli.arg("g", "glob") - headless: bool = cli.arg( - None, "headless", "Run the tests without opening the report." - ) - fast: str = cli.arg( - None, "fast", "Proceed to the next test as soon as an error occurs." - ) - runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless") - - -@cli.command("reftests", "Manage the reftests") -def _(): ... - - -TESTS_DIR: Path = Path(__file__).parent.parent.parent / "tests" -TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() - - -@cli.command("reftests/clean", "Manage the reftests") -def _(): - for f in TEST_REPORT.glob("*.*"): - f.unlink() - TEST_REPORT.rmdir() - print(f"Cleaned {TEST_REPORT}") - - -@cli.command("reftests/run", "Manage the reftests") -def _(args: RefTestArgs): - paperMuncher = buildPaperMuncher(args) - - TEST_REPORT.mkdir(parents=True, exist_ok=True) - report = """ - - - - Reftest - - -
- Reftest report -
-""" - - def update_temp_file(path, container, rendering): - # write xhtml into the temporary file - xhtml = re.sub(r"", rendering, container) if container else rendering - with path.open("w") as f: - f.write(f"\n{textwrap.dedent(xhtml)}") - - REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") - - def getInfo(txt): - return {prop: value for prop, value in REG_INFO.findall(txt)} - - passed = 0 - failed = 0 - skipped = 0 - test_failed = "" - - counter = 0 - for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"): - if file.suffix != ".xhtml": - continue - print(f"Running {file.relative_to(TESTS_DIR)}...") - - with file.open() as f: - content = f.read() - - passCount = 0 - failCount = 0 - skippedCount = 0 - for info, test in re.findall(r"""]*)>([\w\W]+?)""", content): - props = getInfo(info) - - category_skipped = "skip" in props - type = props.get("type") # the type of test [render (default) | print] - - if category_skipped and not args.runSkipped: - skippedCount += 1 - skipped += 1 - - report += f""" -
-
-

{props.get("name") or "Unamed"}

-

Test Skipped

-
-
- """ - print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) - continue - - test_report = "" - - search = re.search(r"""([\w\W]+?)""", test) - container = search and search.group(1) - if not container: - container = '' - - expected_xhtml = None - expected_image: bytes | None = None - expected_image_url = TEST_REPORT / f"{counter}.expected.bmp" - if props.get("name"): - ref_image = file.parent / f"{props.get('name')}.bmp" - if ref_image.exists(): - with ref_image.open("rb") as imageReader: - expected_image = imageReader.read() - - with expected_image_url.open("wb") as imageWriter: - imageWriter.write(expected_image) - - expected_image_url = ref_image - - for tag, info, rendering in re.findall( - r"""<(rendering|error)([^>]*)>([\w\W]+?)""", test - ): - renderingProps = getInfo(info) - test_skipped = category_skipped or "skip" in renderingProps - if test_skipped and not args.runSkipped: - skippedCount += 1 - skipped += 1 - - print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) - continue - - input_path = TEST_REPORT / f"{counter}.xhtml" - - update_temp_file(input_path, container, rendering) - - # generate temporary bmp - img_path = TEST_REPORT / f"{counter}.bmp" - - xsize = props.get("size", "200") - ysize = xsize - page = props.get("page") - if props.get("size") == "full": - xsize = "800" - ysize = "600" - - runPaperMuncher( - paperMuncher, type, xsize, ysize, page, img_path, input_path - ) - - with img_path.open("rb") as imageFile: - output_image: bytes = imageFile.read() - - # the first template is the expected value - if not expected_xhtml: - expected_xhtml = rendering - if not expected_image: - expected_image = output_image - with (TEST_REPORT / f"{counter}.expected.bmp").open( - "wb" - ) as imageWriter: - imageWriter.write(expected_image) - continue - - # check if the rendering is different - assert expected_image is not None - assert output_image is not None - - ok = compareImages(expected_image, output_image) == (tag == "rendering") - if ok: - passCount += 1 - else: - failCount += 1 - - help = renderingProps.get("help") - - if ok: - passed += 1 - print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True) - else: - failed += 1 - print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True) - test_failed += f"""Test {counter} failed. -file://{input_path} -file://{TEST_REPORT / "report.html"}#case-{counter} -""" - - add_infos = [] - if test_skipped: - add_infos.append("skip flag") - if len(add_infos) != 0: - add_infos = " [" + ", ".join(add_infos) + "]" - else: - add_infos = "" - - test_report += f""" -
-
-

{counter} - {tag} {add_infos}

-

{help}

-
-
- -
Actual
-
- -
- -
{"Reference" if (tag == "rendering") else "Unexpected"}
-
- -
- -
Rendition
-
-
- Reference - Source -
- """ - - counter += 1 - - if args.fast: - break - report += f""" -
-
-

{props.get("name")}

-

{props.get("help") or ""}

- Source - {passCount} passed, {failCount} failed and {skippedCount} skipped -
- {test_report} -
- """ - print() - report += f""" -
-

{fetchMessage(args, "witty" if failed else "nice")}

-

Failed {failed} tests, Passed {passed} tests, Skipped {skipped}

-
- """ - - report += """ - - - - - - - """ - - with (TEST_REPORT / "report.html").open("w") as f: - f.write(report) - - if not args.headless: - if shell.which("xdg-open"): - shell.exec("xdg-open", str(TEST_REPORT / "report.html")) - elif shell.which("open"): - shell.exec("open", str(TEST_REPORT / "report.html")) - - print() - if failed: - print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(args, 'witty')}{vt100.RESET}") - print( - f"{vt100.RED}Failed {failed} tests{vt100.RESET}, {vt100.GREEN}Passed {passed} tests{vt100.RESET}" - ) - print(f"Report: {TEST_REPORT / 'report.html'}") - - print() - print("Failed tests details:") - print(test_failed) - raise RuntimeError("Some tests failed") - else: - print(f"{vt100.GREEN}// {fetchMessage(args, 'nice')}{vt100.RESET}") - print(f"{vt100.GREEN}All tests passed{vt100.RESET}") - print(f"Report: {TEST_REPORT / 'report.html'}") diff --git a/meta/plugins/reftests/CLIReport.py b/meta/plugins/reftests/CLIReport.py new file mode 100644 index 00000000..caf47656 --- /dev/null +++ b/meta/plugins/reftests/CLIReport.py @@ -0,0 +1,47 @@ +from cutekit import model, vt100, const +from pathlib import Path + +from .utils import fetchMessage +from .Test import TestCase + + +class CLIReport: + """ + Object to abstract the generation of the cli report for the reftests. + """ + + def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path): + self.TEST_REPORT: Path = TEST_REPORT + + def addTestCase(self, testId: int, passed: bool, test: TestCase): + if passed: + print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True) + else: + print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True) + + def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): + pass + + def addSkippedFile(self, testId: int, props): + print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) + + def addSkippedCase(self): + print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) + + def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context): + print() + if totalFailed: + print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(manifests, 'witty')}{vt100.RESET}") + print( + f"{vt100.RED}Failed {totalFailed} tests{vt100.RESET}, {vt100.GREEN}Passed {totalPassed} tests{vt100.RESET}" + ) + print(f"Report: {self.TEST_REPORT / 'report.html'}") + + print() + print("Failed tests details:") + print(context.testFailed) + raise RuntimeError("Some tests failed") + else: + print(f"{vt100.GREEN}// {fetchMessage(manifests, 'nice')}{vt100.RESET}") + print(f"{vt100.GREEN}All tests passed{vt100.RESET}") + print(f"Report: {self.TEST_REPORT / 'report.html'}") diff --git a/meta/plugins/reftests/Test.py b/meta/plugins/reftests/Test.py new file mode 100644 index 00000000..ae88afe4 --- /dev/null +++ b/meta/plugins/reftests/Test.py @@ -0,0 +1,102 @@ +from pathlib import Path +from cutekit import const +import textwrap +import re + +TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() + + +class TestReference: + def __init__(self, path: Path, imagePath: Path, image: bytes | None = None): + self.path = path # path to the reference document + self.imagePath = imagePath + self.image = image + + +class TestCase: + def __init__(self, props, inputPath, outputPath, context, tag, testDocument, caseProps, + container=None, reference: TestReference | None = None): + self.outputImage: bytes | None = None + self.addInfos = [] + + self.type = props.get("type") # the type of test [render (default) | print] + self.xsize = props.get("size", "200") + self.ysize = self.xsize + + if props.get("size") == "full": + self.xsize = "800" + self.ysize = "600" + + self.page = props.get("page") # page size + self.inputPath = inputPath # path to test case's document + self.outputPath = outputPath # path to the output image + self.context = context + self.ref = reference + self.help = caseProps.get("help", "") + self.tag = tag # test tag [rendering | error] + + if not container: + container = '' + + self.container = container + self.testDocument = testDocument + + def render(self): + def updateTempFile(path, rendering): + # write xhtml into the temporary file + xhtml = re.sub(r"", rendering, self.container) if self.container else rendering + with path.open("w") as f: + f.write(f"\n{textwrap.dedent(xhtml)}") + + updateTempFile(self.inputPath, self.testDocument) + + runPaperMuncher(self.context.paperMuncher, self) + + with self.outputPath.open("rb") as imageFile: + self.outputImage = imageFile.read() + + def run(self) -> bool: + self.render() + + return areImagesIdentical(self.ref.image, self.outputImage) == (self.tag == "rendering") + + +def runPaperMuncher(executable, test: TestCase): + command = ["--feature", "*=on", "--quiet"] + + if test.type == "print": + command.extend(["--flow", "paginate"]) + + if test.xsize or not test.page: + command.extend(["--width", (test.xsize or 200) + "px"]) + + if test.ysize or not test.page: + command.extend(["--height", (test.ysize or 200) + "px"]) + + if test.page: + command.extend(["--page", test.page]) + + command += [ + "-o", + test.outputPath, + test.inputPath, + ] + + executable.popen(*command) + + +def areImagesIdentical(image1: bytes, image2: bytes) -> bool: + """ + Compare the results from the reftests by checking if the images are identical. + + This method is sensitive to any changes in the image, including compression artifacts. + If you want to compare the images with more tolerance use a SSIM. + + Args: + image1: The byte content of the first image. + image2: The byte content of the second image. + + Returns: + True if the images are identical (byte-for-byte), False otherwise. + """ + return image1 == image2 diff --git a/meta/plugins/reftests/WebReport.py b/meta/plugins/reftests/WebReport.py new file mode 100644 index 00000000..7b166ade --- /dev/null +++ b/meta/plugins/reftests/WebReport.py @@ -0,0 +1,101 @@ +from cutekit import model, shell +from pathlib import Path + +from .utils import fetchMessage +from .Test import TestCase +from .reporter import Reporter + + +class WebReport(Reporter): + """ + Object to abstract the generation of the web report for the reftests. + """ + + def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path): + self.TEST_REPORT: Path = TEST_REPORT + self.html = f""" + + + + Reftest + + + + +
+ Reftest report +
+ """ + self.testHtml = "" + + def addTestCase(self, testId: int, passed: bool, test: TestCase): + + addInfos = " - ".join(test.addInfos) + self.testHtml += f""" +
+
+

{testId} - {test.tag} {addInfos}

+

{test.help}

+
+
+ +
Actual
+
+ +
+ +
{"Reference" if (test.tag == "rendering") else "Unexpected"}
+
+ +
+ +
Rendition
+
+
+ Reference + Source +
+ """ + + def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): + self.html += f""" +
+
+

{props.get("name")}

+

{props.get("help") or ""}

+ Source + {passCount} passed, {failCount} failed and {skippedCount} skipped +
+ {self.testHtml} +
+ """ + self.testHtml = "" + + def addSkippedFile(self, testId: int, props): + self.html += f""" +
+
+

{props.get("name") or "Unnamed"}

+

Test Skipped

+
+
+ """ + + def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context): + self.html += f""" +
+

{fetchMessage(manifests, "witty" if totalFailed != 0 else "nice")}

+

Failed {totalFailed} tests, Passed {totalPassed} tests, Skipped {totalSkipped}

+
+ + + + """ + with (self.TEST_REPORT / "report.html").open("w") as f: + f.write(self.html) + + # Automatically open the report in the default browser + if shell.which("xdg-open"): + shell.exec("xdg-open", str(self.TEST_REPORT / "report.html")) + elif shell.which("open"): + shell.exec("open", str(self.TEST_REPORT / "report.html")) diff --git a/meta/plugins/reftests/__init__.py b/meta/plugins/reftests/__init__.py new file mode 100644 index 00000000..12e32259 --- /dev/null +++ b/meta/plugins/reftests/__init__.py @@ -0,0 +1 @@ +from . import reftest # noqa E402, F401: Needed for side effect diff --git a/meta/plugins/reftests/reftest.py b/meta/plugins/reftests/reftest.py new file mode 100644 index 00000000..0c193b6d --- /dev/null +++ b/meta/plugins/reftests/reftest.py @@ -0,0 +1,237 @@ +from cutekit import vt100, cli, builder, model, const +from pathlib import Path + +import re + +# Local imports +from .utils import fetchMessage +from .WebReport import WebReport +from .CLIReport import CLIReport +from .reporter import Reporter +from .Test import TestCase, TestReference + +SOURCE_DIR: Path = Path(__file__).parent +TESTS_DIR: Path = SOURCE_DIR.parent.parent.parent / "tests" +TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() + + +def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: + """ + Build paper-muncher with the given target arguments for later use in reftests. + + Args: + args: The target arguments, which define the context for the build. + + Returns: + The ProductScope result from building paper-muncher. + + Raises: + RuntimeError: If the "paper-muncher" component cannot be found. + """ + + scope = builder.TargetScope.use(args) + PmComponent = scope.registry.lookup("paper-muncher", model.Component) + if PmComponent is None: + raise RuntimeError("paper-muncher not found") + return builder.build(scope, PmComponent)[0] + + +class RefTestArgs(model.TargetArgs): + glob: str = cli.arg("g", "glob") + headless: bool = cli.arg( + None, "headless", "Run the tests without opening the report." + ) + fast: str = cli.arg( + None, "fast", "Proceed to the next test as soon as an error occurs." + ) + runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless") + + +class TestRunnerContext: + def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, reporters: list[Reporter]): + self.reporters = reporters + self.args = args + self.paperMuncher = paperMuncher + self.currentTestId: int = 0 + self.testFailed: str = "" # str repr of all failed tests + + +REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") + + +def getInfo(txt): + return {prop: value for prop, value in REG_INFO.findall(txt)} + + +def getTests(txt): + return re.findall( + r"""<(rendering|error)([^>]*)>([\w\W]+?)""", txt + ) + + +def reportTestCase(context: TestRunnerContext, ok: bool, test: TestCase, skipped: bool = False): + if skipped: + test.addInfos.append("skip flag") + if len(test.addInfos) != 0: + addInfos = " [" + ", ".join(test.addInfos) + "]" + else: + addInfos = "" + + for report in context.reporters: + report.addTestCase(context.currentTestId, ok, test) + + +def runTestCase(test, context: TestRunnerContext, skipped: bool = False) -> bool: + ok = test.run() + if not ok: + context.testFailed += f"""Test {context.currentTestId} failed. + file://{test.inputPath} + file://{TEST_REPORT / "report.html"}#case-{context.currentTestId} + """ + + reportTestCase( + context, + ok, + test, + skipped=skipped, + ) + + context.currentTestId += 1 + + return ok + + +def generateReferenceImage(testCase) -> TestReference: + testCase.render() + + return TestReference( + testCase.inputPath, + testCase.outputPath, + testCase.outputImage + ) + + +def runTestCategory(context: TestRunnerContext, test_content: str, props, container, file, categorySkipped=False): + passedCount = 0 + failedCount = 0 + skippedCount = 0 + + tests = getTests(test_content) + reference = None + + for tag, info, testDocument in tests: + caseProps = getInfo(info) + + inputPath = TEST_REPORT / f"{context.currentTestId}.xhtml" + # generate temporary bmp + imgPath = TEST_REPORT / f"{context.currentTestId}.bmp" + + if not reference: + test = TestCase(props, inputPath, imgPath, context, tag, testDocument, + caseProps=getInfo(info), + container=container) + + # the first template of the category is the reference document + reference = generateReferenceImage(test) + continue + + testSkipped = categorySkipped or "skip" in caseProps + + if testSkipped and not context.args.runSkipped: + skippedCount += 1 + + for report in context.reporters: + report.addSkippedCase() + + continue + + test = TestCase(props, inputPath, imgPath, context, tag, testDocument, caseProps, + container=container, reference=reference) + + success = runTestCase(test, context, testSkipped) + if success: + passedCount += 1 + else: + failedCount += 1 + + if context.args.fast: + break + + for report in context.reporters: + report.addTestCategory(context.currentTestId, props, file, passedCount, failedCount, skippedCount) + return passedCount, failedCount, skippedCount + + +def runTestFile(context: TestRunnerContext, file: Path): + passedCount = 0 + failedCount = 0 + skippedCount = 0 + + print(f"Running {file.relative_to(TESTS_DIR)}...") + + def getContainer(test_content: str) -> str | None: + searchContainer = re.search(r"""([\w\W]+?)""", test) + container = searchContainer and searchContainer.group(1) + if not container: + container = '' + return container + + with file.open() as f: + content = f.read() + + for info, test in re.findall(r"""]*)>([\w\W]+?)""", content): + props = getInfo(info) + + categorySkipped = "skip" in props + + if categorySkipped and not context.args.runSkipped: + skippedCount += 1 + for report in context.reporters: + report.addSkippedFile(context.currentTestId, props) + continue + + container = getContainer(test) + + catPassed, catFailed, catSkipped = runTestCategory(context, test, props, container, file, categorySkipped) + passedCount += catPassed + failedCount += catFailed + skippedCount += catSkipped + + print() + return context.currentTestId, passedCount, failedCount, skippedCount + + +@cli.command("reftests", "Manage the reftests") +def _(): ... # Placeholder for the reftests command group + + +@cli.command("reftests/run", "Manage the reftests") +def _(args: RefTestArgs): + reporters: list[Reporter] = [] + paperMuncher = buildPaperMuncher(args) + manifests = model.Registry.use(args) + + TEST_REPORT.mkdir(parents=True, exist_ok=True) + + if not args.headless: + webReport = WebReport(SOURCE_DIR, TEST_REPORT) + reporters.append(webReport) + + cliReport = CLIReport(SOURCE_DIR, TEST_REPORT) + reporters.append(cliReport) + + passed = 0 + failed = 0 + skipped = 0 + + context = TestRunnerContext(args, paperMuncher, + reporters) # storing these in a context object for easier passing around + for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"): + testId, filePassed, fileFailed, fileSkipped = runTestFile(context, file) + passed += filePassed + failed += fileFailed + skipped += fileSkipped + + # Testing ended - reporting results + for report in reporters: + report.finish(manifests, failed, passed, skipped, context) diff --git a/meta/plugins/reftests/report.css b/meta/plugins/reftests/report.css new file mode 100644 index 00000000..8d6defcb --- /dev/null +++ b/meta/plugins/reftests/report.css @@ -0,0 +1,139 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + --bg: #1b1b1c; + --bg2: #161616; + --font: #fafafa; + --failed: #c52b2b; + --passed: #74b553; +} + +body.light { + --bg: #f3eee7; + --bg2: #f7ece7; + --font: #090909; + --failed: #c52b2b; + --passed: #74b553; +} + +header { + padding: 8px; + background-color: var(--bg2); + color: #fafafa; + z-index: 100; +} + +footer { + position: fixed; + bottom: 0; + left: 0; + right: 0; + padding: 8px; + background-color: var(--bg2); + z-index: 100; +} + +.infoBar { + position: absolute; + transform: translateY(-1rem); + height: 100%; + width: 1rem; + left: 0; +} + +.failed .infoBar { + background: var(--failed); +} + +.passed .infoBar { + background: var(--passed); +} + +.dark a:link { + color: #8bd3ff; +} + +.dark a:visited { + color: #8e8bff; +} + +.light a:link { + color: #267eb3; +} + +.light a:visited { + color: #267eb3; +} + +body { + font-family: sans-serif; + background-color: var(--bg); + color: var(--font); + font-size: 0.9rem; +} + +.test { + padding: 1rem; + background-color: var(--bg2); + border-bottom: 1px solid #4f4f4f; + position: sticky; + gap: 0.2rem; + top: 0; + z-index: 100; + display: flex; + flex-direction: column; + align-items: center; +} + +h1 { + font-size: 1.2rem; + text-decoration: underline; +} + +h2 { + font-size: 1.1rem; +} + +.wrapper { + width: fit-content; +} + +.test-case { + padding: 1rem; + padding-left: 2rem; + border-bottom: 1px solid #333; + width: fit-content; + min-width: 100vw; +} + +.passed { +} + +.failed { +} + +.outputs { + margin: 1.2rem 0; + display: flex; + gap: 1rem; + width: fit-content; +} + +.outputs > div { + display: flex; + gap: 0.5rem; + flex-direction: column-reverse; + align-items: center; +} + +.actual { + border: 0px solid blue; +} + +iframe { + border: none; +} \ No newline at end of file diff --git a/meta/plugins/reftests/report.js b/meta/plugins/reftests/report.js new file mode 100644 index 00000000..397bd52e --- /dev/null +++ b/meta/plugins/reftests/report.js @@ -0,0 +1,23 @@ +function initTheme() { + const prefersDarkScheme = window.matchMedia("(prefers-color-scheme: dark)").matches; + if (prefersDarkScheme) { + document.body.classList.remove("light"); + document.body.classList.add("dark"); + + } else { + document.body.classList.add("light"); + document.body.classList.remove("dark"); + } +} + +initTheme(); + +// Use a broadcast channel to tell other ref-tests instances to stop +const id = Math.random().toString(36).substring(7); +const channel = new BroadcastChannel('reftest'); +channel.onmessage = (event) => { + if (event.data.id !== id && event.data.msg === 'stop') { + window.close(); + } +} +channel.postMessage({from: id, msg: 'stop'}); diff --git a/meta/plugins/reftests/reporter.py b/meta/plugins/reftests/reporter.py new file mode 100644 index 00000000..4b43f407 --- /dev/null +++ b/meta/plugins/reftests/reporter.py @@ -0,0 +1,28 @@ +from cutekit import model +from pathlib import Path + +from .Test import TestCase + + +class Reporter: + """ + Object to ensure every reporter has the same interface (and to clean the types). + """ + + def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path): + pass + + def addTestCase(self, testId: int, passed: bool, test: TestCase): + pass + + def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): + pass + + def addSkippedFile(self, testId: int, props): + pass + + def addSkippedCase(self): + pass + + def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context): + pass diff --git a/meta/plugins/reftests/utils.py b/meta/plugins/reftests/utils.py new file mode 100644 index 00000000..0cdb8f0f --- /dev/null +++ b/meta/plugins/reftests/utils.py @@ -0,0 +1,45 @@ +from cutekit import model +from random import randint +from pathlib import Path + + +def fetchFile(manifests: model.Registry, component: str, path: str) -> str: + """ + Fetches the text content of a file from a specific component's directory. + + Args: + manifests: The component registry used to look up component information. + component: The name of the component (e.g., "karm-core"). + path: The relative path to the file within that component's directory + (e.g., "base/defs/error.inc"). + + Returns: + The entire content of the specified file as a string. + + Raises: + AssertionError: If the specified component is not found in the registry. + """ + component = manifests.lookup(component, model.Component) + assert component is not None + p = Path(component.dirname()) / path + with p.open() as f: + return f.read() + + +def fetchMessage(manifests: model.Registry, type: str) -> str: + """ + Fetches a random message from a ".inc" file. (e.g., funny error/success messages) + + Args: + manifests: The component registry used to look up component information. + type: The type of message to fetch (e.g., "witty", "nice"), which + corresponds to the name of the .inc file. + + Returns: + A randomly selected message string from the fetched file. + """ + + messages = eval( + "[" + fetchFile(manifests, "karm-core", "base/defs/" + type + ".inc") + "]" + ) + return messages[randint(0, len(messages) - 1)] From b53f0cedf152c7a21807e9760c3ee813283333fd Mon Sep 17 00:00:00 2001 From: Lou ! Date: Mon, 8 Dec 2025 11:43:33 +0100 Subject: [PATCH 2/2] taking into accounts the review --- meta/plugins/reftests/CLIReport.py | 3 +- meta/plugins/reftests/WebReport.py | 3 + meta/plugins/reftests/reftest.py | 367 +++++++++++++++++------------ meta/plugins/reftests/reporter.py | 8 +- 4 files changed, 232 insertions(+), 149 deletions(-) diff --git a/meta/plugins/reftests/CLIReport.py b/meta/plugins/reftests/CLIReport.py index caf47656..f242de1c 100644 --- a/meta/plugins/reftests/CLIReport.py +++ b/meta/plugins/reftests/CLIReport.py @@ -3,9 +3,10 @@ from .utils import fetchMessage from .Test import TestCase +from .reporter import Reporter -class CLIReport: +class CLIReport(Reporter): """ Object to abstract the generation of the cli report for the reftests. """ diff --git a/meta/plugins/reftests/WebReport.py b/meta/plugins/reftests/WebReport.py index 7b166ade..06187ec8 100644 --- a/meta/plugins/reftests/WebReport.py +++ b/meta/plugins/reftests/WebReport.py @@ -57,6 +57,9 @@ def addTestCase(self, testId: int, passed: bool, test: TestCase):
""" + def addSkippedCase(self): + pass + def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): self.html += f"""
diff --git a/meta/plugins/reftests/reftest.py b/meta/plugins/reftests/reftest.py index 0c193b6d..038651ea 100644 --- a/meta/plugins/reftests/reftest.py +++ b/meta/plugins/reftests/reftest.py @@ -4,7 +4,6 @@ import re # Local imports -from .utils import fetchMessage from .WebReport import WebReport from .CLIReport import CLIReport from .reporter import Reporter @@ -12,7 +11,7 @@ SOURCE_DIR: Path = Path(__file__).parent TESTS_DIR: Path = SOURCE_DIR.parent.parent.parent / "tests" -TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() +TEST_REPORT: Path = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: @@ -37,201 +36,275 @@ def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: class RefTestArgs(model.TargetArgs): - glob: str = cli.arg("g", "glob") + glob: str = cli.arg("g", "glob", "Glob pattern to match test files") headless: bool = cli.arg( None, "headless", "Run the tests without opening the report." ) - fast: str = cli.arg( + fast: bool = cli.arg( None, "fast", "Proceed to the next test as soon as an error occurs." ) runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless") -class TestRunnerContext: - def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, reporters: list[Reporter]): - self.reporters = reporters - self.args = args - self.paperMuncher = paperMuncher - self.currentTestId: int = 0 - self.testFailed: str = "" # str repr of all failed tests - - -REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") - - -def getInfo(txt): - return {prop: value for prop, value in REG_INFO.findall(txt)} - - -def getTests(txt): - return re.findall( - r"""<(rendering|error)([^>]*)>([\w\W]+?)""", txt - ) - - -def reportTestCase(context: TestRunnerContext, ok: bool, test: TestCase, skipped: bool = False): - if skipped: - test.addInfos.append("skip flag") - if len(test.addInfos) != 0: - addInfos = " [" + ", ".join(test.addInfos) + "]" - else: - addInfos = "" - - for report in context.reporters: - report.addTestCase(context.currentTestId, ok, test) - - -def runTestCase(test, context: TestRunnerContext, skipped: bool = False) -> bool: - ok = test.run() - if not ok: - context.testFailed += f"""Test {context.currentTestId} failed. - file://{test.inputPath} - file://{TEST_REPORT / "report.html"}#case-{context.currentTestId} - """ +class TestResults: + """Tracks test execution results.""" - reportTestCase( - context, - ok, - test, - skipped=skipped, - ) + def __init__(self) -> None: + self.passed: int = 0 + self.failed: int = 0 + self.skipped: int = 0 + self.failedDetails: str = "" - context.currentTestId += 1 + def addPassed(self) -> None: + self.passed += 1 - return ok + def addFailed(self, details: str = "") -> None: + self.failed += 1 + if details: + self.failedDetails += details + def addSkipped(self) -> None: + self.skipped += 1 -def generateReferenceImage(testCase) -> TestReference: - testCase.render() + def merge(self, other: 'TestResults') -> None: + """Merge another TestResults into this one.""" + self.passed += other.passed + self.failed += other.failed + self.skipped += other.skipped + self.failedDetails += other.failedDetails - return TestReference( - testCase.inputPath, - testCase.outputPath, - testCase.outputImage - ) +class TestRunnerContext: + """Context for test execution containing shared state and configuration.""" -def runTestCategory(context: TestRunnerContext, test_content: str, props, container, file, categorySkipped=False): - passedCount = 0 - failedCount = 0 - skippedCount = 0 + def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, reporters: list[Reporter]) -> None: + self.reporters: list[Reporter] = reporters + self.args: RefTestArgs = args + self.paperMuncher: builder.ProductScope = paperMuncher + self.currentTestId: int = 0 + self.results: TestResults = TestResults() - tests = getTests(test_content) - reference = None + def nextTestId(self) -> int: + """Get the current test ID and increment for next test.""" + testId = self.currentTestId + self.currentTestId += 1 + return testId - for tag, info, testDocument in tests: - caseProps = getInfo(info) + def shouldRunSkipped(self) -> bool: + """Check if skipped tests should be run.""" + return self.args.runSkipped - inputPath = TEST_REPORT / f"{context.currentTestId}.xhtml" - # generate temporary bmp - imgPath = TEST_REPORT / f"{context.currentTestId}.bmp" + def shouldStopOnFailure(self) -> bool: + """Check if execution should stop on first failure.""" + return self.args.fast - if not reference: - test = TestCase(props, inputPath, imgPath, context, tag, testDocument, - caseProps=getInfo(info), - container=container) - # the first template of the category is the reference document - reference = generateReferenceImage(test) - continue +REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") +REG_TESTS = re.compile(r"""<(rendering|error)([^>]*)>([\w\W]+?)""") +REG_TEST_BLOCKS = re.compile(r"""]*)>([\w\W]+?)""") +REG_CONTAINER = re.compile(r"""([\w\W]+?)""") + +DEFAULT_CONTAINER = '' + + +class TestParser: + """Handles all test file parsing operations.""" + + @staticmethod + def parseProperties(text: str) -> dict[str, str]: + """Parse properties from XML-like attributes.""" + return {prop: value for prop, value in REG_INFO.findall(text)} + + @staticmethod + def parseTestCases(content: str) -> list[tuple[str, str, str]]: + """Parse individual test cases (rendering/error tags).""" + return REG_TESTS.findall(content) + + @staticmethod + def parseTestBlocks(content: str) -> list[tuple[str, str]]: + """Parse test blocks from file content.""" + return REG_TEST_BLOCKS.findall(content) + + @staticmethod + def extractContainer(content: str) -> str: + """Extract container from test content or return default.""" + match = REG_CONTAINER.search(content) + if match: + return match.group(1) + return DEFAULT_CONTAINER + + +class ReportManager: + """Manages all test reporting operations.""" + + def __init__(self, reporters: list[Reporter]) -> None: + self._reporters: list[Reporter] = reporters + + def reportTestCase(self, testId: int, passed: bool, test: TestCase) -> None: + """Report a single test case result.""" + for reporter in self._reporters: + reporter.addTestCase(testId, passed, test) + + def reportSkippedCase(self) -> None: + """Report a skipped test case.""" + for reporter in self._reporters: + reporter.addSkippedCase() + + def reportSkippedFile(self, testId: int, props: dict[str, str]) -> None: + """Report a skipped test file.""" + for reporter in self._reporters: + reporter.addSkippedFile(testId, props) + + def reportTestCategory(self, testId: int, props: dict[str, str], file: Path, + passCount: int, failCount: int, skippedCount: int) -> None: + """Report results for a test category.""" + for reporter in self._reporters: + reporter.addTestCategory(testId, props, file, passCount, failCount, skippedCount) + + def finish(self, manifests: model.Registry, results: TestResults, context: TestRunnerContext) -> None: + """Finish reporting and display final results.""" + for reporter in self._reporters: + reporter.finish(manifests, results.failed, results.passed, results.skipped, context) + + +class TestRunner: + """Handles test execution logic.""" + + def __init__(self, context: TestRunnerContext, reportManager: ReportManager) -> None: + self._context: TestRunnerContext = context + self._reportManager: ReportManager = reportManager + self._parser: TestParser = TestParser() + + def _generateReferenceImage(self, testCase: TestCase) -> TestReference: + """Generate reference image from a test case.""" + testCase.render() + return TestReference( + testCase.inputPath, + testCase.outputPath, + testCase.outputImage + ) + + def _runSingleTestCase(self, test: TestCase, skipped: bool = False) -> bool: + """Run a single test case and report results.""" + testId: int = self._context.currentTestId + + if skipped: + test.addInfos.append("skip flag") + + ok: bool = test.run() + if not ok: + failureDetails: str = f"""Test {testId} failed. + file://{test.inputPath} + file://{TEST_REPORT / "report.html"}#case-{testId} + """ + self._context.results.addFailed(failureDetails) + else: + self._context.results.addPassed() - testSkipped = categorySkipped or "skip" in caseProps + self._reportManager.reportTestCase(testId, ok, test) + self._context.currentTestId += 1 - if testSkipped and not context.args.runSkipped: - skippedCount += 1 + return ok - for report in context.reporters: - report.addSkippedCase() + def _runTestCategory(self, test_content: str, props: dict[str, str], + container: str, file: Path, categorySkipped: bool = False) -> TestResults: + """Run all test cases in a category.""" + categoryResults: TestResults = TestResults() + testCases: list[tuple[str, str, str]] = self._parser.parseTestCases(test_content) + reference: TestReference | None = None - continue + for tag, info, testDocument in testCases: + caseProps: dict[str, str] = self._parser.parseProperties(info) + inputPath: Path = TEST_REPORT / f"{self._context.currentTestId}.xhtml" + imgPath: Path = TEST_REPORT / f"{self._context.currentTestId}.bmp" - test = TestCase(props, inputPath, imgPath, context, tag, testDocument, caseProps, - container=container, reference=reference) + # First test case is the reference + if not reference: + test = TestCase(props, inputPath, imgPath, self._context, tag, + testDocument, caseProps, container=container) + reference = self._generateReferenceImage(test) + continue - success = runTestCase(test, context, testSkipped) - if success: - passedCount += 1 - else: - failedCount += 1 + testSkipped: bool = categorySkipped or "skip" in caseProps - if context.args.fast: - break + if testSkipped and not self._context.shouldRunSkipped(): + categoryResults.addSkipped() + self._reportManager.reportSkippedCase() + continue - for report in context.reporters: - report.addTestCategory(context.currentTestId, props, file, passedCount, failedCount, skippedCount) - return passedCount, failedCount, skippedCount + test = TestCase(props, inputPath, imgPath, self._context, tag, + testDocument, caseProps, container=container, reference=reference) + success: bool = self._runSingleTestCase(test, testSkipped) + if success: + categoryResults.addPassed() + else: + categoryResults.addFailed() -def runTestFile(context: TestRunnerContext, file: Path): - passedCount = 0 - failedCount = 0 - skippedCount = 0 + if self._context.shouldStopOnFailure(): + break - print(f"Running {file.relative_to(TESTS_DIR)}...") + self._reportManager.reportTestCategory( + self._context.currentTestId, props, file, + categoryResults.passed, categoryResults.failed, categoryResults.skipped + ) - def getContainer(test_content: str) -> str | None: - searchContainer = re.search(r"""([\w\W]+?)""", test) - container = searchContainer and searchContainer.group(1) - if not container: - container = '' - return container + return categoryResults - with file.open() as f: - content = f.read() + def runTestFile(self, file: Path) -> TestResults: + """Run all tests in a file.""" + fileResults: TestResults = TestResults() + print(f"Running {file.relative_to(TESTS_DIR)}...") - for info, test in re.findall(r"""]*)>([\w\W]+?)""", content): - props = getInfo(info) + with file.open() as f: + content: str = f.read() - categorySkipped = "skip" in props + testBlocks: list[tuple[str, str]] = self._parser.parseTestBlocks(content) - if categorySkipped and not context.args.runSkipped: - skippedCount += 1 - for report in context.reporters: - report.addSkippedFile(context.currentTestId, props) - continue + for info, test_content in testBlocks: + props: dict[str, str] = self._parser.parseProperties(info) + categorySkipped: bool = "skip" in props - container = getContainer(test) + if categorySkipped and not self._context.shouldRunSkipped(): + fileResults.addSkipped() + self._reportManager.reportSkippedFile(self._context.currentTestId, props) + continue - catPassed, catFailed, catSkipped = runTestCategory(context, test, props, container, file, categorySkipped) - passedCount += catPassed - failedCount += catFailed - skippedCount += catSkipped + container: str = self._parser.extractContainer(test_content) + categoryResults: TestResults = self._runTestCategory( + test_content, props, container, file, categorySkipped + ) + fileResults.merge(categoryResults) - print() - return context.currentTestId, passedCount, failedCount, skippedCount + print() + return fileResults @cli.command("reftests", "Manage the reftests") -def _(): ... # Placeholder for the reftests command group +def _() -> None: + """Placeholder for the reftests command group.""" + ... @cli.command("reftests/run", "Manage the reftests") -def _(args: RefTestArgs): - reporters: list[Reporter] = [] - paperMuncher = buildPaperMuncher(args) - manifests = model.Registry.use(args) +def _(args: RefTestArgs) -> None: + """Run the reftest suite.""" + paperMuncher: builder.ProductScope = buildPaperMuncher(args) + manifests: model.Registry = model.Registry.use(args) TEST_REPORT.mkdir(parents=True, exist_ok=True) + reporters: list[Reporter] = [] if not args.headless: - webReport = WebReport(SOURCE_DIR, TEST_REPORT) - reporters.append(webReport) - - cliReport = CLIReport(SOURCE_DIR, TEST_REPORT) - reporters.append(cliReport) + reporters.append(WebReport(SOURCE_DIR, TEST_REPORT)) + reporters.append(CLIReport(SOURCE_DIR, TEST_REPORT)) - passed = 0 - failed = 0 - skipped = 0 + context: TestRunnerContext = TestRunnerContext(args, paperMuncher, reporters) + reportManager: ReportManager = ReportManager(reporters) + testRunner: TestRunner = TestRunner(context, reportManager) - context = TestRunnerContext(args, paperMuncher, - reporters) # storing these in a context object for easier passing around for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"): - testId, filePassed, fileFailed, fileSkipped = runTestFile(context, file) - passed += filePassed - failed += fileFailed - skipped += fileSkipped - - # Testing ended - reporting results - for report in reporters: - report.finish(manifests, failed, passed, skipped, context) + fileResults: TestResults = testRunner.runTestFile(file) + context.results.merge(fileResults) + + reportManager.finish(manifests, context.results, context) diff --git a/meta/plugins/reftests/reporter.py b/meta/plugins/reftests/reporter.py index 4b43f407..ff7cc60e 100644 --- a/meta/plugins/reftests/reporter.py +++ b/meta/plugins/reftests/reporter.py @@ -1,10 +1,11 @@ +from abc import ABC, abstractmethod from cutekit import model from pathlib import Path from .Test import TestCase -class Reporter: +class Reporter(ABC): """ Object to ensure every reporter has the same interface (and to clean the types). """ @@ -12,17 +13,22 @@ class Reporter: def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path): pass + @abstractmethod def addTestCase(self, testId: int, passed: bool, test: TestCase): pass + @abstractmethod def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): pass + @abstractmethod def addSkippedFile(self, testId: int, props): pass + @abstractmethod def addSkippedCase(self): pass + @abstractmethod def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int, context): pass