+ """
+
+ def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int):
+ self.html += f"""
+
+
+
+
+ """
+ with (self.TEST_REPORT / "report.html").open("w") as f:
+ f.write(self.html)
diff --git a/meta/plugins/reftests/__init__.py b/meta/plugins/reftests/__init__.py
new file mode 100644
index 00000000..12e32259
--- /dev/null
+++ b/meta/plugins/reftests/__init__.py
@@ -0,0 +1 @@
+from . import reftest # noqa E402, F401: Needed for side effect
diff --git a/meta/plugins/reftests/reftest.py b/meta/plugins/reftests/reftest.py
new file mode 100644
index 00000000..271bcfe8
--- /dev/null
+++ b/meta/plugins/reftests/reftest.py
@@ -0,0 +1,321 @@
+from cutekit import shell, vt100, cli, builder, model, const
+from pathlib import Path
+
+import re
+import textwrap
+
+# Local imports
+from .utils import fetchMessage
+from .WebReport import WebReport
+
+SOURCE_DIR: Path = Path(__file__).parent
+TESTS_DIR: Path = SOURCE_DIR.parent.parent.parent / "tests"
+TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute()
+
+
+def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope:
+ """
+ Build paper-muncher with the given target arguments for later use in reftests.
+
+ Args:
+ args: The target arguments, which define the context for the build.
+
+ Returns:
+ The ProductScope result from building paper-muncher.
+
+ Raises:
+ RuntimeError: If the "paper-muncher" component cannot be found.
+ """
+
+ scope = builder.TargetScope.use(args)
+ PmComponent = scope.registry.lookup("paper-muncher", model.Component)
+ if PmComponent is None:
+ raise RuntimeError("paper-muncher not found")
+ return builder.build(scope, PmComponent)[0]
+
+
+def areImagesIdentical(image1: bytes, image2: bytes) -> bool:
+ """
+ Compare the results from the reftests by checking if the images are identical.
+
+ This method is sensitive to any changes in the image, including compression artifacts.
+ If you want to compare the images with more tolerance use a SSIM.
+
+ Args:
+ image1: The byte content of the first image.
+ image2: The byte content of the second image.
+ """
+ return image1 == image2
+
+
+def runPaperMuncher(executable, type, xsize, ysize, page, outputPath, inputPath):
+ command = ["--feature", "*=on", "--quiet"]
+
+ if type == "print":
+ command.extend(["--flow", "paginate"])
+
+ if xsize or not page:
+ command.extend(["--width", (xsize or 200) + "px"])
+
+ if ysize or not page:
+ command.extend(["--height", (ysize or 200) + "px"])
+
+ if page:
+ command.extend(["--page", page])
+
+ command += [
+ "-o",
+ outputPath,
+ inputPath,
+ ]
+
+ executable.popen(*command)
+
+
+class RefTestArgs(model.TargetArgs):
+ glob: str = cli.arg("g", "glob")
+ headless: bool = cli.arg(
+ None, "headless", "Run the tests without opening the report."
+ )
+ fast: str = cli.arg(
+ None, "fast", "Proceed to the next test as soon as an error occurs."
+ )
+ runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless")
+
+
+class TestRunnerContext:
+ def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, webReport: WebReport):
+ self.webReport = webReport
+ self.args = args
+ self.paperMuncher = paperMuncher
+ self.webReport = webReport
+ self.currentTestId: int = 0
+ self.testFailed: str = ""
+
+
+REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""")
+
+
+def getInfo(txt):
+ return {prop: value for prop, value in REG_INFO.findall(txt)}
+
+
+def getTests(txt):
+ return re.findall(
+ r"""<(rendering|error)([^>]*)>([\w\W]+?)(?:rendering|error)>""", txt
+ )
+
+
+def reportTestCase(context: TestRunnerContext, ok: bool, tag: str, input_path: Path, referenceImageURL: Path,
+ xsize: int, ysize: int, props, skipped: bool = False):
+ add_infos = []
+ if skipped:
+ add_infos.append("skip flag")
+ if len(add_infos) != 0:
+ add_infos = " [" + ", ".join(add_infos) + "]"
+ else:
+ add_infos = ""
+
+ help = props.get("help")
+ context.webReport.addTestCase(context.currentTestId, ok, tag, help, input_path, referenceImageURL, xsize,
+ ysize, add_infos)
+
+
+def update_temp_file(path, container, rendering):
+ # write xhtml into the temporary file
+ xhtml = re.sub(r"
", rendering, container) if container else rendering
+ with path.open("w") as f:
+ f.write(f"\n{textwrap.dedent(xhtml)}")
+
+
+def runTestCategory(context: TestRunnerContext, test_content: str, props, container, file, categorySkipped=False):
+ passedCount = 0
+ failedCount = 0
+ skippedCount = 0
+
+ referenceDocument = None # Expected reference document content (HTML/XHTML)
+ referenceImage: bytes | None = None
+ referenceImageURL: Path = TEST_REPORT / f"{context.currentTestId}.expected.bmp"
+ if props.get("name"):
+ ref_image = file.parent / f"{props.get('name')}.bmp"
+ if ref_image.exists():
+ with ref_image.open("rb") as imageReader:
+ expected_image = imageReader.read()
+
+ with referenceImageURL.open("wb") as imageWriter:
+ imageWriter.write(expected_image)
+
+ referenceImageURL = ref_image
+
+ tests = getTests(test_content)
+
+ for tag, info, testDocument in tests:
+ renderingProps = getInfo(info)
+
+ testSkipped = categorySkipped or "skip" in renderingProps
+ if testSkipped and not context.args.runSkipped:
+ skippedCount += 1
+
+ print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
+ continue
+
+ input_path = TEST_REPORT / f"{context.currentTestId}.xhtml"
+ update_temp_file(input_path, container, testDocument)
+
+ # generate temporary bmp
+ img_path = TEST_REPORT / f"{context.currentTestId}.bmp"
+
+ xsize = props.get("size", "200")
+ ysize = xsize
+ page = props.get("page")
+ if props.get("size") == "full":
+ xsize = "800"
+ ysize = "600"
+
+ type = props.get("type") # the type of test [render (default) | print]
+ runPaperMuncher(
+ context.paperMuncher, type, xsize, ysize, page, img_path, input_path
+ )
+
+ with img_path.open("rb") as imageFile:
+ output_image: bytes = imageFile.read()
+
+ # the first template of the category is the reference document
+ if not referenceDocument:
+ referenceDocument = testDocument
+ if not referenceImage:
+ referenceImage = output_image
+ with (TEST_REPORT / f"{context.currentTestId}.expected.bmp").open(
+ "wb"
+ ) as imageWriter:
+ imageWriter.write(referenceImage)
+ continue
+
+ # check if the test is valid
+ assert referenceImage is not None
+ assert output_image is not None
+
+ ok = areImagesIdentical(referenceImage, output_image) == (tag == "rendering")
+ if ok:
+ passedCount += 1
+ print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True)
+ else:
+ failedCount += 1
+ print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True)
+ context.testFailed += f"""Test {context.currentTestId} failed.
+ file://{input_path}
+ file://{TEST_REPORT / "report.html"}#case-{context.currentTestId}
+ """
+
+ reportTestCase(
+ context,
+ ok,
+ tag,
+ input_path,
+ referenceImageURL,
+ int(xsize),
+ int(ysize),
+ props,
+ skipped=testSkipped,
+ )
+
+ context.currentTestId += 1
+
+ if context.args.fast:
+ break
+
+ context.webReport.addTestCategory(context.currentTestId, props, file, passedCount, failedCount, skippedCount)
+ return passedCount, failedCount, skippedCount
+
+
+def runTestFile(context: TestRunnerContext, file: Path):
+ passedCount = 0
+ failedCount = 0
+ skippedCount = 0
+
+ print(f"Running {file.relative_to(TESTS_DIR)}...")
+
+ def getContainer(test_content: str) -> str | None:
+ searchContainer = re.search(r"""
([\w\W]+?)""", test)
+ container = searchContainer and searchContainer.group(1)
+ if not container:
+ container = '
'
+ return container
+
+ with file.open() as f:
+ content = f.read()
+
+ for info, test in re.findall(r"""
]*)>([\w\W]+?)""", content):
+ props = getInfo(info)
+
+ categorySkipped = "skip" in props
+
+ if categorySkipped and not context.args.runSkipped:
+ skippedCount += 1
+ context.webReport.addSkippedFile(context.currentTestId, props)
+ print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True)
+ continue
+
+ container = getContainer(test)
+
+ catPassed, catFailed, catSkipped = runTestCategory(context, test, props, container, file, categorySkipped)
+ passedCount += catPassed
+ failedCount += catFailed
+ skippedCount += catSkipped
+
+ print()
+ return context.currentTestId, passedCount, failedCount, skippedCount
+
+
+def reportToCLI(manifests, failed, passed, test_failed):
+ print()
+ if failed:
+ print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(manifests, 'witty')}{vt100.RESET}")
+ print(
+ f"{vt100.RED}Failed {failed} tests{vt100.RESET}, {vt100.GREEN}Passed {passed} tests{vt100.RESET}"
+ )
+ print(f"Report: {TEST_REPORT / 'report.html'}")
+
+ print()
+ print("Failed tests details:")
+ print(test_failed)
+ raise RuntimeError("Some tests failed")
+ else:
+ print(f"{vt100.GREEN}// {fetchMessage(manifests, 'nice')}{vt100.RESET}")
+ print(f"{vt100.GREEN}All tests passed{vt100.RESET}")
+ print(f"Report: {TEST_REPORT / 'report.html'}")
+
+
+@cli.command("reftests", "Manage the reftests")
+def _(): ... # Placeholder for the reftests command group
+
+
+@cli.command("reftests/run", "Manage the reftests")
+def _(args: RefTestArgs):
+ paperMuncher = buildPaperMuncher(args)
+ manifests = model.Registry.use(args)
+
+ TEST_REPORT.mkdir(parents=True, exist_ok=True)
+ webReport = WebReport(SOURCE_DIR, TEST_REPORT)
+
+ passed = 0
+ failed = 0
+ skipped = 0
+
+ context = TestRunnerContext(args, paperMuncher,
+ webReport) # storing these in a context object for easier passing around
+ for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"):
+ testId, filePassed, fileFailed, fileSkipped = runTestFile(context, file)
+ passed += filePassed
+ failed += fileFailed
+ skipped += fileSkipped
+
+ # Testing ended - reporting results
+ if not args.headless:
+ if shell.which("xdg-open"):
+ shell.exec("xdg-open", str(TEST_REPORT / "report.html"))
+ elif shell.which("open"):
+ shell.exec("open", str(TEST_REPORT / "report.html"))
+
+ webReport.finish(manifests, failed, passed, skipped)
+ reportToCLI(manifests, failed, passed, context.testFailed)
diff --git a/meta/plugins/reftests/report.css b/meta/plugins/reftests/report.css
new file mode 100644
index 00000000..8d6defcb
--- /dev/null
+++ b/meta/plugins/reftests/report.css
@@ -0,0 +1,139 @@
+* {
+ margin: 0;
+ padding: 0;
+ box-sizing: border-box;
+}
+
+body {
+ --bg: #1b1b1c;
+ --bg2: #161616;
+ --font: #fafafa;
+ --failed: #c52b2b;
+ --passed: #74b553;
+}
+
+body.light {
+ --bg: #f3eee7;
+ --bg2: #f7ece7;
+ --font: #090909;
+ --failed: #c52b2b;
+ --passed: #74b553;
+}
+
+header {
+ padding: 8px;
+ background-color: var(--bg2);
+ color: #fafafa;
+ z-index: 100;
+}
+
+footer {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ right: 0;
+ padding: 8px;
+ background-color: var(--bg2);
+ z-index: 100;
+}
+
+.infoBar {
+ position: absolute;
+ transform: translateY(-1rem);
+ height: 100%;
+ width: 1rem;
+ left: 0;
+}
+
+.failed .infoBar {
+ background: var(--failed);
+}
+
+.passed .infoBar {
+ background: var(--passed);
+}
+
+.dark a:link {
+ color: #8bd3ff;
+}
+
+.dark a:visited {
+ color: #8e8bff;
+}
+
+.light a:link {
+ color: #267eb3;
+}
+
+.light a:visited {
+ color: #267eb3;
+}
+
+body {
+ font-family: sans-serif;
+ background-color: var(--bg);
+ color: var(--font);
+ font-size: 0.9rem;
+}
+
+.test {
+ padding: 1rem;
+ background-color: var(--bg2);
+ border-bottom: 1px solid #4f4f4f;
+ position: sticky;
+ gap: 0.2rem;
+ top: 0;
+ z-index: 100;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+}
+
+h1 {
+ font-size: 1.2rem;
+ text-decoration: underline;
+}
+
+h2 {
+ font-size: 1.1rem;
+}
+
+.wrapper {
+ width: fit-content;
+}
+
+.test-case {
+ padding: 1rem;
+ padding-left: 2rem;
+ border-bottom: 1px solid #333;
+ width: fit-content;
+ min-width: 100vw;
+}
+
+.passed {
+}
+
+.failed {
+}
+
+.outputs {
+ margin: 1.2rem 0;
+ display: flex;
+ gap: 1rem;
+ width: fit-content;
+}
+
+.outputs > div {
+ display: flex;
+ gap: 0.5rem;
+ flex-direction: column-reverse;
+ align-items: center;
+}
+
+.actual {
+ border: 0px solid blue;
+}
+
+iframe {
+ border: none;
+}
\ No newline at end of file
diff --git a/meta/plugins/reftests/report.js b/meta/plugins/reftests/report.js
new file mode 100644
index 00000000..397bd52e
--- /dev/null
+++ b/meta/plugins/reftests/report.js
@@ -0,0 +1,23 @@
+function initTheme() {
+ const prefersDarkScheme = window.matchMedia("(prefers-color-scheme: dark)").matches;
+ if (prefersDarkScheme) {
+ document.body.classList.remove("light");
+ document.body.classList.add("dark");
+
+ } else {
+ document.body.classList.add("light");
+ document.body.classList.remove("dark");
+ }
+}
+
+initTheme();
+
+// Use a broadcast channel to tell other ref-tests instances to stop
+const id = Math.random().toString(36).substring(7);
+const channel = new BroadcastChannel('reftest');
+channel.onmessage = (event) => {
+ if (event.data.id !== id && event.data.msg === 'stop') {
+ window.close();
+ }
+}
+channel.postMessage({from: id, msg: 'stop'});
diff --git a/meta/plugins/reftests/utils.py b/meta/plugins/reftests/utils.py
new file mode 100644
index 00000000..b63903a9
--- /dev/null
+++ b/meta/plugins/reftests/utils.py
@@ -0,0 +1,45 @@
+from cutekit import model
+from random import randint
+from pathlib import Path
+
+
+def fetchFile(manifests: model.Registry, component: str, path: str) -> str:
+ """
+ Fetches the text content of a file from a specific component's directory.
+
+ Args:
+ manifests: The component registry used to look up component information.
+ component: The name of the component (e.g., "karm-core").
+ path: The relative path to the file within that component's directory
+ (e.g., "base/defs/error.inc").
+
+ Returns:
+ The entire content of the specified file as a string.
+
+ Raises:
+ AssertionError: If the specified component is not found in the registry.
+ """
+ component = manifests.lookup(component, model.Component)
+ assert component is not None
+ p = Path(component.dirname()) / path
+ with p.open() as f:
+ return f.read()
+
+
+def fetchMessage(manifests: model.Registry, type: str) -> str:
+ """
+ Fetches a random message from a ".inc" file. (e.g., funny error/success messages)
+
+ Args:
+ args: The target arguments, used to get the component registry.
+ type: The type of message to fetch (e.g., "witty", "nice"), which
+ corresponds to the name of the .inc file.
+
+ Returns:
+ A randomly selected message string from the fetched file.
+ """
+
+ messages = eval(
+ "[" + fetchFile(manifests, "karm-core", "base/defs/" + type + ".inc") + "]"
+ )
+ return messages[randint(0, len(messages) - 1)]