diff --git a/meta/plugins/__init__.py b/meta/plugins/__init__.py index c9d83ee5..9e1388a4 100644 --- a/meta/plugins/__init__.py +++ b/meta/plugins/__init__.py @@ -2,4 +2,5 @@ ensure((0, 10, 0)) -from . import reftest, tools, wpt # noqa E402, F401: Needed for side effect +from . import tools, wpt # noqa E402, F401: Needed for side effect +from . import reftests # noqa E402, F401: Needed for side effect diff --git a/meta/plugins/reftest.py b/meta/plugins/reftest.py deleted file mode 100644 index a39f924b..00000000 --- a/meta/plugins/reftest.py +++ /dev/null @@ -1,523 +0,0 @@ -from cutekit import shell, vt100, cli, builder, model, const -from pathlib import Path -from random import randint -import re -import textwrap -import time - - -def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: - scope = builder.TargetScope.use(args) - component = scope.registry.lookup("paper-muncher", model.Component) - if component is None: - raise RuntimeError("paper-muncher not found") - return builder.build(scope, component)[0] - - -def fetchFile(args: model.TargetArgs, component: str, path: str) -> str: - r = model.Registry.use(args) - c = r.lookup(component, model.Component) - assert c is not None - p = Path(c.dirname()) / path - with p.open() as f: - return f.read() - - -def fetchMessage(args: model.TargetArgs, type: str) -> str: - message = eval( - "[" + fetchFile(args, "karm-core", "base/defs/" + type + ".inc") + "]" - ) - return message[randint(0, len(message) - 1)] - - -def compareImages( - lhs: bytes, - rhs: bytes, - lowEpsilon: float = 0.05, - highEpsilon: float = 0.1, - strict=False, -) -> bool: - if strict: - return lhs == rhs - - if len(lhs) != len(rhs): - return False - - if lhs == rhs: - return True - - errorSum = 0 - for i in range(len(lhs)): - diff = abs(lhs[i] - rhs[i]) / 255 - if diff > highEpsilon: - # print(f"Image rejected with diff = {diff}") - return False - errorSum += diff > lowEpsilon - - if errorSum > len(lhs) // 100: - # print(f"Image reject with errorSum = {errorSum}") - return False - - return True - - -def runPaperMuncher(executable, type, xsize, ysize, page, outputPath, inputPath): - command = ["--feature", "*=on", "--quiet"] - - if type == "print": - command.extend(["--flow", "paginate"]) - - if xsize or not page: - command.extend(["--width", (xsize or 200) + "px"]) - - if ysize or not page: - command.extend(["--height", (ysize or 200) + "px"]) - - if page: - command.extend(["--page", page]) - - command += [ - "-o", - outputPath, - inputPath, - ] - - executable.popen(*command) - - -class RefTestArgs(model.TargetArgs): - glob: str = cli.arg("g", "glob") - headless: bool = cli.arg( - None, "headless", "Run the tests without opening the report." - ) - fast: str = cli.arg( - None, "fast", "Proceed to the next test as soon as an error occurs." - ) - runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless") - - -@cli.command("reftests", "Manage the reftests") -def _(): ... - - -TESTS_DIR: Path = Path(__file__).parent.parent.parent / "tests" -TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() - - -@cli.command("reftests/clean", "Manage the reftests") -def _(): - for f in TEST_REPORT.glob("*.*"): - f.unlink() - TEST_REPORT.rmdir() - print(f"Cleaned {TEST_REPORT}") - - -@cli.command("reftests/run", "Manage the reftests") -def _(args: RefTestArgs): - paperMuncher = buildPaperMuncher(args) - - TEST_REPORT.mkdir(parents=True, exist_ok=True) - report = """ - - - - Reftest - - -
- Reftest report -
-""" - - def update_temp_file(path, container, rendering): - # write xhtml into the temporary file - xhtml = re.sub(r"", rendering, container) if container else rendering - with path.open("w") as f: - f.write(f"\n{textwrap.dedent(xhtml)}") - - REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") - - def getInfo(txt): - return {prop: value for prop, value in REG_INFO.findall(txt)} - - passed = 0 - failed = 0 - skipped = 0 - test_failed = "" - - counter = 0 - for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"): - if file.suffix != ".xhtml": - continue - print(f"Running {file.relative_to(TESTS_DIR)}...") - - with file.open() as f: - content = f.read() - - passCount = 0 - failCount = 0 - skippedCount = 0 - for info, test in re.findall(r"""]*)>([\w\W]+?)""", content): - props = getInfo(info) - - category_skipped = "skip" in props - type = props.get("type") # the type of test [render (default) | print] - - if category_skipped and not args.runSkipped: - skippedCount += 1 - skipped += 1 - - report += f""" -
-
-

{props.get("name") or "Unamed"}

-

Test Skipped

-
-
- """ - print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) - continue - - test_report = "" - - search = re.search(r"""([\w\W]+?)""", test) - container = search and search.group(1) - if not container: - container = '' - - expected_xhtml = None - expected_image: bytes | None = None - expected_image_url = TEST_REPORT / f"{counter}.expected.bmp" - if props.get("name"): - ref_image = file.parent / f"{props.get('name')}.bmp" - if ref_image.exists(): - with ref_image.open("rb") as imageReader: - expected_image = imageReader.read() - - with expected_image_url.open("wb") as imageWriter: - imageWriter.write(expected_image) - - expected_image_url = ref_image - - for tag, info, rendering in re.findall( - r"""<(rendering|error)([^>]*)>([\w\W]+?)""", test - ): - renderingProps = getInfo(info) - test_skipped = category_skipped or "skip" in renderingProps - if test_skipped and not args.runSkipped: - skippedCount += 1 - skipped += 1 - - print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) - continue - - input_path = TEST_REPORT / f"{counter}.xhtml" - - update_temp_file(input_path, container, rendering) - - # generate temporary bmp - img_path = TEST_REPORT / f"{counter}.bmp" - - xsize = props.get("size", "200") - ysize = xsize - page = props.get("page") - if props.get("size") == "full": - xsize = "800" - ysize = "600" - - runPaperMuncher( - paperMuncher, type, xsize, ysize, page, img_path, input_path - ) - - with img_path.open("rb") as imageFile: - output_image: bytes = imageFile.read() - - # the first template is the expected value - if not expected_xhtml: - expected_xhtml = rendering - if not expected_image: - expected_image = output_image - with (TEST_REPORT / f"{counter}.expected.bmp").open( - "wb" - ) as imageWriter: - imageWriter.write(expected_image) - continue - - # check if the rendering is different - assert expected_image is not None - assert output_image is not None - - ok = compareImages(expected_image, output_image) == (tag == "rendering") - if ok: - passCount += 1 - else: - failCount += 1 - - help = renderingProps.get("help") - - if ok: - passed += 1 - print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True) - else: - failed += 1 - print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True) - test_failed += f"""Test {counter} failed. -file://{input_path} -file://{TEST_REPORT / "report.html"}#case-{counter} -""" - - add_infos = [] - if test_skipped: - add_infos.append("skip flag") - if len(add_infos) != 0: - add_infos = " [" + ", ".join(add_infos) + "]" - else: - add_infos = "" - - test_report += f""" -
-
-

{counter} - {tag} {add_infos}

-

{help}

-
-
- -
Actual
-
- -
- -
{"Reference" if (tag == "rendering") else "Unexpected"}
-
- -
- -
Rendition
-
-
- Reference - Source -
- """ - - counter += 1 - - if args.fast: - break - report += f""" -
-
-

{props.get("name")}

-

{props.get("help") or ""}

- Source - {passCount} passed, {failCount} failed and {skippedCount} skipped -
- {test_report} -
- """ - print() - report += f""" -
-

{fetchMessage(args, "witty" if failed else "nice")}

-

Failed {failed} tests, Passed {passed} tests, Skipped {skipped}

-
- """ - - report += """ - - - - - - - """ - - with (TEST_REPORT / "report.html").open("w") as f: - f.write(report) - - if not args.headless: - if shell.which("xdg-open"): - shell.exec("xdg-open", str(TEST_REPORT / "report.html")) - elif shell.which("open"): - shell.exec("open", str(TEST_REPORT / "report.html")) - - print() - if failed: - print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(args, 'witty')}{vt100.RESET}") - print( - f"{vt100.RED}Failed {failed} tests{vt100.RESET}, {vt100.GREEN}Passed {passed} tests{vt100.RESET}" - ) - print(f"Report: {TEST_REPORT / 'report.html'}") - - print() - print("Failed tests details:") - print(test_failed) - raise RuntimeError("Some tests failed") - else: - print(f"{vt100.GREEN}// {fetchMessage(args, 'nice')}{vt100.RESET}") - print(f"{vt100.GREEN}All tests passed{vt100.RESET}") - print(f"Report: {TEST_REPORT / 'report.html'}") diff --git a/meta/plugins/reftests/WebReport.py b/meta/plugins/reftests/WebReport.py new file mode 100644 index 00000000..e264a90e --- /dev/null +++ b/meta/plugins/reftests/WebReport.py @@ -0,0 +1,92 @@ +from cutekit import model +from pathlib import Path + +from .utils import fetchMessage + + +class WebReport: + """ + Object to abstract the generation of the web report for the reftests. + """ + + def __init__(self, SOURCE_DIR: Path, TEST_REPORT: Path): + self.TEST_REPORT: Path = TEST_REPORT + self.html = f""" + + + + Reftest + + + + +
+ Reftest report +
+ """ + self.testHtml = "" + + def addTestCase(self, testId: int, passed: bool, tag: str, help: str, input_path: Path, expected_image_url: Path, + xsize: int, ysize: int, add_infos: str): + self.testHtml += f""" +
+
+

{testId} - {tag} {add_infos}

+

{help}

+
+
+ +
Actual
+
+ +
+ +
{"Reference" if (tag == "rendering") else "Unexpected"}
+
+ +
+ +
Rendition
+
+
+ Reference + Source +
+ """ + + def addTestCategory(self, testId: int, props, file: Path, passCount: int, failCount: int, skippedCount: int): + self.html += f""" +
+
+

{props.get("name")}

+

{props.get("help") or ""}

+ Source + {passCount} passed, {failCount} failed and {skippedCount} skipped +
+ {self.testHtml} +
+ """ + self.testHtml = "" + + def addSkippedFile(self, testId: int, props): + self.html += f""" +
+
+

{props.get("name") or "Unamed"}

+

Test Skipped

+
+
+ """ + + def finish(self, manifests: model.Registry, totalFailed: int, totalPassed: int, totalSkipped: int): + self.html += f""" +
+

{fetchMessage(manifests, "witty" if totalFailed != 0 else "nice")}

+

Failed {totalFailed} tests, Passed {totalPassed} tests, Skipped {totalSkipped}

+
+ + + + """ + with (self.TEST_REPORT / "report.html").open("w") as f: + f.write(self.html) diff --git a/meta/plugins/reftests/__init__.py b/meta/plugins/reftests/__init__.py new file mode 100644 index 00000000..12e32259 --- /dev/null +++ b/meta/plugins/reftests/__init__.py @@ -0,0 +1 @@ +from . import reftest # noqa E402, F401: Needed for side effect diff --git a/meta/plugins/reftests/reftest.py b/meta/plugins/reftests/reftest.py new file mode 100644 index 00000000..271bcfe8 --- /dev/null +++ b/meta/plugins/reftests/reftest.py @@ -0,0 +1,321 @@ +from cutekit import shell, vt100, cli, builder, model, const +from pathlib import Path + +import re +import textwrap + +# Local imports +from .utils import fetchMessage +from .WebReport import WebReport + +SOURCE_DIR: Path = Path(__file__).parent +TESTS_DIR: Path = SOURCE_DIR.parent.parent.parent / "tests" +TEST_REPORT = (Path(const.PROJECT_CK_DIR) / "tests" / "report").absolute() + + +def buildPaperMuncher(args: model.TargetArgs) -> builder.ProductScope: + """ + Build paper-muncher with the given target arguments for later use in reftests. + + Args: + args: The target arguments, which define the context for the build. + + Returns: + The ProductScope result from building paper-muncher. + + Raises: + RuntimeError: If the "paper-muncher" component cannot be found. + """ + + scope = builder.TargetScope.use(args) + PmComponent = scope.registry.lookup("paper-muncher", model.Component) + if PmComponent is None: + raise RuntimeError("paper-muncher not found") + return builder.build(scope, PmComponent)[0] + + +def areImagesIdentical(image1: bytes, image2: bytes) -> bool: + """ + Compare the results from the reftests by checking if the images are identical. + + This method is sensitive to any changes in the image, including compression artifacts. + If you want to compare the images with more tolerance use a SSIM. + + Args: + image1: The byte content of the first image. + image2: The byte content of the second image. + """ + return image1 == image2 + + +def runPaperMuncher(executable, type, xsize, ysize, page, outputPath, inputPath): + command = ["--feature", "*=on", "--quiet"] + + if type == "print": + command.extend(["--flow", "paginate"]) + + if xsize or not page: + command.extend(["--width", (xsize or 200) + "px"]) + + if ysize or not page: + command.extend(["--height", (ysize or 200) + "px"]) + + if page: + command.extend(["--page", page]) + + command += [ + "-o", + outputPath, + inputPath, + ] + + executable.popen(*command) + + +class RefTestArgs(model.TargetArgs): + glob: str = cli.arg("g", "glob") + headless: bool = cli.arg( + None, "headless", "Run the tests without opening the report." + ) + fast: str = cli.arg( + None, "fast", "Proceed to the next test as soon as an error occurs." + ) + runSkipped: bool = cli.arg(None, "run-skipped", "Run the skipped tests nonetheless") + + +class TestRunnerContext: + def __init__(self, args: RefTestArgs, paperMuncher: builder.ProductScope, webReport: WebReport): + self.webReport = webReport + self.args = args + self.paperMuncher = paperMuncher + self.webReport = webReport + self.currentTestId: int = 0 + self.testFailed: str = "" + + +REG_INFO = re.compile(r"""(\w+)=['"]([^'"]+)['"]""") + + +def getInfo(txt): + return {prop: value for prop, value in REG_INFO.findall(txt)} + + +def getTests(txt): + return re.findall( + r"""<(rendering|error)([^>]*)>([\w\W]+?)""", txt + ) + + +def reportTestCase(context: TestRunnerContext, ok: bool, tag: str, input_path: Path, referenceImageURL: Path, + xsize: int, ysize: int, props, skipped: bool = False): + add_infos = [] + if skipped: + add_infos.append("skip flag") + if len(add_infos) != 0: + add_infos = " [" + ", ".join(add_infos) + "]" + else: + add_infos = "" + + help = props.get("help") + context.webReport.addTestCase(context.currentTestId, ok, tag, help, input_path, referenceImageURL, xsize, + ysize, add_infos) + + +def update_temp_file(path, container, rendering): + # write xhtml into the temporary file + xhtml = re.sub(r"", rendering, container) if container else rendering + with path.open("w") as f: + f.write(f"\n{textwrap.dedent(xhtml)}") + + +def runTestCategory(context: TestRunnerContext, test_content: str, props, container, file, categorySkipped=False): + passedCount = 0 + failedCount = 0 + skippedCount = 0 + + referenceDocument = None # Expected reference document content (HTML/XHTML) + referenceImage: bytes | None = None + referenceImageURL: Path = TEST_REPORT / f"{context.currentTestId}.expected.bmp" + if props.get("name"): + ref_image = file.parent / f"{props.get('name')}.bmp" + if ref_image.exists(): + with ref_image.open("rb") as imageReader: + expected_image = imageReader.read() + + with referenceImageURL.open("wb") as imageWriter: + imageWriter.write(expected_image) + + referenceImageURL = ref_image + + tests = getTests(test_content) + + for tag, info, testDocument in tests: + renderingProps = getInfo(info) + + testSkipped = categorySkipped or "skip" in renderingProps + if testSkipped and not context.args.runSkipped: + skippedCount += 1 + + print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) + continue + + input_path = TEST_REPORT / f"{context.currentTestId}.xhtml" + update_temp_file(input_path, container, testDocument) + + # generate temporary bmp + img_path = TEST_REPORT / f"{context.currentTestId}.bmp" + + xsize = props.get("size", "200") + ysize = xsize + page = props.get("page") + if props.get("size") == "full": + xsize = "800" + ysize = "600" + + type = props.get("type") # the type of test [render (default) | print] + runPaperMuncher( + context.paperMuncher, type, xsize, ysize, page, img_path, input_path + ) + + with img_path.open("rb") as imageFile: + output_image: bytes = imageFile.read() + + # the first template of the category is the reference document + if not referenceDocument: + referenceDocument = testDocument + if not referenceImage: + referenceImage = output_image + with (TEST_REPORT / f"{context.currentTestId}.expected.bmp").open( + "wb" + ) as imageWriter: + imageWriter.write(referenceImage) + continue + + # check if the test is valid + assert referenceImage is not None + assert output_image is not None + + ok = areImagesIdentical(referenceImage, output_image) == (tag == "rendering") + if ok: + passedCount += 1 + print(f"{vt100.GREEN}●{vt100.RESET}", end="", flush=True) + else: + failedCount += 1 + print(f"{vt100.RED}●{vt100.RESET}", end="", flush=True) + context.testFailed += f"""Test {context.currentTestId} failed. + file://{input_path} + file://{TEST_REPORT / "report.html"}#case-{context.currentTestId} + """ + + reportTestCase( + context, + ok, + tag, + input_path, + referenceImageURL, + int(xsize), + int(ysize), + props, + skipped=testSkipped, + ) + + context.currentTestId += 1 + + if context.args.fast: + break + + context.webReport.addTestCategory(context.currentTestId, props, file, passedCount, failedCount, skippedCount) + return passedCount, failedCount, skippedCount + + +def runTestFile(context: TestRunnerContext, file: Path): + passedCount = 0 + failedCount = 0 + skippedCount = 0 + + print(f"Running {file.relative_to(TESTS_DIR)}...") + + def getContainer(test_content: str) -> str | None: + searchContainer = re.search(r"""([\w\W]+?)""", test) + container = searchContainer and searchContainer.group(1) + if not container: + container = '' + return container + + with file.open() as f: + content = f.read() + + for info, test in re.findall(r"""]*)>([\w\W]+?)""", content): + props = getInfo(info) + + categorySkipped = "skip" in props + + if categorySkipped and not context.args.runSkipped: + skippedCount += 1 + context.webReport.addSkippedFile(context.currentTestId, props) + print(f"{vt100.YELLOW}○{vt100.RESET}", end="", flush=True) + continue + + container = getContainer(test) + + catPassed, catFailed, catSkipped = runTestCategory(context, test, props, container, file, categorySkipped) + passedCount += catPassed + failedCount += catFailed + skippedCount += catSkipped + + print() + return context.currentTestId, passedCount, failedCount, skippedCount + + +def reportToCLI(manifests, failed, passed, test_failed): + print() + if failed: + print(f"{vt100.BRIGHT_GREEN}// {fetchMessage(manifests, 'witty')}{vt100.RESET}") + print( + f"{vt100.RED}Failed {failed} tests{vt100.RESET}, {vt100.GREEN}Passed {passed} tests{vt100.RESET}" + ) + print(f"Report: {TEST_REPORT / 'report.html'}") + + print() + print("Failed tests details:") + print(test_failed) + raise RuntimeError("Some tests failed") + else: + print(f"{vt100.GREEN}// {fetchMessage(manifests, 'nice')}{vt100.RESET}") + print(f"{vt100.GREEN}All tests passed{vt100.RESET}") + print(f"Report: {TEST_REPORT / 'report.html'}") + + +@cli.command("reftests", "Manage the reftests") +def _(): ... # Placeholder for the reftests command group + + +@cli.command("reftests/run", "Manage the reftests") +def _(args: RefTestArgs): + paperMuncher = buildPaperMuncher(args) + manifests = model.Registry.use(args) + + TEST_REPORT.mkdir(parents=True, exist_ok=True) + webReport = WebReport(SOURCE_DIR, TEST_REPORT) + + passed = 0 + failed = 0 + skipped = 0 + + context = TestRunnerContext(args, paperMuncher, + webReport) # storing these in a context object for easier passing around + for file in TESTS_DIR.glob(args.glob or "**/*.xhtml"): + testId, filePassed, fileFailed, fileSkipped = runTestFile(context, file) + passed += filePassed + failed += fileFailed + skipped += fileSkipped + + # Testing ended - reporting results + if not args.headless: + if shell.which("xdg-open"): + shell.exec("xdg-open", str(TEST_REPORT / "report.html")) + elif shell.which("open"): + shell.exec("open", str(TEST_REPORT / "report.html")) + + webReport.finish(manifests, failed, passed, skipped) + reportToCLI(manifests, failed, passed, context.testFailed) diff --git a/meta/plugins/reftests/report.css b/meta/plugins/reftests/report.css new file mode 100644 index 00000000..8d6defcb --- /dev/null +++ b/meta/plugins/reftests/report.css @@ -0,0 +1,139 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} + +body { + --bg: #1b1b1c; + --bg2: #161616; + --font: #fafafa; + --failed: #c52b2b; + --passed: #74b553; +} + +body.light { + --bg: #f3eee7; + --bg2: #f7ece7; + --font: #090909; + --failed: #c52b2b; + --passed: #74b553; +} + +header { + padding: 8px; + background-color: var(--bg2); + color: #fafafa; + z-index: 100; +} + +footer { + position: fixed; + bottom: 0; + left: 0; + right: 0; + padding: 8px; + background-color: var(--bg2); + z-index: 100; +} + +.infoBar { + position: absolute; + transform: translateY(-1rem); + height: 100%; + width: 1rem; + left: 0; +} + +.failed .infoBar { + background: var(--failed); +} + +.passed .infoBar { + background: var(--passed); +} + +.dark a:link { + color: #8bd3ff; +} + +.dark a:visited { + color: #8e8bff; +} + +.light a:link { + color: #267eb3; +} + +.light a:visited { + color: #267eb3; +} + +body { + font-family: sans-serif; + background-color: var(--bg); + color: var(--font); + font-size: 0.9rem; +} + +.test { + padding: 1rem; + background-color: var(--bg2); + border-bottom: 1px solid #4f4f4f; + position: sticky; + gap: 0.2rem; + top: 0; + z-index: 100; + display: flex; + flex-direction: column; + align-items: center; +} + +h1 { + font-size: 1.2rem; + text-decoration: underline; +} + +h2 { + font-size: 1.1rem; +} + +.wrapper { + width: fit-content; +} + +.test-case { + padding: 1rem; + padding-left: 2rem; + border-bottom: 1px solid #333; + width: fit-content; + min-width: 100vw; +} + +.passed { +} + +.failed { +} + +.outputs { + margin: 1.2rem 0; + display: flex; + gap: 1rem; + width: fit-content; +} + +.outputs > div { + display: flex; + gap: 0.5rem; + flex-direction: column-reverse; + align-items: center; +} + +.actual { + border: 0px solid blue; +} + +iframe { + border: none; +} \ No newline at end of file diff --git a/meta/plugins/reftests/report.js b/meta/plugins/reftests/report.js new file mode 100644 index 00000000..397bd52e --- /dev/null +++ b/meta/plugins/reftests/report.js @@ -0,0 +1,23 @@ +function initTheme() { + const prefersDarkScheme = window.matchMedia("(prefers-color-scheme: dark)").matches; + if (prefersDarkScheme) { + document.body.classList.remove("light"); + document.body.classList.add("dark"); + + } else { + document.body.classList.add("light"); + document.body.classList.remove("dark"); + } +} + +initTheme(); + +// Use a broadcast channel to tell other ref-tests instances to stop +const id = Math.random().toString(36).substring(7); +const channel = new BroadcastChannel('reftest'); +channel.onmessage = (event) => { + if (event.data.id !== id && event.data.msg === 'stop') { + window.close(); + } +} +channel.postMessage({from: id, msg: 'stop'}); diff --git a/meta/plugins/reftests/utils.py b/meta/plugins/reftests/utils.py new file mode 100644 index 00000000..b63903a9 --- /dev/null +++ b/meta/plugins/reftests/utils.py @@ -0,0 +1,45 @@ +from cutekit import model +from random import randint +from pathlib import Path + + +def fetchFile(manifests: model.Registry, component: str, path: str) -> str: + """ + Fetches the text content of a file from a specific component's directory. + + Args: + manifests: The component registry used to look up component information. + component: The name of the component (e.g., "karm-core"). + path: The relative path to the file within that component's directory + (e.g., "base/defs/error.inc"). + + Returns: + The entire content of the specified file as a string. + + Raises: + AssertionError: If the specified component is not found in the registry. + """ + component = manifests.lookup(component, model.Component) + assert component is not None + p = Path(component.dirname()) / path + with p.open() as f: + return f.read() + + +def fetchMessage(manifests: model.Registry, type: str) -> str: + """ + Fetches a random message from a ".inc" file. (e.g., funny error/success messages) + + Args: + args: The target arguments, used to get the component registry. + type: The type of message to fetch (e.g., "witty", "nice"), which + corresponds to the name of the .inc file. + + Returns: + A randomly selected message string from the fetched file. + """ + + messages = eval( + "[" + fetchFile(manifests, "karm-core", "base/defs/" + type + ".inc") + "]" + ) + return messages[randint(0, len(messages) - 1)]