| 
1 | 1 | import json  | 
2 | 2 | import sys  | 
3 | 3 | import os  | 
 | 4 | +import re  | 
4 | 5 | from datetime import datetime  | 
5 | 6 | 
 
  | 
6 | 7 | SUCCESS_SYMBOL = ":white_check_mark:"  | 
 | 
12 | 13 |     data = json.load(f)  | 
13 | 14 |     tests = sorted(data["stats"]["suite_details"], key=lambda x: x["name"])  | 
14 | 15 | 
 
  | 
 | 16 | +def _safe_env_list(name: str):  | 
 | 17 | +    """Parse list-like env variables that look like ['esp32','esp32s2'].  | 
 | 18 | +    Accepts empty/None and returns a Python list of strings.  | 
 | 19 | +    """  | 
 | 20 | +    raw = os.environ.get(name, "").strip()  | 
 | 21 | +    if not raw:  | 
 | 22 | +        return []  | 
 | 23 | +    # Normalize to JSON by replacing single quotes with double quotes  | 
 | 24 | +    try:  | 
 | 25 | +        normalized = raw.replace("'", '"')  | 
 | 26 | +        return json.loads(normalized)  | 
 | 27 | +    except Exception:  | 
 | 28 | +        # Fallback: split on commas and strip brackets  | 
 | 29 | +        raw = raw.strip("[]")  | 
 | 30 | +        return [p.strip().strip("'\"") for p in raw.split(",") if p.strip()]  | 
 | 31 | + | 
 | 32 | + | 
 | 33 | +def _scan_artifacts_for_tests():  | 
 | 34 | +    """Discover built tests from downloaded artifacts.  | 
 | 35 | +    Returns mapping test_name -> set(targets) for which a build*.tmp dir exists containing ci.yml (or even without).  | 
 | 36 | +    """  | 
 | 37 | +    results = {}  | 
 | 38 | +    root = os.path.join(os.getcwd(), "runtime-test-results", "build_artifacts")  | 
 | 39 | +    if not os.path.isdir(root):  | 
 | 40 | +        return results  | 
 | 41 | +    for dirpath, dirnames, filenames in os.walk(root):  | 
 | 42 | +        # Match .../<target>/<test_name>/buildX.tmp  | 
 | 43 | +        if not re.search(r"build\d*\.tmp$", dirpath):  | 
 | 44 | +            continue  | 
 | 45 | +        parts = dirpath.split(os.sep)  | 
 | 46 | +        if len(parts) < 3:  | 
 | 47 | +            continue  | 
 | 48 | +        test_name = parts[-2]  | 
 | 49 | +        target = parts[-3]  | 
 | 50 | +        results.setdefault(test_name, set()).add(target)  | 
 | 51 | +    return results  | 
 | 52 | + | 
 | 53 | + | 
 | 54 | +def _read_local_ci_yaml_if_available(test_name: str) -> str:  | 
 | 55 | +    """Try to find a ci.yml stored beside compiled binaries downloaded by results workflow.  | 
 | 56 | +    Expected paths: runtime-test-results/build_artifacts/**/build*.tmp/ci.yml with sketch folder named test_name.  | 
 | 57 | +    """  | 
 | 58 | +    # Walk known root  | 
 | 59 | +    root = os.path.join(os.getcwd(), "runtime-test-results", "build_artifacts")  | 
 | 60 | +    if not os.path.isdir(root):  | 
 | 61 | +        return ""  | 
 | 62 | +    # Traverse limited depth for performance  | 
 | 63 | +    for dirpath, dirnames, filenames in os.walk(root):  | 
 | 64 | +        # Quick filter: folder path must contain the test folder name and a build*.tmp  | 
 | 65 | +        if test_name not in dirpath:  | 
 | 66 | +            continue  | 
 | 67 | +        if not re.search(r"build\d*\.tmp", dirpath):  | 
 | 68 | +            continue  | 
 | 69 | +        if "ci.yml" in filenames:  | 
 | 70 | +            try:  | 
 | 71 | +                with open(os.path.join(dirpath, "ci.yml"), "r") as f:  | 
 | 72 | +                    return f.read()  | 
 | 73 | +            except Exception:  | 
 | 74 | +                continue  | 
 | 75 | +    return ""  | 
 | 76 | + | 
 | 77 | + | 
 | 78 | +def _parse_ci_yml_minimal(content: str):  | 
 | 79 | +    """Very small YAML reader for the fields we need: targets, platforms, requires, requires_any.  | 
 | 80 | +    Returns dict: {targets: {t: bool}, platforms: {p: bool}, requires: [str], requires_any: [str]}  | 
 | 81 | +    """  | 
 | 82 | +    parsed = {"targets": {}, "platforms": {}, "requires": [], "requires_any": []}  | 
 | 83 | +    if not content:  | 
 | 84 | +        return parsed  | 
 | 85 | + | 
 | 86 | +    lines = content.splitlines()  | 
 | 87 | +    current = None  | 
 | 88 | +    for line in lines:  | 
 | 89 | +        # Remove comments and trailing spaces  | 
 | 90 | +        line = line.split("#", 1)[0].rstrip()  | 
 | 91 | +        if not line.strip():  | 
 | 92 | +            continue  | 
 | 93 | +        # Top-level keys  | 
 | 94 | +        if not line.startswith(" "):  | 
 | 95 | +            if line.startswith("targets:"):  | 
 | 96 | +                current = "targets"  | 
 | 97 | +                continue  | 
 | 98 | +            if line.startswith("platforms:"):  | 
 | 99 | +                current = "platforms"  | 
 | 100 | +                continue  | 
 | 101 | +            if line.startswith("requires:"):  | 
 | 102 | +                current = "requires"  | 
 | 103 | +                continue  | 
 | 104 | +            if line.startswith("requires_any:"):  | 
 | 105 | +                current = "requires_any"  | 
 | 106 | +                continue  | 
 | 107 | +            current = None  | 
 | 108 | +            continue  | 
 | 109 | + | 
 | 110 | +        # Nested under a current section (2+ spaces)  | 
 | 111 | +        if current in ("targets", "platforms"):  | 
 | 112 | +            m = re.match(r"\s*([A-Za-z0-9_\-]+)\s*:\s*(\w+)", line)  | 
 | 113 | +            if m:  | 
 | 114 | +                key = m.group(1)  | 
 | 115 | +                val = m.group(2).lower()  | 
 | 116 | +                parsed[current][key] = (val != "false" and val != "null")  | 
 | 117 | +        elif current in ("requires", "requires_any"):  | 
 | 118 | +            m = re.match(r"\s*-[\s]*(.+)$", line)  | 
 | 119 | +            if m:  | 
 | 120 | +                parsed[current].append(m.group(1).strip())  | 
 | 121 | + | 
 | 122 | +    return parsed  | 
 | 123 | + | 
 | 124 | + | 
 | 125 | +def _read_local_sdkconfig_for_target(target: str) -> str:  | 
 | 126 | +    """Find sdkconfig for target inside downloaded build artifacts.  | 
 | 127 | +    Searches any build*.tmp directory under .arduino/tests/<target>/<test>/build*.tmp  | 
 | 128 | +    Returns first found content.  | 
 | 129 | +    """  | 
 | 130 | +    root = os.path.join(os.getcwd(), "runtime-test-results", "build_artifacts")  | 
 | 131 | +    if not os.path.isdir(root):  | 
 | 132 | +        return ""  | 
 | 133 | +    for dirpath, dirnames, filenames in os.walk(root):  | 
 | 134 | +        if target not in dirpath:  | 
 | 135 | +            continue  | 
 | 136 | +        if not re.search(r"build\d*\.tmp$", dirpath):  | 
 | 137 | +            continue  | 
 | 138 | +        if "sdkconfig" in filenames:  | 
 | 139 | +            try:  | 
 | 140 | +                with open(os.path.join(dirpath, "sdkconfig"), "r") as f:  | 
 | 141 | +                    return f.read()  | 
 | 142 | +            except Exception:  | 
 | 143 | +                continue  | 
 | 144 | +    return ""  | 
 | 145 | + | 
 | 146 | + | 
 | 147 | +def _meets_requirements(ci_cfg: dict, sdkconfig_text: str) -> bool:  | 
 | 148 | +    # Missing files -> assume requirements met (match CI behavior to build and fail later if needed)  | 
 | 149 | +    if not sdkconfig_text:  | 
 | 150 | +        return True  | 
 | 151 | +    # AND list  | 
 | 152 | +    for req in ci_cfg.get("requires", []):  | 
 | 153 | +        req = req.strip()  | 
 | 154 | +        if not req:  | 
 | 155 | +            continue  | 
 | 156 | +        if not any(line.startswith(req) for line in sdkconfig_text.splitlines()):  | 
 | 157 | +            return False  | 
 | 158 | +    # OR list  | 
 | 159 | +    req_any = ci_cfg.get("requires_any", [])  | 
 | 160 | +    if req_any:  | 
 | 161 | +        if not any(any(line.startswith(r.strip()) for line in sdkconfig_text.splitlines()) for r in req_any):  | 
 | 162 | +            return False  | 
 | 163 | +    return True  | 
 | 164 | + | 
 | 165 | + | 
 | 166 | +def _parse_fqbn_counts(content: str):  | 
 | 167 | +    """Parse ci.yml 'fqbn' section and count entries per target.  | 
 | 168 | +    Returns dict target -> int  | 
 | 169 | +    """  | 
 | 170 | +    counts = {}  | 
 | 171 | +    if not content:  | 
 | 172 | +        return counts  | 
 | 173 | +    lines = content.splitlines()  | 
 | 174 | +    in_fqbn = False  | 
 | 175 | +    current_target = None  | 
 | 176 | +    for line in lines:  | 
 | 177 | +        raw = line.split("#", 1)[0].rstrip()  | 
 | 178 | +        if not raw:  | 
 | 179 | +            continue  | 
 | 180 | +        if not raw.startswith(" "):  | 
 | 181 | +            # top-level key  | 
 | 182 | +            if raw.startswith("fqbn:"):  | 
 | 183 | +                in_fqbn = True  | 
 | 184 | +                current_target = None  | 
 | 185 | +                continue  | 
 | 186 | +            else:  | 
 | 187 | +                if in_fqbn:  | 
 | 188 | +                    # leaving fqbn section  | 
 | 189 | +                    break  | 
 | 190 | +                continue  | 
 | 191 | +        if not in_fqbn:  | 
 | 192 | +            continue  | 
 | 193 | +        # two-space indent target: line  | 
 | 194 | +        m_target = re.match(r"\s{2}([A-Za-z0-9_\-]+)\s*:\s*", raw)  | 
 | 195 | +        if m_target:  | 
 | 196 | +            current_target = m_target.group(1)  | 
 | 197 | +            counts.setdefault(current_target, 0)  | 
 | 198 | +            continue  | 
 | 199 | +        # four-space list item under current target  | 
 | 200 | +        if current_target:  | 
 | 201 | +            if re.match(r"\s{4}-\s", raw):  | 
 | 202 | +                counts[current_target] = counts.get(current_target, 0) + 1  | 
 | 203 | +    return counts  | 
 | 204 | + | 
 | 205 | + | 
15 | 206 | # Get commit SHA from command line argument or environment variable  | 
16 | 207 | commit_sha = None  | 
17 | 208 | if len(sys.argv) < 2 or len(sys.argv) > 3:  | 
18 | 209 |     print(f"Usage: python {sys.argv[0]} <test_results.json> [commit_sha]", file=sys.stderr)  | 
19 | 210 |     sys.exit(1)  | 
20 |  | -elif len(sys.argv) == 3: # Commit SHA is provided as argument  | 
 | 211 | +elif len(sys.argv) == 3:  # Commit SHA is provided as argument  | 
21 | 212 |     commit_sha = sys.argv[2]  | 
22 |  | -elif "GITHUB_SHA" in os.environ: # Commit SHA is provided as environment variable  | 
 | 213 | +elif "GITHUB_SHA" in os.environ:  # Commit SHA is provided as environment variable  | 
23 | 214 |     commit_sha = os.environ["GITHUB_SHA"]  | 
24 |  | -else: # Commit SHA is not provided  | 
 | 215 | +else:  # Commit SHA is not provided  | 
25 | 216 |     print("Commit SHA is not provided. Please provide it as an argument or set the GITHUB_SHA environment variable.", file=sys.stderr)  | 
26 | 217 |     sys.exit(1)  | 
27 | 218 | 
 
  | 
 | 
45 | 236 | proc_test_data = {}  | 
46 | 237 | target_list = []  | 
47 | 238 | 
 
  | 
 | 239 | +# Build executed tests map and collect targets  | 
 | 240 | +executed_tests_index = {}  # {(platform, target, test_name): {tests, failures, errors}}  | 
 | 241 | +executed_run_counts = {}   # {(platform, target, test_name): int}  | 
 | 242 | + | 
48 | 243 | for test in tests:  | 
49 | 244 |     if test["name"].startswith("performance_"):  | 
50 | 245 |         continue  | 
51 | 246 | 
 
  | 
52 |  | -    _, platform, target, test_name = test["name"].split("_", 3)  | 
53 |  | -    test_name = test_name[:-1]  | 
 | 247 | +    try:  | 
 | 248 | +        test_type, platform, target, rest = test["name"].split("_", 3)  | 
 | 249 | +    except ValueError:  | 
 | 250 | +        # Unexpected name, skip  | 
 | 251 | +        continue  | 
 | 252 | + | 
 | 253 | +    # Remove an optional trailing numeric index (multi-FQBN builds)  | 
 | 254 | +    m = re.match(r"(.+?)(\d+)?$", rest)  | 
 | 255 | +    test_name = m.group(1) if m else rest  | 
54 | 256 | 
 
  | 
55 | 257 |     if target not in target_list:  | 
56 | 258 |         target_list.append(target)  | 
 | 
72 | 274 |     proc_test_data[platform][test_name][target]["failures"] += test["failures"]  | 
73 | 275 |     proc_test_data[platform][test_name][target]["errors"] += test["errors"]  | 
74 | 276 | 
 
  | 
 | 277 | +    executed_tests_index[(platform, target, test_name)] = proc_test_data[platform][test_name][target]  | 
 | 278 | +    executed_run_counts[(platform, target, test_name)] = executed_run_counts.get((platform, target, test_name), 0) + 1  | 
 | 279 | + | 
75 | 280 | target_list = sorted(target_list)  | 
76 | 281 | 
 
  | 
77 |  | -for platform in proc_test_data:  | 
 | 282 | +# Determine expected tests from repo, ci.yml and sdkconfig for validation type  | 
 | 283 | +repo = os.environ.get("GITHUB_REPOSITORY", "")  | 
 | 284 | +hw_enabled = os.environ.get("HW_TESTS_ENABLED", "false").lower() == "true"  | 
 | 285 | +wokwi_enabled = os.environ.get("WOKWI_TESTS_ENABLED", "false").lower() == "true"  | 
 | 286 | +qemu_enabled = os.environ.get("QEMU_TESTS_ENABLED", "false").lower() == "true"  | 
 | 287 | + | 
 | 288 | +hw_targets = _safe_env_list("HW_TARGETS") if hw_enabled else []  | 
 | 289 | +wokwi_targets = _safe_env_list("WOKWI_TARGETS") if wokwi_enabled else []  | 
 | 290 | +qemu_targets = _safe_env_list("QEMU_TARGETS") if qemu_enabled else []  | 
 | 291 | + | 
 | 292 | +# Only consider validation tests in this report  | 
 | 293 | +expected_by_platform = {"hardware": {}, "wokwi": {}, "qemu": {}}  | 
 | 294 | +expected_counts_by_key = {}  # {(platform, target, test_name): int}  | 
 | 295 | +try:  | 
 | 296 | +    built_tests = _scan_artifacts_for_tests()  | 
 | 297 | +    # Prefetch sdkconfigs per target  | 
 | 298 | +    sdk_by_target = {}  | 
 | 299 | +    all_needed_targets = sorted(set(hw_targets + wokwi_targets + qemu_targets))  | 
 | 300 | +    for t in all_needed_targets:  | 
 | 301 | +        sdk_by_target[t] = _read_local_sdkconfig_for_target(t)  | 
 | 302 | + | 
 | 303 | +    for test_name, test_targets in built_tests.items():  | 
 | 304 | +        # Load ci.yml if exists  | 
 | 305 | +        ci_content = _read_local_ci_yaml_if_available(test_name)  | 
 | 306 | +        ci = _parse_ci_yml_minimal(ci_content)  | 
 | 307 | +        fqbn_counts = _parse_fqbn_counts(ci_content)  | 
 | 308 | + | 
 | 309 | +        # Evaluate for each platform  | 
 | 310 | +        for platform, targets in (("hardware", hw_targets), ("wokwi", wokwi_targets), ("qemu", qemu_targets)):  | 
 | 311 | +            if not targets:  | 
 | 312 | +                continue  | 
 | 313 | +            # platforms.<platform> == false -> skip  | 
 | 314 | +            if platform in ci.get("platforms", {}) and ci["platforms"].get(platform) is False:  | 
 | 315 | +                continue  | 
 | 316 | +            # Only consider targets that were actually built (we don't fetch repo)  | 
 | 317 | +            for target in [t for t in targets if t in test_targets]:  | 
 | 318 | +                # targets.<target> == false -> skip  | 
 | 319 | +                if target in ci.get("targets", {}) and ci["targets"].get(target) is False:  | 
 | 320 | +                    continue  | 
 | 321 | +                # requirements  | 
 | 322 | +                if not _meets_requirements(ci, sdk_by_target.get(target, "")):  | 
 | 323 | +                    continue  | 
 | 324 | +                expected_by_platform.setdefault(platform, {}).setdefault(test_name, set()).add(target)  | 
 | 325 | +                # Determine expected run count by number of FQBN variants (default 1)  | 
 | 326 | +                expected_runs = fqbn_counts.get(target, 0)  | 
 | 327 | +                if expected_runs <= 0:  | 
 | 328 | +                    expected_runs = 1  | 
 | 329 | +                expected_counts_by_key[(platform, target, test_name)] = expected_runs  | 
 | 330 | +except Exception as e:  | 
 | 331 | +    print(f"WARN: Failed to compute expected tests: {e}", file=sys.stderr)  | 
 | 332 | + | 
 | 333 | +# Ensure target_list also includes expected targets even if nothing executed  | 
 | 334 | +for t in hw_targets + wokwi_targets + qemu_targets:  | 
 | 335 | +    if t not in target_list:  | 
 | 336 | +        target_list.append(t)  | 
 | 337 | +target_list = sorted(set(target_list))  | 
 | 338 | + | 
 | 339 | +platforms_to_render = set(proc_test_data.keys()) | {p for p, mp in expected_by_platform.items() if any(mp.values())}  | 
 | 340 | +for platform in platforms_to_render:  | 
78 | 341 |     print("")  | 
79 | 342 |     print(f"#### {platform.capitalize()}")  | 
80 | 343 |     print("")  | 
 | 
90 | 353 |     print("")  | 
91 | 354 |     print("-" + "|:-:" * len(target_list))  | 
92 | 355 | 
 
  | 
93 |  | -    for test_name, targets in proc_test_data[platform].items():  | 
 | 356 | +    # Determine all test names to render for this platform: executed ∪ expected  | 
 | 357 | +    platform_executed = proc_test_data.get(platform, {})  | 
 | 358 | +    platform_expected = expected_by_platform.get(platform, {})  | 
 | 359 | +    all_test_names = sorted(set(platform_executed.keys()) | set(platform_expected.keys()))  | 
 | 360 | + | 
 | 361 | +    for test_name in all_test_names:  | 
94 | 362 |         print(f"{test_name}", end="")  | 
95 | 363 |         for target in target_list:  | 
96 |  | -            if target in targets:  | 
97 |  | -                test_data = targets[target]  | 
98 |  | -                if test_data["errors"] > 0:  | 
 | 364 | +            # Executed?  | 
 | 365 | +            executed_cell = platform_executed.get(test_name, {}).get(target)  | 
 | 366 | +            exp_runs = expected_counts_by_key.get((platform, target, test_name), 0)  | 
 | 367 | +            exec_runs = executed_run_counts.get((platform, target, test_name), 0)  | 
 | 368 | +            if executed_cell:  | 
 | 369 | +                # If fewer runs executed than expected, mark as error  | 
 | 370 | +                if exp_runs > 0 and exec_runs < exp_runs:  | 
 | 371 | +                    print(f"|Error {ERROR_SYMBOL}", end="")  | 
 | 372 | +                    continue  | 
 | 373 | +                if executed_cell["errors"] > 0:  | 
99 | 374 |                     print(f"|Error {ERROR_SYMBOL}", end="")  | 
100 | 375 |                 else:  | 
101 |  | -                    print(f"|{test_data['total']-test_data['failures']}/{test_data['total']}", end="")  | 
102 |  | -                    if test_data["failures"] > 0:  | 
 | 376 | +                    print(f"|{executed_cell['total']-executed_cell['failures']}/{executed_cell['total']}", end="")  | 
 | 377 | +                    if executed_cell["failures"] > 0:  | 
103 | 378 |                         print(f" {FAILURE_SYMBOL}", end="")  | 
104 | 379 |                     else:  | 
105 | 380 |                         print(f" {SUCCESS_SYMBOL}", end="")  | 
 | 381 | +                continue  | 
 | 382 | + | 
 | 383 | +            # Not executed. Was it expected?  | 
 | 384 | +            expected_targets = platform_expected.get(test_name, set())  | 
 | 385 | +            if target in expected_targets:  | 
 | 386 | +                print(f"|Error {ERROR_SYMBOL}", end="")  | 
106 | 387 |             else:  | 
107 | 388 |                 print("|-", end="")  | 
108 | 389 |         print("")  | 
 | 
0 commit comments