diff --git a/.github/workflows/scripts-android.yml b/.github/workflows/scripts-android.yml index 5d71510ddf..d8beb91aa4 100644 --- a/.github/workflows/scripts-android.yml +++ b/.github/workflows/scripts-android.yml @@ -14,9 +14,20 @@ name: Test Android build scripts jobs: build-android: + permissions: + contents: read + pull-requests: write + issues: write runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.CN1SS_GH_TOKEN }} + GH_TOKEN: ${{ secrets.CN1SS_GH_TOKEN }} steps: - uses: actions/checkout@v4 + - name: Install Pillow for image processing + run: | + python3 -m pip install --upgrade pip + python3 -m pip install pillow - name: Setup workspace run: ./scripts/setup-workspace.sh -q -DskipTests - name: Build Android port @@ -45,4 +56,4 @@ jobs: path: artifacts/*.png if-no-files-found: warn retention-days: 14 - compression-level: 6 \ No newline at end of file + compression-level: 6 diff --git a/scripts/android/screenshots/BrowserComponent.png b/scripts/android/screenshots/BrowserComponent.png new file mode 100644 index 0000000000..af6ce4456d Binary files /dev/null and b/scripts/android/screenshots/BrowserComponent.png differ diff --git a/scripts/android/screenshots/MainActivity.png b/scripts/android/screenshots/MainActivity.png new file mode 100644 index 0000000000..b3ade26931 Binary files /dev/null and b/scripts/android/screenshots/MainActivity.png differ diff --git a/scripts/android/screenshots/README.md b/scripts/android/screenshots/README.md new file mode 100644 index 0000000000..84d922730f --- /dev/null +++ b/scripts/android/screenshots/README.md @@ -0,0 +1,13 @@ +# Android Instrumentation Test Screenshots + +This directory stores reference screenshots for Android native instrumentation tests. + +Each PNG file should be named after the test stream that emits the screenshot +(e.g. `MainActivity.png` or `BrowserComponent.png`). The automation in +`scripts/run-android-instrumentation-tests.sh` compares the screenshots emitted +by the emulator with the files stored here. If the pixels differ (ignoring PNG +metadata) or if a reference image is missing, the workflow posts a pull request +comment that includes the updated screenshot. + +When the comparison passes, no screenshot artifacts are published and no +comment is created. diff --git a/scripts/android/tests/HelloCodenameOneInstrumentedTest.java b/scripts/android/tests/HelloCodenameOneInstrumentedTest.java index 92de85de8c..dd1d16943c 100644 --- a/scripts/android/tests/HelloCodenameOneInstrumentedTest.java +++ b/scripts/android/tests/HelloCodenameOneInstrumentedTest.java @@ -13,25 +13,49 @@ import androidx.test.core.app.ApplicationProvider; import androidx.test.ext.junit.runners.AndroidJUnit4; +import com.codename1.ui.Container; +import com.codename1.ui.BrowserComponent; +import com.codename1.ui.Display; +import com.codename1.ui.Form; +import com.codename1.ui.Label; +import com.codename1.ui.layouts.BorderLayout; +import com.codename1.ui.layouts.BoxLayout; + import org.junit.Assert; +import org.junit.Assume; import org.junit.Test; import org.junit.runner.RunWith; import java.io.ByteArrayOutputStream; +import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; @RunWith(AndroidJUnit4.class) public class HelloCodenameOneInstrumentedTest { + private static final int CHUNK_SIZE = 2000; + private static final String PREVIEW_CHANNEL = "PREVIEW"; + private static final int[] PREVIEW_JPEG_QUALITIES = + new int[] {60, 50, 40, 35, 30, 25, 20, 18, 16, 14, 12, 10, 8, 6, 5, 4, 3, 2, 1}; + private static final int MAX_PREVIEW_BYTES = 20 * 1024; // 20 KiB target keeps comment payloads small + private static final String MAIN_SCREEN_TEST = "MainActivity"; + private static final String BROWSER_TEST = "BrowserComponent"; + private static void println(String s) { System.out.println(s); } - @Test - public void testUseAppContext_andEmitScreenshot() throws Exception { - Context ctx = ApplicationProvider.getApplicationContext(); - String pkg = "@PACKAGE@"; - Assert.assertEquals("Package mismatch", pkg, ctx.getPackageName()); + private static void settle(long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + } + private static ActivityScenario launchMainActivity(Context ctx) { + String pkg = "@PACKAGE@"; Intent launch = ctx.getPackageManager().getLaunchIntentForPackage(pkg); if (launch == null) { Intent q = new Intent(Intent.ACTION_MAIN); @@ -40,73 +64,279 @@ public void testUseAppContext_andEmitScreenshot() throws Exception { launch = q; } launch.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); + println("CN1SS:INFO: launching activity for test"); + return ActivityScenario.launch(launch); + } - println("CN1SS:INFO: about to launch Activity"); - byte[] pngBytes = null; - - try (ActivityScenario scenario = ActivityScenario.launch(launch)) { - Thread.sleep(750); - - println("CN1SS:INFO: activity launched"); - - final byte[][] holder = new byte[1][]; - scenario.onActivity(activity -> { - try { - View root = activity.getWindow().getDecorView().getRootView(); - int w = root.getWidth(); - int h = root.getHeight(); - if (w <= 0 || h <= 0) { - DisplayMetrics dm = activity.getResources().getDisplayMetrics(); - w = Math.max(1, dm.widthPixels); - h = Math.max(1, dm.heightPixels); - int sw = View.MeasureSpec.makeMeasureSpec(w, View.MeasureSpec.EXACTLY); - int sh = View.MeasureSpec.makeMeasureSpec(h, View.MeasureSpec.EXACTLY); - root.measure(sw, sh); - root.layout(0, 0, w, h); - println("CN1SS:INFO: forced layout to " + w + "x" + h); - } else { - println("CN1SS:INFO: natural layout " + w + "x" + h); - } + private static final class ScreenshotCapture { + final byte[] png; + final byte[] previewJpeg; + final int previewQuality; - Bitmap bmp = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888); - Canvas c = new Canvas(bmp); - root.draw(c); + ScreenshotCapture(byte[] png, byte[] previewJpeg, int previewQuality) { + this.png = png; + this.previewJpeg = previewJpeg; + this.previewQuality = previewQuality; + } + } - ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.max(1024, w * h / 2)); - boolean ok = bmp.compress(Bitmap.CompressFormat.PNG, 100, baos); - if (!ok) { - throw new RuntimeException("Bitmap.compress returned false"); + private static ScreenshotCapture captureScreenshot(ActivityScenario scenario, String testName) { + final byte[][] holder = new byte[2][]; + final int[] qualityHolder = new int[1]; + scenario.onActivity(activity -> { + try { + View root = activity.getWindow().getDecorView().getRootView(); + int w = root.getWidth(); + int h = root.getHeight(); + if (w <= 0 || h <= 0) { + DisplayMetrics dm = activity.getResources().getDisplayMetrics(); + w = Math.max(1, dm.widthPixels); + h = Math.max(1, dm.heightPixels); + int sw = View.MeasureSpec.makeMeasureSpec(w, View.MeasureSpec.EXACTLY); + int sh = View.MeasureSpec.makeMeasureSpec(h, View.MeasureSpec.EXACTLY); + root.measure(sw, sh); + root.layout(0, 0, w, h); + println("CN1SS:INFO:test=" + testName + " forced layout to " + w + "x" + h); + } else { + println("CN1SS:INFO:test=" + testName + " natural layout " + w + "x" + h); + } + + Bitmap bmp = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888); + Canvas c = new Canvas(bmp); + root.draw(c); + + ByteArrayOutputStream pngOut = new ByteArrayOutputStream(Math.max(1024, w * h / 2)); + if (!bmp.compress(Bitmap.CompressFormat.PNG, 100, pngOut)) { + throw new RuntimeException("Bitmap.compress returned false"); + } + holder[0] = pngOut.toByteArray(); + println( + "CN1SS:INFO:test=" + + testName + + " png_bytes=" + + holder[0].length); + + int chosenQuality = 0; + byte[] chosenPreview = null; + int smallestBytes = Integer.MAX_VALUE; + + for (int quality : PREVIEW_JPEG_QUALITIES) { + ByteArrayOutputStream jpegOut = new ByteArrayOutputStream(Math.max(1024, w * h / 2)); + if (!bmp.compress(Bitmap.CompressFormat.JPEG, quality, jpegOut)) { + continue; + } + byte[] jpegBytes = jpegOut.toByteArray(); + int length = jpegBytes.length; + if (length < smallestBytes) { + smallestBytes = length; + chosenQuality = quality; + chosenPreview = jpegBytes; + } + if (length <= MAX_PREVIEW_BYTES) { + break; } - holder[0] = baos.toByteArray(); - println("CN1SS:INFO: png_bytes=" + holder[0].length); - } catch (Throwable t) { - println("CN1SS:ERR: onActivity " + t); - t.printStackTrace(System.out); } - }); - pngBytes = holder[0]; - } catch (Throwable t) { - println("CN1SS:ERR: launch " + t); - t.printStackTrace(System.out); + holder[1] = chosenPreview; + qualityHolder[0] = chosenQuality; + if (chosenPreview != null) { + println( + "CN1SS:INFO:test=" + + testName + + " preview_jpeg_bytes=" + + chosenPreview.length + + " preview_quality=" + + chosenQuality); + if (chosenPreview.length > MAX_PREVIEW_BYTES) { + println( + "CN1SS:WARN:test=" + + testName + + " preview_exceeds_limit_bytes=" + + chosenPreview.length + + " max_preview_bytes=" + + MAX_PREVIEW_BYTES); + } + } else { + println("CN1SS:INFO:test=" + testName + " preview_jpeg_bytes=0 preview_quality=0"); + } + bmp.recycle(); + } catch (Throwable t) { + println("CN1SS:ERR:test=" + testName + " " + t); + t.printStackTrace(System.out); + } + }); + if (holder[0] == null) { + return new ScreenshotCapture(null, null, 0); } + return new ScreenshotCapture(holder[0], holder[1], qualityHolder[0]); + } + + private static String sanitizeTestName(String testName) { + return testName.replaceAll("[^A-Za-z0-9_.-]", "_"); + } - if (pngBytes == null || pngBytes.length == 0) { - println("CN1SS:END"); - Assert.fail("Screenshot capture produced 0 bytes"); + private static void emitScreenshot(ScreenshotCapture capture, String testName) { + if (capture == null || capture.png == null || capture.png.length == 0) { + println("CN1SS:END:" + sanitizeTestName(testName)); + Assert.fail("Screenshot capture produced 0 bytes for " + testName); return; } + emitScreenshotChannel(capture.png, testName, ""); + if (capture.previewJpeg != null && capture.previewJpeg.length > 0) { + emitScreenshotChannel(capture.previewJpeg, testName, PREVIEW_CHANNEL); + } + } - String b64 = Base64.encodeToString(pngBytes, Base64.NO_WRAP); - final int chunkSize = 2000; + private static void emitScreenshotChannel(byte[] bytes, String testName, String channel) { + String safeName = sanitizeTestName(testName); + String prefix = "CN1SS"; + if (channel != null && channel.length() > 0) { + prefix += channel; + } + if (bytes == null || bytes.length == 0) { + println(prefix + ":END:" + safeName); + return; + } + String b64 = Base64.encodeToString(bytes, Base64.NO_WRAP); int count = 0; - for (int pos = 0; pos < b64.length(); pos += chunkSize) { - int end = Math.min(pos + chunkSize, b64.length()); - System.out.println("CN1SS:" + String.format("%06d", pos) + ":" + b64.substring(pos, end)); + for (int pos = 0; pos < b64.length(); pos += CHUNK_SIZE) { + int end = Math.min(pos + CHUNK_SIZE, b64.length()); + String chunk = b64.substring(pos, end); + System.out.println( + prefix + + ":" + + safeName + + ":" + + String.format(Locale.US, "%06d", pos) + + ":" + + chunk); count++; } - println("CN1SS:INFO: chunks=" + count + " total_b64_len=" + b64.length()); - System.out.println("CN1SS:END"); + println("CN1SS:INFO:test=" + safeName + " chunks=" + count + " total_b64_len=" + b64.length()); + System.out.println(prefix + ":END:" + safeName); System.out.flush(); } + + private static void prepareBrowserComponentContent(ActivityScenario scenario) throws InterruptedException { + final CountDownLatch supportLatch = new CountDownLatch(1); + final boolean[] supported = new boolean[1]; + + scenario.onActivity(activity -> Display.getInstance().callSerially(() -> { + try { + supported[0] = BrowserComponent.isNativeBrowserSupported(); + } finally { + supportLatch.countDown(); + } + })); + + if (!supportLatch.await(5, TimeUnit.SECONDS)) { + Assert.fail("Timed out while verifying BrowserComponent support"); + } + + Assume.assumeTrue("BrowserComponent native support required for this test", supported[0]); + + final CountDownLatch loadLatch = new CountDownLatch(1); + final String html = "" + + "" + + "

Codename One

" + + "

BrowserComponent instrumentation test content.

"; + + scenario.onActivity(activity -> Display.getInstance().callSerially(() -> { + Form current = Display.getInstance().getCurrent(); + if (current == null) { + current = new Form("Browser Test", new BorderLayout()); + current.show(); + } else { + current.setLayout(new BorderLayout()); + current.setTitle("Browser Test"); + current.removeAll(); + } + + BrowserComponent browser = new BrowserComponent(); + browser.addWebEventListener(BrowserComponent.onLoad, evt -> loadLatch.countDown()); + browser.setPage(html, null); + current.add(BorderLayout.CENTER, browser); + current.revalidate(); + })); + + if (!loadLatch.await(10, TimeUnit.SECONDS)) { + Assert.fail("Timed out waiting for BrowserComponent to load content"); + } + } + + private static void prepareMainActivityContent(ActivityScenario scenario) throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + scenario.onActivity(activity -> Display.getInstance().callSerially(() -> { + try { + Form current = Display.getInstance().getCurrent(); + if (current == null) { + current = new Form("Main Screen", new BorderLayout()); + current.show(); + } else { + current.setLayout(new BorderLayout()); + current.setTitle("Main Screen"); + current.removeAll(); + } + + Container content = new Container(BoxLayout.y()); + content.getAllStyles().setBgColor(0x1f2937); + content.getAllStyles().setBgTransparency(255); + content.getAllStyles().setPadding(6, 6, 6, 6); + content.getAllStyles().setFgColor(0xf9fafb); + + Label heading = new Label("Hello Codename One"); + heading.getAllStyles().setFgColor(0x38bdf8); + heading.getAllStyles().setMargin(0, 4, 0, 0); + + Label body = new Label("Instrumentation main activity preview"); + body.getAllStyles().setFgColor(0xf9fafb); + + content.add(heading); + content.add(body); + + current.add(BorderLayout.CENTER, content); + current.revalidate(); + } finally { + latch.countDown(); + } + })); + + if (!latch.await(5, TimeUnit.SECONDS)) { + Assert.fail("Timed out preparing main activity content"); + } + } + + @Test + public void testUseAppContext_andEmitScreenshot() throws Exception { + Context ctx = ApplicationProvider.getApplicationContext(); + String pkg = "@PACKAGE@"; + Assert.assertEquals("Package mismatch", pkg, ctx.getPackageName()); + + ScreenshotCapture capture; + try (ActivityScenario scenario = launchMainActivity(ctx)) { + settle(750); + prepareMainActivityContent(scenario); + settle(500); + capture = captureScreenshot(scenario, MAIN_SCREEN_TEST); + } + + emitScreenshot(capture, MAIN_SCREEN_TEST); + } + + @Test + public void testBrowserComponentScreenshot() throws Exception { + Context ctx = ApplicationProvider.getApplicationContext(); + ScreenshotCapture capture; + + try (ActivityScenario scenario = launchMainActivity(ctx)) { + settle(750); + prepareBrowserComponentContent(scenario); + settle(500); + capture = captureScreenshot(scenario, BROWSER_TEST); + } + + emitScreenshot(capture, BROWSER_TEST); + } } diff --git a/scripts/android/tests/cn1ss_chunk_tools.py b/scripts/android/tests/cn1ss_chunk_tools.py index 026bae0192..0845047b22 100755 --- a/scripts/android/tests/cn1ss_chunk_tools.py +++ b/scripts/android/tests/cn1ss_chunk_tools.py @@ -1,40 +1,61 @@ -#!/usr/bin/env python3 -"""Helpers for extracting CN1SS chunked screenshot payloads.""" -from __future__ import annotations - import argparse import base64 import pathlib import re import sys -from typing import Iterable, List, Tuple +from typing import Iterable, List, Optional, Tuple -CHUNK_PATTERN = re.compile(r"CN1SS:(\d{6}):(.*)") +DEFAULT_TEST_NAME = "default" +DEFAULT_CHANNEL = "" +CHUNK_PATTERN = re.compile( + r"CN1SS(?:(?P[A-Z]+))?:(?:(?P[A-Za-z0-9_.-]+):)?(?P\d{6}):(?P.*)" +) -def _iter_chunk_lines(path: pathlib.Path) -> Iterable[Tuple[int, str]]: +def _iter_chunk_lines( + path: pathlib.Path, + test_filter: Optional[str] = None, + channel_filter: Optional[str] = DEFAULT_CHANNEL, +) -> Iterable[Tuple[str, int, str]]: text = path.read_text(encoding="utf-8", errors="ignore") for line in text.splitlines(): match = CHUNK_PATTERN.search(line) if not match: continue - index = int(match.group(1)) - payload = re.sub(r"[^A-Za-z0-9+/=]", "", match.group(2)) + test_name = match.group("test") or DEFAULT_TEST_NAME + if test_filter is not None and test_name != test_filter: + continue + channel = match.group("channel") or DEFAULT_CHANNEL + if channel_filter is not None and channel != channel_filter: + continue + index = int(match.group("index")) + payload = re.sub(r"[^A-Za-z0-9+/=]", "", match.group("payload")) if payload: - yield index, payload + yield test_name, index, payload -def count_chunks(path: pathlib.Path) -> int: - return sum(1 for _ in _iter_chunk_lines(path)) +def count_chunks( + path: pathlib.Path, test: Optional[str] = None, channel: Optional[str] = DEFAULT_CHANNEL +) -> int: + return sum(1 for _ in _iter_chunk_lines(path, test_filter=test, channel_filter=channel)) -def concatenate_chunks(path: pathlib.Path) -> str: - ordered = sorted(_iter_chunk_lines(path), key=lambda item: item[0]) - return "".join(payload for _, payload in ordered) +def concatenate_chunks( + path: pathlib.Path, test: Optional[str] = None, channel: Optional[str] = DEFAULT_CHANNEL +) -> str: + ordered = sorted( + _iter_chunk_lines(path, test_filter=test, channel_filter=channel), + key=lambda item: item[1], + ) + return "".join(payload for _, _, payload in ordered) -def decode_chunks(path: pathlib.Path) -> bytes: - data = concatenate_chunks(path) +def decode_chunks( + path: pathlib.Path, + test: Optional[str] = None, + channel: Optional[str] = DEFAULT_CHANNEL, +) -> bytes: + data = concatenate_chunks(path, test=test, channel=channel) if not data: return b"" try: @@ -43,28 +64,63 @@ def decode_chunks(path: pathlib.Path) -> bytes: return b"" +def list_tests(path: pathlib.Path) -> List[str]: + seen = { + test + for test, _, _ in _iter_chunk_lines(path, channel_filter=DEFAULT_CHANNEL) + } + return sorted(seen) + + def main(argv: List[str] | None = None) -> int: parser = argparse.ArgumentParser(description=__doc__) subparsers = parser.add_subparsers(dest="command", required=True) p_count = subparsers.add_parser("count", help="Count CN1SS chunks in a file") p_count.add_argument("path", type=pathlib.Path) + p_count.add_argument("--test", dest="test", default=None, help="Optional test name filter") + p_count.add_argument( + "--channel", + dest="channel", + default=DEFAULT_CHANNEL, + help="Optional channel (default=primary)", + ) p_extract = subparsers.add_parser("extract", help="Concatenate CN1SS payload chunks") p_extract.add_argument("path", type=pathlib.Path) p_extract.add_argument("--decode", action="store_true", help="Decode payload to binary PNG") + p_extract.add_argument("--test", dest="test", default=None, help="Test name to extract (default=unnamed)") + p_extract.add_argument( + "--channel", + dest="channel", + default=DEFAULT_CHANNEL, + help="Optional channel (default=primary)", + ) + + p_tests = subparsers.add_parser("tests", help="List distinct test names found in CN1SS chunks") + p_tests.add_argument("path", type=pathlib.Path) args = parser.parse_args(argv) if args.command == "count": - print(count_chunks(args.path)) + print(count_chunks(args.path, args.test, args.channel)) return 0 if args.command == "extract": + target_test: Optional[str] + if args.test is None: + target_test = DEFAULT_TEST_NAME + else: + target_test = args.test if args.decode: - sys.stdout.buffer.write(decode_chunks(args.path)) + sys.stdout.buffer.write(decode_chunks(args.path, target_test, args.channel)) else: - sys.stdout.write(concatenate_chunks(args.path)) + sys.stdout.write(concatenate_chunks(args.path, target_test, args.channel)) + return 0 + + if args.command == "tests": + for name in list_tests(args.path): + print(name) return 0 return 1 diff --git a/scripts/android/tests/process_screenshots.py b/scripts/android/tests/process_screenshots.py new file mode 100644 index 0000000000..2e7bd8b9c5 --- /dev/null +++ b/scripts/android/tests/process_screenshots.py @@ -0,0 +1,630 @@ +#!/usr/bin/env python3 +"""Compare CN1 screenshot outputs against stored references.""" + +from __future__ import annotations + +import argparse +import base64 +import io +import json +import pathlib +import shutil +import struct +import subprocess +import sys +import tempfile +import zlib +from dataclasses import dataclass +from typing import Dict, Iterable, List, Optional, Tuple, cast + +try: + from PIL import Image # type: ignore +except Exception: # pragma: no cover - optional dependency + Image = None + +MAX_COMMENT_BASE64 = 60_000 +JPEG_QUALITY_CANDIDATES = (70, 60, 50, 40, 30, 20, 10) + +PNG_SIGNATURE = b"\x89PNG\r\n\x1a\n" + + +class PNGError(Exception): + """Raised when a PNG cannot be parsed.""" + + +@dataclass +class PNGImage: + width: int + height: int + bit_depth: int + color_type: int + pixels: bytes + bytes_per_pixel: int + + +@dataclass +class CommentPayload: + base64: Optional[str] + base64_length: int + mime: str + codec: str + quality: Optional[int] = None + omitted_reason: Optional[str] = None + note: Optional[str] = None + data: Optional[bytes] = None + + +def _read_chunks(path: pathlib.Path) -> Iterable[Tuple[bytes, bytes]]: + data = path.read_bytes() + if not data.startswith(PNG_SIGNATURE): + raise PNGError(f"{path} is not a PNG file (missing signature)") + offset = len(PNG_SIGNATURE) + length = len(data) + while offset + 8 <= length: + chunk_len = int.from_bytes(data[offset : offset + 4], "big") + chunk_type = data[offset + 4 : offset + 8] + offset += 8 + if offset + chunk_len + 4 > length: + raise PNGError("PNG chunk truncated before CRC") + chunk_data = data[offset : offset + chunk_len] + offset += chunk_len + 4 # skip data + CRC + yield chunk_type, chunk_data + if chunk_type == b"IEND": + break + + +def _bytes_per_pixel(bit_depth: int, color_type: int) -> int: + if bit_depth != 8: + raise PNGError(f"Unsupported bit depth: {bit_depth}") + if color_type == 0: # greyscale + return 1 + if color_type == 2: # RGB + return 3 + if color_type == 4: # greyscale + alpha + return 2 + if color_type == 6: # RGBA + return 4 + raise PNGError(f"Unsupported color type: {color_type}") + + +def _paeth_predict(a: int, b: int, c: int) -> int: + p = a + b - c + pa = abs(p - a) + pb = abs(p - b) + pc = abs(p - c) + if pa <= pb and pa <= pc: + return a + if pb <= pc: + return b + return c + + +def _unfilter(width: int, height: int, bpp: int, raw: bytes) -> bytes: + stride = width * bpp + expected = height * (stride + 1) + if len(raw) != expected: + raise PNGError("PNG IDAT payload has unexpected length") + result = bytearray(height * stride) + in_offset = 0 + out_offset = 0 + for row in range(height): + filter_type = raw[in_offset] + in_offset += 1 + row_data = bytearray(raw[in_offset : in_offset + stride]) + in_offset += stride + if filter_type == 0: # None + pass + elif filter_type == 1: # Sub + for i in range(stride): + left = row_data[i - bpp] if i >= bpp else 0 + row_data[i] = (row_data[i] + left) & 0xFF + elif filter_type == 2: # Up + for i in range(stride): + up = result[out_offset - stride + i] if row > 0 else 0 + row_data[i] = (row_data[i] + up) & 0xFF + elif filter_type == 3: # Average + for i in range(stride): + left = row_data[i - bpp] if i >= bpp else 0 + up = result[out_offset - stride + i] if row > 0 else 0 + row_data[i] = (row_data[i] + ((left + up) // 2)) & 0xFF + elif filter_type == 4: # Paeth + for i in range(stride): + left = row_data[i - bpp] if i >= bpp else 0 + up = result[out_offset - stride + i] if row > 0 else 0 + up_left = result[out_offset - stride + i - bpp] if (row > 0 and i >= bpp) else 0 + row_data[i] = (row_data[i] + _paeth_predict(left, up, up_left)) & 0xFF + else: + raise PNGError(f"Unsupported PNG filter type: {filter_type}") + result[out_offset : out_offset + stride] = row_data + out_offset += stride + return bytes(result) + + +def load_png(path: pathlib.Path) -> PNGImage: + ihdr = None + idat_chunks: List[bytes] = [] + for chunk_type, chunk_data in _read_chunks(path): + if chunk_type == b"IHDR": + if ihdr is not None: + raise PNGError("Duplicate IHDR chunk") + if len(chunk_data) != 13: + raise PNGError("Invalid IHDR length") + width = int.from_bytes(chunk_data[0:4], "big") + height = int.from_bytes(chunk_data[4:8], "big") + bit_depth = chunk_data[8] + color_type = chunk_data[9] + # compression (10), filter (11), interlace (12) must be default values + if chunk_data[10] != 0 or chunk_data[11] != 0: + raise PNGError("Unsupported PNG compression or filter method") + if chunk_data[12] not in (0, 1): + raise PNGError("Unsupported PNG interlace method") + ihdr = (width, height, bit_depth, color_type, chunk_data[12]) + elif chunk_type == b"IDAT": + idat_chunks.append(chunk_data) + elif chunk_type == b"IEND": + break + else: + # Ancillary chunks are ignored (metadata) + continue + + if ihdr is None: + raise PNGError("Missing IHDR chunk") + if not idat_chunks: + raise PNGError("Missing IDAT data") + + width, height, bit_depth, color_type, interlace = ihdr + if interlace != 0: + raise PNGError("Interlaced PNGs are not supported") + + bpp = _bytes_per_pixel(bit_depth, color_type) + compressed = b"".join(idat_chunks) + try: + raw = zlib.decompress(compressed) + except Exception as exc: # pragma: no cover - defensive + raise PNGError(f"Failed to decompress IDAT data: {exc}") from exc + + pixels = _unfilter(width, height, bpp, raw) + return PNGImage(width, height, bit_depth, color_type, pixels, bpp) + + +def compare_images(expected: PNGImage, actual: PNGImage) -> Dict[str, bool]: + equal = ( + expected.width == actual.width + and expected.height == actual.height + and expected.bit_depth == actual.bit_depth + and expected.color_type == actual.color_type + and expected.pixels == actual.pixels + ) + return { + "equal": equal, + "width": actual.width, + "height": actual.height, + "bit_depth": actual.bit_depth, + "color_type": actual.color_type, + } + + +def _encode_png(width: int, height: int, bit_depth: int, color_type: int, bpp: int, pixels: bytes) -> bytes: + import zlib as _zlib + + if len(pixels) != width * height * bpp: + raise PNGError("Pixel buffer length does not match dimensions") + + def chunk(tag: bytes, payload: bytes) -> bytes: + crc = _zlib.crc32(tag + payload) & 0xFFFFFFFF + return ( + len(payload).to_bytes(4, "big") + + tag + + payload + + crc.to_bytes(4, "big") + ) + + raw = bytearray() + stride = width * bpp + for row in range(height): + raw.append(0) + start = row * stride + raw.extend(pixels[start : start + stride]) + + ihdr = struct.pack( + ">IIBBBBB", + width, + height, + bit_depth, + color_type, + 0, + 0, + 0, + ) + + compressed = _zlib.compress(bytes(raw)) + return b"".join( + [PNG_SIGNATURE, chunk(b"IHDR", ihdr), chunk(b"IDAT", compressed), chunk(b"IEND", b"")] + ) + + +def _prepare_pillow_image(image: PNGImage): + if Image is None: + raise RuntimeError("Pillow is not available") + mode_map = {0: "L", 2: "RGB", 4: "LA", 6: "RGBA"} + mode = mode_map.get(image.color_type) + if mode is None: + raise PNGError(f"Unsupported PNG color type for conversion: {image.color_type}") + pil_img = Image.frombytes(mode, (image.width, image.height), image.pixels) + if pil_img.mode == "LA": + pil_img = pil_img.convert("RGBA") + if pil_img.mode == "RGBA": + background = Image.new("RGB", pil_img.size, (255, 255, 255)) + alpha = pil_img.split()[-1] + background.paste(pil_img.convert("RGB"), mask=alpha) + pil_img = background + elif pil_img.mode != "RGB": + pil_img = pil_img.convert("RGB") + return pil_img + + +def _build_png_payload(image: PNGImage) -> bytes: + return _encode_png( + image.width, + image.height, + image.bit_depth, + image.color_type, + image.bytes_per_pixel, + image.pixels, + ) + + +def build_comment_payload(image: PNGImage, max_length: int = MAX_COMMENT_BASE64) -> CommentPayload: + note: Optional[str] = None + if Image is not None: + pil_img = cast("Image.Image", _prepare_pillow_image(image)) + scales = [1.0, 0.7, 0.5, 0.35, 0.25] + smallest_data: Optional[bytes] = None + smallest_quality: Optional[int] = None + for scale in scales: + candidate = pil_img + if scale < 1.0: + width = max(1, int(image.width * scale)) + height = max(1, int(image.height * scale)) + candidate = pil_img.copy() + candidate.thumbnail((width, height)) + for quality in JPEG_QUALITY_CANDIDATES: + buffer = io.BytesIO() + try: + candidate.save(buffer, format="JPEG", quality=quality, optimize=True) + except OSError: + buffer = io.BytesIO() + candidate.save(buffer, format="JPEG", quality=quality) + data = buffer.getvalue() + smallest_data = data + smallest_quality = quality + encoded = base64.b64encode(data).decode("ascii") + if len(encoded) <= max_length: + note_bits = [f"JPEG preview quality {quality}"] + if scale < 1.0: + note_bits.append(f"downscaled to {candidate.width}x{candidate.height}") + return CommentPayload( + base64=encoded, + base64_length=len(encoded), + mime="image/jpeg", + codec="jpeg", + quality=quality, + omitted_reason=None, + note="; ".join(note_bits), + data=data, + ) + if smallest_data is not None and smallest_quality is not None: + return CommentPayload( + base64=None, + base64_length=len(base64.b64encode(smallest_data).decode("ascii")), + mime="image/jpeg", + codec="jpeg", + quality=smallest_quality, + omitted_reason="too_large", + note="All JPEG previews exceeded limit even after downscaling", + data=smallest_data, + ) + note = "JPEG conversion unavailable" + else: + # Attempt an external conversion using ImageMagick/GraphicsMagick if + # Pillow isn't present on the runner. This keeps the previews JPEG-based + # while avoiding large dependencies in the workflow environment. + cli_payload = _build_comment_payload_via_cli(image, max_length) + if cli_payload is not None: + return cli_payload + note = "Pillow library not available; falling back to PNG previews." + + png_bytes = _build_png_payload(image) + encoded = base64.b64encode(png_bytes).decode("ascii") + if len(encoded) <= max_length: + return CommentPayload( + base64=encoded, + base64_length=len(encoded), + mime="image/png", + codec="png", + quality=None, + omitted_reason=None, + note=note, + data=png_bytes, + ) + return CommentPayload( + base64=None, + base64_length=len(encoded), + mime="image/png", + codec="png", + quality=None, + omitted_reason="too_large", + note=note, + data=png_bytes, + ) + + +def _build_comment_payload_via_cli( + image: PNGImage, max_length: int +) -> Optional[CommentPayload]: + """Attempt to generate a JPEG preview using an external CLI.""" + + converters = _detect_cli_converters() + if not converters: + return None + + png_bytes = _build_png_payload(image) + + with tempfile.TemporaryDirectory(prefix="cn1ss-cli-jpeg-") as tmp_dir: + tmp_dir_path = pathlib.Path(tmp_dir) + src = tmp_dir_path / "input.png" + dst = tmp_dir_path / "preview.jpg" + src.write_bytes(png_bytes) + + last_encoded: Optional[str] = None + last_length = 0 + last_quality: Optional[int] = None + last_data: Optional[bytes] = None + last_error: Optional[str] = None + + for quality in JPEG_QUALITY_CANDIDATES: + for converter in converters: + try: + _run_cli_converter(converter, src, dst, quality) + except RuntimeError as exc: + last_error = str(exc) + continue + if not dst.exists(): + last_error = "CLI converter did not create JPEG output" + continue + data = dst.read_bytes() + encoded = base64.b64encode(data).decode("ascii") + last_encoded = encoded + last_length = len(encoded) + last_quality = quality + last_data = data + if len(encoded) <= max_length: + return CommentPayload( + base64=encoded, + base64_length=len(encoded), + mime="image/jpeg", + codec="jpeg", + quality=quality, + omitted_reason=None, + note=f"JPEG preview generated via {converter[0]}", + data=data, + ) + break # try next quality once any converter succeeded + + if last_encoded is not None: + note = "" + if last_error: + note = last_error + return CommentPayload( + base64=None, + base64_length=last_length, + mime="image/jpeg", + codec="jpeg", + quality=last_quality, + omitted_reason="too_large", + note=(note or f"JPEG preview generated via {converters[0][0]}") + if converters + else note, + data=last_data, + ) + + return None + + +def _record_comment_payload( + record: Dict[str, object], + payload: CommentPayload, + default_name: str, + preview_dir: Optional[pathlib.Path], +) -> None: + if payload.base64 is not None: + record["base64"] = payload.base64 + else: + record.update( + {"base64_omitted": payload.omitted_reason, "base64_length": payload.base64_length} + ) + record.update({ + "base64_mime": payload.mime, + "base64_codec": payload.codec, + }) + if payload.quality is not None: + record["base64_quality"] = payload.quality + if payload.note: + record["base64_note"] = payload.note + + if preview_dir is None or payload.data is None: + return + + preview_dir.mkdir(parents=True, exist_ok=True) + suffix = ".jpg" if payload.mime == "image/jpeg" else ".png" + base_name = _slugify(default_name.rsplit(".", 1)[0] or "preview") + preview_path = preview_dir / f"{base_name}{suffix}" + preview_path.write_bytes(payload.data) + record["preview"] = { + "path": str(preview_path), + "name": preview_path.name, + "mime": payload.mime, + "codec": payload.codec, + "quality": payload.quality, + "note": payload.note, + } + + +def _load_external_preview_payload( + test_name: str, preview_dir: pathlib.Path +) -> Optional[CommentPayload]: + slug = _slugify(test_name) + candidates = ( + (preview_dir / f"{slug}.jpg", "image/jpeg", "jpeg"), + (preview_dir / f"{slug}.jpeg", "image/jpeg", "jpeg"), + (preview_dir / f"{slug}.png", "image/png", "png"), + ) + for path, mime, codec in candidates: + if not path.exists(): + continue + data = path.read_bytes() + encoded = base64.b64encode(data).decode("ascii") + note = "Preview provided by instrumentation" + if len(encoded) <= MAX_COMMENT_BASE64: + return CommentPayload( + base64=encoded, + base64_length=len(encoded), + mime=mime, + codec=codec, + quality=None, + omitted_reason=None, + note=note, + data=data, + ) + return CommentPayload( + base64=None, + base64_length=len(encoded), + mime=mime, + codec=codec, + quality=None, + omitted_reason="too_large", + note=note, + data=data, + ) + return None + + +def _detect_cli_converters() -> List[Tuple[str, ...]]: + """Return a list of available CLI converters (command tuples).""" + + candidates: List[Tuple[str, ...]] = [] + for cmd in (("magick", "convert"), ("convert",)): + if shutil.which(cmd[0]): + candidates.append(cmd) + return candidates + + +def _run_cli_converter( + command: Tuple[str, ...], src: pathlib.Path, dst: pathlib.Path, quality: int +) -> None: + """Execute the CLI converter.""" + + if not command: + raise RuntimeError("No converter command provided") + + cmd = list(command) + if len(cmd) == 2 and cmd[0] == "magick": + # magick convert -quality + cmd.extend([str(src), "-quality", str(quality), str(dst)]) + else: + # convert -quality + cmd.extend([str(src), "-quality", str(quality), str(dst)]) + + result = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=False, + text=True, + ) + if result.returncode != 0: + raise RuntimeError( + f"{' '.join(command)} exited with {result.returncode}: {result.stderr.strip()}" + ) + + +def _slugify(name: str) -> str: + return "".join(ch if ch.isalnum() else "_" for ch in name) + + +def build_results( + reference_dir: pathlib.Path, + actual_entries: List[Tuple[str, pathlib.Path]], + emit_base64: bool, + preview_dir: Optional[pathlib.Path] = None, +) -> Dict[str, List[Dict[str, object]]]: + results: List[Dict[str, object]] = [] + for test_name, actual_path in actual_entries: + expected_path = reference_dir / f"{test_name}.png" + record: Dict[str, object] = { + "test": test_name, + "actual_path": str(actual_path), + "expected_path": str(expected_path), + } + if not actual_path.exists(): + record.update({"status": "missing_actual", "message": "Actual screenshot not found"}) + elif not expected_path.exists(): + record.update({"status": "missing_expected"}) + if emit_base64: + payload = None + if preview_dir is not None: + payload = _load_external_preview_payload(test_name, preview_dir) + if payload is None: + payload = build_comment_payload(load_png(actual_path)) + _record_comment_payload(record, payload, actual_path.name, preview_dir) + else: + try: + actual_img = load_png(actual_path) + expected_img = load_png(expected_path) + outcome = compare_images(expected_img, actual_img) + except Exception as exc: + record.update({"status": "error", "message": str(exc)}) + else: + if outcome["equal"]: + record.update({"status": "equal"}) + else: + record.update({"status": "different", "details": outcome}) + if emit_base64: + payload = None + if preview_dir is not None: + payload = _load_external_preview_payload(test_name, preview_dir) + if payload is None: + payload = build_comment_payload(actual_img) + _record_comment_payload(record, payload, actual_path.name, preview_dir) + results.append(record) + return {"results": results} + + +def parse_args(argv: List[str] | None = None) -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--reference-dir", required=True, type=pathlib.Path) + parser.add_argument("--emit-base64", action="store_true", help="Include base64 payloads for updated screenshots") + parser.add_argument("--preview-dir", type=pathlib.Path, help="Directory to store generated preview images") + parser.add_argument("--actual", action="append", default=[], help="Mapping of test=path to evaluate") + return parser.parse_args(argv) + + +def main(argv: List[str] | None = None) -> int: + args = parse_args(argv) + reference_dir: pathlib.Path = args.reference_dir + actual_entries: List[Tuple[str, pathlib.Path]] = [] + for item in args.actual: + if "=" not in item: + print(f"Invalid --actual value: {item}", file=sys.stderr) + return 2 + name, path_str = item.split("=", 1) + actual_entries.append((name, pathlib.Path(path_str))) + + preview_dir = args.preview_dir + payload = build_results(reference_dir, actual_entries, bool(args.emit_base64), preview_dir) + json.dump(payload, sys.stdout) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/run-android-instrumentation-tests.sh b/scripts/run-android-instrumentation-tests.sh index 1bac3c92cc..03c0ddd072 100755 --- a/scripts/run-android-instrumentation-tests.sh +++ b/scripts/run-android-instrumentation-tests.sh @@ -13,6 +13,8 @@ CN1SS_TOOL="" count_chunks() { local f="${1:-}" + local test="${2:-}" + local channel="${3:-}" if [ -z "$CN1SS_TOOL" ] || [ ! -x "$CN1SS_TOOL" ]; then echo 0 return @@ -21,21 +23,57 @@ count_chunks() { echo 0 return fi - python3 "$CN1SS_TOOL" count "$f" 2>/dev/null || echo 0 + local args=("count" "$f") + if [ -n "$test" ]; then + args+=("--test" "$test") + fi + if [ -n "$channel" ]; then + args+=("--channel" "$channel") + fi + python3 "$CN1SS_TOOL" "${args[@]}" 2>/dev/null || echo 0 } extract_cn1ss_base64() { local f="${1:-}" + local test="${2:-}" + local channel="${3:-}" + if [ -z "$CN1SS_TOOL" ] || [ ! -x "$CN1SS_TOOL" ]; then + return 1 + fi + if [ -z "$f" ] || [ ! -r "$f" ]; then + return 1 + fi + local args=("extract" "$f") + if [ -n "$test" ]; then + args+=("--test" "$test") + fi + if [ -n "$channel" ]; then + args+=("--channel" "$channel") + fi + python3 "$CN1SS_TOOL" "${args[@]}" +} + +decode_cn1ss_binary() { + local f="${1:-}" + local test="${2:-}" + local channel="${3:-}" if [ -z "$CN1SS_TOOL" ] || [ ! -x "$CN1SS_TOOL" ]; then return 1 fi if [ -z "$f" ] || [ ! -r "$f" ]; then return 1 fi - python3 "$CN1SS_TOOL" extract "$f" + local args=("extract" "$f" "--decode") + if [ -n "$test" ]; then + args+=("--test" "$test") + fi + if [ -n "$channel" ]; then + args+=("--channel" "$channel") + fi + python3 "$CN1SS_TOOL" "${args[@]}" } -decode_cn1ss_png() { +list_cn1ss_tests() { local f="${1:-}" if [ -z "$CN1SS_TOOL" ] || [ ! -x "$CN1SS_TOOL" ]; then return 1 @@ -43,7 +81,409 @@ decode_cn1ss_png() { if [ -z "$f" ] || [ ! -r "$f" ]; then return 1 fi - python3 "$CN1SS_TOOL" extract "$f" --decode + python3 "$CN1SS_TOOL" tests "$f" +} + + +post_pr_comment() { + local body_file="${1:-}" + local preview_dir="${2:-}" + if [ -z "$body_file" ] || [ ! -s "$body_file" ]; then + ra_log "Skipping PR comment post (no content)." + return 0 + fi + local comment_token="${GITHUB_TOKEN:-}" + if [ -z "$comment_token" ] && [ -n "${GH_TOKEN:-}" ]; then + comment_token="${GH_TOKEN}" + ra_log "PR comment auth using GH_TOKEN fallback" + fi + if [ -n "$comment_token" ]; then + ra_log "PR comment authentication token detected" + fi + if [ -z "$comment_token" ]; then + ra_log "PR comment skipped (no GitHub token available)" + return 0 + fi + if [ -z "${GITHUB_EVENT_PATH:-}" ] || [ ! -f "$GITHUB_EVENT_PATH" ]; then + ra_log "PR comment skipped (GITHUB_EVENT_PATH unavailable)" + return 0 + fi + local body_size + body_size=$(wc -c < "$body_file" 2>/dev/null || echo 0) + ra_log "Attempting to post PR comment (payload bytes=${body_size})" + GITHUB_TOKEN="$comment_token" python3 - "$body_file" "$preview_dir" <<'PY' +import json +import os +import pathlib +import re +import shutil +import subprocess +import sys +from typing import Dict, List, Match, Optional +from urllib.request import Request, urlopen + +MARKER = "" + + +def load_event(path: str) -> Dict[str, object]: + with open(path, "r", encoding="utf-8") as fh: + return json.load(fh) + + +def find_pr_number(event: Dict[str, object]) -> Optional[int]: + if "pull_request" in event: + return event["pull_request"].get("number") + issue = event.get("issue") + if isinstance(issue, dict) and issue.get("pull_request"): + return issue.get("number") + return None + + +def next_link(header: Optional[str]) -> Optional[str]: + if not header: + return None + for part in header.split(","): + segment = part.strip() + if segment.endswith('rel="next"'): + url_part = segment.split(";", 1)[0].strip() + if url_part.startswith("<") and url_part.endswith(">"): + return url_part[1:-1] + return None + + +def publish_previews_to_branch( + preview_dir: Optional[pathlib.Path], + repo: str, + pr_number: int, + token: Optional[str], + allow_push: bool, +) -> Dict[str, str]: + """Publish preview images to the cn1ss-previews branch and return name->URL.""" + + if not preview_dir or not preview_dir.exists(): + return {} + image_files = [ + path + for path in sorted(preview_dir.iterdir()) + if path.is_file() and path.suffix.lower() in {".jpg", ".jpeg", ".png"} + ] + if not image_files: + return {} + if not allow_push: + print( + "[run-android-instrumentation-tests] Preview publishing skipped for forked PR", # noqa: E501 + file=sys.stdout, + ) + return {} + if not repo or not token: + return {} + + workspace = pathlib.Path(os.environ.get("GITHUB_WORKSPACE", ".")).resolve() + worktree = workspace / f".cn1ss-previews-pr-{pr_number}" + if worktree.exists(): + shutil.rmtree(worktree) + worktree.mkdir(parents=True, exist_ok=True) + + try: + env = os.environ.copy() + env.setdefault("GIT_TERMINAL_PROMPT", "0") + + def run_git(args, check: bool = True): + result = subprocess.run( + ["git", *args], + cwd=worktree, + env=env, + capture_output=True, + text=True, + ) + if check and result.returncode != 0: + raise RuntimeError( + f"git {' '.join(args)} failed: {result.stderr.strip() or result.stdout.strip()}" + ) + return result + + run_git(["init"]) + run_git(["config", "user.name", os.environ.get("GITHUB_ACTOR", "github-actions") or "github-actions"]) + run_git(["config", "user.email", "github-actions@users.noreply.github.com"]) + remote_url = f"https://x-access-token:{token}@github.com/{repo}.git" + run_git(["remote", "add", "origin", remote_url]) + + has_branch = run_git(["ls-remote", "--heads", "origin", "cn1ss-previews"], check=False) + if has_branch.returncode == 0 and has_branch.stdout.strip(): + run_git(["fetch", "origin", "cn1ss-previews"]) + run_git(["checkout", "cn1ss-previews"]) + else: + run_git(["checkout", "--orphan", "cn1ss-previews"]) + + dest = worktree / f"pr-{pr_number}" + if dest.exists(): + shutil.rmtree(dest) + dest.mkdir(parents=True, exist_ok=True) + + for source in image_files: + shutil.copy2(source, dest / source.name) + + run_git(["add", "-A", "."]) + status = run_git(["status", "--porcelain"]) + if status.stdout.strip(): + run_git(["commit", "-m", f"Add previews for PR #{pr_number}"]) + push = run_git(["push", "origin", "HEAD:cn1ss-previews"], check=False) + if push.returncode != 0: + raise RuntimeError(f"git push failed: {push.stderr.strip() or push.stdout.strip()}") + print( + f"[run-android-instrumentation-tests] Published {len(image_files)} preview(s) to cn1ss-previews/pr-{pr_number}", + file=sys.stdout, + ) + else: + print( + f"[run-android-instrumentation-tests] Preview branch already up-to-date for PR #{pr_number}", + file=sys.stdout, + ) + + raw_base = f"https://raw.githubusercontent.com/{repo}/cn1ss-previews/pr-{pr_number}" + urls: Dict[str, str] = {} + if dest.exists(): + for file in sorted(dest.iterdir()): + if file.is_file(): + urls[file.name] = f"{raw_base}/{file.name}" + return urls + finally: + shutil.rmtree(worktree, ignore_errors=True) + + +body_path = pathlib.Path(sys.argv[1]) +preview_dir_arg: Optional[pathlib.Path] = None +if len(sys.argv) > 2: + candidate = pathlib.Path(sys.argv[2]) + if candidate.exists(): + preview_dir_arg = candidate +raw_body = body_path.read_text(encoding="utf-8") +body = raw_body.strip() +if not body: + sys.exit(0) + +if MARKER not in body: + body = body.rstrip() + "\n\n" + MARKER + +body_without_marker = body.replace(MARKER, "").strip() +if not body_without_marker: + sys.exit(0) + +event_path = os.environ.get("GITHUB_EVENT_PATH") +repo = os.environ.get("GITHUB_REPOSITORY") +token = os.environ.get("GITHUB_TOKEN") +actor = os.environ.get("GITHUB_ACTOR") +if not event_path or not repo or not token: + sys.exit(0) + +event = load_event(event_path) +pr_number = find_pr_number(event) +if not pr_number: + sys.exit(0) + +headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github+json", + "Content-Type": "application/json", +} + +pr_data = event.get("pull_request") +is_fork_pr = False +if isinstance(pr_data, dict): + head = pr_data.get("head") + if isinstance(head, dict): + head_repo = head.get("repo") + if isinstance(head_repo, dict): + is_fork_pr = bool(head_repo.get("fork")) + +comments_url = f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments?per_page=100" +existing_comment: Optional[Dict[str, object]] = None +preferred_comment: Optional[Dict[str, object]] = None +preferred_logins = set() +if actor: + preferred_logins.add(actor) +preferred_logins.add("github-actions[bot]") + +while comments_url: + req = Request(comments_url, headers=headers) + with urlopen(req) as resp: + comments = json.load(resp) + for comment in comments: + body_text = comment.get("body") or "" + if MARKER in body_text: + existing_comment = comment + login = comment.get("user", {}).get("login") + if login in preferred_logins: + preferred_comment = comment + comments_url = next_link(resp.headers.get("Link")) + +comment_id: Optional[int] = None +created_placeholder = False + +if preferred_comment is not None: + existing_comment = preferred_comment + +if existing_comment is not None: + comment_id = existing_comment.get("id") +else: + create_payload = json.dumps({"body": MARKER}).encode("utf-8") + create_req = Request( + f"https://api.github.com/repos/{repo}/issues/{pr_number}/comments", + data=create_payload, + headers=headers, + method="POST", + ) + with urlopen(create_req) as resp: + created = json.load(resp) + comment_id = created.get("id") + created_placeholder = True + print( + f"[run-android-instrumentation-tests] Created new screenshot comment placeholder (id={comment_id})", + file=sys.stdout, + ) + +if comment_id is None: + sys.exit(1) + +attachment_pattern = re.compile(r"\(attachment:([^)]+)\)") +attachment_urls: Dict[str, str] = {} +missing_previews: List[str] = [] +if attachment_pattern.search(body): + try: + attachment_urls = publish_previews_to_branch( + preview_dir_arg, + repo, + pr_number, + token, + allow_push=not is_fork_pr, + ) + if attachment_urls: + for name, url in attachment_urls.items(): + print( + f"[run-android-instrumentation-tests] Preview available for {name}: {url}", + file=sys.stdout, + ) + except Exception as exc: + print( + f"[run-android-instrumentation-tests] Preview publishing failed: {exc}", + file=sys.stderr, + ) + sys.exit(1) + + +def replace_attachment(match: re.Match[str]) -> str: + name = match.group(1) + url = attachment_urls.get(name) + if url: + return f"({url})" + print( + f"[run-android-instrumentation-tests] Preview URL missing for {name}; leaving placeholder", + file=sys.stdout, + ) + missing_previews.append(name) + return "(#)" + + +final_body = attachment_pattern.sub(replace_attachment, body) + +if missing_previews: + if is_fork_pr: + print( + "[run-android-instrumentation-tests] Preview URLs unavailable in forked PR context; placeholders left as-is", + file=sys.stdout, + ) + else: + print( + f"[run-android-instrumentation-tests] Failed to resolve preview URLs for: {', '.join(sorted(set(missing_previews)))}", + file=sys.stderr, + ) + sys.exit(1) + +update_payload = json.dumps({"body": final_body}).encode("utf-8") +update_req = Request( + f"https://api.github.com/repos/{repo}/issues/comments/{comment_id}", + data=update_payload, + headers=headers, + method="PATCH", +) + +with urlopen(update_req) as resp: + resp.read() + action = "updated" if not created_placeholder else "posted" + print( + f"[run-android-instrumentation-tests] PR comment {action} (status={resp.status}, bytes={len(update_payload)})", + file=sys.stdout, + ) +PY + local rc=$? + if [ $rc -eq 0 ]; then + ra_log "Posted screenshot comparison comment to PR" + else + ra_log "STAGE:COMMENT_POST_FAILED (see stderr for details)" + if [ -n "${ARTIFACTS_DIR:-}" ]; then + local failure_flag="$ARTIFACTS_DIR/pr-comment-failed.txt" + printf 'Comment POST failed at %s\n' "$(date -u +'%Y-%m-%dT%H:%M:%SZ')" > "$failure_flag" 2>/dev/null || true + fi + fi + return $rc +} + +decode_test_asset() { + local test_name="${1:-}" + local dest="${2:-}" + local channel="${3:-}" + local verifier="${4:-}" + local source="" + local count="0" + + if [ "${#XMLS[@]}" -gt 0 ]; then + for x in "${XMLS[@]}"; do + count="$(count_chunks "$x" "$test_name" "$channel")"; count="${count//[^0-9]/}"; : "${count:=0}" + [ "$count" -gt 0 ] || continue + ra_log "Reassembling test '$test_name' from XML: $x (chunks=$count)" + if decode_cn1ss_binary "$x" "$test_name" "$channel" > "$dest" 2>/dev/null; then + if [ -z "$verifier" ] || "$verifier" "$dest"; then source="XML:$(basename "$x")"; break; fi + fi + done + fi + + if [ -z "$source" ] && [ "${#LOGCAT_FILES[@]}" -gt 0 ]; then + for logcat in "${LOGCAT_FILES[@]}"; do + [ -s "$logcat" ] || continue + count="$(count_chunks "$logcat" "$test_name" "$channel")"; count="${count//[^0-9]/}"; : "${count:=0}" + [ "$count" -gt 0 ] || continue + ra_log "Reassembling test '$test_name' from logcat: $logcat (chunks=$count)" + if decode_cn1ss_binary "$logcat" "$test_name" "$channel" > "$dest" 2>/dev/null; then + if [ -z "$verifier" ] || "$verifier" "$dest"; then source="LOGCAT:$(basename "$logcat")"; break; fi + fi + done + fi + + if [ -z "$source" ] && [ -n "${TEST_EXEC_LOG:-}" ] && [ -s "$TEST_EXEC_LOG" ]; then + count="$(count_chunks "$TEST_EXEC_LOG" "$test_name" "$channel")"; count="${count//[^0-9]/}"; : "${count:=0}" + if [ "$count" -gt 0 ]; then + ra_log "Reassembling test '$test_name' from test-results.log: $TEST_EXEC_LOG (chunks=$count)" + if decode_cn1ss_binary "$TEST_EXEC_LOG" "$test_name" "$channel" > "$dest" 2>/dev/null; then + if [ -z "$verifier" ] || "$verifier" "$dest"; then source="EXECLOG:$(basename "$TEST_EXEC_LOG")"; fi + fi + fi + fi + + if [ -n "$source" ]; then + printf '%s' "$source" + return 0 + fi + + rm -f "$dest" 2>/dev/null || true + return 1 +} + +decode_test_png() { + decode_test_asset "$1" "$2" "" verify_png +} + +decode_test_preview() { + decode_test_asset "$1" "$2" "PREVIEW" verify_jpeg } # Verify PNG signature + non-zero size @@ -53,6 +493,16 @@ verify_png() { head -c 8 "$f" | od -An -t x1 | tr -d ' \n' | grep -qi '^89504e470d0a1a0a$' } +verify_jpeg() { + local f="$1" + [ -s "$f" ] || return 1 + local header + header="$(head -c 2 "$f" | od -An -t x1 | tr -d ' \n' | tr '[:lower:]' '[:upper:]')" + local trailer + trailer="$(tail -c 2 "$f" | od -An -t x1 | tr -d ' \n' | tr '[:lower:]' '[:upper:]')" + [ "$header" = "FFD8" ] && [ "$trailer" = "FFD9" ] +} + # ---- Args & environment ---------------------------------------------------- if [ $# -lt 1 ]; then @@ -79,7 +529,10 @@ ENV_FILE="$ENV_DIR/env.sh" ARTIFACTS_DIR="${ARTIFACTS_DIR:-${GITHUB_WORKSPACE:-$REPO_ROOT}/artifacts}" ensure_dir "$ARTIFACTS_DIR" TEST_LOG="$ARTIFACTS_DIR/connectedAndroidTest.log" -SCREENSHOT_OUT="$ARTIFACTS_DIR/emulator-screenshot.png" +SCREENSHOT_REF_DIR="$SCRIPT_DIR/android/screenshots" +SCREENSHOT_TMP_DIR="$(mktemp -d "${TMPDIR}/cn1ss-XXXXXX" 2>/dev/null || echo "${TMPDIR}/cn1ss-tmp")" +ensure_dir "$SCREENSHOT_TMP_DIR" +SCREENSHOT_PREVIEW_DIR="$SCREENSHOT_TMP_DIR/previews" ra_log "Loading workspace environment from $ENV_FILE" [ -f "$ENV_FILE" ] || { ra_log "Missing env file: $ENV_FILE"; exit 3; } @@ -125,9 +578,9 @@ mapfile -t XMLS < <( ) || XMLS=() # logcat files produced by AGP -mapfile -t LOGCATS < <( +mapfile -t LOGCAT_FILES < <( find "$RESULTS_ROOT" -type f -name 'logcat-*.txt' -print 2>/dev/null -) || LOGCATS=() +) || LOGCAT_FILES=() # execution log (use first if present) TEST_EXEC_LOG="$(find "$RESULTS_ROOT" -type f -path '*/testlog/test-results.log' -print -quit 2>/dev/null || true)" @@ -139,13 +592,12 @@ else ra_log "No test result XML files found under $RESULTS_ROOT" fi -# Pick first logcat if any -LOGCAT_FILE="${LOGCATS[0]:-}" -if [ -z "${LOGCAT_FILE:-}" ] || [ ! -s "$LOGCAT_FILE" ]; then +if [ "${#LOGCAT_FILES[@]}" -eq 0 ]; then ra_log "FATAL: No logcat-*.txt produced by connectedDebugAndroidTest (cannot extract CN1SS chunks)." exit 12 fi + # ---- Chunk accounting (diagnostics) --------------------------------------- XML_CHUNKS_TOTAL=0 @@ -153,7 +605,11 @@ for x in "${XMLS[@]}"; do c="$(count_chunks "$x")"; c="${c//[^0-9]/}"; : "${c:=0}" XML_CHUNKS_TOTAL=$(( XML_CHUNKS_TOTAL + c )) done -LOGCAT_CHUNKS="$(count_chunks "$LOGCAT_FILE")"; LOGCAT_CHUNKS="${LOGCAT_CHUNKS//[^0-9]/}"; : "${LOGCAT_CHUNKS:=0}" +LOGCAT_CHUNKS=0 +for logcat in "${LOGCAT_FILES[@]}"; do + c="$(count_chunks "$logcat")"; c="${c//[^0-9]/}"; : "${c:=0}" + LOGCAT_CHUNKS=$(( LOGCAT_CHUNKS + c )) +done EXECLOG_CHUNKS="$(count_chunks "${TEST_EXEC_LOG:-}")"; EXECLOG_CHUNKS="${EXECLOG_CHUNKS//[^0-9]/}"; : "${EXECLOG_CHUNKS:=0}" ra_log "Chunk counts -> XML: ${XML_CHUNKS_TOTAL} | logcat: ${LOGCAT_CHUNKS} | test-results.log: ${EXECLOG_CHUNKS}" @@ -168,78 +624,374 @@ if [ "${LOGCAT_CHUNKS:-0}" = "0" ] && [ "${XML_CHUNKS_TOTAL:-0}" = "0" ] && [ "$ exit 12 fi -# ---- Reassemble (prefer XML → logcat → exec log) -------------------------- +# ---- Identify CN1SS test streams ----------------------------------------- -: > "$SCREENSHOT_OUT" -SOURCE="" +declare -A TEST_NAME_SET=() -if [ "${#XMLS[@]}" -gt 0 ] && [ "${XML_CHUNKS_TOTAL:-0}" -gt 0 ]; then +if [ "${#XMLS[@]}" -gt 0 ]; then for x in "${XMLS[@]}"; do - c="$(count_chunks "$x")"; c="${c//[^0-9]/}"; : "${c:=0}" - [ "$c" -gt 0 ] || continue - ra_log "Reassembling from XML: $x (chunks=$c)" - if decode_cn1ss_png "$x" > "$SCREENSHOT_OUT" 2>/dev/null; then - if verify_png "$SCREENSHOT_OUT"; then SOURCE="XML"; break; fi - fi + while IFS= read -r name; do + [ -n "$name" ] || continue + TEST_NAME_SET["$name"]=1 + done < <(list_cn1ss_tests "$x" 2>/dev/null || true) done fi -if [ -z "$SOURCE" ] && [ "${LOGCAT_CHUNKS:-0}" -gt 0 ]; then - ra_log "Reassembling from logcat: $LOGCAT_FILE (chunks=$LOGCAT_CHUNKS)" - if decode_cn1ss_png "$LOGCAT_FILE" > "$SCREENSHOT_OUT" 2>/dev/null; then - if verify_png "$SCREENSHOT_OUT"; then SOURCE="LOGCAT"; fi - fi +for logcat in "${LOGCAT_FILES[@]}"; do + [ -s "$logcat" ] || continue + while IFS= read -r name; do + [ -n "$name" ] || continue + TEST_NAME_SET["$name"]=1 + done < <(list_cn1ss_tests "$logcat" 2>/dev/null || true) +done + +if [ -n "${TEST_EXEC_LOG:-}" ] && [ -s "$TEST_EXEC_LOG" ]; then + while IFS= read -r name; do + [ -n "$name" ] || continue + TEST_NAME_SET["$name"]=1 + done < <(list_cn1ss_tests "$TEST_EXEC_LOG" 2>/dev/null || true) fi -if [ -z "$SOURCE" ] && [ -n "${TEST_EXEC_LOG:-}" ] && [ "${EXECLOG_CHUNKS:-0}" -gt 0 ]; then - ra_log "Reassembling from test-results.log: $TEST_EXEC_LOG (chunks=$EXECLOG_CHUNKS)" - if decode_cn1ss_png "$TEST_EXEC_LOG" > "$SCREENSHOT_OUT" 2>/dev/null; then - if verify_png "$SCREENSHOT_OUT"; then SOURCE="EXECLOG"; fi - fi +if [ "${#TEST_NAME_SET[@]}" -eq 0 ] && { [ "${LOGCAT_CHUNKS:-0}" -gt 0 ] || [ "${XML_CHUNKS_TOTAL:-0}" -gt 0 ] || [ "${EXECLOG_CHUNKS:-0}" -gt 0 ]; }; then + TEST_NAME_SET["default"]=1 fi -# ---- Final validation / failure paths ------------------------------------- - -if [ -z "$SOURCE" ]; then - ra_log "FATAL: Failed to extract/decode CN1SS payload from any source" - # Keep partial for debugging - RAW_B64_OUT="${SCREENSHOT_OUT}.raw.b64" - { - # Try to emit concatenated base64 from whichever had chunks (priority logcat, then XML, then exec) - if [ "${LOGCAT_CHUNKS:-0}" -gt 0 ]; then extract_cn1ss_base64 "$LOGCAT_FILE"; fi - if [ "${XML_CHUNKS_TOTAL:-0}" -gt 0 ] && [ "${LOGCAT_CHUNKS:-0}" -eq 0 ]; then - # concatenate all XMLs - for x in "${XMLS[@]}"; do - if [ "$(count_chunks "$x")" -gt 0 ]; then extract_cn1ss_base64 "$x"; fi - done +if [ "${#TEST_NAME_SET[@]}" -eq 0 ]; then + ra_log "FATAL: Could not determine any CN1SS test streams" + exit 12 +fi + +declare -a TEST_NAMES=() +for name in "${!TEST_NAME_SET[@]}"; do + TEST_NAMES+=("$name") +done +IFS=$'\n' TEST_NAMES=($(printf '%s\n' "${TEST_NAMES[@]}" | sort)) +unset IFS +ra_log "Detected CN1SS test streams: ${TEST_NAMES[*]}" + +declare -A TEST_OUTPUTS=() +declare -A TEST_SOURCES=() +declare -A PREVIEW_OUTPUTS=() + +ensure_dir "$SCREENSHOT_PREVIEW_DIR" + +for test in "${TEST_NAMES[@]}"; do + dest="$SCREENSHOT_TMP_DIR/${test}.png" + if source_label="$(decode_test_png "$test" "$dest")"; then + TEST_OUTPUTS["$test"]="$dest" + TEST_SOURCES["$test"]="$source_label" + ra_log "Decoded screenshot for '$test' (source=${source_label}, size: $(stat -c '%s' "$dest") bytes)" + preview_dest="$SCREENSHOT_PREVIEW_DIR/${test}.jpg" + if preview_source="$(decode_test_preview "$test" "$preview_dest")"; then + PREVIEW_OUTPUTS["$test"]="$preview_dest" + ra_log "Decoded preview for '$test' (source=${preview_source}, size: $(stat -c '%s' "$preview_dest") bytes)" + else + rm -f "$preview_dest" 2>/dev/null || true fi - if [ -n "${TEST_EXEC_LOG:-}" ] && [ "${EXECLOG_CHUNKS:-0}" -gt 0 ] && [ "${LOGCAT_CHUNKS:-0}" -eq 0 ] && [ "${XML_CHUNKS_TOTAL:-0}" -eq 0 ]; then - extract_cn1ss_base64 "$TEST_EXEC_LOG" + else + ra_log "FATAL: Failed to extract/decode CN1SS payload for test '$test'" + RAW_B64_OUT="$SCREENSHOT_TMP_DIR/${test}.raw.b64" + { + local count + for logcat in "${LOGCAT_FILES[@]}"; do + [ -s "$logcat" ] || continue + count="$(count_chunks "$logcat" "$test")"; count="${count//[^0-9]/}"; : "${count:=0}" + if [ "$count" -gt 0 ]; then extract_cn1ss_base64 "$logcat" "$test"; fi + done + if [ "${#XMLS[@]}" -gt 0 ]; then + for x in "${XMLS[@]}"; do + count="$(count_chunks "$x" "$test")"; count="${count//[^0-9]/}"; : "${count:=0}" + if [ "$count" -gt 0 ]; then extract_cn1ss_base64 "$x" "$test"; fi + done + fi + if [ -n "${TEST_EXEC_LOG:-}" ] && [ -s "$TEST_EXEC_LOG" ]; then + count="$(count_chunks "$TEST_EXEC_LOG" "$test")"; count="${count//[^0-9]/}"; : "${count:=0}" + if [ "$count" -gt 0 ]; then extract_cn1ss_base64 "$TEST_EXEC_LOG" "$test"; fi + fi + } > "$RAW_B64_OUT" 2>/dev/null || true + if [ -s "$RAW_B64_OUT" ]; then + head -c 64 "$RAW_B64_OUT" | sed 's/^/[CN1SS-B64-HEAD] /' + ra_log "Partial base64 saved at: $RAW_B64_OUT" fi - } > "$RAW_B64_OUT" 2>/dev/null || true - if [ -s "$RAW_B64_OUT" ]; then - head -c 64 "$RAW_B64_OUT" | sed 's/^/[CN1SS-B64-HEAD] /' - ra_log "Partial base64 saved at: $RAW_B64_OUT" + exit 12 fi - # Emit contextual INFO lines - grep -n 'CN1SS:INFO' "$LOGCAT_FILE" 2>/dev/null || true - exit 12 +done + +# ---- Compare against stored references ------------------------------------ + +COMPARE_ARGS=() +for test in "${TEST_NAMES[@]}"; do + dest="${TEST_OUTPUTS[$test]:-}" + [ -n "$dest" ] || continue + COMPARE_ARGS+=("--actual" "${test}=${dest}") +done + +COMPARE_JSON="$SCREENSHOT_TMP_DIR/screenshot-compare.json" +export CN1SS_PREVIEW_DIR="$SCREENSHOT_PREVIEW_DIR" +ra_log "STAGE:COMPARE -> Evaluating screenshots against stored references" +python3 "$SCRIPT_DIR/android/tests/process_screenshots.py" \ + --reference-dir "$SCREENSHOT_REF_DIR" \ + --emit-base64 \ + --preview-dir "$SCREENSHOT_PREVIEW_DIR" \ + "${COMPARE_ARGS[@]}" > "$COMPARE_JSON" + +SUMMARY_FILE="$SCREENSHOT_TMP_DIR/screenshot-summary.txt" +COMMENT_FILE="$SCREENSHOT_TMP_DIR/screenshot-comment.md" + +ra_log "STAGE:COMMENT_BUILD -> Rendering summary and PR comment markdown" +python3 - "$COMPARE_JSON" "$COMMENT_FILE" "$SUMMARY_FILE" <<'PY' +import json +import pathlib +import sys + +compare_path = pathlib.Path(sys.argv[1]) +comment_path = pathlib.Path(sys.argv[2]) +summary_path = pathlib.Path(sys.argv[3]) + +data = json.loads(compare_path.read_text(encoding="utf-8")) +summary_lines = [] +comment_entries = [] + +for result in data.get("results", []): + test = result.get("test", "unknown") + status = result.get("status", "unknown") + expected_path = result.get("expected_path") + actual_path = result.get("actual_path", "") + details = result.get("details") or {} + base64_data = result.get("base64") + base64_omitted = result.get("base64_omitted") + base64_length = result.get("base64_length") + base64_mime = result.get("base64_mime") or "image/png" + base64_codec = result.get("base64_codec") + base64_quality = result.get("base64_quality") + base64_note = result.get("base64_note") + message = "" + copy_flag = "0" + + preview = result.get("preview") or {} + preview_name = preview.get("name") + preview_path = preview.get("path") + preview_mime = preview.get("mime") + preview_note = preview.get("note") + preview_quality = preview.get("quality") + if status == "equal": + message = "Matches stored reference." + elif status == "missing_expected": + message = f"Reference screenshot missing at {expected_path}." + copy_flag = "1" + comment_entries.append({ + "test": test, + "status": "missing reference", + "message": message, + "artifact_name": f"{test}.png", + "preview_name": preview_name, + "preview_path": preview_path, + "preview_mime": preview_mime, + "preview_note": preview_note, + "preview_quality": preview_quality, + "base64": base64_data, + "base64_omitted": base64_omitted, + "base64_length": base64_length, + "base64_mime": base64_mime, + "base64_codec": base64_codec, + "base64_quality": base64_quality, + "base64_note": base64_note, + }) + elif status == "different": + dims = "" + if details: + dims = f" ({details.get('width')}x{details.get('height')} px, bit depth {details.get('bit_depth')})" + message = f"Screenshot differs{dims}." + copy_flag = "1" + comment_entries.append({ + "test": test, + "status": "updated screenshot", + "message": message, + "artifact_name": f"{test}.png", + "preview_name": preview_name, + "preview_path": preview_path, + "preview_mime": preview_mime, + "preview_note": preview_note, + "preview_quality": preview_quality, + "base64": base64_data, + "base64_omitted": base64_omitted, + "base64_length": base64_length, + "base64_mime": base64_mime, + "base64_codec": base64_codec, + "base64_quality": base64_quality, + "base64_note": base64_note, + }) + elif status == "error": + message = f"Comparison error: {result.get('message', 'unknown error')}" + copy_flag = "1" + comment_entries.append({ + "test": test, + "status": "comparison error", + "message": message, + "artifact_name": f"{test}.png", + "preview_name": preview_name, + "preview_path": preview_path, + "preview_mime": preview_mime, + "preview_note": preview_note, + "preview_quality": preview_quality, + "base64": None, + "base64_omitted": base64_omitted, + "base64_length": base64_length, + "base64_mime": base64_mime, + "base64_codec": base64_codec, + "base64_quality": base64_quality, + "base64_note": base64_note, + }) + elif status == "missing_actual": + message = "Actual screenshot missing (test did not produce output)." + copy_flag = "1" + comment_entries.append({ + "test": test, + "status": "missing actual screenshot", + "message": message, + "artifact_name": None, + "preview_name": preview_name, + "preview_path": preview_path, + "preview_mime": preview_mime, + "preview_note": preview_note, + "preview_quality": preview_quality, + "base64": None, + "base64_omitted": base64_omitted, + "base64_length": base64_length, + "base64_mime": base64_mime, + "base64_codec": base64_codec, + "base64_quality": base64_quality, + "base64_note": base64_note, + }) + else: + message = f"Status: {status}." + + note_column = preview_note or base64_note or "" + summary_lines.append("|".join([status, test, message, copy_flag, actual_path, note_column])) + +summary_path.write_text("\n".join(summary_lines) + ("\n" if summary_lines else ""), encoding="utf-8") + +if comment_entries: + lines = ["### Android screenshot updates", ""] + + def add_line(text: str = "") -> None: + lines.append(text) + + for entry in comment_entries: + entry_header = f"- **{entry['test']}** — {entry['status']}. {entry['message']}" + add_line(entry_header) + preview_name = entry.get("preview_name") + preview_quality = entry.get("preview_quality") + preview_note = entry.get("preview_note") + base64_note = entry.get("base64_note") + preview_mime = entry.get("preview_mime") + + preview_notes = [] + if preview_mime == "image/jpeg" and preview_quality: + preview_notes.append(f"JPEG preview quality {preview_quality}") + if preview_note: + preview_notes.append(preview_note) + if base64_note and base64_note != preview_note: + preview_notes.append(base64_note) + + if preview_name: + add_line("") + add_line(f" ![{entry['test']}](attachment:{preview_name})") + if preview_notes: + add_line(f" _Preview info: {'; '.join(preview_notes)}._") + elif entry.get("base64"): + add_line("") + add_line( + " _Preview generated but could not be published; see workflow artifacts for JPEG preview._" + ) + if preview_notes: + add_line(f" _Preview info: {'; '.join(preview_notes)}._") + elif entry.get("base64_omitted") == "too_large": + size_note = "" + if entry.get("base64_length"): + size_note = f" (base64 length ≈ {entry['base64_length']:,} chars)" + add_line("") + codec = entry.get("base64_codec") + quality = entry.get("base64_quality") + note = entry.get("base64_note") + extra_bits = [] + if codec == "jpeg" and quality: + extra_bits.append(f"attempted JPEG quality {quality}") + if note: + extra_bits.append(note) + tail = "" + if extra_bits: + tail = " (" + "; ".join(extra_bits) + ")" + add_line( + " _Screenshot omitted from comment because the encoded payload exceeded GitHub's size limits" + + size_note + + "." + tail + "_" + ) + else: + add_line("") + add_line(" _No preview available for this screenshot._") + artifact_name = entry.get("artifact_name") + if artifact_name: + add_line(f" _Full-resolution PNG saved as `{artifact_name}` in workflow artifacts._") + add_line("") + MARKER = "" + if lines[-1] != "": + lines.append("") + lines.append(MARKER) + comment_path.write_text("\n".join(lines).rstrip() + "\n", encoding="utf-8") +else: + MARKER = "" + passed = "✅ Native Android screenshot tests passed." + comment_path.write_text(passed + "\n\n" + MARKER + "\n", encoding="utf-8") +PY + +if [ -s "$SUMMARY_FILE" ]; then + ra_log " -> Wrote summary entries to $SUMMARY_FILE ($(wc -l < "$SUMMARY_FILE" 2>/dev/null || echo 0) line(s))" +else + ra_log " -> No summary entries generated (all screenshots matched stored baselines)" fi -# Size & signature check (belt & suspenders) -if ! verify_png "$SCREENSHOT_OUT"; then - ra_log "STAGE:BAD_PNG_SIGNATURE -> Not a PNG" - file "$SCREENSHOT_OUT" || true - exit 14 +if [ -s "$COMMENT_FILE" ]; then + ra_log " -> Prepared PR comment payload at $COMMENT_FILE (bytes=$(wc -c < "$COMMENT_FILE" 2>/dev/null || echo 0))" +else + ra_log " -> No PR comment content produced" fi -ra_log "SUCCESS -> screenshot saved (${SOURCE}), size: $(stat -c '%s' "$SCREENSHOT_OUT") bytes at $SCREENSHOT_OUT" +if [ -s "$SUMMARY_FILE" ]; then + while IFS='|' read -r status test message copy_flag path preview_note; do + [ -n "${test:-}" ] || continue + ra_log "Test '${test}': ${message}" + if [ "$copy_flag" = "1" ] && [ -n "${path:-}" ] && [ -f "$path" ]; then + cp -f "$path" "$ARTIFACTS_DIR/${test}.png" 2>/dev/null || true + ra_log " -> Stored PNG artifact copy at $ARTIFACTS_DIR/${test}.png" + fi + if [ "$status" = "equal" ] && [ -n "${path:-}" ]; then + rm -f "$path" 2>/dev/null || true + fi + if [ -n "${preview_note:-}" ]; then + ra_log " Preview note: ${preview_note}" + fi + done < "$SUMMARY_FILE" +fi + +cp -f "$COMPARE_JSON" "$ARTIFACTS_DIR/screenshot-compare.json" 2>/dev/null || true +if [ -s "$COMMENT_FILE" ]; then + cp -f "$COMMENT_FILE" "$ARTIFACTS_DIR/screenshot-comment.md" 2>/dev/null || true +fi + +ra_log "STAGE:COMMENT_POST -> Submitting PR feedback" +comment_rc=0 +if ! post_pr_comment "$COMMENT_FILE" "$SCREENSHOT_PREVIEW_DIR"; then + comment_rc=$? +fi # Copy useful artifacts for GH Actions -cp -f "$LOGCAT_FILE" "$ARTIFACTS_DIR/$(basename "$LOGCAT_FILE")" 2>/dev/null || true +for logcat in "${LOGCAT_FILES[@]}"; do + cp -f "$logcat" "$ARTIFACTS_DIR/$(basename "$logcat")" 2>/dev/null || true +done for x in "${XMLS[@]}"; do cp -f "$x" "$ARTIFACTS_DIR/$(basename "$x")" 2>/dev/null || true done [ -n "${TEST_EXEC_LOG:-}" ] && cp -f "$TEST_EXEC_LOG" "$ARTIFACTS_DIR/test-results.log" 2>/dev/null || true -exit 0 +exit $comment_rc