Skip to content

Commit d6caa9d

Browse files
poweifengbejado
andauthored
diffimg: binary tool for comparing images (#9668)
This tool uses existing libraries: image, imageio, imageio-lite, imagediff to perform difference comparison for on-disk images. We refactor renderdiff to use this tool instead of using python dependencies. Co-authored-by: Ben Doherty <bendoherty@google.com>
1 parent 19209a0 commit d6caa9d

File tree

15 files changed

+476
-359
lines changed

15 files changed

+476
-359
lines changed

.github/workflows/postsubmit-main.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ jobs:
1616
- uses: ./.github/actions/linux-prereq
1717
- id: get_commit_msg
1818
uses: ./.github/actions/get-commit-msg
19-
- name: Prerequisites
20-
run: pip install tifffile numpy
19+
- name: Build diffimg
20+
run: ./build.sh release diffimg
2121
- name: Run update script
2222
env:
2323
GH_TOKEN: ${{ secrets.FILAMENTBOT_TOKEN }}

.github/workflows/presubmit.yml

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ jobs:
125125
uses: ./.github/actions/get-commit-msg
126126
- name: Prerequisites
127127
run: |
128-
pip install tifffile numpy
129128
# Must have at least clang-16 for a webgpu/dawn build.
130129
sudo xcode-select -s /Applications/Xcode_16.2.app/Contents/Developer
131130
shell: bash
@@ -139,6 +138,9 @@ jobs:
139138
set -eux
140139
GOLDEN_BRANCH=$(echo "${COMMIT_MESSAGE}" | python3 ${TEST_DIR}/src/commit_msg.py)
141140
bash ${TEST_DIR}/generate.sh
141+
# Build diffimg tool
142+
./build.sh release diffimg
143+
142144
python3 ${TEST_DIR}/src/golden_manager.py \
143145
--branch=${GOLDEN_BRANCH} \
144146
--output=${GOLDEN_OUTPUT_DIR}
@@ -149,7 +151,9 @@ jobs:
149151
python3 ${TEST_DIR}/src/compare.py \
150152
--src=${GOLDEN_OUTPUT_DIR} \
151153
--dest=${RENDER_OUTPUT_DIR} \
152-
--out=${DIFF_OUTPUT_DIR} 2>&1 | tee compare_output.txt
154+
--out=${DIFF_OUTPUT_DIR} \
155+
--diffimg="$(pwd)/out/cmake-release/tools/diffimg/diffimg" \
156+
--test="${TEST_DIR}/tests/presubmit.json" 2>&1 | tee compare_output.txt
153157
154158
if grep "Failed" compare_output.txt > /dev/null; then
155159
DELIMITER="EOF_FILE_CONTENT_$(date +%s)" # Using timestamp to make it more unique

CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -955,6 +955,7 @@ if (IS_HOST_PLATFORM)
955955

956956
add_subdirectory(${TOOLS}/cmgen)
957957
add_subdirectory(${TOOLS}/cso-lut)
958+
add_subdirectory(${TOOLS}/diffimg)
958959
add_subdirectory(${TOOLS}/filamesh)
959960
add_subdirectory(${TOOLS}/glslminifier)
960961
add_subdirectory(${TOOLS}/matc)

test/renderdiff/generate.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ function start_render_() {
3131
python3 -m venv ${VENV_DIR}
3232
source ${VENV_DIR}/bin/activate
3333

34-
NEEDED_PYTHON_DEPS=("numpy" "tifffile")
34+
NEEDED_PYTHON_DEPS=()
3535
for cmd in "${NEEDED_PYTHON_DEPS[@]}"; do
3636
if ! python3 -m pip show -q "${cmd}"; then
3737
python3 -m pip install ${cmd}

test/renderdiff/local_test.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,15 @@ else
2626
fi
2727

2828
bash `dirname $0`/generate.sh "$@" && \
29+
./build.sh release diffimg && \
2930
python3 ${RENDERDIFF_TEST_DIR}/src/golden_manager.py \
3031
--branch=${GOLDEN_BRANCH} \
3132
--output=${GOLDEN_OUTPUT_DIR} && \
3233
python3 ${RENDERDIFF_TEST_DIR}/src/compare.py \
3334
--src=${GOLDEN_OUTPUT_DIR} \
3435
--dest=${RENDER_OUTPUT_DIR} \
3536
--out=${DIFF_OUTPUT_DIR} \
37+
--diffimg="$(pwd)/out/cmake-release/tools/diffimg/diffimg" \
3638
--test="${RENDERDIFF_TEST_DIR}/tests/presubmit.json" "$@"
3739

3840
# $@ Pass arguments to generate.sh, e.g. --test_filter

test/renderdiff/src/compare.py

Lines changed: 64 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@
44
import pprint
55
import json
66
import fnmatch
7+
import subprocess
8+
import tempfile
79

810
from utils import execute, ArgParseImpl, important_print, mkdir_p
9-
from image_diff import same_image, output_image_diff
1011
from results import RESULT_OK, RESULT_FAILED, RESULT_MISSING, GOLDEN_MISSING
1112
import test_config
1213

@@ -24,65 +25,43 @@ def _get_tolerance_for_test_case(test_case_name, test_config_obj):
2425

2526
return None
2627

27-
def _format_tolerance_summary(stats):
28-
"""
29-
Create human-readable summary of tolerance statistics.
30-
31-
Args:
32-
stats: Statistics dictionary from tolerance evaluation
33-
34-
Returns:
35-
str: Formatted summary string
36-
"""
37-
if 'error' in stats:
38-
return f"Error: {stats['error']}"
39-
40-
if 'operator' in stats:
41-
# Nested criteria with operator
42-
operator = stats['operator']
43-
criteria_count = len(stats['criteria_results'])
44-
passed_count = sum(1 for c in stats['criteria_results'] if c.get('passed', False))
45-
summary = f"{operator} of {criteria_count} criteria: {passed_count} passed, {criteria_count - passed_count} failed"
46-
47-
# Add details for each criteria
48-
details = []
49-
for i, criteria_stats in enumerate(stats['criteria_results']):
50-
details.append(f" Criteria {i+1}: {_format_tolerance_summary(criteria_stats)}")
51-
52-
return summary + "\n" + "\n".join(details)
53-
else:
54-
# Single criteria
55-
total_pixels = stats.get('total_pixels', 0)
56-
failing_pixels = stats.get('failing_pixels', 0)
57-
failing_percentage = stats.get('failing_percentage', 0.0)
58-
allowed_percentage = stats.get('allowed_percentage', 0.0)
59-
max_abs_diff = stats.get('max_abs_diff', 0)
60-
mean_abs_diff = stats.get('mean_abs_diff', 0)
61-
max_diff_per_channel = stats.get('max_diff_per_channel', [])
62-
63-
criteria = stats.get('criteria', {})
64-
criteria_desc = []
65-
if 'max_pixel_diff' in criteria:
66-
criteria_desc.append(f"max_pixel_diff: {criteria['max_pixel_diff']}")
67-
if 'max_pixel_diff_percent' in criteria:
68-
criteria_desc.append(f"max_pixel_diff_percent: {criteria['max_pixel_diff_percent']}%")
69-
if 'allowed_diff_pixels' in criteria:
70-
criteria_desc.append(f"allowed_diff_pixels: {criteria['allowed_diff_pixels']}%")
71-
72-
summary_lines = [
73-
f"Tolerance: {', '.join(criteria_desc)}",
74-
f"Pixels: {failing_pixels:,} / {total_pixels:,} ({failing_percentage:.2f}%) exceed tolerance",
75-
f"Allowed: {allowed_percentage:.2f}% - {'PASS' if stats.get('passed', False) else 'FAIL'}",
76-
f"Max difference: {max_abs_diff} (mean: {mean_abs_diff:.1f})"
77-
]
28+
def _run_diffimg(diffimg_path, ref_path, cand_path, tolerance=None, diff_out_path=None):
29+
cmd = [diffimg_path, ref_path, cand_path]
30+
31+
config_file = None
32+
if tolerance:
33+
fd, config_file = tempfile.mkstemp(suffix='.json', text=True)
34+
with os.fdopen(fd, 'w') as f:
35+
json.dump(tolerance, f)
36+
cmd.extend(['--config', config_file])
37+
38+
if diff_out_path:
39+
cmd.extend(['--diff', diff_out_path])
40+
41+
try:
42+
result_proc = subprocess.run(cmd, capture_output=True, text=True)
43+
# diffimg outputs JSON to stdout even on failure (exit code might be non-zero for mismatch)
44+
# However, if it crashed or failed to run, stdout might be empty or not JSON.
45+
46+
output = result_proc.stdout.strip()
47+
if not output:
48+
return False, {'error': 'No output from diffimg', 'stderr': result_proc.stderr}
49+
50+
try:
51+
result_json = json.loads(output)
52+
passed = result_json.get('passed', False)
53+
return passed, result_json
54+
except json.JSONDecodeError:
55+
return False, {'error': 'Invalid JSON output from diffimg', 'stdout': output, 'stderr': result_proc.stderr}
7856

79-
if len(max_diff_per_channel) > 1:
80-
channel_info = ", ".join(f"Ch{i}: {diff}" for i, diff in enumerate(max_diff_per_channel))
81-
summary_lines.append(f"Per-channel max: {channel_info}")
57+
except Exception as e:
58+
return False, {'error': f'Failed to run diffimg: {e}'}
59+
finally:
60+
if config_file and os.path.exists(config_file):
61+
os.remove(config_file)
8262

83-
return "\n".join(summary_lines)
8463

85-
def _compare_goldens(base_dir, comparison_dir, out_dir=None, test_filter=None, test_config_path=None):
64+
def _compare_goldens(base_dir, comparison_dir, diffimg_path, out_dir=None, test_filter=None, test_config_path=None):
8665
def test_name(p):
8766
return p.replace('.tif', '')
8867

@@ -115,33 +94,31 @@ def single_test(src_dir, dest_dir, src_fname):
11594
# Get tolerance configuration for this test case
11695
tolerance = _get_tolerance_for_test_case(test_case.replace('.tif', ''), test_config_obj)
11796

118-
# Compare images and get detailed statistics
119-
comparison_result, stats = same_image(src_fname, dest_fname, tolerance)
97+
diff_fname = None
98+
if output_test_dir:
99+
diff_fname = os.path.join(output_test_dir, f"{test_case.replace('.tif', '_diff.tif')}")
100+
# Ensure subdirectories exist for diff output
101+
os.makedirs(os.path.dirname(diff_fname), exist_ok=True)
102+
103+
# Compare images using diffimg
104+
comparison_result, stats = _run_diffimg(diffimg_path, src_fname, dest_fname, tolerance, diff_fname)
120105

121106
if not comparison_result:
122107
result['result'] = RESULT_FAILED
123-
if output_test_dir:
124-
# just the file name
125-
diff_fname = f"{test_case.replace('.tif', '_diff.tif')}"
126-
output_image_diff(src_fname, dest_fname, os.path.join(output_test_dir, diff_fname))
127-
result['diff'] = diff_fname
108+
if diff_fname and os.path.exists(diff_fname):
109+
result['diff'] = os.path.basename(diff_fname)
128110
else:
129111
result['result'] = RESULT_OK
130112

131113
# Add detailed tolerance information to result
132114
if tolerance:
133115
result['tolerance_used'] = True
134116
result['tolerance_config'] = tolerance
135-
if stats:
136-
result['tolerance_stats'] = stats
137-
# Add human-readable summary
138-
result['tolerance_summary'] = _format_tolerance_summary(stats)
139-
elif stats is None and comparison_result:
140-
result['comparison_type'] = 'exact_match'
141-
elif stats and 'error' in stats:
142-
result['error'] = stats['error']
143-
if 'details' in stats:
144-
result['error_details'] = stats['details']
117+
118+
if stats:
119+
result['stats'] = stats
120+
if 'error' in stats:
121+
result['error'] = stats['error']
145122

146123
return result
147124

@@ -191,6 +168,7 @@ def single_test(src_dir, dest_dir, src_fname):
191168
parser.add_argument('--src', help='Directory of the base of the diff.', required=True)
192169
parser.add_argument('--dest', help='Directory of the comparison of the diff.')
193170
parser.add_argument('--out', help='Directory of output for the result of the diff.')
171+
parser.add_argument('--diffimg', help='Path to the diffimg tool.', required=True)
194172
parser.add_argument('--test_filter', help='Filter for the tests to run')
195173
parser.add_argument('--test', help='Path to test configuration JSON file for tolerance settings.')
196174

@@ -202,42 +180,37 @@ def single_test(src_dir, dest_dir, src_fname):
202180
dest = os.path.join(os.getcwd(), './out/renderdiff')
203181
assert os.path.exists(dest), f"Destination folder={dest} does not exist."
204182

205-
results = _compare_goldens(args.src, dest, out_dir=args.out,
183+
if not os.path.exists(args.diffimg):
184+
print(f"Error: diffimg tool not found at {args.diffimg}")
185+
sys.exit(1)
186+
187+
results = _compare_goldens(args.src, dest, args.diffimg, out_dir=args.out,
206188
test_filter=args.test_filter, test_config_path=args.test)
207189

208190
# Categorize results
209191
failed = [k for k in results if k['result'] != RESULT_OK]
210192
passed = [k for k in results if k['result'] == RESULT_OK]
211-
tolerance_used_count = len([k for k in results if k.get('tolerance_used', False)])
212193

213194
# Create detailed failure report
214195
failed_details = []
215196
for k in failed:
216197
failure_line = f" {k['name']} ({k['result']})"
217-
if 'tolerance_summary' in k:
218-
failure_line += f"\n {k['tolerance_summary'].replace(chr(10), chr(10) + ' ')}"
198+
if 'stats' in k:
199+
stats = k['stats']
200+
if 'maxDiffFound' in stats:
201+
failure_line += f"\n Max Diff: {stats['maxDiffFound']}"
202+
if 'failingPixelCount' in stats:
203+
failure_line += f"\n Failing Pixels: {stats['failingPixelCount']}"
219204
failed_details.append(failure_line)
220205

221-
# Create success report with tolerance details
222-
tolerance_used_details = []
223-
for k in passed:
224-
if k.get('tolerance_used', False) and 'tolerance_summary' in k:
225-
tolerance_used_details.append(f" {k['name']}: {k['tolerance_summary'].split(chr(10))[0]}")
226-
227206
# Main summary
228207
success_count = len(passed)
229208
important_print(f'Successfully compared {success_count} / {len(results)} images')
230209

231-
if tolerance_used_details:
232-
pstr = 'Passed:'
233-
for detail in tolerance_used_details:
234-
pstr += '\n' + detail
235-
important_print(pstr)
236-
237210
if failed_details:
238211
pstr = 'Failed:'
239212
for detail in failed_details:
240213
pstr += '\n' + detail
241214
important_print(pstr)
242215
if len(failed) > 0:
243-
exit(1)
216+
exit(1)

0 commit comments

Comments
 (0)