44import pprint
55import json
66import fnmatch
7+ import subprocess
8+ import tempfile
79
810from utils import execute , ArgParseImpl , important_print , mkdir_p
9- from image_diff import same_image , output_image_diff
1011from results import RESULT_OK , RESULT_FAILED , RESULT_MISSING , GOLDEN_MISSING
1112import test_config
1213
@@ -24,65 +25,43 @@ def _get_tolerance_for_test_case(test_case_name, test_config_obj):
2425
2526 return None
2627
27- def _format_tolerance_summary (stats ):
28- """
29- Create human-readable summary of tolerance statistics.
30-
31- Args:
32- stats: Statistics dictionary from tolerance evaluation
33-
34- Returns:
35- str: Formatted summary string
36- """
37- if 'error' in stats :
38- return f"Error: { stats ['error' ]} "
39-
40- if 'operator' in stats :
41- # Nested criteria with operator
42- operator = stats ['operator' ]
43- criteria_count = len (stats ['criteria_results' ])
44- passed_count = sum (1 for c in stats ['criteria_results' ] if c .get ('passed' , False ))
45- summary = f"{ operator } of { criteria_count } criteria: { passed_count } passed, { criteria_count - passed_count } failed"
46-
47- # Add details for each criteria
48- details = []
49- for i , criteria_stats in enumerate (stats ['criteria_results' ]):
50- details .append (f" Criteria { i + 1 } : { _format_tolerance_summary (criteria_stats )} " )
51-
52- return summary + "\n " + "\n " .join (details )
53- else :
54- # Single criteria
55- total_pixels = stats .get ('total_pixels' , 0 )
56- failing_pixels = stats .get ('failing_pixels' , 0 )
57- failing_percentage = stats .get ('failing_percentage' , 0.0 )
58- allowed_percentage = stats .get ('allowed_percentage' , 0.0 )
59- max_abs_diff = stats .get ('max_abs_diff' , 0 )
60- mean_abs_diff = stats .get ('mean_abs_diff' , 0 )
61- max_diff_per_channel = stats .get ('max_diff_per_channel' , [])
62-
63- criteria = stats .get ('criteria' , {})
64- criteria_desc = []
65- if 'max_pixel_diff' in criteria :
66- criteria_desc .append (f"max_pixel_diff: { criteria ['max_pixel_diff' ]} " )
67- if 'max_pixel_diff_percent' in criteria :
68- criteria_desc .append (f"max_pixel_diff_percent: { criteria ['max_pixel_diff_percent' ]} %" )
69- if 'allowed_diff_pixels' in criteria :
70- criteria_desc .append (f"allowed_diff_pixels: { criteria ['allowed_diff_pixels' ]} %" )
71-
72- summary_lines = [
73- f"Tolerance: { ', ' .join (criteria_desc )} " ,
74- f"Pixels: { failing_pixels :,} / { total_pixels :,} ({ failing_percentage :.2f} %) exceed tolerance" ,
75- f"Allowed: { allowed_percentage :.2f} % - { 'PASS' if stats .get ('passed' , False ) else 'FAIL' } " ,
76- f"Max difference: { max_abs_diff } (mean: { mean_abs_diff :.1f} )"
77- ]
28+ def _run_diffimg (diffimg_path , ref_path , cand_path , tolerance = None , diff_out_path = None ):
29+ cmd = [diffimg_path , ref_path , cand_path ]
30+
31+ config_file = None
32+ if tolerance :
33+ fd , config_file = tempfile .mkstemp (suffix = '.json' , text = True )
34+ with os .fdopen (fd , 'w' ) as f :
35+ json .dump (tolerance , f )
36+ cmd .extend (['--config' , config_file ])
37+
38+ if diff_out_path :
39+ cmd .extend (['--diff' , diff_out_path ])
40+
41+ try :
42+ result_proc = subprocess .run (cmd , capture_output = True , text = True )
43+ # diffimg outputs JSON to stdout even on failure (exit code might be non-zero for mismatch)
44+ # However, if it crashed or failed to run, stdout might be empty or not JSON.
45+
46+ output = result_proc .stdout .strip ()
47+ if not output :
48+ return False , {'error' : 'No output from diffimg' , 'stderr' : result_proc .stderr }
49+
50+ try :
51+ result_json = json .loads (output )
52+ passed = result_json .get ('passed' , False )
53+ return passed , result_json
54+ except json .JSONDecodeError :
55+ return False , {'error' : 'Invalid JSON output from diffimg' , 'stdout' : output , 'stderr' : result_proc .stderr }
7856
79- if len (max_diff_per_channel ) > 1 :
80- channel_info = ", " .join (f"Ch{ i } : { diff } " for i , diff in enumerate (max_diff_per_channel ))
81- summary_lines .append (f"Per-channel max: { channel_info } " )
57+ except Exception as e :
58+ return False , {'error' : f'Failed to run diffimg: { e } ' }
59+ finally :
60+ if config_file and os .path .exists (config_file ):
61+ os .remove (config_file )
8262
83- return "\n " .join (summary_lines )
8463
85- def _compare_goldens (base_dir , comparison_dir , out_dir = None , test_filter = None , test_config_path = None ):
64+ def _compare_goldens (base_dir , comparison_dir , diffimg_path , out_dir = None , test_filter = None , test_config_path = None ):
8665 def test_name (p ):
8766 return p .replace ('.tif' , '' )
8867
@@ -115,33 +94,31 @@ def single_test(src_dir, dest_dir, src_fname):
11594 # Get tolerance configuration for this test case
11695 tolerance = _get_tolerance_for_test_case (test_case .replace ('.tif' , '' ), test_config_obj )
11796
118- # Compare images and get detailed statistics
119- comparison_result , stats = same_image (src_fname , dest_fname , tolerance )
97+ diff_fname = None
98+ if output_test_dir :
99+ diff_fname = os .path .join (output_test_dir , f"{ test_case .replace ('.tif' , '_diff.tif' )} " )
100+ # Ensure subdirectories exist for diff output
101+ os .makedirs (os .path .dirname (diff_fname ), exist_ok = True )
102+
103+ # Compare images using diffimg
104+ comparison_result , stats = _run_diffimg (diffimg_path , src_fname , dest_fname , tolerance , diff_fname )
120105
121106 if not comparison_result :
122107 result ['result' ] = RESULT_FAILED
123- if output_test_dir :
124- # just the file name
125- diff_fname = f"{ test_case .replace ('.tif' , '_diff.tif' )} "
126- output_image_diff (src_fname , dest_fname , os .path .join (output_test_dir , diff_fname ))
127- result ['diff' ] = diff_fname
108+ if diff_fname and os .path .exists (diff_fname ):
109+ result ['diff' ] = os .path .basename (diff_fname )
128110 else :
129111 result ['result' ] = RESULT_OK
130112
131113 # Add detailed tolerance information to result
132114 if tolerance :
133115 result ['tolerance_used' ] = True
134116 result ['tolerance_config' ] = tolerance
135- if stats :
136- result ['tolerance_stats' ] = stats
137- # Add human-readable summary
138- result ['tolerance_summary' ] = _format_tolerance_summary (stats )
139- elif stats is None and comparison_result :
140- result ['comparison_type' ] = 'exact_match'
141- elif stats and 'error' in stats :
142- result ['error' ] = stats ['error' ]
143- if 'details' in stats :
144- result ['error_details' ] = stats ['details' ]
117+
118+ if stats :
119+ result ['stats' ] = stats
120+ if 'error' in stats :
121+ result ['error' ] = stats ['error' ]
145122
146123 return result
147124
@@ -191,6 +168,7 @@ def single_test(src_dir, dest_dir, src_fname):
191168 parser .add_argument ('--src' , help = 'Directory of the base of the diff.' , required = True )
192169 parser .add_argument ('--dest' , help = 'Directory of the comparison of the diff.' )
193170 parser .add_argument ('--out' , help = 'Directory of output for the result of the diff.' )
171+ parser .add_argument ('--diffimg' , help = 'Path to the diffimg tool.' , required = True )
194172 parser .add_argument ('--test_filter' , help = 'Filter for the tests to run' )
195173 parser .add_argument ('--test' , help = 'Path to test configuration JSON file for tolerance settings.' )
196174
@@ -202,42 +180,37 @@ def single_test(src_dir, dest_dir, src_fname):
202180 dest = os .path .join (os .getcwd (), './out/renderdiff' )
203181 assert os .path .exists (dest ), f"Destination folder={ dest } does not exist."
204182
205- results = _compare_goldens (args .src , dest , out_dir = args .out ,
183+ if not os .path .exists (args .diffimg ):
184+ print (f"Error: diffimg tool not found at { args .diffimg } " )
185+ sys .exit (1 )
186+
187+ results = _compare_goldens (args .src , dest , args .diffimg , out_dir = args .out ,
206188 test_filter = args .test_filter , test_config_path = args .test )
207189
208190 # Categorize results
209191 failed = [k for k in results if k ['result' ] != RESULT_OK ]
210192 passed = [k for k in results if k ['result' ] == RESULT_OK ]
211- tolerance_used_count = len ([k for k in results if k .get ('tolerance_used' , False )])
212193
213194 # Create detailed failure report
214195 failed_details = []
215196 for k in failed :
216197 failure_line = f" { k ['name' ]} ({ k ['result' ]} )"
217- if 'tolerance_summary' in k :
218- failure_line += f"\n { k ['tolerance_summary' ].replace (chr (10 ), chr (10 ) + ' ' )} "
198+ if 'stats' in k :
199+ stats = k ['stats' ]
200+ if 'maxDiffFound' in stats :
201+ failure_line += f"\n Max Diff: { stats ['maxDiffFound' ]} "
202+ if 'failingPixelCount' in stats :
203+ failure_line += f"\n Failing Pixels: { stats ['failingPixelCount' ]} "
219204 failed_details .append (failure_line )
220205
221- # Create success report with tolerance details
222- tolerance_used_details = []
223- for k in passed :
224- if k .get ('tolerance_used' , False ) and 'tolerance_summary' in k :
225- tolerance_used_details .append (f" { k ['name' ]} : { k ['tolerance_summary' ].split (chr (10 ))[0 ]} " )
226-
227206 # Main summary
228207 success_count = len (passed )
229208 important_print (f'Successfully compared { success_count } / { len (results )} images' )
230209
231- if tolerance_used_details :
232- pstr = 'Passed:'
233- for detail in tolerance_used_details :
234- pstr += '\n ' + detail
235- important_print (pstr )
236-
237210 if failed_details :
238211 pstr = 'Failed:'
239212 for detail in failed_details :
240213 pstr += '\n ' + detail
241214 important_print (pstr )
242215 if len (failed ) > 0 :
243- exit (1 )
216+ exit (1 )
0 commit comments