@@ -99,28 +99,84 @@ def generate_cpp_merge_test(n: int) -> str:
9999 return cpp_code
100100
101101
102- def analyze_trace_file ( trace_path : str ) -> tuple [ float , float ] :
102+ def generate_cpp_nested_loop_test ( n : int ) -> str :
103103 """
104- Parses the -ftime-trace JSON output to find durations.
104+ Generates C++ code with N levels of nested loops.
105+ This pattern tests how analysis performance scales with loop nesting depth,
106+ which is a key factor in the complexity of dataflow analyses on structured
107+ control flow.
105108
106- Returns:
107- A tuple of (lifetime_analysis_duration_us, total_clang_duration_us).
109+ Example (n=3):
110+ struct MyObj { int id; ~MyObj() {} };
111+ void nested_loops_3() {
112+ MyObj* p = nullptr;
113+ for(int i0=0; i0<2; ++i0) {
114+ MyObj s0;
115+ p = &s0;
116+ for(int i1=0; i1<2; ++i1) {
117+ MyObj s1;
118+ p = &s1;
119+ for(int i2=0; i2<2; ++i2) {
120+ MyObj s2;
121+ p = &s2;
122+ }
123+ }
124+ }
125+ }
126+ """
127+ if n <= 0 :
128+ return "// Nesting depth must be positive."
129+
130+ cpp_code = "struct MyObj { int id; ~MyObj() {} };\n \n "
131+ cpp_code += f"void nested_loops_{ n } () {{\n "
132+ cpp_code += " MyObj* p = nullptr;\n "
133+
134+ for i in range (n ):
135+ indent = " " * (i + 1 )
136+ cpp_code += f"{ indent } for(int i{ i } =0; i{ i } <2; ++i{ i } ) {{\n "
137+ cpp_code += f"{ indent } MyObj s{ i } ; p = &s{ i } ;\n "
138+
139+ for i in range (n - 1 , - 1 , - 1 ):
140+ indent = " " * (i + 1 )
141+ cpp_code += f"{ indent } }}\n "
142+
143+ cpp_code += "}\n "
144+ cpp_code += f"\n int main() {{ nested_loops_{ n } (); return 0; }}\n "
145+ return cpp_code
146+
147+
148+ def analyze_trace_file (trace_path : str ) -> dict :
108149 """
109- lifetime_duration = 0.0
110- total_duration = 0.0
150+ Parses the -ftime-trace JSON output to find durations for the lifetime
151+ analysis and its sub-phases.
152+ Returns a dictionary of durations in microseconds.
153+ """
154+ durations = {
155+ "lifetime_us" : 0.0 ,
156+ "total_us" : 0.0 ,
157+ "fact_gen_us" : 0.0 ,
158+ "loan_prop_us" : 0.0 ,
159+ "expired_loans_us" : 0.0 ,
160+ }
161+ event_name_map = {
162+ "LifetimeSafetyAnalysis" : "lifetime_us" ,
163+ "ExecuteCompiler" : "total_us" ,
164+ "FactGenerator" : "fact_gen_us" ,
165+ "LoanPropagation" : "loan_prop_us" ,
166+ "ExpiredLoans" : "expired_loans_us" ,
167+ }
111168 try :
112169 with open (trace_path , "r" ) as f :
113170 trace_data = json .load (f )
114171 for event in trace_data .get ("traceEvents" , []):
115- if event .get ("name" ) == "LifetimeSafetyAnalysis" :
116- lifetime_duration += float (event .get ("dur" , 0 ))
117- if event .get ("name" ) == "ExecuteCompiler" :
118- total_duration += float (event .get ("dur" , 0 ))
119-
172+ event_name = event .get ("name" )
173+ if event_name in event_name_map :
174+ key = event_name_map [event_name ]
175+ durations [key ] += float (event .get ("dur" , 0 ))
120176 except (IOError , json .JSONDecodeError ) as e :
121177 print (f"Error reading or parsing trace file { trace_path } : { e } " , file = sys .stderr )
122- return 0.0 , 0.0
123- return lifetime_duration , total_duration
178+ return { key : 0.0 for key in durations }
179+ return durations
124180
125181
126182def power_law (n , c , k ):
@@ -135,8 +191,29 @@ def human_readable_time(ms: float) -> str:
135191 return f"{ ms :.2f} ms"
136192
137193
194+ def calculate_complexity (n_data , y_data ) -> tuple [float | None , float | None ]:
195+ """
196+ Calculates the exponent 'k' for the power law fit y = c * n^k.
197+ Returns a tuple of (k, k_standard_error).
198+ """
199+ try :
200+ if len (n_data ) < 3 or np .all (y_data < 1e-6 ) or np .var (y_data ) < 1e-6 :
201+ return None , None
202+
203+ non_zero_indices = y_data > 0
204+ if np .sum (non_zero_indices ) < 3 :
205+ return None , None
206+
207+ n_fit , y_fit = n_data [non_zero_indices ], y_data [non_zero_indices ]
208+ popt , pcov = curve_fit (power_law , n_fit , y_fit , p0 = [0 , 1 ], maxfev = 5000 )
209+ k_stderr = np .sqrt (np .diag (pcov ))[1 ]
210+ return popt [1 ], k_stderr
211+ except (RuntimeError , ValueError ):
212+ return None , None
213+
214+
138215def generate_markdown_report (results : dict ) -> str :
139- """Generates a Markdown-formatted report from the benchmark results."""
216+ """Generates a concise, Markdown-formatted report from the benchmark results."""
140217 report = []
141218 timestamp = datetime .now ().strftime ("%Y-%m-%d %H:%M:%S %Z" )
142219 report .append (f"# Lifetime Analysis Performance Report" )
@@ -146,54 +223,52 @@ def generate_markdown_report(results: dict) -> str:
146223 for test_name , data in results .items ():
147224 title = data ["title" ]
148225 report .append (f"## Test Case: { title } " )
149- report .append ("" )
226+ report .append ("\n **Timing Results:** \n " )
150227
151228 # Table header
152- report .append ("| N | Analysis Time | Total Clang Time |" )
153- report .append ("|:----|--------------:|-----------------:|" )
229+ report .append (
230+ "| N (Input Size) | Total Time | Analysis Time (%) | Fact Generator (%) | Loan Propagation (%) | Expired Loans (%) |"
231+ )
232+ report .append (
233+ "|:---------------|-----------:|------------------:|-------------------:|---------------------:|------------------:|"
234+ )
154235
155236 # Table rows
156237 n_data = np .array (data ["n" ])
157- analysis_data = np .array (data ["lifetime_ms" ])
158- total_data = np .array (data ["total_ms" ])
238+ total_ms_data = np .array (data ["total_ms" ])
159239 for i in range (len (n_data )):
160- analysis_str = human_readable_time (analysis_data [i ])
161- total_str = human_readable_time (total_data [i ])
162- report .append (f"| { n_data [i ]:<3} | { analysis_str :>13} | { total_str :>16} |" )
163-
164- report .append ("" )
165-
166- # Complexity analysis
167- report .append (f"**Complexity Analysis:**" )
168- try :
169- # Curve fitting requires at least 3 points
170- if len (n_data ) < 3 :
171- raise ValueError ("Not enough data points to perform curve fitting." )
172-
173- popt , pcov = curve_fit (
174- power_law , n_data , analysis_data , p0 = [0 , 2 ], maxfev = 5000
175- )
176- _ , k = popt
177-
178- # Confidence Interval for k
179- alpha = 0.05 # 95% confidence
180- dof = max (0 , len (n_data ) - len (popt )) # degrees of freedom
181- t_val = t .ppf (1.0 - alpha / 2.0 , dof )
182- # Standard error of the parameters
183- perr = np .sqrt (np .diag (pcov ))
184- k_stderr = perr [1 ]
185- k_ci_lower = k - t_val * k_stderr
186- k_ci_upper = k + t_val * k_stderr
187-
188- report .append (
189- f"- The performance for this case scales approx. as **O(n<sup>{ k :.2f} </sup>)**."
190- )
191- report .append (
192- f"- **95% Confidence interval for exponent:** `[{ k_ci_lower :.2f} , { k_ci_upper :.2f} ]`."
193- )
240+ total_t = total_ms_data [i ]
241+ if total_t < 1e-6 :
242+ total_t = 1.0 # Avoid division by zero
243+
244+ row = [
245+ f"| { n_data [i ]:<14} |" ,
246+ f"{ human_readable_time (total_t ):>10} |" ,
247+ f"{ data ['lifetime_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
248+ f"{ data ['fact_gen_ms' ][i ] / total_t * 100 :>18.2f} % |" ,
249+ f"{ data ['loan_prop_ms' ][i ] / total_t * 100 :>20.2f} % |" ,
250+ f"{ data ['expired_loans_ms' ][i ] / total_t * 100 :>17.2f} % |" ,
251+ ]
252+ report .append (" " .join (row ))
253+
254+ report .append ("\n **Complexity Analysis:**\n " )
255+ report .append ("| Analysis Phase | Complexity O(n<sup>k</sup>) |" )
256+ report .append ("|:------------------|:--------------------------|" )
257+
258+ analysis_phases = {
259+ "Total Analysis" : data ["lifetime_ms" ],
260+ "FactGenerator" : data ["fact_gen_ms" ],
261+ "LoanPropagation" : data ["loan_prop_ms" ],
262+ "ExpiredLoans" : data ["expired_loans_ms" ],
263+ }
194264
195- except (RuntimeError , ValueError ) as e :
196- report .append (f"- Could not determine a best-fit curve for the data: { e } " )
265+ for phase_name , y_data in analysis_phases .items ():
266+ k , delta = calculate_complexity (n_data , np .array (y_data ))
267+ if k is not None and delta is not None :
268+ complexity_str = f"O(n<sup>{ k :.2f} </sup> ± { delta :.2f} )"
269+ else :
270+ complexity_str = "(Negligible)"
271+ report .append (f"| { phase_name :<17} | { complexity_str :<25} |" )
197272
198273 report .append ("\n ---\n " )
199274
@@ -202,7 +277,7 @@ def generate_markdown_report(results: dict) -> str:
202277
203278def run_single_test (
204279 clang_binary : str , output_dir : str , test_name : str , generator_func , n : int
205- ) -> tuple [ float , float ] :
280+ ) -> dict :
206281 """Generates, compiles, and benchmarks a single test case."""
207282 print (f"--- Running Test: { test_name .capitalize ()} with N={ n } ---" )
208283
@@ -221,7 +296,8 @@ def run_single_test(
221296 "-o" ,
222297 "/dev/null" ,
223298 "-ftime-trace=" + trace_file ,
224- "-Wexperimental-lifetime-safety" ,
299+ "-Xclang" ,
300+ "-fexperimental-lifetime-safety" ,
225301 "-std=c++17" ,
226302 source_file ,
227303 ]
@@ -231,11 +307,12 @@ def run_single_test(
231307 if result .returncode != 0 :
232308 print (f"Compilation failed for N={ n } !" , file = sys .stderr )
233309 print (result .stderr , file = sys .stderr )
234- return 0.0 , 0.0
310+ return {}
235311
236- lifetime_us , total_us = analyze_trace_file (trace_file )
237-
238- return lifetime_us / 1000.0 , total_us / 1000.0
312+ durations_us = analyze_trace_file (trace_file )
313+ return {
314+ key .replace ("_us" , "_ms" ): value / 1000.0 for key , value in durations_us .items ()
315+ }
239316
240317
241318if __name__ == "__main__" :
@@ -270,6 +347,12 @@ def run_single_test(
270347 "generator_func" : generate_cpp_merge_test ,
271348 "n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
272349 },
350+ {
351+ "name" : "nested_loops" ,
352+ "title" : "Deeply Nested Loops" ,
353+ "generator_func" : generate_cpp_nested_loop_test ,
354+ "n_values" : [10 , 50 , 100 , 200 , 400 , 800 ],
355+ },
273356 ]
274357
275358 results = {}
@@ -282,21 +365,28 @@ def run_single_test(
282365 "n" : [],
283366 "lifetime_ms" : [],
284367 "total_ms" : [],
368+ "fact_gen_ms" : [],
369+ "loan_prop_ms" : [],
370+ "expired_loans_ms" : [],
285371 }
286372 for n in config ["n_values" ]:
287- lifetime_ms , total_ms = run_single_test (
373+ durations_ms = run_single_test (
288374 args .clang_binary ,
289375 args .output_dir ,
290376 test_name ,
291377 config ["generator_func" ],
292378 n ,
293379 )
294- if total_ms > 0 :
380+ if durations_ms :
295381 results [test_name ]["n" ].append (n )
296- results [test_name ]["lifetime_ms" ].append (lifetime_ms )
297- results [test_name ]["total_ms" ].append (total_ms )
382+ for key , value in durations_ms .items ():
383+ results [test_name ][key ].append (value )
384+
298385 print (
299- f" Total: { human_readable_time (total_ms )} | Analysis: { human_readable_time (lifetime_ms )} "
386+ f" Total Analysis: { human_readable_time (durations_ms ['lifetime_ms' ])} | "
387+ f"FactGen: { human_readable_time (durations_ms ['fact_gen_ms' ])} | "
388+ f"LoanProp: { human_readable_time (durations_ms ['loan_prop_ms' ])} | "
389+ f"ExpiredLoans: { human_readable_time (durations_ms ['expired_loans_ms' ])} "
300390 )
301391
302392 print ("\n \n " + "=" * 80 )
@@ -305,3 +395,8 @@ def run_single_test(
305395
306396 markdown_report = generate_markdown_report (results )
307397 print (markdown_report )
398+
399+ report_filename = os .path .join (args .output_dir , "performance_report.md" )
400+ with open (report_filename , "w" ) as f :
401+ f .write (markdown_report )
402+ print (f"Report saved to: { report_filename } " )
0 commit comments