@@ -103,6 +103,7 @@ class BestOptimization(BaseModel):
103103 winning_benchmarking_test_results : TestResults
104104 winning_replay_benchmarking_test_results : Optional [TestResults ] = None
105105 line_profiler_test_results : dict
106+ async_throughput : Optional [int ] = None
106107
107108
108109@dataclass (frozen = True )
@@ -277,6 +278,7 @@ class OptimizedCandidateResult(BaseModel):
277278 replay_benchmarking_test_results : Optional [dict [BenchmarkKey , TestResults ]] = None
278279 optimization_candidate_index : int
279280 total_candidate_timing : int
281+ async_throughput : Optional [int ] = None
280282
281283
282284class GeneratedTests (BaseModel ):
@@ -383,6 +385,7 @@ class OriginalCodeBaseline(BaseModel):
383385 line_profile_results : dict
384386 runtime : int
385387 coverage_results : Optional [CoverageData ]
388+ async_throughput : Optional [int ] = None
386389
387390
388391class CoverageStatus (Enum ):
@@ -545,6 +548,7 @@ class TestResults(BaseModel): # noqa: PLW1641
545548 # also we don't support deletion of test results elements - caution is advised
546549 test_results : list [FunctionTestInvocation ] = []
547550 test_result_idx : dict [str , int ] = {}
551+ perf_stdout : Optional [str ] = None
548552
549553 def add (self , function_test_invocation : FunctionTestInvocation ) -> None :
550554 unique_id = function_test_invocation .unique_invocation_loop_id
0 commit comments