@@ -116,26 +116,6 @@ fn generate_wide_dag(num_nodes: usize) -> Vec<u8> {
116116 data
117117}
118118
119- /*
120- /// Generate DAG data for input parsing stress tests
121- fn generate_input_parsing_heavy(num_edges: usize) -> Vec<u8> {
122- // Create a scenario with many edges but relatively few unique nodes
123- // This stresses the input parsing and graph construction optimizations
124- let num_unique_nodes = (num_edges as f64).sqrt() as usize;
125- let mut data = Vec::new();
126-
127- for i in 0..num_edges {
128- let from = i % num_unique_nodes;
129- let to = (i / num_unique_nodes) % num_unique_nodes;
130- if from != to {
131- data.extend_from_slice(format!("n{from} n{to}\n").as_bytes());
132- }
133- }
134-
135- data
136- }
137- */
138-
139119/// Benchmark linear chain graphs of different sizes
140120/// This tests the performance improvements mentioned in PR #8694
141121#[ divan:: bench( args = [ 1_000_000 ] ) ]
@@ -187,6 +167,27 @@ fn tsort_wide_dag(bencher: Bencher, num_nodes: usize) {
187167}
188168
189169/*
170+ /// silent for now because too much variance
171+
172+
173+ /// Generate DAG data for input parsing stress tests
174+ fn generate_input_parsing_heavy(num_edges: usize) -> Vec<u8> {
175+ // Create a scenario with many edges but relatively few unique nodes
176+ // This stresses the input parsing and graph construction optimizations
177+ let num_unique_nodes = (num_edges as f64).sqrt() as usize;
178+ let mut data = Vec::new();
179+
180+ for i in 0..num_edges {
181+ let from = i % num_unique_nodes;
182+ let to = (i / num_unique_nodes) % num_unique_nodes;
183+ if from != to {
184+ data.extend_from_slice(format!("n{from} n{to}\n").as_bytes());
185+ }
186+ }
187+
188+ data
189+ }
190+
190191/// Benchmark input parsing vs computation by using files with different edge densities
191192#[divan::bench(args = [5_000])]
192193fn tsort_input_parsing_heavy(bencher: Bencher, num_edges: usize) {
0 commit comments