diff --git a/collector/src/benchmark_set/compile_benchmarks.rs b/collector/src/benchmark_set/compile_benchmarks.rs index e7b05ce4f..18e8e1a2e 100644 --- a/collector/src/benchmark_set/compile_benchmarks.rs +++ b/collector/src/benchmark_set/compile_benchmarks.rs @@ -1,7 +1,7 @@ //! This file contains an exhaustive list of all compile-time benchmarks //! located in the `collector/compile-benchmarks` directory that are benchmarked in production. //! If new benchmarks are added/removed, they have to also be added/removed here, and in -//! the [super::expand_benchmark_set] function. +//! the [super::get_benchmark_sets_for_target] function. // Stable benchmarks pub(super) const CARGO: &str = "cargo"; diff --git a/collector/src/benchmark_set/mod.rs b/collector/src/benchmark_set/mod.rs index 90d8239c9..1fcee6101 100644 --- a/collector/src/benchmark_set/mod.rs +++ b/collector/src/benchmark_set/mod.rs @@ -31,20 +31,31 @@ pub enum BenchmarkSetMember { CompileBenchmark(BenchmarkName), } -/// Return the number of benchmark sets for the given target. -pub fn benchmark_set_count(target: Target) -> usize { - match target { - Target::X86_64UnknownLinuxGnu => 1, +#[derive(Debug)] +pub struct BenchmarkSet { + members: Vec, +} + +impl BenchmarkSet { + pub fn members(&self) -> &[BenchmarkSetMember] { + &self.members } } -/// Expand all the benchmarks that should be performed by a single collector. -pub fn expand_benchmark_set(id: BenchmarkSetId) -> Vec { +pub const BENCHMARK_SET_RUNTIME_BENCHMARKS: u32 = 0; +pub const BENCHMARK_SET_RUSTC: u32 = 0; + +/// Return all benchmark sets for the given target. +pub fn get_benchmark_sets_for_target(target: Target) -> Vec { use compile_benchmarks::*; - match (id.target, id.index) { - (Target::X86_64UnknownLinuxGnu, 0) => { - vec![ + fn compile(name: &str) -> BenchmarkSetMember { + BenchmarkSetMember::CompileBenchmark(BenchmarkName::from(name)) + } + + match target { + Target::X86_64UnknownLinuxGnu => { + let all = vec![ compile(AWAIT_CALL_TREE), compile(BITMAPS_3_2_1), compile(BITMAPS_3_2_1_NEW_SOLVER), @@ -106,24 +117,21 @@ pub fn expand_benchmark_set(id: BenchmarkSetId) -> Vec { compile(UNUSED_WARNINGS), compile(WF_PROJECTION_STRESS_65510), compile(WG_GRAMMAR), - ] - } - (Target::X86_64UnknownLinuxGnu, 1..) => { - panic!("Unknown benchmark set id {id:?}"); + ]; + vec![BenchmarkSet { members: all }] } } } -/// Helper function for creating compile-time benchmark member sets. -fn compile(name: &str) -> BenchmarkSetMember { - BenchmarkSetMember::CompileBenchmark(BenchmarkName::from(name)) +/// Expand all the benchmarks that should be performed by a single collector. +pub fn get_benchmark_set(id: BenchmarkSetId) -> BenchmarkSet { + let mut sets = get_benchmark_sets_for_target(id.target); + sets.remove(id.index as usize) } #[cfg(test)] mod tests { - use crate::benchmark_set::{ - benchmark_set_count, expand_benchmark_set, BenchmarkSetId, BenchmarkSetMember, - }; + use crate::benchmark_set::{get_benchmark_sets_for_target, BenchmarkSetMember}; use crate::compile::benchmark::target::Target; use crate::compile::benchmark::{ get_compile_benchmarks, BenchmarkName, CompileBenchmarkFilter, @@ -135,21 +143,13 @@ mod tests { /// complete, i.e. they don't miss any benchmarks. #[test] fn check_benchmark_set_x64() { - let target = Target::X86_64UnknownLinuxGnu; - let sets = (0..benchmark_set_count(target)) - .map(|index| { - expand_benchmark_set(BenchmarkSetId { - target, - index: index as u32, - }) - }) - .collect::>>(); + let sets = get_benchmark_sets_for_target(Target::X86_64UnknownLinuxGnu); // Assert set is unique for set in &sets { - let hashset = set.iter().collect::>(); + let hashset = set.members().iter().collect::>(); assert_eq!( - set.len(), + set.members().len(), hashset.len(), "Benchmark set {set:?} contains duplicates" ); @@ -160,8 +160,8 @@ mod tests { for j in i + 1..sets.len() { let set_a = &sets[i]; let set_b = &sets[j]; - let hashset_a = set_a.iter().collect::>(); - let hashset_b = set_b.iter().collect::>(); + let hashset_a = set_a.members().iter().collect::>(); + let hashset_b = set_b.members().iter().collect::>(); assert!( hashset_a.is_disjoint(&hashset_b), "Benchmark sets {set_a:?} and {set_b:?} overlap" @@ -170,7 +170,10 @@ mod tests { } // Check that the union of all sets contains all the required benchmarks - let all_members = sets.iter().flatten().collect::>(); + let all_members = sets + .iter() + .flat_map(|s| s.members()) + .collect::>(); const BENCHMARK_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/compile-benchmarks"); let all_compile_benchmarks = @@ -189,7 +192,7 @@ mod tests { let BenchmarkSetMember::CompileBenchmark(name) = benchmark; assert!( all_compile_benchmarks.contains(name), - "Compile-time benchmark {name} does not exist on disk or is a stable benchmark" + "Compile-time benchmark {name} does not exist on disk" ); } assert_eq!(all_members.len(), all_compile_benchmarks.len()); diff --git a/collector/src/bin/collector.rs b/collector/src/bin/collector.rs index c035c6c1a..94912e2ac 100644 --- a/collector/src/bin/collector.rs +++ b/collector/src/bin/collector.rs @@ -34,7 +34,7 @@ use collector::api::next_artifact::NextArtifact; use collector::artifact_stats::{ compile_and_get_stats, ArtifactStats, ArtifactWithStats, CargoProfile, }; -use collector::benchmark_set::{expand_benchmark_set, BenchmarkSetId, BenchmarkSetMember}; +use collector::benchmark_set::{get_benchmark_set, BenchmarkSetId, BenchmarkSetMember}; use collector::codegen::{codegen_diff, CodegenType}; use collector::compile::benchmark::category::Category; use collector::compile::benchmark::codegen_backend::CodegenBackend; @@ -1777,9 +1777,12 @@ async fn create_benchmark_configs( Option, )> { // Expand the benchmark set and figure out which benchmarks should be executed - let benchmark_set = BenchmarkSetId::new(job.target().into(), job.benchmark_set().get_id()); - let benchmark_set_members = expand_benchmark_set(benchmark_set); - log::debug!("Expanded benchmark set members: {benchmark_set_members:?}"); + let benchmark_set_id = BenchmarkSetId::new(job.target().into(), job.benchmark_set().get_id()); + let benchmark_set = get_benchmark_set(benchmark_set_id); + log::debug!( + "Expanded benchmark set members: {:?}", + benchmark_set.members() + ); let mut bench_rustc = false; let mut bench_runtime = false; @@ -1795,7 +1798,7 @@ async fn create_benchmark_configs( bench_runtime = true; } database::BenchmarkJobKind::Compiletime => { - for member in benchmark_set_members { + for member in benchmark_set.members() { match member { BenchmarkSetMember::CompileBenchmark(benchmark) => { bench_compile_benchmarks.insert(benchmark); diff --git a/database/src/pool/postgres.rs b/database/src/pool/postgres.rs index 173d763c7..45fba3215 100644 --- a/database/src/pool/postgres.rs +++ b/database/src/pool/postgres.rs @@ -1970,11 +1970,15 @@ where // We take the oldest job from the job_queue matching the benchmark_set, // target and status of 'queued' or 'in_progress' // If a job was dequeued, we increment its retry (dequeue) count + let row_opt = self .conn() .query_opt( " - WITH picked AS ( + -- We use the AS MATERIALIZED clause to ensure that Postgres will run each CTE only once, + -- and not do any optimizer magic that could run the CTE query multiple times. + -- See https://stackoverflow.com/a/73967537/1107768 + WITH picked AS MATERIALIZED ( SELECT id FROM diff --git a/site/src/job_queue/mod.rs b/site/src/job_queue/mod.rs index 25fc5f5e0..c4388cb30 100644 --- a/site/src/job_queue/mod.rs +++ b/site/src/job_queue/mod.rs @@ -5,7 +5,9 @@ use crate::job_queue::utils::{parse_release_string, ExtractIf}; use crate::load::{partition_in_place, SiteCtxt}; use anyhow::Context; use chrono::Utc; -use collector::benchmark_set::benchmark_set_count; +use collector::benchmark_set::{ + get_benchmark_sets_for_target, BENCHMARK_SET_RUNTIME_BENCHMARKS, BENCHMARK_SET_RUSTC, +}; use database::pool::{JobEnqueueResult, Transaction}; use database::{ BenchmarkJobKind, BenchmarkRequest, BenchmarkRequestIndex, BenchmarkRequestInsertResult, @@ -304,7 +306,7 @@ pub async fn enqueue_benchmark_request( // Target x benchmark_set x backend x profile -> BenchmarkJob for target in Target::all() { - for benchmark_set in 0..benchmark_set_count(target.into()) { + for benchmark_set in 0..get_benchmark_sets_for_target(target.into()).len() { for &backend in backends.iter() { for &profile in profiles.iter() { enqueue_job( @@ -354,7 +356,7 @@ pub async fn enqueue_benchmark_request( target, CodegenBackend::Llvm, Profile::Opt, - 0u32, + BENCHMARK_SET_RUNTIME_BENCHMARKS, BenchmarkJobKind::Runtime, EnqueueMode::Commit, ) @@ -371,7 +373,7 @@ pub async fn enqueue_benchmark_request( Target::X86_64UnknownLinuxGnu, CodegenBackend::Llvm, Profile::Opt, - 0u32, + BENCHMARK_SET_RUSTC, BenchmarkJobKind::Rustc, EnqueueMode::Commit, )