Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions collector/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ anyhow = { workspace = true }
chrono = { workspace = true, features = ["serde"] }
clap = { workspace = true, features = ["derive"] }
env_logger = { workspace = true }
hashbrown = { workspace = true }
log = { workspace = true }
reqwest = { workspace = true, features = ["blocking", "json"] }
serde = { workspace = true, features = ["derive"] }
Expand Down
12 changes: 6 additions & 6 deletions collector/src/bin/collector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,8 @@ fn profile_compile(
toolchain,
Some(1),
targets,
// We always want to profile everything
&hashbrown::HashSet::new(),
));
eprintln!("Finished benchmark {benchmark_id}");

Expand Down Expand Up @@ -1804,11 +1806,8 @@ async fn bench_compile(
print_intro: &dyn Fn(),
measure: F,
) {
let is_fresh = collector.start_compile_step(conn, benchmark_name).await;
if !is_fresh {
eprintln!("skipping {} -- already benchmarked", benchmark_name);
return;
}
collector.start_compile_step(conn, benchmark_name).await;

let mut tx = conn.transaction().await;
let (supports_stable, category) = category.db_representation();
tx.conn()
Expand All @@ -1819,7 +1818,7 @@ async fn bench_compile(
tx.conn(),
benchmark_name,
&shared.artifact_id,
collector.artifact_row_id,
collector,
config.is_self_profile,
);
let result = measure(&mut processor).await;
Expand Down Expand Up @@ -1866,6 +1865,7 @@ async fn bench_compile(
&shared.toolchain,
config.iterations,
&config.targets,
&collector.measured_compile_test_cases,
))
.await
.with_context(|| anyhow::anyhow!("Cannot compile {}", benchmark.name))
Expand Down
9 changes: 9 additions & 0 deletions collector/src/compile/benchmark/codegen_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,12 @@ impl CodegenBackend {
vec![CodegenBackend::Llvm, CodegenBackend::Cranelift]
}
}

impl From<CodegenBackend> for database::CodegenBackend {
fn from(value: CodegenBackend) -> Self {
match value {
CodegenBackend::Llvm => database::CodegenBackend::Llvm,
CodegenBackend::Cranelift => database::CodegenBackend::Cranelift,
}
}
}
135 changes: 117 additions & 18 deletions collector/src/compile/benchmark/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ use crate::compile::execute::{CargoProcess, Processor};
use crate::toolchain::Toolchain;
use crate::utils::wait_for_future;
use anyhow::{bail, Context};
use database::selector::CompileTestCase;
use log::debug;
use std::collections::{HashMap, HashSet};
use std::fmt::{Display, Formatter};
Expand Down Expand Up @@ -249,6 +250,7 @@ impl Benchmark {
toolchain: &Toolchain,
iterations: Option<usize>,
targets: &[Target],
already_computed: &hashbrown::HashSet<CompileTestCase>,
) -> anyhow::Result<()> {
if self.config.disabled {
eprintln!("Skipping {}: disabled", self.name);
Expand Down Expand Up @@ -279,19 +281,61 @@ impl Benchmark {
return Ok(());
}

eprintln!("Preparing {}", self.name);
let mut target_dirs: Vec<((CodegenBackend, Profile, Target), TempDir)> = vec![];
struct BenchmarkDir {
dir: TempDir,
scenarios: Vec<Scenario>,
profile: Profile,
backend: CodegenBackend,
target: Target,
}

// Materialize the test cases that we want to benchmark
// We need to handle scenarios a bit specially, because they share the target directory
let mut benchmark_dirs: Vec<BenchmarkDir> = vec![];

for backend in backends {
for profile in &profiles {
for target in targets {
target_dirs.push((
(*backend, *profile, *target),
self.make_temp_dir(&self.path)?,
));
// Do we have any scenarios left to compute?
let remaining_scenarios = scenarios
.iter()
.filter(|scenario| {
self.should_run_scenario(
scenario,
profile,
backend,
target,
already_computed,
)
})
.copied()
.collect::<Vec<Scenario>>();
if remaining_scenarios.is_empty() {
continue;
}

let temp_dir = self.make_temp_dir(&self.path)?;
benchmark_dirs.push(BenchmarkDir {
dir: temp_dir,
scenarios: remaining_scenarios,
profile: *profile,
backend: *backend,
target: *target,
});
}
}
}

if benchmark_dirs.is_empty() {
eprintln!(
"Skipping {}: all test cases were previously computed",
self.name
);
return Ok(());
}

eprintln!("Preparing {}", self.name);

// In parallel (but with a limit to the number of CPUs), prepare all
// profiles. This is done in parallel vs. sequentially because:
// * We don't record any measurements during this phase, so the
Expand Down Expand Up @@ -325,18 +369,18 @@ impl Benchmark {
.get(),
)
.context("jobserver::new")?;
let mut threads = Vec::with_capacity(target_dirs.len());
for ((backend, profile, target), prep_dir) in &target_dirs {
let mut threads = Vec::with_capacity(benchmark_dirs.len());
for benchmark_dir in &benchmark_dirs {
let server = server.clone();
let thread = s.spawn::<_, anyhow::Result<()>>(move || {
wait_for_future(async move {
let server = server.clone();
self.mk_cargo_process(
toolchain,
prep_dir.path(),
*profile,
*backend,
*target,
benchmark_dir.dir.path(),
benchmark_dir.profile,
benchmark_dir.backend,
benchmark_dir.target,
)
.jobserver(server)
.run_rustc(false)
Expand Down Expand Up @@ -371,10 +415,11 @@ impl Benchmark {
let mut timing_dirs: Vec<ManuallyDrop<TempDir>> = vec![];

let benchmark_start = std::time::Instant::now();
for ((backend, profile, target), prep_dir) in &target_dirs {
let backend = *backend;
let profile = *profile;
let target = *target;
for benchmark_dir in &benchmark_dirs {
let backend = benchmark_dir.backend;
let profile = benchmark_dir.profile;
let target = benchmark_dir.target;
let scenarios = &benchmark_dir.scenarios;
eprintln!(
"Running {}: {:?} + {:?} + {:?} + {:?}",
self.name, profile, scenarios, backend, target,
Expand All @@ -394,7 +439,7 @@ impl Benchmark {
}
log::debug!("Benchmark iteration {}/{}", i + 1, iterations);
// Don't delete the directory on error.
let timing_dir = ManuallyDrop::new(self.make_temp_dir(prep_dir.path())?);
let timing_dir = ManuallyDrop::new(self.make_temp_dir(benchmark_dir.dir.path())?);
let cwd = timing_dir.path();

// A full non-incremental build.
Expand All @@ -407,7 +452,7 @@ impl Benchmark {

// Rustdoc does not support incremental compilation
if !profile.is_doc() {
// An incremental from scratch (slowest incremental case).
// An incremental build from scratch (slowest incremental case).
// This is required for any subsequent incremental builds.
if scenarios.iter().any(|s| s.is_incr()) {
self.mk_cargo_process(toolchain, cwd, profile, backend, target)
Expand Down Expand Up @@ -464,6 +509,60 @@ impl Benchmark {

Ok(())
}

/// Return true if the given `scenario` should be computed.
fn should_run_scenario(
&self,
scenario: &Scenario,
profile: &Profile,
backend: &CodegenBackend,
target: &Target,
already_computed: &hashbrown::HashSet<CompileTestCase>,
) -> bool {
// Keep this in sync with the logic in `Benchmark::measure`.
if scenario.is_incr() && profile.is_doc() {
return false;
}

let benchmark = database::Benchmark::from(self.name.0.as_str());
let profile: database::Profile = (*profile).into();
let backend: database::CodegenBackend = (*backend).into();
let target: database::Target = (*target).into();

match scenario {
// For these scenarios, we can simply check if they were benchmarked or not
Scenario::Full | Scenario::IncrFull | Scenario::IncrUnchanged => {
let test_case = CompileTestCase {
benchmark,
profile,
backend,
target,
scenario: match scenario {
Scenario::Full => database::Scenario::Empty,
Scenario::IncrFull => database::Scenario::IncrementalEmpty,
Scenario::IncrUnchanged => database::Scenario::IncrementalFresh,
Scenario::IncrPatched => unreachable!(),
},
};
!already_computed.contains(&test_case)
}
// For incr-patched, it is a bit more complicated.
// If there is at least a single uncomputed `IncrPatched`, we need to rerun
// all of them, because they stack on top of one another.
// Note that we don't need to explicitly include `IncrFull` if `IncrPatched`
// is selected, as the benchmark code will always run `IncrFull` before `IncrPatched`.
Scenario::IncrPatched => self.patches.iter().any(|patch| {
let test_case = CompileTestCase {
benchmark,
profile,
scenario: database::Scenario::IncrementalPatch(patch.name),
backend,
target,
};
!already_computed.contains(&test_case)
}),
}
}
}

/// Directory containing compile-time benchmarks.
Expand Down
13 changes: 13 additions & 0 deletions collector/src/compile/benchmark/profile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,3 +41,16 @@ impl Profile {
}
}
}

impl From<Profile> for database::Profile {
fn from(value: Profile) -> Self {
match value {
Profile::Check => database::Profile::Check,
Profile::Debug => database::Profile::Debug,
Profile::Doc => database::Profile::Doc,
Profile::DocJson => database::Profile::DocJson,
Profile::Opt => database::Profile::Opt,
Profile::Clippy => database::Profile::Clippy,
}
}
}
14 changes: 12 additions & 2 deletions collector/src/compile/benchmark/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,20 @@ impl Target {
pub fn all() -> Vec<Self> {
vec![Self::X86_64UnknownLinuxGnu]
}
}

pub fn from_db_target(target: &database::Target) -> Target {
match target {
impl From<database::Target> for Target {
fn from(value: database::Target) -> Self {
match value {
database::Target::X86_64UnknownLinuxGnu => Self::X86_64UnknownLinuxGnu,
}
}
}

impl From<Target> for database::Target {
fn from(value: Target) -> Self {
match value {
Target::X86_64UnknownLinuxGnu => database::Target::X86_64UnknownLinuxGnu,
}
}
}
21 changes: 14 additions & 7 deletions collector/src/compile/execute/bencher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use crate::compile::execute::{
};
use crate::toolchain::Toolchain;
use crate::utils::git::get_rustc_perf_commit;
use crate::CollectorCtx;
use anyhow::Context;
use database::CollectionId;
use futures::stream::FuturesUnordered;
Expand Down Expand Up @@ -42,7 +43,7 @@ pub struct BenchProcessor<'a> {
benchmark: &'a BenchmarkName,
conn: &'a mut dyn database::Connection,
artifact: &'a database::ArtifactId,
artifact_row_id: database::ArtifactIdNumber,
collector_ctx: &'a CollectorCtx,
is_first_collection: bool,
is_self_profile: bool,
tries: u8,
Expand All @@ -54,7 +55,7 @@ impl<'a> BenchProcessor<'a> {
conn: &'a mut dyn database::Connection,
benchmark: &'a BenchmarkName,
artifact: &'a database::ArtifactId,
artifact_row_id: database::ArtifactIdNumber,
collector_ctx: &'a CollectorCtx,
is_self_profile: bool,
) -> Self {
// Check we have `perf` or (`xperf.exe` and `tracelog.exe`) available.
Expand All @@ -78,7 +79,7 @@ impl<'a> BenchProcessor<'a> {
conn,
benchmark,
artifact,
artifact_row_id,
collector_ctx,
is_first_collection: true,
is_self_profile,
tries: 0,
Expand Down Expand Up @@ -108,7 +109,7 @@ impl<'a> BenchProcessor<'a> {
for (stat, value) in stats.iter() {
buf.push(self.conn.record_statistic(
collection,
self.artifact_row_id,
self.collector_ctx.artifact_row_id,
self.benchmark.0.as_str(),
profile,
scenario,
Expand All @@ -123,7 +124,13 @@ impl<'a> BenchProcessor<'a> {
}

pub async fn measure_rustc(&mut self, toolchain: &Toolchain) -> anyhow::Result<()> {
rustc::measure(self.conn, toolchain, self.artifact, self.artifact_row_id).await
rustc::measure(
self.conn,
toolchain,
self.artifact,
self.collector_ctx.artifact_row_id,
)
.await
}
}

Expand Down Expand Up @@ -252,7 +259,7 @@ impl Processor for BenchProcessor<'_> {
.map(|profile| {
self.conn.record_raw_self_profile(
profile.collection,
self.artifact_row_id,
self.collector_ctx.artifact_row_id,
self.benchmark.0.as_str(),
profile.profile,
profile.scenario,
Expand All @@ -270,7 +277,7 @@ impl Processor for BenchProcessor<'_> {

// FIXME: Record codegen backend in the self profile name
let prefix = PathBuf::from("self-profile")
.join(self.artifact_row_id.0.to_string())
.join(self.collector_ctx.artifact_row_id.0.to_string())
.join(self.benchmark.0.as_str())
.join(profile.profile.to_string())
.join(profile.scenario.to_id());
Expand Down
Loading
Loading