Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 49 additions & 18 deletions .gitlab/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,11 @@
variables:
GITLAB_BENCHMARKS_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:ruby-gitlab
GITLAB_DDPROF_BENCHMARK_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:ruby-ddprof-benchmark
BASE_CI_IMAGE: 486234852809.dkr.ecr.us-east-1.amazonaws.com/ci/benchmarking-platform:dd-trace-rb
BASE_CI_IMAGE: registry.ddbuild.io/ci/benchmarking-platform:dd-trace-rb

# -----------------------------------------------------
# Macrobenchmarks
# -----------------------------------------------------

.macrobenchmarks:
stage: macrobenchmarks
Expand Down Expand Up @@ -155,7 +159,18 @@ ddprof-benchmark:
LATEST_COMMIT_ID: $CI_COMMIT_SHA
KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: dd-trace-rb


# -----------------------------------------------------
# Microbenchmarks
# -----------------------------------------------------

# Configuration for executing microbenchmarks in parallel
# Choose which benchmarks to execute, with how many CPUs, on what CI job
include:
- local: benchmarks/execution.yml

microbenchmarks:
extends: .execution # From benchmarks/execution.yml
stage: microbenchmarks
when: always
needs: []
Expand All @@ -166,29 +181,45 @@ microbenchmarks:
interruptible: false
- interruptible: true
timeout: 1h
variables:
# GitLab CI variables passed to bp-runner
CI_COMMIT_REF_NAME: $CI_COMMIT_REF_NAME
CI_COMMIT_SHA: $CI_COMMIT_SHA
CI_PROJECT_NAME: $CI_PROJECT_NAME
CI_JOB_ID: $CI_JOB_ID
CI_PIPELINE_ID: $CI_PIPELINE_ID
script:
- export ARTIFACTS_DIR="$(pwd)/artifacts" && (mkdir "${ARTIFACTS_DIR}" || :)
- export ARTIFACTS_DIR="$(pwd)/artifacts" && mkdir -p "$ARTIFACTS_DIR"
# $GROUP is defined on benchmarks/execution.yml
- eval "export BENCHMARKS=\$BENCHMARKS_$GROUP"
- git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/"
- git clone --branch dd-trace-rb https://github.com/DataDog/benchmarking-platform benchmarking-platform && cd benchmarking-platform
- gem -v
- bundle -v
- gem list bundler
- ./steps/capture-hardware-software-info.sh
- ./steps/run-benchmarks.sh
- ./steps/analyze-results.sh
- "./steps/upload-results-to-s3.sh || :"
- "./steps/upload-results-to-benchmarking-api.sh || :"
- "./steps/post-pr-comment.sh || :"
- git clone --branch dd-trace-rb https://github.com/DataDog/benchmarking-platform platform && cd platform
- bp-runner bp-runner.yml --debug
artifacts:
name: "artifacts"
when: always
paths:
- artifacts/
expire_in: 3 months

microbenchmarks-pr-comment:
stage: microbenchmarks
when: always
needs: [microbenchmarks]
tags: ["arch:amd64"]
image: $BASE_CI_IMAGE
timeout: 30m
variables:
KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: dd-trace-rb
FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY: "true"
UPSTREAM_BRANCH: $CI_COMMIT_REF_NAME
UPSTREAM_PROJECT_ID: $CI_PROJECT_ID
UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME
UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA
CI_COMMIT_REF_NAME: $CI_COMMIT_REF_NAME
CI_PROJECT_NAME: $CI_PROJECT_NAME
script:
- export ARTIFACTS_DIR="$(pwd)/artifacts" && mkdir -p "$ARTIFACTS_DIR"
- git config --global url."https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.ddbuild.io/DataDog/".insteadOf "https://github.com/DataDog/"
- git clone --branch dd-trace-rb https://github.com/DataDog/benchmarking-platform platform && cd platform
- bp-runner bp-runner.pr-comment.yml --debug
artifacts:
name: "artifacts"
when: always
paths:
- artifacts/
expire_in: 3 months
2 changes: 1 addition & 1 deletion benchmarks/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ ENV BUNDLE_SILENCE_ROOT_WARNING 1

# Reinstall a recent version of the trace to help Docker cache dependencies.
# Bump this version periodically.
RUN gem install datadog -v 1.20.0
RUN gem install datadog -v 2.28.0
Comment on lines 36 to +38
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lol this was broken before and nobody noticed! There's no datadog 1.20.0, we renamed the library. Thanks for catching it.


WORKDIR /app

Expand Down
20 changes: 10 additions & 10 deletions benchmarks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,20 @@

1. Use one of the following prefixes:

- `library_` (spec in `spec/validate_benchmarks_spec.rb`)
- `profiling_` (spec in `./spec/datadog/profiling/validate_benchmarks_spec.rb`)
- `tracing_` (spec in `./spec/datadog/tracing/validate_benchmarks_spec.rb`)
- `di_` (spec in `./spec/datadog/di/validate_benchmarks_spec.rb`)
- `error_tracking` (spec in `./spec/datadog/error_tracing/validate_benchmarks_spec.rb`)
- `library_` (spec in `spec/validate_benchmarks_spec.rb`)
- `profiling_` (spec in `./spec/datadog/profiling/validate_benchmarks_spec.rb`)
- `tracing_` (spec in `./spec/datadog/tracing/validate_benchmarks_spec.rb`)
- `di_` (spec in `./spec/datadog/di/validate_benchmarks_spec.rb`)
- `error_tracking` (spec in `./spec/datadog/error_tracing/validate_benchmarks_spec.rb`)

2. Add the new file to `run_all.sh` in this directory.
2. Ensure the benchmark outputs results to `<filename>-results.json` (or `<filename>-<variant>-results.json` for multiple outputs).

3. Depending on the prefix, add the new file to the correct
`validate_benchmarks_spec.rb` as listed above
3. Add the new file to `benchmarks/execution.yml` in the appropriate group. See that file for details on groups and CPU allocation.

4. Depending on the prefix, add the new file to the correct `validate_benchmarks_spec.rb` as listed above.

## Adding Benchmarks For a New Product

1. Create a `validate_benchmarks_spec.rb` test in the product subdirectory,
using the existing files as a template.
1. Create a `validate_benchmarks_spec.rb` test in the product subdirectory, using the existing files as a template.

2. Update this README to add the new product in the previous section.
14 changes: 7 additions & 7 deletions benchmarks/di_instrument.rb
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def run_benchmark
Target.new.test_method
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down Expand Up @@ -134,7 +134,7 @@ def run_benchmark
Target.new.test_method
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down Expand Up @@ -179,7 +179,7 @@ def run_benchmark
Target.new.test_method_for_line_probe
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down Expand Up @@ -231,7 +231,7 @@ def run_benchmark
DITarget.new.test_method_for_line_probe
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down Expand Up @@ -262,7 +262,7 @@ def run_benchmark
Target.new.test_method
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -282,7 +282,7 @@ def run_benchmark
Target.new.test_method_for_line_probe
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -300,7 +300,7 @@ def run_benchmark
Target.new.not_instrumented
end

x.save! 'di-instrument-results.json' unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
8 changes: 4 additions & 4 deletions benchmarks/error_tracking_simple.rb
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def benchmark_simple_no_error_tracking(with_error: false)
end
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand All @@ -61,7 +61,7 @@ def benchmark_simple_all(with_error: false)
end
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand All @@ -82,7 +82,7 @@ def benchmark_simple_user(with_error: false)
end
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand All @@ -103,7 +103,7 @@ def benchmark_simple_third_party(with_error: false)
end
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
52 changes: 52 additions & 0 deletions benchmarks/execution.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# This file defines how benchmarks are grouped and executed in CI:
# 1. Each GROUP runs as a separate CI job (defined by parallel:matrix)
# 2. Within a group, benchmarks run in parallel with CPU isolation
# 3. Each benchmark runs $REPETITIONS times for stability analysis
#
# Adding a new benchmark:
# 1. Add the benchmark filename to the appropriate group below
# 2. Ensure the benchmark outputs: <filename>-results.json (or <filename>-<variant>-results.json)
# 3. Ensure the number of requested CPUs (CPUS_PER_BENCHMARK * number of benchmarks) is within the CPU_AFFINITY range, otherwise benchmarks will be queued and make the CI job longer
#
# Adding a new group:
# 1. Define a new anchor (e.g., &mygroup) with the list of benchmarks
# 2. Add BENCHMARKS_mygroup: *mygroup to .execution.variables
# 3. Add GROUP: mygroup to parallel:matrix

.groups:
- &profiling >-
profiling_allocation.rb
profiling_gc.rb
profiling_hold_resume_interruptions.rb
profiling_http_transport.rb
profiling_memory_sample_serialize.rb
profiling_sample_loop_v2.rb
profiling_sample_serialize.rb
profiling_sample_gvl.rb
profiling_string_storage_intern.rb

- &other >-
error_tracking_simple.rb
tracing_trace.rb
di_instrument.rb
library_gem_loading.rb

.execution:
variables:
# Benchmark execution settings
REPETITIONS: "6" # How many times to run each benchmark
CPU_AFFINITY: "24-47" # Which CPUs to use for all benchmarks
CPUS_PER_BENCHMARK: "2" # How many CPUs to use for each benchmark

# NOTE: GitLab CI does not allow CI job names above 256 characters.
# Therefore, we use 'BENCHMARKS_<GROUP>' to hold the list of benchmarks to run in each group, otherwise the list (which is longer than 256 characters) would be set as the name of the CI job.

# Variables holding the list of benchmarks to run in each group.
# '<GROUP>' in 'BENCHMARKS_<GROUP>' matches '<GROUP>' in 'parallel:matrix'.
BENCHMARKS_profiling: *profiling
BENCHMARKS_other: *other

parallel:
matrix:
- GROUP: profiling
- GROUP: other
2 changes: 1 addition & 1 deletion benchmarks/library_gem_loading.rb
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def benchmark_gem_loading
raise unless status.success?
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
RUBY
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/profiling_allocation.rb
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def run_benchmark

x.report('Allocations (baseline)', 'BasicObject.new')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -48,7 +48,7 @@ def run_benchmark

x.report("Allocations (#{ENV["CONFIG"]})", 'BasicObject.new')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
12 changes: 6 additions & 6 deletions benchmarks/profiling_gc.rb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def run_benchmark
Datadog::Profiling::Collectors::ThreadContext::Testing._native_sample_after_gc(@collector, false)
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -58,7 +58,7 @@ def run_benchmark
@recorder.serialize
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -70,7 +70,7 @@ def run_benchmark

x.report('Major GC runs (profiling disabled)', 'GC.start')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -89,7 +89,7 @@ def run_benchmark

x.report('Major GC runs (profiling enabled)', 'GC.start')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -103,7 +103,7 @@ def run_benchmark

x.report('Allocations (profiling disabled)', 'Object.new')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand All @@ -122,7 +122,7 @@ def run_benchmark

x.report('Allocations (profiling enabled)', 'Object.new')

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end

Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiling_hold_resume_interruptions.rb
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def run_benchmark
Datadog::Profiling::Collectors::CpuAndWallTimeWorker._native_resume_signals
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiling_http_transport.rb
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def run_benchmark
run_once
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
2 changes: 1 addition & 1 deletion benchmarks/profiling_memory_sample_serialize.rb
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def run_benchmark
retained_objs.size # Dummy action to make sure this is still alive
end

x.save! "#{File.basename(__FILE__)}-results.json" unless VALIDATE_BENCHMARK_MODE
x.save! "#{File.basename(__FILE__, '.rb')}-results.json" unless VALIDATE_BENCHMARK_MODE
x.compare!
end
end
Expand Down
Loading
Loading