From 703e9bb839ca82212191512cb188f916a72eedf5 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Mon, 22 Sep 2025 10:48:07 +0000 Subject: [PATCH 01/38] Initial benchmark version --- .gitignore | 3 + spec/performance/bench.sh | 177 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100755 spec/performance/bench.sh diff --git a/.gitignore b/.gitignore index 3f63eaf013..5b838f6567 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,9 @@ yalc.lock /spec/dummy/.bsb.lock /spec/dummy/**/*.res.js +# Performance test results +/bench_results + # Generated by ROR FS-based Registry generated diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh new file mode 100755 index 0000000000..a0f6f1de40 --- /dev/null +++ b/spec/performance/bench.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +set -euo pipefail +#set -x # Uncomment for debugging commands + +# Benchmark parameters +TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE=${RATE:-50} +# virtual users for k6 +VUS=${VUS:-100} +DURATION_SEC=${DURATION_SEC:-10} +DURATION="${DURATION_SEC}s" +# Tools to run (comma-separated) +TOOLS=${TOOLS:-fortio,vegeta,k6} + +# Validate input parameters +if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then + echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 + exit 1 +fi +if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then + echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 + exit 1 +fi +if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then + echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 + exit 1 +fi + +OUTDIR="bench_results" + +# Precompute checks for each tool +RUN_FORTIO=0 +RUN_VEGETA=0 +RUN_K6=0 +[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 +[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 +[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 + +for cmd in ${TOOLS//,/ } jq column awk tee bc; do + if ! command -v "$cmd" >/dev/null 2>&1; then + echo "Error: required tool '$cmd' is not installed" >&2 + exit 1 + fi +done + +TIMEOUT_SEC=60 +START=$(date +%s) +until curl -fsS "$TARGET" >/dev/null; do + if (( $(date +%s) - START > TIMEOUT_SEC )); then + echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 + exit 1 + fi + sleep 1 +done + +mkdir -p "$OUTDIR" + +if [ "$RATE" = "max" ]; then + FORTIO_ARGS=(-qps 0) + VEGETA_ARGS=(-rate=infinity) + K6_SCENARIOS="{ + max_rate: { + executor: 'shared-iterations', + vus: $VUS, + iterations: $((VUS * DURATION_SEC * 10)), + maxDuration: '$DURATION' + } + }" +else + FORTIO_ARGS=(-qps "$RATE" -uniform) + VEGETA_ARGS=(-rate="$RATE") + K6_SCENARIOS="{ + constant_rate: { + executor: 'constant-arrival-rate', + rate: $RATE, + timeUnit: '1s', + duration: '$DURATION', + preAllocatedVUs: $VUS, + maxVUs: $((VUS * 10)) + } + }" +fi + +if (( RUN_FORTIO )); then + echo "===> Fortio" + # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + | tee "$OUTDIR/fortio.txt" +fi + +if (( RUN_VEGETA )); then + echo + echo "===> Vegeta" + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + | tee "$OUTDIR/vegeta.bin" \ + | vegeta report | tee "$OUTDIR/vegeta.txt" + vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" +fi + +if (( RUN_K6 )); then + echo + echo "===> k6" + cat < "$OUTDIR/k6_test.js" +import http from 'k6/http'; +import { check } from 'k6'; + +export const options = { + scenarios: $K6_SCENARIOS, +}; + +export default function () { + const response = http.get('$TARGET'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); +} +EOF + + k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" +fi + +echo +echo "===> Parsing results and generating summary" + +echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" + +if (( RUN_FORTIO )); then + FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') + FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") + echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_VEGETA )); then + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') + VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") + echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" +fi + +if (( RUN_K6 )); then + K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') + # Status: compute successful vs failed requests + K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") + K6_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.key[7:] + "=" + (.value.passes|tostring)) + | join(",") + ' "$OUTDIR/k6_summary.json") + K6_REQS_KNOWN_STATUS=$(jq -r ' + .root_group.checks + | to_entries + | map(.value.passes) + | add + ' "$OUTDIR/k6_summary.json") + K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) + if [ "$K6_REQS_OTHER" -gt 0 ]; then + K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" + fi + echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" +fi + +echo +echo "Summary saved to $OUTDIR/summary.txt" +column -t -s $'\t' "$OUTDIR/summary.txt" From 8fcc3b9836a6e53ff4650096d660f74202420435 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Thu, 2 Oct 2025 14:27:53 +0000 Subject: [PATCH 02/38] Add production scripts --- spec/dummy/bin/prod | 29 +++++++++++++++++++++++++++++ spec/dummy/bin/prod-assets | 9 +++++++++ 2 files changed, 38 insertions(+) create mode 100755 spec/dummy/bin/prod create mode 100755 spec/dummy/bin/prod-assets diff --git a/spec/dummy/bin/prod b/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/spec/dummy/bin/prod-assets b/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..cf493134fa --- /dev/null +++ b/spec/dummy/bin/prod-assets @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +yarn run build:rescript +bundle exec rails assets:precompile From baea2429fb479c25a899c575a0c7b7de1722b2fd Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 3 Oct 2025 16:17:25 +0000 Subject: [PATCH 03/38] Initial benchmark workflow --- .github/workflows/benchmark.yml | 343 ++++++++++++++++++++++++++++++++ 1 file changed, 343 insertions(+) create mode 100644 .github/workflows/benchmark.yml diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..f6f42a783a --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,343 @@ +name: Benchmark Workflow + +on: + # https://github.com/mxschmitt/action-tmate?tab=readme-ov-file#manually-triggered-debug + workflow_dispatch: + inputs: + debug_enabled: + description: 'Enable SSH access (⚠️ Security Risk - read workflow comments)' + required: false + default: false + type: boolean + rate: + description: 'Requests per second (use "max" for maximum throughput)' + required: false + default: '50' + type: string + duration_sec: + description: 'Duration in seconds' + required: false + default: 10 + type: number + vus: + description: 'Virtual users for k6' + required: false + default: 100 + type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string + push: + branches: + - master + paths-ignore: + - '**.md' + - 'docs/**' + pull_request: + paths-ignore: + - '**.md' + - 'docs/**' +env: + FORTIO_VERSION: "1.73.0" + K6_VERSION: "1.3.0" + VEGETA_VERSION: "12.13.0" + # Benchmark parameters + RATE: ${{ github.event.inputs.rate || '50' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + VUS: ${{ github.event.inputs.vus || '100' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + +jobs: + benchmark: + runs-on: ubuntu-latest + + steps: + # ============================================ + # STEP 1: CHECKOUT CODE + # ============================================ + - name: Checkout repository + uses: actions/checkout@v4 + + # ============================================ + # STEP 2: OPTIONAL SSH ACCESS + # ============================================ + # NOTE: Interactive confirmation is not possible in GitHub Actions. + # As a secure workaround, SSH access is gated by the workflow_dispatch + # input variable 'debug_enabled' which defaults to false. + # Users must explicitly set this to true to enable SSH. + + - name: SSH Warning + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + run: | + echo "⚠️ ⚠️ ⚠️ SSH ACCESS ENABLED ⚠️ ⚠️ ⚠️" + echo "" + echo "SECURITY NOTICE:" + echo " - SSH access exposes your GitHub Actions runner" + echo " - Only proceed if you understand and accept the risks" + echo " - Do NOT store secrets or sensitive data on the runner" + echo " - Access is limited to the workflow initiator only" + echo " - The session will remain open until manually terminated" + echo "" + echo "⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️ ⚠️" + + - name: Setup SSH access (if enabled) + if: ${{ github.event.inputs.debug_enabled == true || github.event.inputs.debug_enabled == 'true' }} + uses: mxschmitt/action-tmate@v3 + with: + detached: true + limit-access-to-actor: true # Only workflow trigger can access + + # ============================================ + # STEP 3: INSTALL BENCHMARKING TOOLS + # ============================================ + + - name: Add tools directory to PATH + run: | + mkdir -p ~/bin + echo "$HOME/bin" >> $GITHUB_PATH + + - name: Cache Fortio binary + id: cache-fortio + uses: actions/cache@v4 + with: + path: ~/bin/fortio + key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} + + - name: Install Fortio + if: steps.cache-fortio.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Fortio v${FORTIO_VERSION}" + + # Download and extract fortio binary + wget -q https://github.com/fortio/fortio/releases/download/v${FORTIO_VERSION}/fortio-linux_amd64-${FORTIO_VERSION}.tgz + tar -xzf fortio-linux_amd64-${FORTIO_VERSION}.tgz + + # Store in cache directory + mv usr/bin/fortio ~/bin/ + + - name: Cache Vegeta binary + id: cache-vegeta + uses: actions/cache@v4 + with: + path: ~/bin/vegeta + key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} + + - name: Install Vegeta + if: steps.cache-vegeta.outputs.cache-hit != 'true' + run: | + echo "📦 Installing Vegeta v${VEGETA_VERSION}" + + # Download and extract vegeta binary + wget -q https://github.com/tsenart/vegeta/releases/download/v${VEGETA_VERSION}/vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + tar -xzf vegeta_${VEGETA_VERSION}_linux_amd64.tar.gz + + # Store in cache directory + mv vegeta ~/bin/ + + - name: Setup k6 + uses: grafana/setup-k6-action@v1 + with: + k6-version: ${{ env.K6_VERSION }} + + # ============================================ + # STEP 4: START APPLICATION SERVER + # ============================================ + + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '3.4' + bundler: 2.5.9 + + - name: Fix dependency for libyaml-dev + run: sudo apt install libyaml-dev -y + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '22' + cache: yarn + cache-dependency-path: '**/yarn.lock' + + - name: Print system information + run: | + echo "Linux release: "; cat /etc/issue + echo "Current user: "; whoami + echo "Current directory: "; pwd + echo "Ruby version: "; ruby -v + echo "Node version: "; node -v + echo "Yarn version: "; yarn --version + echo "Bundler version: "; bundle --version + + - name: Install Node modules with Yarn for renderer package + run: | + yarn install --no-progress --no-emoji --frozen-lockfile + npm install --global yalc + + - name: yalc publish for react-on-rails + run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish + + - name: yalc add react-on-rails + run: cd spec/dummy && yalc add react-on-rails + + - name: Install Node modules with Yarn for dummy app + run: cd spec/dummy && yarn install --no-progress --no-emoji + + - name: Save dummy app ruby gems to cache + uses: actions/cache@v4 + with: + path: spec/dummy/vendor/bundle + key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} + + - name: Install Ruby Gems for dummy app + run: | + cd spec/dummy + bundle lock --add-platform 'x86_64-linux' + if ! bundle check --path=vendor/bundle; then + bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + fi + + - name: generate file system-based packs + run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + + - name: Prepare production assets + run: | + set -e # Exit on any error + echo "🔨 Building production assets..." + cd spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start production server + run: | + set -e # Exit on any error + echo "🚀 Starting production server..." + cd spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 + + # ============================================ + # STEP 5: RUN BENCHMARKS + # ============================================ + + - name: Execute benchmark suite + timeout-minutes: 20 + run: | + set -e # Exit on any error + echo "🏃 Running benchmark suite..." + echo "Script: spec/performance/bench.sh" + echo "" + echo "Benchmark parameters:" + echo " - RATE: ${RATE}" + echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - VUS: ${VUS}" + echo " - TOOLS: ${TOOLS}" + echo "" + + if ! spec/performance/bench.sh; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate benchmark results + run: | + set -e # Exit on any error + echo "🔍 Validating benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + # Check if results directory exists + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + # List all generated files + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + # Check for required files + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + # Report validation results + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + # ============================================ + # STEP 6: COLLECT BENCHMARK RESULTS + # ============================================ + + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + if: always() # Upload even if benchmark fails + with: + name: benchmark-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + - name: Verify artifact upload + if: success() + run: | + echo "✅ Benchmark results uploaded as workflow artifacts" + echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" + echo "🔗 Access artifacts from the Actions tab in GitHub" + + # ============================================ + # WORKFLOW COMPLETION + # ============================================ + + - name: Workflow summary + if: always() + run: | + echo "📋 Benchmark Workflow Summary" + echo "==============================" + echo "Status: ${{ job.status }}" + echo "Run number: ${{ github.run_number }}" + echo "Triggered by: ${{ github.actor }}" + echo "Branch: ${{ github.ref_name }}" + echo "" + if [ "${{ job.status }}" == "success" ]; then + echo "✅ All steps completed successfully" + else + echo "❌ Workflow encountered errors - check logs above" + fi From 42061f75c45979203ceea25038a249401bbde937 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 12:49:50 +0000 Subject: [PATCH 04/38] Add server warm-up to benchmark --- spec/performance/bench.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index a0f6f1de40..ebafa55080 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -54,6 +54,13 @@ until curl -fsS "$TARGET" >/dev/null; do sleep 1 done +echo "Warming up server with 10 requests..." +for i in {1..10}; do + curl -fsS "$TARGET" >/dev/null || true + sleep 0.5 +done +echo "Warm-up complete" + mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then From efb4854d794abc5ba2b903d0325e62243eff1b48 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:13:44 +0000 Subject: [PATCH 05/38] Make request timeout a parameter --- .github/workflows/benchmark.yml | 6 ++++++ spec/performance/bench.sh | 13 +++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index f6f42a783a..03c63719b9 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -19,6 +19,11 @@ on: required: false default: 10 type: number + request_timeout: + description: 'Request timeout (e.g., "60s", "1m", "90s")' + required: false + default: '60s' + type: string vus: description: 'Virtual users for k6' required: false @@ -46,6 +51,7 @@ env: # Benchmark parameters RATE: ${{ github.event.inputs.rate || '50' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index ebafa55080..5227e1080d 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -10,6 +10,8 @@ RATE=${RATE:-50} VUS=${VUS:-100} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} # Tools to run (comma-separated) TOOLS=${TOOLS:-fortio,vegeta,k6} @@ -26,6 +28,10 @@ if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 exit 1 fi +if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then + echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 + exit 1 +fi OUTDIR="bench_results" @@ -92,14 +98,14 @@ fi if (( RUN_FORTIO )); then echo "===> Fortio" # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout 30s -json "$OUTDIR/fortio.json" "$TARGET" \ + fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ | tee "$OUTDIR/fortio.txt" fi if (( RUN_VEGETA )); then echo echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" \ + echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ | tee "$OUTDIR/vegeta.bin" \ | vegeta report | tee "$OUTDIR/vegeta.txt" vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" @@ -114,6 +120,9 @@ import { check } from 'k6'; export const options = { scenarios: $K6_SCENARIOS, + httpReq: { + timeout: '$REQUEST_TIMEOUT', + }, }; export default function () { From bc6a88092e97fa3b501512d63427475db808fb67 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:27:16 +0000 Subject: [PATCH 06/38] Update defaults for now --- .github/workflows/benchmark.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 03c63719b9..7d55dd8f61 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -12,12 +12,12 @@ on: rate: description: 'Requests per second (use "max" for maximum throughput)' required: false - default: '50' + default: 'max' type: string duration_sec: description: 'Duration in seconds' required: false - default: 10 + default: 30 type: number request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' @@ -49,8 +49,8 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Benchmark parameters - RATE: ${{ github.event.inputs.rate || '50' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || '10' }} + RATE: ${{ github.event.inputs.rate || 'max' }} + DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} From a270da69121bc589d32b0ab8f125d3dc37f54582 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 13:39:08 +0000 Subject: [PATCH 07/38] Fix knip error --- knip.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/knip.ts b/knip.ts index d7047863b6..8abda77f74 100644 --- a/knip.ts +++ b/knip.ts @@ -15,6 +15,9 @@ const config: KnipConfig = { // Pro package binaries used in Pro workflows 'playwright', 'e2e-test', + // Local binaries + 'bin/.*', + 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**'], ignoreDependencies: [ From 943cee89818a3cae4f5c868be7cf839dc780aea9 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:04:18 +0000 Subject: [PATCH 08/38] Enable clustered mode in production --- .github/workflows/benchmark.yml | 8 +++++++ spec/dummy/config/puma.rb | 41 ++++++++++++++++++++------------- 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 7d55dd8f61..10f04028ca 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -34,6 +34,11 @@ on: required: false default: 'fortio,vegeta,k6' type: string + web_concurrency: + description: 'Number of Puma worker processes' + required: false + default: 2 + type: number push: branches: - master @@ -54,6 +59,7 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} VUS: ${{ github.event.inputs.vus || '100' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} jobs: benchmark: @@ -259,8 +265,10 @@ jobs: echo "Benchmark parameters:" echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" + echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - VUS: ${VUS}" echo " - TOOLS: ${TOOLS}" + echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" if ! spec/performance/bench.sh; then diff --git a/spec/dummy/config/puma.rb b/spec/dummy/config/puma.rb index de5feec982..01b93c7d91 100644 --- a/spec/dummy/config/puma.rb +++ b/spec/dummy/config/puma.rb @@ -10,10 +10,12 @@ min_threads_count = ENV.fetch("RAILS_MIN_THREADS") { max_threads_count } threads min_threads_count, max_threads_count +rails_env = ENV.fetch("RAILS_ENV", "development") + # Specifies the `worker_timeout` threshold that Puma will use to wait before # terminating a worker in development environments. # -worker_timeout 3600 if ENV.fetch("RAILS_ENV", "development") == "development" +worker_timeout 3600 if rails_env == "development" # Specifies the `port` that Puma will listen on to receive requests; default is 3000. # @@ -21,25 +23,32 @@ # Specifies the `environment` that Puma will run in. # -environment ENV.fetch("RAILS_ENV", "development") +environment rails_env # Specifies the `pidfile` that Puma will use. pidfile ENV.fetch("PIDFILE", "tmp/pids/server.pid") -# Specifies the number of `workers` to boot in clustered mode. -# Workers are forked web server processes. If using threads and workers together -# the concurrency of the application would be max `threads` * `workers`. -# Workers do not work on JRuby or Windows (both of which do not support -# processes). -# -# workers ENV.fetch("WEB_CONCURRENCY") { 2 } - -# Use the `preload_app!` method when specifying a `workers` number. -# This directive tells Puma to first boot the application and load code -# before forking the application. This takes advantage of Copy On Write -# process behavior so workers use less memory. -# -# preload_app! +if rails_env == "production" + # Specifies the number of `workers` to boot in clustered mode. + # Workers are forked web server processes. If using threads and workers together + # the concurrency of the application would be max `threads` * `workers`. + # Workers do not work on JRuby or Windows (both of which do not support + # processes). + # + workers ENV.fetch("WEB_CONCURRENCY", 2) + + # Use the `preload_app!` method when specifying a `workers` number. + # This directive tells Puma to first boot the application and load code + # before forking the application. This takes advantage of Copy On Write + # process behavior so workers use less memory. + # + preload_app! + + # Specifies the `worker_shutdown_timeout` threshold that Puma will use to wait before + # terminating a worker. + # + worker_shutdown_timeout 60 +end # Allow puma to be restarted by `bin/rails restart` command. plugin :tmp_restart From 11aefcf119b3064ae2b426eb85b3025966934320 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:44:21 +0000 Subject: [PATCH 09/38] Add MAX_CONNECTIONS --- .github/workflows/benchmark.yml | 16 +++++++++++----- spec/performance/bench.sh | 34 +++++++++++++++++++++------------ 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 10f04028ca..638920a40d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -24,10 +24,14 @@ on: required: false default: '60s' type: string - vus: - description: 'Virtual users for k6' + connections: + description: 'Concurrent connections/virtual users' + required: false + default: 10 + type: number + max_connections: + description: 'Maximum connections/virtual users' required: false - default: 100 type: number tools: description: 'Comma-separated list of tools to run' @@ -57,7 +61,8 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - VUS: ${{ github.event.inputs.vus || '100' }} + CONNECTIONS: ${{ github.event.inputs.connections || '10' }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} @@ -266,7 +271,8 @@ jobs: echo " - RATE: ${RATE}" echo " - DURATION_SEC: ${DURATION_SEC}" echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - VUS: ${VUS}" + echo " - CONNECTIONS: ${CONNECTIONS}" + echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" echo "" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index 5227e1080d..dca256fbf4 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -6,8 +6,10 @@ set -euo pipefail TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE=${RATE:-50} -# virtual users for k6 -VUS=${VUS:-100} +# concurrent connections/virtual users +CONNECTIONS=${CONNECTIONS:-10} +# maximum connections/virtual users +MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} DURATION_SEC=${DURATION_SEC:-10} DURATION="${DURATION_SEC}s" # request timeout (duration string like "60s", "1m", "90s") @@ -20,8 +22,12 @@ if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 exit 1 fi -if ! { [[ "$VUS" =~ ^[0-9]+$ ]] && [ "$VUS" -gt 0 ]; }; then - echo "Error: VUS must be a positive integer (got: '$VUS')" >&2 +if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then + echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 + exit 1 +fi +if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then + echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 exit 1 fi if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then @@ -70,27 +76,31 @@ echo "Warm-up complete" mkdir -p "$OUTDIR" if [ "$RATE" = "max" ]; then - FORTIO_ARGS=(-qps 0) - VEGETA_ARGS=(-rate=infinity) + if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then + echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" + exit 1 + fi + FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") + VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { executor: 'shared-iterations', - vus: $VUS, - iterations: $((VUS * DURATION_SEC * 10)), + vus: $CONNECTIONS, + iterations: $((CONNECTIONS * DURATION_SEC * 10)), maxDuration: '$DURATION' } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform) - VEGETA_ARGS=(-rate="$RATE") + FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { executor: 'constant-arrival-rate', rate: $RATE, timeUnit: '1s', duration: '$DURATION', - preAllocatedVUs: $VUS, - maxVUs: $((VUS * 10)) + preAllocatedVUs: $CONNECTIONS, + maxVUs: $MAX_CONNECTIONS } }" fi From 1a57bf4c621e547f0ab738e35a3e89dcef49af53 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:45:22 +0000 Subject: [PATCH 10/38] Fix max rate K6 scenario --- spec/performance/bench.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index dca256fbf4..d147c3e071 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -84,10 +84,9 @@ if [ "$RATE" = "max" ]; then VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") K6_SCENARIOS="{ max_rate: { - executor: 'shared-iterations', + executor: 'constant-vus', vus: $CONNECTIONS, - iterations: $((CONNECTIONS * DURATION_SEC * 10)), - maxDuration: '$DURATION' + duration: '$DURATION' } }" else From 2645dc3a3fdd31da35c26fd1bea7ab2cb7f9c170 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 14:52:55 +0000 Subject: [PATCH 11/38] Reorder workflow parameters more logically --- .github/workflows/benchmark.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 638920a40d..aa8c1aa66b 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -33,16 +33,16 @@ on: description: 'Maximum connections/virtual users' required: false type: number - tools: - description: 'Comma-separated list of tools to run' - required: false - default: 'fortio,vegeta,k6' - type: string web_concurrency: description: 'Number of Puma worker processes' required: false default: 2 type: number + tools: + description: 'Comma-separated list of tools to run' + required: false + default: 'fortio,vegeta,k6' + type: string push: branches: - master @@ -63,8 +63,8 @@ env: REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || '10' }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: @@ -273,8 +273,8 @@ jobs: echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - TOOLS: ${TOOLS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - TOOLS: ${TOOLS}" echo "" if ! spec/performance/bench.sh; then From d2f4254ee4b7480419cea6fd84526bb2e6800b2c Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:15:39 +0000 Subject: [PATCH 12/38] Closer to recommended Fortio options --- spec/performance/bench.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index d147c3e071..b8a5be9256 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -90,7 +90,7 @@ if [ "$RATE" = "max" ]; then } }" else - FORTIO_ARGS=(-qps "$RATE" -uniform -c "$CONNECTIONS") + FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") K6_SCENARIOS="{ constant_rate: { From 84d76a539189a276bee6e5dd5f424e9c84b7f6da Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:23:37 +0000 Subject: [PATCH 13/38] Allow configuring RAILS_MAX/MIN_THREADS in the workflow --- .github/workflows/benchmark.yml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index aa8c1aa66b..f04d5a070f 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -36,7 +36,16 @@ on: web_concurrency: description: 'Number of Puma worker processes' required: false - default: 2 + default: 4 + type: number + rails_max_threads: + description: 'Maximum number of Puma threads' + required: false + default: 3 + type: number + rails_min_threads: + description: 'Minimum number of Puma threads (same as maximum if not set)' + required: false type: number tools: description: 'Comma-separated list of tools to run' @@ -61,9 +70,11 @@ env: RATE: ${{ github.event.inputs.rate || 'max' }} DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || '10' }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || '10' }} - WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || '2' }} + CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: @@ -274,6 +285,8 @@ jobs: echo " - CONNECTIONS: ${CONNECTIONS}" echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" + echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" + echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" echo " - TOOLS: ${TOOLS}" echo "" From 3f0f3d4ef09293aad6fdbc9b78ea56ead4aeacfb Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 5 Nov 2025 15:57:09 +0000 Subject: [PATCH 14/38] Move showing benchmark params to bench.sh for simplicity --- .github/workflows/benchmark.yml | 13 ------------- spec/performance/bench.sh | 12 ++++++++++++ 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index f04d5a070f..355fd3d596 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -276,19 +276,6 @@ jobs: run: | set -e # Exit on any error echo "🏃 Running benchmark suite..." - echo "Script: spec/performance/bench.sh" - echo "" - echo "Benchmark parameters:" - echo " - RATE: ${RATE}" - echo " - DURATION_SEC: ${DURATION_SEC}" - echo " - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT}" - echo " - CONNECTIONS: ${CONNECTIONS}" - echo " - MAX_CONNECTIONS: ${MAX_CONNECTIONS}" - echo " - WEB_CONCURRENCY: ${WEB_CONCURRENCY}" - echo " - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS}" - echo " - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS}" - echo " - TOOLS: ${TOOLS}" - echo "" if ! spec/performance/bench.sh; then echo "❌ ERROR: Benchmark execution failed" diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh index b8a5be9256..b83d429497 100755 --- a/spec/performance/bench.sh +++ b/spec/performance/bench.sh @@ -56,6 +56,18 @@ for cmd in ${TOOLS//,/ } jq column awk tee bc; do fi done +echo "Benchmark parameters: + - RATE: ${RATE:-unset} + - DURATION_SEC: ${DURATION_SEC:-unset} + - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} + - CONNECTIONS: ${CONNECTIONS:-unset} + - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} + - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} + - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} + - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} + - TOOLS: ${TOOLS:-unset} +" + TIMEOUT_SEC=60 START=$(date +%s) until curl -fsS "$TARGET" >/dev/null; do From 11ed6c99bf6872213227ba5dc9372c29b693f4d6 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:36:03 +0000 Subject: [PATCH 15/38] Convert the benchmark script to Ruby --- .github/workflows/benchmark.yml | 2 +- knip.ts | 2 +- spec/performance/bench.rb | 318 ++++++++++++++++++++++++++++++++ spec/performance/bench.sh | 214 --------------------- 4 files changed, 320 insertions(+), 216 deletions(-) create mode 100755 spec/performance/bench.rb delete mode 100755 spec/performance/bench.sh diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 355fd3d596..7b60cab6a1 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -277,7 +277,7 @@ jobs: set -e # Exit on any error echo "🏃 Running benchmark suite..." - if ! spec/performance/bench.sh; then + if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" exit 1 fi diff --git a/knip.ts b/knip.ts index 8abda77f74..0bebe2c9b6 100644 --- a/knip.ts +++ b/knip.ts @@ -10,6 +10,7 @@ const config: KnipConfig = { ignoreBinaries: [ // Has to be installed globally 'yalc', + 'ruby', // Used in package.json scripts (devDependency, so unlisted in production mode) 'nps', // Pro package binaries used in Pro workflows @@ -17,7 +18,6 @@ const config: KnipConfig = { 'e2e-test', // Local binaries 'bin/.*', - 'spec/performance/bench.sh', ], ignore: ['react_on_rails_pro/**'], ignoreDependencies: [ diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb new file mode 100755 index 0000000000..6f9a9536f4 --- /dev/null +++ b/spec/performance/bench.rb @@ -0,0 +1,318 @@ +#!/usr/bin/env ruby +# frozen_string_literal: true + +require "json" +require "fileutils" +require "net/http" +require "uri" + +# Benchmark parameters +BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") +TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") +# requests per second; if "max" will get maximum number of queries instead of a fixed rate +RATE = ENV.fetch("RATE", "50") +# concurrent connections/virtual users +CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +# maximum connections/virtual users +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i +DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f +DURATION = "#{DURATION_SEC}s".freeze +# request timeout (duration string like "60s", "1m", "90s") +REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +# Tools to run (comma-separated) +TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") + +OUTDIR = "bench_results" +FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze +FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze +VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze +VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze +VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze +K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze +K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze +K6_TXT = "#{OUTDIR}/k6.txt".freeze +SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze + +# Validate input parameters +def validate_rate(rate) + return if rate == "max" + + return if rate.match?(/^\d+(\.\d+)?$/) && rate.to_f.positive? + + raise "RATE must be 'max' or a positive number (got: '#{rate}')" +end + +def validate_positive_integer(value, name) + return if value.is_a?(Integer) && value.positive? + + raise "#{name} must be a positive integer (got: '#{value}')" +end + +def validate_duration(value, name) + return if value.is_a?(Numeric) && value.positive? + + raise "#{name} must be a positive number (got: '#{value}')" +end + +def validate_timeout(value) + return if value.match?(/^(\d+(\.\d+)?[smh])+$/) + + raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" +end + +def parse_json_file(file_path, tool_name) + JSON.parse(File.read(file_path)) +rescue Errno::ENOENT + raise "#{tool_name} results file not found: #{file_path}" +rescue JSON::ParserError => e + raise "Failed to parse #{tool_name} JSON: #{e.message}" +rescue StandardError => e + raise "Failed to read #{tool_name} results: #{e.message}" +end + +validate_rate(RATE) +validate_positive_integer(CONNECTIONS, "CONNECTIONS") +validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") +validate_duration(DURATION_SEC, "DURATION_SEC") +validate_timeout(REQUEST_TIMEOUT) + +raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS + +# Precompute checks for each tool +run_fortio = TOOLS.include?("fortio") +run_vegeta = TOOLS.include?("vegeta") +run_k6 = TOOLS.include?("k6") + +# Check required tools are installed +required_tools = TOOLS + %w[column tee] +required_tools.each do |cmd| + raise "required tool '#{cmd}' is not installed" unless system("command -v #{cmd} >/dev/null 2>&1") +end + +puts <<~PARAMS + Benchmark parameters: + - RATE: #{RATE} + - DURATION_SEC: #{DURATION_SEC} + - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} + - CONNECTIONS: #{CONNECTIONS} + - MAX_CONNECTIONS: #{MAX_CONNECTIONS} + - WEB_CONCURRENCY: #{ENV['WEB_CONCURRENCY'] || 'unset'} + - RAILS_MAX_THREADS: #{ENV['RAILS_MAX_THREADS'] || 'unset'} + - RAILS_MIN_THREADS: #{ENV['RAILS_MIN_THREADS'] || 'unset'} + - TOOLS: #{TOOLS.join(', ')} +PARAMS + +# Helper method to check if server is responding +def server_responding?(uri) + response = Net::HTTP.get_response(uri) + response.is_a?(Net::HTTPSuccess) +rescue StandardError + false +end + +# Wait for the server to be ready +TIMEOUT_SEC = 60 +start_time = Time.now +loop do + break if server_responding?(TARGET) + + raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + + sleep 1 +end + +# Warm up server +puts "Warming up server with 10 requests..." +10.times do + server_responding?(TARGET) + sleep 0.5 +end +puts "Warm-up complete" + +FileUtils.mkdir_p(OUTDIR) + +# Configure tool-specific arguments +if RATE == "max" + if CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" + end + + fortio_args = ["-qps", 0, "-c", CONNECTIONS] + vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS +else + fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + k6_scenarios = <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS +end + +# Run Fortio +if run_fortio + puts "===> Fortio" + # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", FORTIO_JSON, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +end + +# Run Vegeta +if run_vegeta + puts "\n===> Vegeta" + vegeta_cmd = [ + "echo", "'GET #{TARGET}'", "|", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") +end + +# Run k6 +if run_k6 + puts "\n===> k6" + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + httpReq: { + timeout: '#{REQUEST_TIMEOUT}', + }, + }; + + export default function () { + const response = http.get('#{TARGET}'); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(K6_TEST_JS, k6_script) + k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") +end + +puts "\n===> Parsing results and generating summary" + +# Initialize summary file +File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") + +# Parse Fortio results +if run_fortio + begin + fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" + + percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] + p50_data = percentiles.find { |p| p["Percentile"] == 50 } + p90_data = percentiles.find { |p| p["Percentile"] == 90 } + p99_data = percentiles.find { |p| p["Percentile"] == 99 } + + raise "Fortio results missing percentile data" unless p50_data && p90_data && p99_data + + fortio_p50 = (p50_data["Value"] * 1000).round(2) + fortio_p90 = (p90_data["Value"] * 1000).round(2) + fortio_p99 = (p99_data["Value"] * 1000).round(2) + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse Vegeta results +if run_vegeta + begin + vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period + vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" + vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" + vegeta_line = [ + "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status + ].join("\t") + File.open(SUMMARY_TXT, "a") do |f| + f.puts vegeta_line + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +# Parse k6 results +if run_k6 + begin + k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" + k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" + k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" + k6_p99 = k6_data.dig("metrics", "http_req_duration", "p(99)")&.round(2) || "missing" + + # Status: compute successful vs failed requests + k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 + k6_checks = k6_data.dig("root_group", "checks") || {} + # Extract status code from check name (e.g., "status=200" -> "200") + # Handle both "status=XXX" format and other potential formats + k6_status_parts = k6_checks.map do |name, check| + status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name + "#{status_label}=#{check['passes']}" + end + k6_reqs_known_status = k6_checks.values.sum { |check| check["passes"] || 0 } + k6_reqs_other = k6_reqs_total - k6_reqs_known_status + k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? + k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") + + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" + end + rescue StandardError => e + puts "Error: #{e.message}" + File.open(SUMMARY_TXT, "a") do |f| + f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" + end + end +end + +puts "\nSummary saved to #{SUMMARY_TXT}" +system("column", "-t", "-s", "\t", SUMMARY_TXT) diff --git a/spec/performance/bench.sh b/spec/performance/bench.sh deleted file mode 100755 index b83d429497..0000000000 --- a/spec/performance/bench.sh +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail -#set -x # Uncomment for debugging commands - -# Benchmark parameters -TARGET="http://${BASE_URL:-localhost:3001}/${ROUTE:-server_side_hello_world_hooks}" -# requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE=${RATE:-50} -# concurrent connections/virtual users -CONNECTIONS=${CONNECTIONS:-10} -# maximum connections/virtual users -MAX_CONNECTIONS=${MAX_CONNECTIONS:-$CONNECTIONS} -DURATION_SEC=${DURATION_SEC:-10} -DURATION="${DURATION_SEC}s" -# request timeout (duration string like "60s", "1m", "90s") -REQUEST_TIMEOUT=${REQUEST_TIMEOUT:-60s} -# Tools to run (comma-separated) -TOOLS=${TOOLS:-fortio,vegeta,k6} - -# Validate input parameters -if ! { [ "$RATE" = "max" ] || { [[ "$RATE" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$RATE > 0") )); }; }; then - echo "Error: RATE must be 'max' or a positive number (got: '$RATE')" >&2 - exit 1 -fi -if ! { [[ "$CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$CONNECTIONS" -gt 0 ]; }; then - echo "Error: CONNECTIONS must be a positive integer (got: '$CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$MAX_CONNECTIONS" =~ ^[0-9]+$ ]] && [ "$MAX_CONNECTIONS" -gt 0 ]; }; then - echo "Error: MAX_CONNECTIONS must be a positive integer (got: '$MAX_CONNECTIONS')" >&2 - exit 1 -fi -if ! { [[ "$DURATION_SEC" =~ ^[0-9]+(\.[0-9]+)?$ ]] && (( $(bc -l <<< "$DURATION_SEC > 0") )); }; then - echo "Error: DURATION_SEC must be a positive number (got: '$DURATION_SEC')" >&2 - exit 1 -fi -if ! [[ "$REQUEST_TIMEOUT" =~ ^([0-9]+(\.[0-9]+)?[smh])+$ ]]; then - echo "Error: REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '$REQUEST_TIMEOUT')" >&2 - exit 1 -fi - -OUTDIR="bench_results" - -# Precompute checks for each tool -RUN_FORTIO=0 -RUN_VEGETA=0 -RUN_K6=0 -[[ ",$TOOLS," == *",fortio,"* ]] && RUN_FORTIO=1 -[[ ",$TOOLS," == *",vegeta,"* ]] && RUN_VEGETA=1 -[[ ",$TOOLS," == *",k6,"* ]] && RUN_K6=1 - -for cmd in ${TOOLS//,/ } jq column awk tee bc; do - if ! command -v "$cmd" >/dev/null 2>&1; then - echo "Error: required tool '$cmd' is not installed" >&2 - exit 1 - fi -done - -echo "Benchmark parameters: - - RATE: ${RATE:-unset} - - DURATION_SEC: ${DURATION_SEC:-unset} - - REQUEST_TIMEOUT: ${REQUEST_TIMEOUT:-unset} - - CONNECTIONS: ${CONNECTIONS:-unset} - - MAX_CONNECTIONS: ${MAX_CONNECTIONS:-unset} - - WEB_CONCURRENCY: ${WEB_CONCURRENCY:-unset} - - RAILS_MAX_THREADS: ${RAILS_MAX_THREADS:-unset} - - RAILS_MIN_THREADS: ${RAILS_MIN_THREADS:-unset} - - TOOLS: ${TOOLS:-unset} -" - -TIMEOUT_SEC=60 -START=$(date +%s) -until curl -fsS "$TARGET" >/dev/null; do - if (( $(date +%s) - START > TIMEOUT_SEC )); then - echo "Error: Target $TARGET not responding within ${TIMEOUT_SEC}s" >&2 - exit 1 - fi - sleep 1 -done - -echo "Warming up server with 10 requests..." -for i in {1..10}; do - curl -fsS "$TARGET" >/dev/null || true - sleep 0.5 -done -echo "Warm-up complete" - -mkdir -p "$OUTDIR" - -if [ "$RATE" = "max" ]; then - if [ "$CONNECTIONS" != "$MAX_CONNECTIONS" ]; then - echo "For RATE=max, CONNECTIONS (got $CONNECTIONS) and MAX_CONNECTIONS (got $MAX_CONNECTIONS) should be the same" - exit 1 - fi - FORTIO_ARGS=(-qps 0 -c "$CONNECTIONS") - VEGETA_ARGS=(-rate=infinity --workers="$CONNECTIONS" --max-workers="$CONNECTIONS") - K6_SCENARIOS="{ - max_rate: { - executor: 'constant-vus', - vus: $CONNECTIONS, - duration: '$DURATION' - } - }" -else - FORTIO_ARGS=(-qps "$RATE" -uniform -nocatchup -c "$CONNECTIONS") - VEGETA_ARGS=(-rate="$RATE" --workers="$CONNECTIONS" --max-workers="$MAX_CONNECTIONS") - K6_SCENARIOS="{ - constant_rate: { - executor: 'constant-arrival-rate', - rate: $RATE, - timeUnit: '1s', - duration: '$DURATION', - preAllocatedVUs: $CONNECTIONS, - maxVUs: $MAX_CONNECTIONS - } - }" -fi - -if (( RUN_FORTIO )); then - echo "===> Fortio" - # TODO https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio load "${FORTIO_ARGS[@]}" -t "$DURATION" -timeout "$REQUEST_TIMEOUT" -json "$OUTDIR/fortio.json" "$TARGET" \ - | tee "$OUTDIR/fortio.txt" -fi - -if (( RUN_VEGETA )); then - echo - echo "===> Vegeta" - echo "GET $TARGET" | vegeta attack "${VEGETA_ARGS[@]}" -duration="$DURATION" -timeout="$REQUEST_TIMEOUT" \ - | tee "$OUTDIR/vegeta.bin" \ - | vegeta report | tee "$OUTDIR/vegeta.txt" - vegeta report -type=json "$OUTDIR/vegeta.bin" > "$OUTDIR/vegeta.json" -fi - -if (( RUN_K6 )); then - echo - echo "===> k6" - cat < "$OUTDIR/k6_test.js" -import http from 'k6/http'; -import { check } from 'k6'; - -export const options = { - scenarios: $K6_SCENARIOS, - httpReq: { - timeout: '$REQUEST_TIMEOUT', - }, -}; - -export default function () { - const response = http.get('$TARGET'); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); -} -EOF - - k6 run --summary-export="$OUTDIR/k6_summary.json" --summary-trend-stats "min,avg,med,max,p(90),p(99)" "$OUTDIR/k6_test.js" | tee "$OUTDIR/k6.txt" -fi - -echo -echo "===> Parsing results and generating summary" - -echo -e "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus" > "$OUTDIR/summary.txt" - -if (( RUN_FORTIO )); then - FORTIO_RPS=$(jq '.ActualQPS' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P50=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==50) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P90=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==90) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_P99=$(jq '.DurationHistogram.Percentiles[] | select(.Percentile==99) | .Value * 1000' "$OUTDIR/fortio.json" | awk '{printf "%.2f", $1}') - FORTIO_STATUS=$(jq -r '.RetCodes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/fortio.json") - echo -e "Fortio\t$FORTIO_RPS\t$FORTIO_P50\t$FORTIO_P90\t$FORTIO_P99\t$FORTIO_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_VEGETA )); then - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period - VEGETA_RPS=$(jq '.throughput' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P50=$(jq '.latencies["50th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P90=$(jq '.latencies["90th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_P99=$(jq '.latencies["99th"] / 1000000' "$OUTDIR/vegeta.json" | awk '{printf "%.2f", $1}') - VEGETA_STATUS=$(jq -r '.status_codes | to_entries | map("\(.key)=\(.value)") | join(",")' "$OUTDIR/vegeta.json") - echo -e "Vegeta\t$VEGETA_RPS\t$VEGETA_P50\t$VEGETA_P90\t$VEGETA_P99\t$VEGETA_STATUS" >> "$OUTDIR/summary.txt" -fi - -if (( RUN_K6 )); then - K6_RPS=$(jq '.metrics.iterations.rate' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P50=$(jq '.metrics.http_req_duration.med' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P90=$(jq '.metrics.http_req_duration["p(90)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - K6_P99=$(jq '.metrics.http_req_duration["p(99)"]' "$OUTDIR/k6_summary.json" | awk '{printf "%.2f", $1}') - # Status: compute successful vs failed requests - K6_REQS_TOTAL=$(jq '.metrics.http_reqs.count' "$OUTDIR/k6_summary.json") - K6_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.key[7:] + "=" + (.value.passes|tostring)) - | join(",") - ' "$OUTDIR/k6_summary.json") - K6_REQS_KNOWN_STATUS=$(jq -r ' - .root_group.checks - | to_entries - | map(.value.passes) - | add - ' "$OUTDIR/k6_summary.json") - K6_REQS_OTHER=$(( K6_REQS_TOTAL - K6_REQS_KNOWN_STATUS )) - if [ "$K6_REQS_OTHER" -gt 0 ]; then - K6_STATUS="$K6_STATUS,other=$K6_REQS_OTHER" - fi - echo -e "k6\t$K6_RPS\t$K6_P50\t$K6_P90\t$K6_P99\t$K6_STATUS" >> "$OUTDIR/summary.txt" -fi - -echo -echo "Summary saved to $OUTDIR/summary.txt" -column -t -s $'\t' "$OUTDIR/summary.txt" From c0f7322d75891d78361567f33de28b57f7e31e00 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:39:59 +0000 Subject: [PATCH 16/38] Fix k6 timeout --- spec/performance/bench.rb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 6f9a9536f4..9021b41235 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -204,13 +204,10 @@ def server_responding?(uri) export const options = { scenarios: #{k6_scenarios}, - httpReq: { - timeout: '#{REQUEST_TIMEOUT}', - }, }; export default function () { - const response = http.get('#{TARGET}'); + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, // you can add more if needed: From 6b6492289626a994c708eb51469bcc06caee61ba Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 18:49:21 +0000 Subject: [PATCH 17/38] Replace DURATION_SEC with DURATION --- .github/workflows/benchmark.yml | 10 +++++----- spec/performance/bench.rb | 22 ++++++++-------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 7b60cab6a1..44e528dae5 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -14,11 +14,11 @@ on: required: false default: 'max' type: string - duration_sec: - description: 'Duration in seconds' + duration: + description: 'Duration (e.g., "30s", "1m", "90s")' required: false - default: 30 - type: number + default: '30s' + type: string request_timeout: description: 'Request timeout (e.g., "60s", "1m", "90s")' required: false @@ -68,7 +68,7 @@ env: VEGETA_VERSION: "12.13.0" # Benchmark parameters RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION_SEC: ${{ github.event.inputs.duration_sec || 30 }} + DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 9021b41235..ac12eba048 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -15,10 +15,10 @@ # concurrent connections/virtual users CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS.to_s).to_i -DURATION_SEC = ENV.fetch("DURATION_SEC", "10").to_f -DURATION = "#{DURATION_SEC}s".freeze -# request timeout (duration string like "60s", "1m", "90s") +MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +# benchmark duration (duration string like "30s", "1m", "90s") +DURATION = ENV.fetch("DURATION", "30s") +# request timeout (duration string as above) REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") @@ -50,15 +50,9 @@ def validate_positive_integer(value, name) end def validate_duration(value, name) - return if value.is_a?(Numeric) && value.positive? - - raise "#{name} must be a positive number (got: '#{value}')" -end - -def validate_timeout(value) return if value.match?(/^(\d+(\.\d+)?[smh])+$/) - raise "REQUEST_TIMEOUT must be a duration like '60s', '1m', '1.5m' (got: '#{value}')" + raise "#{name} must be a duration like '10s', '1m', '1.5m' (got: '#{value}')" end def parse_json_file(file_path, tool_name) @@ -74,8 +68,8 @@ def parse_json_file(file_path, tool_name) validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") -validate_duration(DURATION_SEC, "DURATION_SEC") -validate_timeout(REQUEST_TIMEOUT) +validate_duration(DURATION, "DURATION") +validate_duration(REQUEST_TIMEOUT, "REQUEST_TIMEOUT") raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS @@ -93,7 +87,7 @@ def parse_json_file(file_path, tool_name) puts <<~PARAMS Benchmark parameters: - RATE: #{RATE} - - DURATION_SEC: #{DURATION_SEC} + - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} - CONNECTIONS: #{CONNECTIONS} - MAX_CONNECTIONS: #{MAX_CONNECTIONS} From e86cb1e806d33fa276f75782da1fb2d12aa53ca7 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:16:49 +0000 Subject: [PATCH 18/38] Group all code for a tool into a single block --- spec/performance/bench.rb | 225 +++++++++++++++++++------------------- 1 file changed, 115 insertions(+), 110 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index ac12eba048..0bfa77e890 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -24,14 +24,6 @@ TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" -FORTIO_JSON = "#{OUTDIR}/fortio.json".freeze -FORTIO_TXT = "#{OUTDIR}/fortio.txt".freeze -VEGETA_BIN = "#{OUTDIR}/vegeta.bin".freeze -VEGETA_JSON = "#{OUTDIR}/vegeta.json".freeze -VEGETA_TXT = "#{OUTDIR}/vegeta.txt".freeze -K6_TEST_JS = "#{OUTDIR}/k6_test.js".freeze -K6_SUMMARY_JSON = "#{OUTDIR}/k6_summary.json".freeze -K6_TXT = "#{OUTDIR}/k6.txt".freeze SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze # Validate input parameters @@ -73,11 +65,6 @@ def parse_json_file(file_path, tool_name) raise "MAX_CONNECTIONS (#{MAX_CONNECTIONS}) must be >= CONNECTIONS (#{CONNECTIONS})" if MAX_CONNECTIONS < CONNECTIONS -# Precompute checks for each tool -run_fortio = TOOLS.include?("fortio") -run_vegeta = TOOLS.include?("vegeta") -run_k6 = TOOLS.include?("k6") - # Check required tools are installed required_tools = TOOLS + %w[column tee] required_tools.each do |cmd| @@ -126,103 +113,43 @@ def server_responding?(uri) FileUtils.mkdir_p(OUTDIR) -# Configure tool-specific arguments -if RATE == "max" - if CONNECTIONS != MAX_CONNECTIONS - raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" - end - - fortio_args = ["-qps", 0, "-c", CONNECTIONS] - vegeta_args = ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - max_rate: { - executor: 'constant-vus', - vus: #{CONNECTIONS}, - duration: '#{DURATION}' - } - } - JS -else - fortio_args = ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] - vegeta_args = ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] - k6_scenarios = <<~JS.strip - { - constant_rate: { - executor: 'constant-arrival-rate', - rate: #{RATE}, - timeUnit: '1s', - duration: '#{DURATION}', - preAllocatedVUs: #{CONNECTIONS}, - maxVUs: #{MAX_CONNECTIONS} - } - } - JS -end - -# Run Fortio -if run_fortio - puts "===> Fortio" - # TODO: https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass - fortio_cmd = [ - "fortio", "load", - *fortio_args, - "-t", DURATION, - "-timeout", REQUEST_TIMEOUT, - "-json", FORTIO_JSON, - TARGET - ].join(" ") - raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{FORTIO_TXT}") +# Validate RATE=max constraint +is_max_rate = RATE == "max" +if is_max_rate && CONNECTIONS != MAX_CONNECTIONS + raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Run Vegeta -if run_vegeta - puts "\n===> Vegeta" - vegeta_cmd = [ - "echo", "'GET #{TARGET}'", "|", - "vegeta", "attack", - *vegeta_args, - "-duration=#{DURATION}", - "-timeout=#{REQUEST_TIMEOUT}" - ].join(" ") - raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{VEGETA_BIN} | vegeta report | tee #{VEGETA_TXT}") - raise "Vegeta report generation failed" unless system("vegeta report -type=json #{VEGETA_BIN} > #{VEGETA_JSON}") -end - -# Run k6 -if run_k6 - puts "\n===> k6" - k6_script = <<~JS - import http from 'k6/http'; - import { check } from 'k6'; - - export const options = { - scenarios: #{k6_scenarios}, - }; - - export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); - check(response, { - 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, - }); - } - JS - File.write(K6_TEST_JS, k6_script) - k6_command = "k6 run --summary-export=#{K6_SUMMARY_JSON} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" - raise "k6 benchmark failed" unless system("#{k6_command} #{K6_TEST_JS} | tee #{K6_TXT}") -end - -puts "\n===> Parsing results and generating summary" - # Initialize summary file File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") -# Parse Fortio results -if run_fortio +# Fortio +if TOOLS.include?("fortio") begin - fortio_data = parse_json_file(FORTIO_JSON, "Fortio") + puts "===> Fortio" + + fortio_json = "#{OUTDIR}/fortio.json" + fortio_txt = "#{OUTDIR}/fortio.txt" + + # Configure Fortio arguments + # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass + fortio_args = + if is_max_rate + ["-qps", 0, "-c", CONNECTIONS] + else + ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] + end + + fortio_cmd = [ + "fortio", "load", + *fortio_args, + "-t", DURATION, + "-timeout", REQUEST_TIMEOUT, + "-json", fortio_json, + TARGET + ].join(" ") + raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") + + fortio_data = parse_json_file(fortio_json, "Fortio") fortio_rps = fortio_data["ActualQPS"]&.round(2) || "missing" percentiles = fortio_data.dig("DurationHistogram", "Percentiles") || [] @@ -247,10 +174,34 @@ def server_responding?(uri) end end -# Parse Vegeta results -if run_vegeta +# Vegeta +if TOOLS.include?("vegeta") begin - vegeta_data = parse_json_file(VEGETA_JSON, "Vegeta") + puts "\n===> Vegeta" + + vegeta_bin = "#{OUTDIR}/vegeta.bin" + vegeta_json = "#{OUTDIR}/vegeta.json" + vegeta_txt = "#{OUTDIR}/vegeta.txt" + + # Configure Vegeta arguments + vegeta_args = + if is_max_rate + ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] + else + ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] + end + + vegeta_cmd = [ + "echo 'GET #{TARGET}' |", + "vegeta", "attack", + *vegeta_args, + "-duration=#{DURATION}", + "-timeout=#{REQUEST_TIMEOUT}" + ].join(" ") + raise "Vegeta attack failed" unless system("#{vegeta_cmd} | tee #{vegeta_bin} | vegeta report | tee #{vegeta_txt}") + raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") + + vegeta_data = parse_json_file(vegeta_json, "Vegeta") # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" @@ -271,10 +222,64 @@ def server_responding?(uri) end end -# Parse k6 results -if run_k6 +# k6 +if TOOLS.include?("k6") begin - k6_data = parse_json_file(K6_SUMMARY_JSON, "k6") + puts "\n===> k6" + + k6_script_file = "#{OUTDIR}/k6_test.js" + k6_summary_json = "#{OUTDIR}/k6_summary.json" + k6_txt = "#{OUTDIR}/k6.txt" + + # Configure k6 scenarios + k6_scenarios = + if is_max_rate + <<~JS.strip + { + max_rate: { + executor: 'constant-vus', + vus: #{CONNECTIONS}, + duration: '#{DURATION}' + } + } + JS + else + <<~JS.strip + { + constant_rate: { + executor: 'constant-arrival-rate', + rate: #{RATE}, + timeUnit: '1s', + duration: '#{DURATION}', + preAllocatedVUs: #{CONNECTIONS}, + maxVUs: #{MAX_CONNECTIONS} + } + } + JS + end + + k6_script = <<~JS + import http from 'k6/http'; + import { check } from 'k6'; + + export const options = { + scenarios: #{k6_scenarios}, + }; + + export default function () { + const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + check(response, { + 'status=200': r => r.status === 200, + // you can add more if needed: + // 'status=500': r => r.status === 500, + }); + } + JS + File.write(k6_script_file, k6_script) + k6_command = "k6 run --summary-export=#{k6_summary_json} --summary-trend-stats 'min,avg,med,max,p(90),p(99)'" + raise "k6 benchmark failed" unless system("#{k6_command} #{k6_script_file} | tee #{k6_txt}") + + k6_data = parse_json_file(k6_summary_json, "k6") k6_rps = k6_data.dig("metrics", "iterations", "rate")&.round(2) || "missing" k6_p50 = k6_data.dig("metrics", "http_req_duration", "med")&.round(2) || "missing" k6_p90 = k6_data.dig("metrics", "http_req_duration", "p(90)")&.round(2) || "missing" From 8145e3689f0c45ba8fe3afcdced558d2e548512b Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Fri, 7 Nov 2025 19:54:44 +0000 Subject: [PATCH 19/38] Remove duplication in adding summaries --- spec/performance/bench.rb | 70 +++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 0bfa77e890..752979c1ed 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -57,6 +57,16 @@ def parse_json_file(file_path, tool_name) raise "Failed to read #{tool_name} results: #{e.message}" end +def failure_metrics(error) + ["FAILED", "FAILED", "FAILED", "FAILED", error.message] +end + +def add_summary_line(*parts) + File.open(SUMMARY_TXT, "a") do |f| + f.puts parts.join("\t") + end +end + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -120,11 +130,12 @@ def server_responding?(uri) end # Initialize summary file -File.write(SUMMARY_TXT, "Tool\tRPS\tp50(ms)\tp90(ms)\tp99(ms)\tStatus\n") +File.write(SUMMARY_TXT, "") +add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") # Fortio if TOOLS.include?("fortio") - begin + fortio_metrics = begin puts "===> Fortio" fortio_json = "#{OUTDIR}/fortio.json" @@ -162,21 +173,20 @@ def server_responding?(uri) fortio_p50 = (p50_data["Value"] * 1000).round(2) fortio_p90 = (p90_data["Value"] * 1000).round(2) fortio_p99 = (p99_data["Value"] * 1000).round(2) - fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\t#{fortio_rps}\t#{fortio_p50}\t#{fortio_p90}\t#{fortio_p99}\t#{fortio_status}" - end - rescue StandardError => e - puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Fortio\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + fortio_status = fortio_data["RetCodes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [fortio_rps, fortio_p50, fortio_p90, fortio_p99, fortio_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) end + + add_summary_line("Fortio", *fortio_metrics) end # Vegeta if TOOLS.include?("vegeta") - begin + vegeta_metrics = begin puts "\n===> Vegeta" vegeta_bin = "#{OUTDIR}/vegeta.bin" @@ -207,24 +217,20 @@ def server_responding?(uri) vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p99 = vegeta_data.dig("latencies", "99th")&./(1_000_000.0)&.round(2) || "missing" - vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "unknown" - vegeta_line = [ - "Vegeta", vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status - ].join("\t") - File.open(SUMMARY_TXT, "a") do |f| - f.puts vegeta_line - end - rescue StandardError => e - puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "Vegeta\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + vegeta_status = vegeta_data["status_codes"]&.map { |k, v| "#{k}=#{v}" }&.join(",") || "missing" + + [vegeta_rps, vegeta_p50, vegeta_p90, vegeta_p99, vegeta_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) end + + add_summary_line("Vegeta", *vegeta_metrics) end # k6 if TOOLS.include?("k6") - begin + k6_metrics = begin puts "\n===> k6" k6_script_file = "#{OUTDIR}/k6_test.js" @@ -299,15 +305,13 @@ def server_responding?(uri) k6_status_parts << "other=#{k6_reqs_other}" if k6_reqs_other.positive? k6_status = k6_status_parts.empty? ? "missing" : k6_status_parts.join(",") - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\t#{k6_rps}\t#{k6_p50}\t#{k6_p90}\t#{k6_p99}\t#{k6_status}" - end - rescue StandardError => e - puts "Error: #{e.message}" - File.open(SUMMARY_TXT, "a") do |f| - f.puts "k6\tFAILED\tFAILED\tFAILED\tFAILED\t#{e.message}" - end + [k6_rps, k6_p50, k6_p90, k6_p99, k6_status] + rescue StandardError => error + puts "Error: #{error.message}" + failure_metrics(error) end + + add_summary_line("k6", *k6_metrics) end puts "\nSummary saved to #{SUMMARY_TXT}" From 64b0d6ba90a8fb8f66e9d0680f19e9df11b531b6 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:19:03 +0000 Subject: [PATCH 20/38] Benchmark all routes --- spec/performance/bench.rb | 159 +++++++++++++++++++++++++------------- 1 file changed, 106 insertions(+), 53 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 752979c1ed..da5c97b793 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -1,15 +1,16 @@ #!/usr/bin/env ruby # frozen_string_literal: true +require "English" require "json" require "fileutils" require "net/http" require "uri" # Benchmark parameters +PRO = ENV.fetch("PRO", "false") == "true" +APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") -ROUTE = ENV.fetch("ROUTE", "server_side_hello_world_hooks") -TARGET = URI.parse("http://#{BASE_URL}/#{ROUTE}") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") # concurrent connections/virtual users @@ -67,6 +68,30 @@ def add_summary_line(*parts) end end +# Get routes from the Rails app filtered by pages# and react_router# controllers +def get_benchmark_routes(app_dir) + routes_output = `cd #{app_dir} && bundle exec rails routes 2>&1` + raise "Failed to get routes from #{app_dir}" unless $CHILD_STATUS.success? + + routes = [] + routes_output.each_line do |line| + # Parse lines like: "server_side_hello_world GET /server_side_hello_world(.:format) pages#server_side_hello_world" + # We want GET routes only (not POST, etc.) served by pages# or react_router# controllers + # Capture path up to (.:format) part using [^(\s]+ (everything except '(' and whitespace) + next unless (match = line.match(/GET\s+([^(\s]+).*(pages|react_router)#/)) + + path = match[1] + path = "/" if path.empty? # Handle root route + routes << path + end + raise "No pages# or react_router# routes found in #{app_dir}" if routes.empty? + + routes +end + +# Get all routes to benchmark +routes = get_benchmark_routes(APP_DIR) + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") @@ -83,6 +108,8 @@ def add_summary_line(*parts) puts <<~PARAMS Benchmark parameters: + - APP_DIR: #{APP_DIR} + - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} - REQUEST_TIMEOUT: #{REQUEST_TIMEOUT} @@ -104,47 +131,42 @@ def server_responding?(uri) # Wait for the server to be ready TIMEOUT_SEC = 60 +puts "Checking server availability at #{BASE_URL}..." +test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now loop do - break if server_responding?(TARGET) + break if server_responding?(test_uri) - raise "Target #{TARGET} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC sleep 1 end - -# Warm up server -puts "Warming up server with 10 requests..." -10.times do - server_responding?(TARGET) - sleep 0.5 -end -puts "Warm-up complete" +puts "Server is ready!" FileUtils.mkdir_p(OUTDIR) # Validate RATE=max constraint -is_max_rate = RATE == "max" -if is_max_rate && CONNECTIONS != MAX_CONNECTIONS +IS_MAX_RATE = RATE == "max" +if IS_MAX_RATE && CONNECTIONS != MAX_CONNECTIONS raise "For RATE=max, CONNECTIONS must be equal to MAX_CONNECTIONS (got #{CONNECTIONS} and #{MAX_CONNECTIONS})" end -# Initialize summary file -File.write(SUMMARY_TXT, "") -add_summary_line("Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") +# rubocop:disable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength -# Fortio -if TOOLS.include?("fortio") - fortio_metrics = begin - puts "===> Fortio" +# Benchmark a single route with Fortio +def run_fortio_benchmark(target, route_name) + return nil unless TOOLS.include?("fortio") - fortio_json = "#{OUTDIR}/fortio.json" - fortio_txt = "#{OUTDIR}/fortio.txt" + begin + puts "===> Fortio: #{route_name}" + + fortio_json = "#{OUTDIR}/#{route_name}_fortio.json" + fortio_txt = "#{OUTDIR}/#{route_name}_fortio.txt" # Configure Fortio arguments # See https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass fortio_args = - if is_max_rate + if IS_MAX_RATE ["-qps", 0, "-c", CONNECTIONS] else ["-qps", RATE, "-uniform", "-nocatchup", "-c", CONNECTIONS] @@ -156,7 +178,7 @@ def server_responding?(uri) "-t", DURATION, "-timeout", REQUEST_TIMEOUT, "-json", fortio_json, - TARGET + target ].join(" ") raise "Fortio benchmark failed" unless system("#{fortio_cmd} | tee #{fortio_txt}") @@ -180,29 +202,29 @@ def server_responding?(uri) puts "Error: #{error.message}" failure_metrics(error) end - - add_summary_line("Fortio", *fortio_metrics) end -# Vegeta -if TOOLS.include?("vegeta") - vegeta_metrics = begin - puts "\n===> Vegeta" +# Benchmark a single route with Vegeta +def run_vegeta_benchmark(target, route_name) + return nil unless TOOLS.include?("vegeta") + + begin + puts "\n===> Vegeta: #{route_name}" - vegeta_bin = "#{OUTDIR}/vegeta.bin" - vegeta_json = "#{OUTDIR}/vegeta.json" - vegeta_txt = "#{OUTDIR}/vegeta.txt" + vegeta_bin = "#{OUTDIR}/#{route_name}_vegeta.bin" + vegeta_json = "#{OUTDIR}/#{route_name}_vegeta.json" + vegeta_txt = "#{OUTDIR}/#{route_name}_vegeta.txt" # Configure Vegeta arguments vegeta_args = - if is_max_rate + if IS_MAX_RATE ["-rate=infinity", "--workers=#{CONNECTIONS}", "--max-workers=#{CONNECTIONS}"] else ["-rate=#{RATE}", "--workers=#{CONNECTIONS}", "--max-workers=#{MAX_CONNECTIONS}"] end vegeta_cmd = [ - "echo 'GET #{TARGET}' |", + "echo 'GET #{target}' |", "vegeta", "attack", *vegeta_args, "-duration=#{DURATION}", @@ -212,7 +234,6 @@ def server_responding?(uri) raise "Vegeta report generation failed" unless system("vegeta report -type=json #{vegeta_bin} > #{vegeta_json}") vegeta_data = parse_json_file(vegeta_json, "Vegeta") - # .throughput is successful_reqs/total_period, .rate is all_requests/attack_period vegeta_rps = vegeta_data["throughput"]&.round(2) || "missing" vegeta_p50 = vegeta_data.dig("latencies", "50th")&./(1_000_000.0)&.round(2) || "missing" vegeta_p90 = vegeta_data.dig("latencies", "90th")&./(1_000_000.0)&.round(2) || "missing" @@ -224,22 +245,22 @@ def server_responding?(uri) puts "Error: #{error.message}" failure_metrics(error) end - - add_summary_line("Vegeta", *vegeta_metrics) end -# k6 -if TOOLS.include?("k6") - k6_metrics = begin - puts "\n===> k6" +# Benchmark a single route with k6 +def run_k6_benchmark(target, route_name) + return nil unless TOOLS.include?("k6") + + begin + puts "\n===> k6: #{route_name}" - k6_script_file = "#{OUTDIR}/k6_test.js" - k6_summary_json = "#{OUTDIR}/k6_summary.json" - k6_txt = "#{OUTDIR}/k6.txt" + k6_script_file = "#{OUTDIR}/#{route_name}_k6_test.js" + k6_summary_json = "#{OUTDIR}/#{route_name}_k6_summary.json" + k6_txt = "#{OUTDIR}/#{route_name}_k6.txt" # Configure k6 scenarios k6_scenarios = - if is_max_rate + if IS_MAX_RATE <<~JS.strip { max_rate: { @@ -273,11 +294,9 @@ def server_responding?(uri) }; export default function () { - const response = http.get('#{TARGET}', { timeout: '#{REQUEST_TIMEOUT}' }); + const response = http.get('#{target}', { timeout: '#{REQUEST_TIMEOUT}' }); check(response, { 'status=200': r => r.status === 200, - // you can add more if needed: - // 'status=500': r => r.status === 500, }); } JS @@ -294,8 +313,6 @@ def server_responding?(uri) # Status: compute successful vs failed requests k6_reqs_total = k6_data.dig("metrics", "http_reqs", "count") || 0 k6_checks = k6_data.dig("root_group", "checks") || {} - # Extract status code from check name (e.g., "status=200" -> "200") - # Handle both "status=XXX" format and other potential formats k6_status_parts = k6_checks.map do |name, check| status_label = name.start_with?("status=") ? name.delete_prefix("status=") : name "#{status_label}=#{check['passes']}" @@ -310,8 +327,44 @@ def server_responding?(uri) puts "Error: #{error.message}" failure_metrics(error) end +end + +# rubocop:enable Metrics/AbcSize, Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/MethodLength + +# Initialize summary file +File.write(SUMMARY_TXT, "") +add_summary_line("Route", "Tool", "RPS", "p50(ms)", "p90(ms)", "p99(ms)", "Status") + +# Run benchmarks for each route +routes.each do |route| + separator = "=" * 80 + puts "\n#{separator}" + puts "Benchmarking route: #{route}" + puts separator + + target = URI.parse("http://#{BASE_URL}#{route}") + + # Warm up server for this route + puts "Warming up server for #{route} with 10 requests..." + 10.times do + server_responding?(target) + sleep 0.5 + end + puts "Warm-up complete for #{route}" + + # Sanitize route name for filenames + route_name = route.gsub(%r{^/}, "").tr("/", "_") + route_name = "root" if route_name.empty? + + # Run each benchmark tool + fortio_metrics = run_fortio_benchmark(target, route_name) + add_summary_line(route, "Fortio", *fortio_metrics) if fortio_metrics + + vegeta_metrics = run_vegeta_benchmark(target, route_name) + add_summary_line(route, "Vegeta", *vegeta_metrics) if vegeta_metrics - add_summary_line("k6", *k6_metrics) + k6_metrics = run_k6_benchmark(target, route_name) + add_summary_line(route, "k6", *k6_metrics) if k6_metrics end puts "\nSummary saved to #{SUMMARY_TXT}" From e31d4a69cbbcc4a68b66ef6da96d95254e6dd878 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:26:49 +0000 Subject: [PATCH 21/38] Fix Fortio failure on server_side_log_throw_raise --- spec/performance/bench.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index da5c97b793..22321da44f 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -177,6 +177,8 @@ def run_fortio_benchmark(target, route_name) *fortio_args, "-t", DURATION, "-timeout", REQUEST_TIMEOUT, + # Allow redirects. Could use -L instead, but it uses the slower HTTP client. + "-allow-initial-errors", "-json", fortio_json, target ].join(" ") From a3866f307c4d12926f91bb46d5bd67663e657c0d Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 12:48:49 +0000 Subject: [PATCH 22/38] Allow specifying routes --- .github/workflows/benchmark.yml | 5 +++++ spec/performance/bench.rb | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 44e528dae5..ba15d675a4 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -9,6 +9,10 @@ on: required: false default: false type: boolean + routes: + description: 'Comma-separated routes to benchmark (e.g., "/,/hello"). Leave empty to auto-detect from Rails.' + required: false + type: string rate: description: 'Requests per second (use "max" for maximum throughput)' required: false @@ -67,6 +71,7 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Benchmark parameters + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 22321da44f..e93b355bd4 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -10,6 +10,7 @@ # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" +ROUTES = ENV.fetch("ROUTES", nil) BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate RATE = ENV.fetch("RATE", "50") @@ -90,7 +91,12 @@ def get_benchmark_routes(app_dir) end # Get all routes to benchmark -routes = get_benchmark_routes(APP_DIR) +routes = + if ROUTES + ROUTES.split(",").map(&:strip) + else + get_benchmark_routes(APP_DIR) + end validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") @@ -109,6 +115,7 @@ def get_benchmark_routes(app_dir) puts <<~PARAMS Benchmark parameters: - APP_DIR: #{APP_DIR} + - ROUTES: #{ROUTES || 'auto-detect from Rails'} - BASE_URL: #{BASE_URL} - RATE: #{RATE} - DURATION: #{DURATION} From a914dcd905071faeb3b5533ee3698fa424c12455 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:10:20 +0000 Subject: [PATCH 23/38] Add pro benchmarks --- .github/workflows/benchmark.yml | 220 +++++++++++++++--- react_on_rails_pro/spec/dummy/bin/prod | 29 +++ react_on_rails_pro/spec/dummy/bin/prod-assets | 8 + 3 files changed, 221 insertions(+), 36 deletions(-) create mode 100755 react_on_rails_pro/spec/dummy/bin/prod create mode 100755 react_on_rails_pro/spec/dummy/bin/prod-assets diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index ba15d675a4..bbc43fc3a2 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -29,33 +29,34 @@ on: default: '60s' type: string connections: - description: 'Concurrent connections/virtual users' + description: 'Concurrent connections/virtual users (also used as max)' required: false default: 10 type: number - max_connections: - description: 'Maximum connections/virtual users' - required: false - type: number web_concurrency: description: 'Number of Puma worker processes' required: false default: 4 type: number - rails_max_threads: - description: 'Maximum number of Puma threads' + rails_threads: + description: 'Number of Puma threads (min and max will be same)' required: false default: 3 type: number - rails_min_threads: - description: 'Minimum number of Puma threads (same as maximum if not set)' - required: false - type: number tools: description: 'Comma-separated list of tools to run' required: false default: 'fortio,vegeta,k6' type: string + app_version: + description: 'Which app version to benchmark' + required: false + default: 'both' + type: choice + options: + - 'both' + - 'core_only' + - 'pro_only' push: branches: - master @@ -76,15 +77,17 @@ env: DURATION: ${{ github.event.inputs.duration || '30s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.max_connections || github.event.inputs.connections || 10 }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} - RAILS_MAX_THREADS: ${{ github.event.inputs.rails_max_threads || 3 }} - RAILS_MIN_THREADS: ${{ github.event.inputs.rails_min_threads || github.event.inputs.rails_max_threads || 3 }} + RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} + RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} jobs: benchmark: runs-on: ubuntu-latest + env: + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} steps: # ============================================ @@ -181,8 +184,8 @@ jobs: - name: Setup Ruby uses: ruby/setup-ruby@v1 with: - ruby-version: '3.4' - bundler: 2.5.9 + ruby-version: '3.3.7' + bundler: 2.5.4 - name: Fix dependency for libyaml-dev run: sudo apt install libyaml-dev -y @@ -213,29 +216,34 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yalc add react-on-rails - name: Install Node modules with Yarn for dummy app + if: github.event.inputs.app_version != 'pro_only' run: cd spec/dummy && yarn install --no-progress --no-emoji - name: Save dummy app ruby gems to cache + if: github.event.inputs.app_version != 'pro_only' uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - name: Install Ruby Gems for dummy app + if: github.event.inputs.app_version != 'pro_only' run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' if ! bundle check --path=vendor/bundle; then - bundle _2.5.9_ install --path=vendor/bundle --jobs=4 --retry=3 + bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 fi - name: generate file system-based packs run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs - name: Prepare production assets + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -249,6 +257,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start production server + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -273,14 +282,15 @@ jobs: exit 1 # ============================================ - # STEP 5: RUN BENCHMARKS + # STEP 5: RUN CORE BENCHMARKS # ============================================ - - name: Execute benchmark suite - timeout-minutes: 20 + - name: Execute Core benchmark suite + if: github.event.inputs.app_version != 'pro_only' + timeout-minutes: 120 run: | set -e # Exit on any error - echo "🏃 Running benchmark suite..." + echo "🏃 Running Core benchmark suite..." if ! ruby spec/performance/bench.rb; then echo "❌ ERROR: Benchmark execution failed" @@ -289,7 +299,8 @@ jobs: echo "✅ Benchmark suite completed successfully" - - name: Validate benchmark results + - name: Validate Core benchmark results + if: github.event.inputs.app_version != 'pro_only' run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -327,39 +338,176 @@ jobs: echo "Continuing with available results..." fi - # ============================================ - # STEP 6: COLLECT BENCHMARK RESULTS - # ============================================ - - - name: Upload benchmark results + - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: always() # Upload even if benchmark fails + if: github.event.inputs.app_version != 'pro_only' && always() with: - name: benchmark-results-${{ github.run_number }} + name: benchmark-core-results-${{ github.run_number }} path: bench_results/ retention-days: 30 if-no-files-found: warn - - name: Verify artifact upload - if: success() + # ============================================ + # STEP 6: SETUP PRO APPLICATION SERVER + # ============================================ + - name: Cache Pro package node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/node_modules + key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} + + - name: Cache Pro dummy app node modules + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/node_modules + key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} + + - name: Cache Pro dummy app Ruby gems + if: github.event.inputs.app_version != 'core_only' + uses: actions/cache@v4 + with: + path: react_on_rails_pro/spec/dummy/vendor/bundle + key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} + + - name: Install Node modules with Yarn for Pro package + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro + sudo yarn global add yalc + yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Node modules with Yarn for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji + + - name: Install Ruby Gems for Pro dummy app + if: github.event.inputs.app_version != 'core_only' + run: | + cd react_on_rails_pro/spec/dummy + bundle lock --add-platform 'x86_64-linux' + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + + - name: Generate file-system based entrypoints for Pro + if: github.event.inputs.app_version != 'core_only' + run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs + + - name: Prepare Pro production assets + if: github.event.inputs.app_version != 'core_only' run: | - echo "✅ Benchmark results uploaded as workflow artifacts" - echo "📦 Artifact name: benchmark-results-${{ github.run_number }}" - echo "🔗 Access artifacts from the Actions tab in GitHub" + set -e + echo "🔨 Building Pro production assets..." + cd react_on_rails_pro/spec/dummy + + if ! bin/prod-assets; then + echo "❌ ERROR: Failed to build production assets" + exit 1 + fi + + echo "✅ Production assets built successfully" + + - name: Start Pro production server + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🚀 Starting Pro production server..." + cd react_on_rails_pro/spec/dummy + + # Start server in background + bin/prod & + echo "Server started in background" + + # Wait for server to be ready (max 30 seconds) + echo "⏳ Waiting for server to be ready..." + for i in {1..30}; do + if curl -fsS http://localhost:3001 > /dev/null; then + echo "✅ Server is ready and responding" + exit 0 + fi + echo " Attempt $i/30: Server not ready yet..." + sleep 1 + done + + echo "❌ ERROR: Server failed to start within 30 seconds" + exit 1 # ============================================ - # WORKFLOW COMPLETION + # STEP 7: RUN PRO BENCHMARKS # ============================================ + - name: Execute Pro benchmark suite + if: github.event.inputs.app_version != 'core_only' + timeout-minutes: 120 + run: | + set -e + echo "🏃 Running Pro benchmark suite..." + + if ! PRO=true ruby spec/performance/bench.rb; then + echo "❌ ERROR: Benchmark execution failed" + exit 1 + fi + + echo "✅ Benchmark suite completed successfully" + + - name: Validate Pro benchmark results + if: github.event.inputs.app_version != 'core_only' + run: | + set -e + echo "🔍 Validating Pro benchmark output files..." + + RESULTS_DIR="bench_results" + REQUIRED_FILES=("summary.txt") + MISSING_FILES=() + + if [ ! -d "${RESULTS_DIR}" ]; then + echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + exit 1 + fi + + echo "Generated files:" + ls -lh ${RESULTS_DIR}/ || true + echo "" + + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "${RESULTS_DIR}/${file}" ]; then + MISSING_FILES+=("${file}") + fi + done + + if [ ${#MISSING_FILES[@]} -eq 0 ]; then + echo "✅ All required benchmark output files present" + echo "📊 Summary preview:" + head -20 ${RESULTS_DIR}/summary.txt || true + else + echo "⚠️ WARNING: Some required files are missing:" + printf ' - %s\n' "${MISSING_FILES[@]}" + echo "Continuing with available results..." + fi + + - name: Upload Pro benchmark results + uses: actions/upload-artifact@v4 + if: github.event.inputs.app_version != 'core_only' && always() + with: + name: benchmark-pro-results-${{ github.run_number }} + path: bench_results/ + retention-days: 30 + if-no-files-found: warn + + # ============================================ + # STEP 8: WORKFLOW COMPLETION + # ============================================ - name: Workflow summary if: always() run: | echo "📋 Benchmark Workflow Summary" - echo "==============================" + echo "====================================" echo "Status: ${{ job.status }}" echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" + echo "App version: ${{ github.event.inputs.app_version || 'both' }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" diff --git a/react_on_rails_pro/spec/dummy/bin/prod b/react_on_rails_pro/spec/dummy/bin/prod new file mode 100755 index 0000000000..35d0d355ce --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# Run only after ./prod-assets + +# Check if assets are precompiled +MANIFEST="public/webpack/production/manifest.json" + +if [ ! -d "public/assets" ]; then + echo "ERROR: public/assets not found. Run ./bin/prod-assets first" + exit 1 +fi + +if [ ! -f "$MANIFEST" ]; then + echo "ERROR: $MANIFEST not found. Run ./bin/prod-assets first" + exit 1 +fi + +# Simple up-to-date check: warn if source files are newer than manifest.json +if find client config -type f \( -name "*.[jt]s" -o -name "*.[jt]sx" \) -newer "$MANIFEST" 2>/dev/null | grep -q .; then + echo "WARNING: client or config has changes newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +if [ -f "yarn.lock" ] && [ "yarn.lock" -nt "$MANIFEST" ]; then + echo "WARNING: yarn.lock is newer than compiled assets" + echo "Consider running ./bin/prod-assets to rebuild" +fi + +NODE_ENV=production RAILS_ENV=production bundle exec rails server -p 3001 diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets new file mode 100755 index 0000000000..96be6c50e8 --- /dev/null +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +export NODE_ENV=production +export RAILS_ENV=production +if [ "$CI" = "true" ]; then + bundle exec bootsnap precompile --gemfile app/ lib/ config/ +fi +bundle exec rails assets:precompile From 3f7224f2641e59006ae27a51a3dbdad52c5531f4 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Sat, 8 Nov 2025 14:11:11 +0000 Subject: [PATCH 24/38] Update Claude instructions --- CLAUDE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CLAUDE.md b/CLAUDE.md index fa18a77374..9ab9ee6c2b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -40,6 +40,8 @@ Pre-commit hooks automatically run: - All linters: `rake lint` (runs ESLint and RuboCop) - ESLint only: `yarn run lint` or `rake lint:eslint` - RuboCop only: `rake lint:rubocop` + - GitHub Action files (workflows, reusable actions, etc.): `actionlint` + - YAML files: `yamllint` (or validate the syntax with Ruby if it isn't installed). Do _not_ try to run RuboCop on `.yml` files. - **Code Formatting**: - Format code with Prettier: `rake autofix` - Check formatting without fixing: `yarn start format.listDifferent` From 1ee85ef06e035bc4164029c0f881d82e0be488d3 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 19:48:24 +0000 Subject: [PATCH 25/38] FIXME temp commit --- .github/workflows/benchmark.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index bbc43fc3a2..a9be4d48a3 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -51,7 +51,8 @@ on: app_version: description: 'Which app version to benchmark' required: false - default: 'both' + # FIXME: for debugging, restore 'both' before merging + default: 'pro_only' type: choice options: - 'both' From a358195d1ab9b36cf25b3ab27142a922e47b1b3a Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:08:30 +0000 Subject: [PATCH 26/38] Update prod-assets to include generate_packs --- .github/workflows/benchmark.yml | 11 ++++------- react_on_rails_pro/spec/dummy/bin/prod-assets | 1 + 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index a9be4d48a3..0fd13e6381 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -88,7 +88,8 @@ jobs: benchmark: runs-on: ubuntu-latest env: - REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE }} + SECRET_KEY_BASE: 'dummy-secret-key-for-ci-testing-not-used-in-production' + REACT_ON_RAILS_PRO_LICENSE: ${{ secrets.REACT_ON_RAILS_PRO_LICENSE_V2 }} steps: # ============================================ @@ -236,12 +237,8 @@ jobs: run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' - if ! bundle check --path=vendor/bundle; then - bundle _2.5.4_ install --path=vendor/bundle --jobs=4 --retry=3 - fi - - - name: generate file system-based packs - run: cd spec/dummy && RAILS_ENV="production" bundle exec rake react_on_rails:generate_packs + bundle config set path vendor/bundle + bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Prepare production assets if: github.event.inputs.app_version != 'pro_only' diff --git a/react_on_rails_pro/spec/dummy/bin/prod-assets b/react_on_rails_pro/spec/dummy/bin/prod-assets index 96be6c50e8..828b1e6ae8 100755 --- a/react_on_rails_pro/spec/dummy/bin/prod-assets +++ b/react_on_rails_pro/spec/dummy/bin/prod-assets @@ -5,4 +5,5 @@ export RAILS_ENV=production if [ "$CI" = "true" ]; then bundle exec bootsnap precompile --gemfile app/ lib/ config/ fi +bundle exec rails react_on_rails:generate_packs bundle exec rails assets:precompile From 3aa40f2cadd9dab0bd99406952c0e02b1fe92e00 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:34:07 +0000 Subject: [PATCH 27/38] Disable js_compressor and css_compressor --- react_on_rails_pro/Gemfile.development_dependencies | 1 - react_on_rails_pro/Gemfile.lock | 3 --- react_on_rails_pro/spec/dummy/Gemfile.lock | 3 --- .../spec/dummy/config/environments/production.rb | 7 ++++--- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 92c9ead62b..8713a981a3 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -20,7 +20,6 @@ gem "pg" # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" -gem "uglifier" gem "jquery-rails" gem "sprockets" gem "sass-rails" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index bc8fc6a08c..2f86e65a10 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -414,8 +414,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -493,7 +491,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index d0f9d868a6..bc9f6b2fae 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -450,8 +450,6 @@ GEM turbolinks-source (5.2.0) tzinfo (2.0.6) concurrent-ruby (~> 1.0) - uglifier (4.2.0) - execjs (>= 0.3.0, < 3) unicode-display_width (2.5.0) uri (1.0.3) useragent (0.16.11) @@ -542,7 +540,6 @@ DEPENDENCIES sprockets sqlite3 (~> 1.4) turbolinks - uglifier web-console webdrivers (= 5.3.0) webmock diff --git a/react_on_rails_pro/spec/dummy/config/environments/production.rb b/react_on_rails_pro/spec/dummy/config/environments/production.rb index 45a1d5f576..d2b312e1fd 100644 --- a/react_on_rails_pro/spec/dummy/config/environments/production.rb +++ b/react_on_rails_pro/spec/dummy/config/environments/production.rb @@ -19,8 +19,9 @@ config.public_file_server.enabled = true # Compress JavaScripts and CSS. - config.assets.js_compressor = Uglifier.new(harmony: true) - config.assets.css_compressor = :csso + # JS/CSS compression handled by Webpack/Shakapacker, not needed for Sprockets + # config.assets.js_compressor = Uglifier.new(harmony: true) + # config.assets.css_compressor = :csso # Do not fallback to assets pipeline if a precompiled asset is missed. config.assets.compile = false @@ -69,7 +70,7 @@ config.active_support.deprecation = :notify # Use default logging formatter so that PID and timestamp are not suppressed. - config.log_formatter = ::Logger::Formatter.new + config.log_formatter = Logger::Formatter.new # Use a different logger for distributed setups. # require 'syslog/logger' From 24ef5a37ae8ad8334058249810781c23f807ca4e Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 20:51:45 +0000 Subject: [PATCH 28/38] Remove unused pg gem --- react_on_rails_pro/Gemfile.development_dependencies | 2 -- react_on_rails_pro/Gemfile.lock | 2 -- react_on_rails_pro/spec/dummy/Gemfile.lock | 2 -- 3 files changed, 6 deletions(-) diff --git a/react_on_rails_pro/Gemfile.development_dependencies b/react_on_rails_pro/Gemfile.development_dependencies index 8713a981a3..83bd149b7e 100644 --- a/react_on_rails_pro/Gemfile.development_dependencies +++ b/react_on_rails_pro/Gemfile.development_dependencies @@ -15,8 +15,6 @@ gem "puma", "~> 6" # Build JSON APIs with ease. Read more: https://github.com/rails/jbuilder gem "jbuilder" -gem "pg" - # Turbolinks makes following links in your web application faster. Read more: https://github.com/rails/turbolinks gem "turbolinks" gem "sqlite3", "~> 1.4" diff --git a/react_on_rails_pro/Gemfile.lock b/react_on_rails_pro/Gemfile.lock index 2f86e65a10..82885179ed 100644 --- a/react_on_rails_pro/Gemfile.lock +++ b/react_on_rails_pro/Gemfile.lock @@ -233,7 +233,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -466,7 +465,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg pry (>= 0.14.1) pry-byebug! pry-doc diff --git a/react_on_rails_pro/spec/dummy/Gemfile.lock b/react_on_rails_pro/spec/dummy/Gemfile.lock index bc9f6b2fae..9693d16731 100644 --- a/react_on_rails_pro/spec/dummy/Gemfile.lock +++ b/react_on_rails_pro/spec/dummy/Gemfile.lock @@ -258,7 +258,6 @@ GEM parser (3.3.3.0) ast (~> 2.4.1) racc - pg (1.5.6) pp (0.6.2) prettyprint prettyprint (0.2.0) @@ -513,7 +512,6 @@ DEPENDENCIES net-http net-imap net-smtp - pg prism-rails pry (>= 0.14.1) pry-byebug! From 2943eaf70bf2c942cf86e7eabbe4fc5d6758f8c7 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:22:11 +0000 Subject: [PATCH 29/38] Handle empty inputs correctly --- .github/workflows/benchmark.yml | 12 ++++++------ spec/performance/bench.rb | 27 ++++++++++++++++++--------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 0fd13e6381..7acfb822bf 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -72,17 +72,17 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" - # Benchmark parameters + # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '30s' }} - REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout || '60s' }} - CONNECTIONS: ${{ github.event.inputs.connections || 10 }} - MAX_CONNECTIONS: ${{ github.event.inputs.connections || 10 }} + DURATION: ${{ github.event.inputs.duration }} + REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} + CONNECTIONS: ${{ github.event.inputs.connections }} + MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio,vegeta,k6' }} + TOOLS: ${{ github.event.inputs.tools }} jobs: benchmark: diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index e93b355bd4..24ac0e8a23 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -7,23 +7,30 @@ require "net/http" require "uri" +# Helper to get env var with default, +# treating empty string and "0" as unset since they can come from the benchmark workflow. +def env_or_default(key, default) + value = ENV[key].to_s + value.empty? || value == "0" ? default : value +end + # Benchmark parameters PRO = ENV.fetch("PRO", "false") == "true" APP_DIR = PRO ? "react_on_rails_pro/spec/dummy" : "spec/dummy" -ROUTES = ENV.fetch("ROUTES", nil) -BASE_URL = ENV.fetch("BASE_URL", "localhost:3001") +ROUTES = env_or_default("ROUTES", nil) +BASE_URL = env_or_default("BASE_URL", "localhost:3001") # requests per second; if "max" will get maximum number of queries instead of a fixed rate -RATE = ENV.fetch("RATE", "50") +RATE = env_or_default("RATE", "50") # concurrent connections/virtual users -CONNECTIONS = ENV.fetch("CONNECTIONS", "10").to_i +CONNECTIONS = env_or_default("CONNECTIONS", 10).to_i # maximum connections/virtual users -MAX_CONNECTIONS = ENV.fetch("MAX_CONNECTIONS", CONNECTIONS).to_i +MAX_CONNECTIONS = env_or_default("MAX_CONNECTIONS", CONNECTIONS).to_i # benchmark duration (duration string like "30s", "1m", "90s") -DURATION = ENV.fetch("DURATION", "30s") +DURATION = env_or_default("DURATION", "30s") # request timeout (duration string as above) -REQUEST_TIMEOUT = ENV.fetch("REQUEST_TIMEOUT", "60s") +REQUEST_TIMEOUT = env_or_default("REQUEST_TIMEOUT", "60s") # Tools to run (comma-separated) -TOOLS = ENV.fetch("TOOLS", "fortio,vegeta,k6").split(",") +TOOLS = env_or_default("TOOLS", "fortio,vegeta,k6").split(",") OUTDIR = "bench_results" SUMMARY_TXT = "#{OUTDIR}/summary.txt".freeze @@ -93,11 +100,13 @@ def get_benchmark_routes(app_dir) # Get all routes to benchmark routes = if ROUTES - ROUTES.split(",").map(&:strip) + ROUTES.split(",").map(&:strip).reject(&:empty?) else get_benchmark_routes(APP_DIR) end +raise "No routes to benchmark" if routes.empty? + validate_rate(RATE) validate_positive_integer(CONNECTIONS, "CONNECTIONS") validate_positive_integer(MAX_CONNECTIONS, "MAX_CONNECTIONS") From 2f658c5906fd7c21366394c339c9de9733fcb5f3 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 21:32:51 +0000 Subject: [PATCH 30/38] Fix app version handling in the benchmark workflow --- .github/workflows/benchmark.yml | 58 ++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 7acfb822bf..39698d40b5 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -72,6 +72,9 @@ env: FORTIO_VERSION: "1.73.0" K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" + # Determine which apps to run (default is 'pro_only' for all triggers) + RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} @@ -218,30 +221,30 @@ jobs: run: cd packages/react-on-rails && yarn install --no-progress --no-emoji --frozen-lockfile && yalc publish - name: yalc add react-on-rails - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: cd spec/dummy && yalc add react-on-rails - - name: Install Node modules with Yarn for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Node modules with Yarn for Core dummy app + if: env.RUN_CORE run: cd spec/dummy && yarn install --no-progress --no-emoji - - name: Save dummy app ruby gems to cache - if: github.event.inputs.app_version != 'pro_only' + - name: Save Core dummy app ruby gems to cache + if: env.RUN_CORE uses: actions/cache@v4 with: path: spec/dummy/vendor/bundle key: dummy-app-gem-cache-${{ hashFiles('spec/dummy/Gemfile.lock') }} - - name: Install Ruby Gems for dummy app - if: github.event.inputs.app_version != 'pro_only' + - name: Install Ruby Gems for Core dummy app + if: env.RUN_CORE run: | cd spec/dummy bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - - name: Prepare production assets - if: github.event.inputs.app_version != 'pro_only' + - name: Prepare Core production assets + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔨 Building production assets..." @@ -254,8 +257,8 @@ jobs: echo "✅ Production assets built successfully" - - name: Start production server - if: github.event.inputs.app_version != 'pro_only' + - name: Start Core production server + if: env.RUN_CORE run: | set -e # Exit on any error echo "🚀 Starting production server..." @@ -284,7 +287,7 @@ jobs: # ============================================ - name: Execute Core benchmark suite - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE timeout-minutes: 120 run: | set -e # Exit on any error @@ -298,7 +301,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Core benchmark results - if: github.event.inputs.app_version != 'pro_only' + if: env.RUN_CORE run: | set -e # Exit on any error echo "🔍 Validating benchmark output files..." @@ -338,7 +341,7 @@ jobs: - name: Upload Core benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'pro_only' && always() + if: env.RUN_CORE && always() with: name: benchmark-core-results-${{ github.run_number }} path: bench_results/ @@ -349,39 +352,39 @@ jobs: # STEP 6: SETUP PRO APPLICATION SERVER # ============================================ - name: Cache Pro package node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/node_modules key: v4-pro-package-node-modules-cache-${{ hashFiles('react_on_rails_pro/yarn.lock') }} - name: Cache Pro dummy app node modules - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/node_modules key: v4-pro-dummy-app-node-modules-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/yarn.lock') }} - name: Cache Pro dummy app Ruby gems - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO uses: actions/cache@v4 with: path: react_on_rails_pro/spec/dummy/vendor/bundle key: v4-pro-dummy-app-gem-cache-${{ hashFiles('react_on_rails_pro/spec/dummy/Gemfile.lock') }} - name: Install Node modules with Yarn for Pro package - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro sudo yarn global add yalc yarn install --frozen-lockfile --no-progress --no-emoji - name: Install Node modules with Yarn for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Install Ruby Gems for Pro dummy app - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy bundle lock --add-platform 'x86_64-linux' @@ -389,11 +392,11 @@ jobs: bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: cd react_on_rails_pro/spec/dummy && bundle exec rake react_on_rails:generate_packs - name: Prepare Pro production assets - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔨 Building Pro production assets..." @@ -407,7 +410,7 @@ jobs: echo "✅ Production assets built successfully" - name: Start Pro production server - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🚀 Starting Pro production server..." @@ -436,7 +439,7 @@ jobs: # ============================================ - name: Execute Pro benchmark suite - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO timeout-minutes: 120 run: | set -e @@ -450,7 +453,7 @@ jobs: echo "✅ Benchmark suite completed successfully" - name: Validate Pro benchmark results - if: github.event.inputs.app_version != 'core_only' + if: env.RUN_PRO run: | set -e echo "🔍 Validating Pro benchmark output files..." @@ -486,7 +489,7 @@ jobs: - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 - if: github.event.inputs.app_version != 'core_only' && always() + if: env.RUN_PRO && always() with: name: benchmark-pro-results-${{ github.run_number }} path: bench_results/ @@ -505,7 +508,8 @@ jobs: echo "Run number: ${{ github.run_number }}" echo "Triggered by: ${{ github.actor }}" echo "Branch: ${{ github.ref_name }}" - echo "App version: ${{ github.event.inputs.app_version || 'both' }}" + echo "Run Core: ${{ env.RUN_CORE }}" + echo "Run Pro: ${{ env.RUN_PRO }}" echo "" if [ "${{ job.status }}" == "success" ]; then echo "✅ All steps completed successfully" From a175f08453bbe2d8a234599a9605b2997314f6fd Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 11 Nov 2025 22:11:00 +0000 Subject: [PATCH 31/38] Fix starting/stopping servers --- .github/workflows/benchmark.yml | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 39698d40b5..f49627bc47 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -300,6 +300,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Core production server + if: env.RUN_CORE && always() + run: | + echo "🛑 Stopping Core production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Core benchmark results if: env.RUN_CORE run: | @@ -452,6 +464,18 @@ jobs: echo "✅ Benchmark suite completed successfully" + - name: Stop Pro production server + if: env.RUN_PRO && always() + run: | + echo "🛑 Stopping Pro production server..." + # Find and kill the Puma process on port 3001 + if lsof -ti:3001 > /dev/null 2>&1; then + kill $(lsof -ti:3001) || true + echo "✅ Server stopped" + else + echo "ℹ️ No server running on port 3001" + fi + - name: Validate Pro benchmark results if: env.RUN_PRO run: | From b44f351508ab89c58e7e1b498bfbf7652227f14f Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 09:55:31 +0000 Subject: [PATCH 32/38] Simplify validate steps --- .github/workflows/benchmark.yml | 76 +++++++++------------------------ 1 file changed, 20 insertions(+), 56 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index f49627bc47..9df228665d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -315,41 +315,21 @@ jobs: - name: Validate Core benchmark results if: env.RUN_CORE run: | - set -e # Exit on any error - echo "🔍 Validating benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + set -e + echo "🔍 Validating benchmark results..." - # Check if results directory exists - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - - # List all generated files - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true - echo "" - # Check for required files - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - # Report validation results - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "✅ Benchmark results found" + echo "" + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Core benchmark results uses: actions/upload-artifact@v4 @@ -480,36 +460,20 @@ jobs: if: env.RUN_PRO run: | set -e - echo "🔍 Validating Pro benchmark output files..." - - RESULTS_DIR="bench_results" - REQUIRED_FILES=("summary.txt") - MISSING_FILES=() + echo "🔍 Validating benchmark results..." - if [ ! -d "${RESULTS_DIR}" ]; then - echo "❌ ERROR: Benchmark results directory '${RESULTS_DIR}' not found" + if [ ! -f "bench_results/summary.txt" ]; then + echo "❌ ERROR: benchmark summary file not found" exit 1 fi - echo "Generated files:" - ls -lh ${RESULTS_DIR}/ || true + echo "✅ Benchmark results found" echo "" - - for file in "${REQUIRED_FILES[@]}"; do - if [ ! -f "${RESULTS_DIR}/${file}" ]; then - MISSING_FILES+=("${file}") - fi - done - - if [ ${#MISSING_FILES[@]} -eq 0 ]; then - echo "✅ All required benchmark output files present" - echo "📊 Summary preview:" - head -20 ${RESULTS_DIR}/summary.txt || true - else - echo "⚠️ WARNING: Some required files are missing:" - printf ' - %s\n' "${MISSING_FILES[@]}" - echo "Continuing with available results..." - fi + echo "📊 Summary:" + column -t -s $'\t' bench_results/summary.txt + echo "" + echo "Generated files:" + ls -lh bench_results/ - name: Upload Pro benchmark results uses: actions/upload-artifact@v4 From 40029faecda302019666052349850390919f5c44 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:04:27 +0000 Subject: [PATCH 33/38] Temp config to speed up --- .github/workflows/benchmark.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 9df228665d..e9996c6888 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -51,8 +51,7 @@ on: app_version: description: 'Which app version to benchmark' required: false - # FIXME: for debugging, restore 'both' before merging - default: 'pro_only' + default: 'both' type: choice options: - 'both' @@ -73,19 +72,20 @@ env: K6_VERSION: "1.3.0" VEGETA_VERSION: "12.13.0" # Determine which apps to run (default is 'pro_only' for all triggers) - RUN_CORE: ${{ (github.event.inputs.app_version || 'pro_only') != 'pro_only' && 'true' || '' }} - RUN_PRO: ${{ (github.event.inputs.app_version || 'pro_only') != 'core_only' && 'true' || '' }} + RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} + RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - ROUTES: ${{ github.event.inputs.routes }} + # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging + ROUTES: ${{ github.event.inputs.routes || '/' }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration }} + DURATION: ${{ github.event.inputs.duration || '5s' }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools }} + TOOLS: ${{ github.event.inputs.tools || 'fortio' }} jobs: benchmark: From 88dc86e04c8279223dd13f875e52f8843f033df7 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 10:10:56 +0000 Subject: [PATCH 34/38] Optimize tools installation --- .github/workflows/benchmark.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index e9996c6888..6088b13d07 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -141,13 +141,14 @@ jobs: - name: Cache Fortio binary id: cache-fortio + if: contains(env.TOOLS, 'fortio') uses: actions/cache@v4 with: path: ~/bin/fortio key: fortio-${{ runner.os }}-${{ runner.arch }}-${{ env.FORTIO_VERSION }} - name: Install Fortio - if: steps.cache-fortio.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'fortio') && steps.cache-fortio.outputs.cache-hit != 'true' run: | echo "📦 Installing Fortio v${FORTIO_VERSION}" @@ -160,13 +161,14 @@ jobs: - name: Cache Vegeta binary id: cache-vegeta + if: contains(env.TOOLS, 'vegeta') uses: actions/cache@v4 with: path: ~/bin/vegeta key: vegeta-${{ runner.os }}-${{ runner.arch }}-${{ env.VEGETA_VERSION }} - name: Install Vegeta - if: steps.cache-vegeta.outputs.cache-hit != 'true' + if: contains(env.TOOLS, 'vegeta') && steps.cache-vegeta.outputs.cache-hit != 'true' run: | echo "📦 Installing Vegeta v${VEGETA_VERSION}" @@ -178,6 +180,7 @@ jobs: mv vegeta ~/bin/ - name: Setup k6 + if: contains(env.TOOLS, 'k6') uses: grafana/setup-k6-action@v1 with: k6-version: ${{ env.K6_VERSION }} From 1404c7b87fd872ade6cd6cc4337d3876f087fff3 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 11:48:59 +0000 Subject: [PATCH 35/38] Add logging to server check --- spec/performance/bench.rb | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 24ac0e8a23..28e850847c 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,9 +140,9 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - response.is_a?(Net::HTTPSuccess) -rescue StandardError - false + { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } +rescue StandardError => e + { success: false, info: "#{e.class.name}: #{e.message}" } end # Wait for the server to be ready @@ -150,10 +150,24 @@ def server_responding?(uri) puts "Checking server availability at #{BASE_URL}..." test_uri = URI.parse("http://#{BASE_URL}#{routes.first}") start_time = Time.now +attempt_count = 0 loop do - break if server_responding?(test_uri) + attempt_count += 1 + attempt_start = Time.now + result = server_responding?(test_uri) + attempt_duration = Time.now - attempt_start + elapsed = Time.now - start_time + + # rubocop:disable Layout/LineLength + if result[:success] + puts " ✅ Attempt #{attempt_count} at #{elapsed.round(2)}s: SUCCESS - #{result[:info]} (took #{attempt_duration.round(3)}s)" + break + else + puts " ❌ Attempt #{attempt_count} at #{elapsed.round(2)}s: FAILED - #{result[:info]} (took #{attempt_duration.round(3)}s)" + end + # rubocop:enable Layout/LineLength - raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if Time.now - start_time > TIMEOUT_SEC + raise "Server at #{BASE_URL} not responding within #{TIMEOUT_SEC}s" if elapsed > TIMEOUT_SEC sleep 1 end From e22d3db16c35002e51def69ae9e68adb2a25ed60 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:02:21 +0000 Subject: [PATCH 36/38] Make installs frozen --- .github/workflows/benchmark.yml | 10 +++++----- spec/dummy/Gemfile.lock | 3 +++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6088b13d07..4a725ec7a7 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -229,7 +229,7 @@ jobs: - name: Install Node modules with Yarn for Core dummy app if: env.RUN_CORE - run: cd spec/dummy && yarn install --no-progress --no-emoji + run: cd spec/dummy && yarn install --frozen-lockfile --no-progress --no-emoji - name: Save Core dummy app ruby gems to cache if: env.RUN_CORE @@ -242,9 +242,9 @@ jobs: if: env.RUN_CORE run: | cd spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 - name: Prepare Core production assets if: env.RUN_CORE @@ -382,9 +382,9 @@ jobs: if: env.RUN_PRO run: | cd react_on_rails_pro/spec/dummy - bundle lock --add-platform 'x86_64-linux' bundle config set path vendor/bundle - bundle _2.5.4_ check || bundle _2.5.4_ install --jobs=4 --retry=3 + bundle config set frozen true + bundle _2.5.4_ install --jobs=4 --retry=3 - name: Generate file-system based entrypoints for Pro if: env.RUN_PRO diff --git a/spec/dummy/Gemfile.lock b/spec/dummy/Gemfile.lock index f2990bbf01..351492cfc6 100644 --- a/spec/dummy/Gemfile.lock +++ b/spec/dummy/Gemfile.lock @@ -195,6 +195,8 @@ GEM nokogiri (1.18.10) mini_portile2 (~> 2.8.2) racc (~> 1.4) + nokogiri (1.18.10-x86_64-linux-gnu) + racc (~> 1.4) ostruct (0.6.3) package_json (0.1.0) parallel (1.24.0) @@ -408,6 +410,7 @@ GEM PLATFORMS ruby + x86_64-linux DEPENDENCIES amazing_print From b7f635bb2ffd0dca6dfeeb7f8eaf536d694264c2 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:05:34 +0000 Subject: [PATCH 37/38] Allow redirects in server_responding --- spec/performance/bench.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spec/performance/bench.rb b/spec/performance/bench.rb index 28e850847c..ad1dc42674 100755 --- a/spec/performance/bench.rb +++ b/spec/performance/bench.rb @@ -140,7 +140,11 @@ def get_benchmark_routes(app_dir) # Helper method to check if server is responding def server_responding?(uri) response = Net::HTTP.get_response(uri) - { success: response.is_a?(Net::HTTPSuccess), info: "HTTP #{response.code} #{response.message}" } + # Accept both success (2xx) and redirect (3xx) responses as "server is responding" + success = response.is_a?(Net::HTTPSuccess) || response.is_a?(Net::HTTPRedirection) + info = "HTTP #{response.code} #{response.message}" + info += " -> #{response['location']}" if response.is_a?(Net::HTTPRedirection) && response["location"] + { success: success, info: info } rescue StandardError => e { success: false, info: "#{e.class.name}: #{e.message}" } end From 8f18eaddef556bddf66a284c15255fb919d0dc59 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Wed, 12 Nov 2025 12:24:32 +0000 Subject: [PATCH 38/38] Try full Pro benchmark --- .github/workflows/benchmark.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 4a725ec7a7..10c49b2476 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -75,17 +75,16 @@ env: RUN_CORE: ${{ (github.event.inputs.app_version || 'both') != 'pro_only' && 'true' || '' }} RUN_PRO: ${{ (github.event.inputs.app_version || 'both') != 'core_only' && 'true' || '' }} # Benchmark parameters (defaults in bench.rb unless overridden here for CI) - # FIXME: default ROUTES, TOOLS and DURATION are set to speed up tests, remove before merging - ROUTES: ${{ github.event.inputs.routes || '/' }} + ROUTES: ${{ github.event.inputs.routes }} RATE: ${{ github.event.inputs.rate || 'max' }} - DURATION: ${{ github.event.inputs.duration || '5s' }} + DURATION: ${{ github.event.inputs.duration }} REQUEST_TIMEOUT: ${{ github.event.inputs.request_timeout }} CONNECTIONS: ${{ github.event.inputs.connections }} MAX_CONNECTIONS: ${{ github.event.inputs.connections }} WEB_CONCURRENCY: ${{ github.event.inputs.web_concurrency || 4 }} RAILS_MAX_THREADS: ${{ github.event.inputs.rails_threads || 3 }} RAILS_MIN_THREADS: ${{ github.event.inputs.rails_threads || 3 }} - TOOLS: ${{ github.event.inputs.tools || 'fortio' }} + TOOLS: ${{ github.event.inputs.tools }} jobs: benchmark: