Skip to content

Commit 8ad061a

Browse files
committed
[TASK] Add real CPU/memory metrics to benchmarks
- Fix bar chart height to prevent overflow with changelog bar - Install GNU time in Docker image for accurate metrics - Run /usr/bin/time -v inside container (not outside) - Capture wall time, CPU time, CPU%, and peak memory - Update JSON output with extended metrics structure Now reports actual PHP process metrics instead of estimates. Example: 5.18s wall, 10.48s CPU (202%), 119.8MB memory
1 parent f14a208 commit 8ad061a

File tree

3 files changed

+60
-37
lines changed

3 files changed

+60
-37
lines changed

Dockerfile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,8 @@ COPY --from=ghcr.io/php/pie:bin /pie /usr/bin/pie
3636
RUN apk add --update $PHPIZE_DEPS
3737
RUN pie install arnaud-lb/inotify && docker-php-ext-install pcntl
3838

39+
# Install GNU time for benchmarking (busybox time doesn't support -v)
40+
RUN apk add --no-cache time
3941
RUN apk del $PHPIZE_DEPS && rm -rf /var/cache/apk/* /tmp/* /usr/share/php/* /usr/local/lib/php/doc/* /usr/local/lib/php/test/*
4042

4143
COPY . /opt/guides

benchmark/benchmark-docker.sh

Lines changed: 56 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -89,13 +89,14 @@ clean_caches() {
8989
rm -rf "$PROJECT_DIR/.cache" 2>/dev/null || true
9090
}
9191

92-
# Run single benchmark using wall-clock timing
92+
# Run single benchmark with CPU and memory metrics via /usr/bin/time inside container
9393
# Pass "fresh" as second arg to force clean output directory
9494
run_benchmark_simple() {
9595
local run_num=$1
9696
local fresh_output="${2:-no}"
9797
local output_dir="/tmp/benchmark-output"
9898
local log_file="/tmp/benchmark-log-$run_num.txt"
99+
local time_file="/tmp/benchmark-time-$run_num.txt"
99100

100101
# Only clean output dir if fresh is requested (cold scenario)
101102
if [ "$fresh_output" = "fresh" ]; then
@@ -109,56 +110,52 @@ run_benchmark_simple() {
109110
config_arg="--config=$DOCS_INPUT"
110111
fi
111112

112-
# Run guides with wall-clock timing
113-
local start_time end_time elapsed
114-
start_time=$(date +%s.%N)
115-
116-
# Run container - mount full project at /project, output at /output
117-
# Use relative paths for input and config
118-
# Use --user to match host user for output permissions
113+
# Run container with /usr/bin/time -v INSIDE the container for accurate metrics
119114
# Note: project mounted read-write so incremental rendering cache can be written
120115
# Mount shared /tmp for Twig cache persistence between warm runs
121116
local shared_tmp="/tmp/typo3-guides-benchmark-cache"
122117
mkdir -p "$shared_tmp"
118+
119+
# Run time inside container, output time stats to stderr which we capture
123120
docker run --rm \
124121
--user "$(id -u):$(id -g)" \
125122
-v "$PROJECT_DIR:/project" \
126123
-v "$output_dir:/output" \
127124
-v "$shared_tmp:/tmp" \
125+
--entrypoint /usr/bin/time \
128126
"$IMAGE_TAG" \
129-
--no-progress $config_arg --output=/output "$DOCS_INPUT" > "$log_file" 2>&1
127+
-v php /opt/guides/vendor/bin/guides --no-progress $config_arg --output=/output "$DOCS_INPUT" \
128+
> "$log_file" 2> "$time_file"
130129
local docker_exit=$?
131130

132-
end_time=$(date +%s.%N)
133-
elapsed=$(echo "$end_time - $start_time" | bc)
131+
# Parse /usr/bin/time output for metrics (GNU time format)
132+
local elapsed user_time sys_time peak_memory_kb cpu_percent
133+
elapsed=$(grep "Elapsed (wall clock)" "$time_file" | sed 's/.*: //' | awk -F: '{if (NF==3) print $1*3600+$2*60+$3; else if (NF==2) print $1*60+$2; else print $1}')
134+
user_time=$(grep "User time" "$time_file" | awk '{print $NF}')
135+
sys_time=$(grep "System time" "$time_file" | awk '{print $NF}')
136+
peak_memory_kb=$(grep "Maximum resident set size" "$time_file" | awk '{print $NF}')
137+
cpu_percent=$(grep "Percent of CPU" "$time_file" | sed 's/.*: //' | tr -d '%')
138+
139+
# Convert to MB and calculate totals
140+
local peak_memory_mb cpu_time
141+
peak_memory_mb=$(echo "scale=1; ${peak_memory_kb:-0} / 1024" | bc)
142+
cpu_time=$(echo "scale=2; ${user_time:-0} + ${sys_time:-0}" | bc)
134143

135144
# Count output files
136145
local file_count
137146
file_count=$(find "$output_dir" -name "*.html" 2>/dev/null | wc -l | tr -d ' ')
138147

139-
# Estimate memory from container (rough estimate based on output size)
140-
local output_size_kb estimated_memory_mb
141-
output_size_kb=$(du -sk "$output_dir" 2>/dev/null | awk '{print $1}')
142-
output_size_kb=${output_size_kb:-0}
143-
# Rough heuristic: memory is typically 50-100x output size for docs rendering
144-
if [ "$output_size_kb" -gt 0 ]; then
145-
estimated_memory_mb=$(echo "scale=0; ($output_size_kb * 60) / 1024" | bc)
146-
else
147-
estimated_memory_mb=64
148-
fi
149-
if [ "$estimated_memory_mb" -lt 50 ]; then
150-
estimated_memory_mb=64 # minimum reasonable estimate
151-
fi
152-
153-
# Output JSON result
154-
echo "{\"total_time_seconds\": $elapsed, \"peak_memory_mb\": $estimated_memory_mb, \"files_rendered\": $file_count}"
148+
# Output JSON result with extended metrics
149+
echo "{\"total_time_seconds\": $elapsed, \"cpu_time_seconds\": $cpu_time, \"cpu_percent\": ${cpu_percent:-0}, \"peak_memory_mb\": $peak_memory_mb, \"files_rendered\": $file_count}"
155150
}
156151

157152
# Run scenario and collect results
158153
run_scenario() {
159154
local scenario=$1
160155
local results=()
161156
local times=()
157+
local cpu_times=()
158+
local cpu_percents=()
162159
local memories=()
163160
local files=0
164161

@@ -172,9 +169,11 @@ run_scenario() {
172169
result=$(run_benchmark_simple $i fresh)
173170
results+=("$result")
174171
time_s=$(echo "$result" | jq -r '.total_time_seconds')
172+
cpu_s=$(echo "$result" | jq -r '.cpu_time_seconds')
173+
cpu_pct=$(echo "$result" | jq -r '.cpu_percent')
175174
memory_mb=$(echo "$result" | jq -r '.peak_memory_mb')
176175
files=$(echo "$result" | jq -r '.files_rendered')
177-
log_success " Time: ${time_s}s, Memory: ~${memory_mb}MB, Files: $files"
176+
log_success " Time: ${time_s}s, CPU: ${cpu_s}s (${cpu_pct}%), Memory: ${memory_mb}MB, Files: $files"
178177
done
179178
;;
180179
warm)
@@ -188,9 +187,11 @@ run_scenario() {
188187
result=$(run_benchmark_simple $i) # Reuse existing cache
189188
results+=("$result")
190189
time_s=$(echo "$result" | jq -r '.total_time_seconds')
190+
cpu_s=$(echo "$result" | jq -r '.cpu_time_seconds')
191+
cpu_pct=$(echo "$result" | jq -r '.cpu_percent')
191192
memory_mb=$(echo "$result" | jq -r '.peak_memory_mb')
192193
files=$(echo "$result" | jq -r '.files_rendered')
193-
log_success " Time: ${time_s}s, Memory: ~${memory_mb}MB, Files: $files"
194+
log_success " Time: ${time_s}s, CPU: ${cpu_s}s (${cpu_pct}%), Memory: ${memory_mb}MB, Files: $files"
194195
done
195196
;;
196197
partial)
@@ -211,35 +212,46 @@ run_scenario() {
211212
result=$(run_benchmark_simple $i) # Reuse existing cache
212213
results+=("$result")
213214
time_s=$(echo "$result" | jq -r '.total_time_seconds')
215+
cpu_s=$(echo "$result" | jq -r '.cpu_time_seconds')
216+
cpu_pct=$(echo "$result" | jq -r '.cpu_percent')
214217
memory_mb=$(echo "$result" | jq -r '.peak_memory_mb')
215218
files=$(echo "$result" | jq -r '.files_rendered')
216-
log_success " Time: ${time_s}s, Memory: ~${memory_mb}MB, Files: $files"
219+
log_success " Time: ${time_s}s, CPU: ${cpu_s}s (${cpu_pct}%), Memory: ${memory_mb}MB, Files: $files"
217220
done
218221
;;
219222
esac
220223

221224
# Extract values for aggregation
222225
for result in "${results[@]}"; do
223226
times+=($(echo "$result" | jq -r '.total_time_seconds'))
227+
cpu_times+=($(echo "$result" | jq -r '.cpu_time_seconds'))
228+
cpu_percents+=($(echo "$result" | jq -r '.cpu_percent'))
224229
memories+=($(echo "$result" | jq -r '.peak_memory_mb'))
225230
done
226231

227232
# Calculate aggregates
228-
local time_sum=0 mem_sum=0
233+
local time_sum=0 cpu_sum=0 cpu_pct_sum=0 mem_sum=0
229234
local time_min=${times[0]} time_max=${times[0]}
235+
local cpu_min=${cpu_times[0]} cpu_max=${cpu_times[0]}
230236
local mem_min=${memories[0]} mem_max=${memories[0]}
231237

232238
for i in "${!times[@]}"; do
233239
time_sum=$(echo "$time_sum + ${times[$i]}" | bc)
240+
cpu_sum=$(echo "$cpu_sum + ${cpu_times[$i]}" | bc)
241+
cpu_pct_sum=$(echo "$cpu_pct_sum + ${cpu_percents[$i]}" | bc)
234242
mem_sum=$(echo "$mem_sum + ${memories[$i]}" | bc)
235243

236244
if (( $(echo "${times[$i]} < $time_min" | bc -l) )); then time_min=${times[$i]}; fi
237245
if (( $(echo "${times[$i]} > $time_max" | bc -l) )); then time_max=${times[$i]}; fi
246+
if (( $(echo "${cpu_times[$i]} < $cpu_min" | bc -l) )); then cpu_min=${cpu_times[$i]}; fi
247+
if (( $(echo "${cpu_times[$i]} > $cpu_max" | bc -l) )); then cpu_max=${cpu_times[$i]}; fi
238248
if (( $(echo "${memories[$i]} < $mem_min" | bc -l) )); then mem_min=${memories[$i]}; fi
239249
if (( $(echo "${memories[$i]} > $mem_max" | bc -l) )); then mem_max=${memories[$i]}; fi
240250
done
241251

242252
local time_avg=$(echo "scale=3; $time_sum / ${#times[@]}" | bc)
253+
local cpu_avg=$(echo "scale=2; $cpu_sum / ${#cpu_times[@]}" | bc)
254+
local cpu_pct_avg=$(echo "scale=0; $cpu_pct_sum / ${#cpu_percents[@]}" | bc)
243255
local mem_avg=$(echo "scale=1; $mem_sum / ${#memories[@]}" | bc)
244256

245257
# Save to JSON
@@ -255,19 +267,27 @@ run_scenario() {
255267
"timestamp": "$TIMESTAMP",
256268
"runs": $RUNS,
257269
"metrics": {
258-
"time": {
270+
"wall_time": {
259271
"avg_seconds": $time_avg,
260272
"min_seconds": $time_min,
261273
"max_seconds": $time_max
262274
},
275+
"cpu_time": {
276+
"avg_seconds": $cpu_avg,
277+
"min_seconds": $cpu_min,
278+
"max_seconds": $cpu_max,
279+
"avg_percent": $cpu_pct_avg
280+
},
263281
"memory": {
264282
"avg_mb": $mem_avg,
265283
"min_mb": $mem_min,
266284
"max_mb": $mem_max
267285
},
268286
"files_rendered": $files
269287
},
270-
"raw_times_seconds": [$(IFS=,; echo "${times[*]}")],
288+
"raw_wall_times_seconds": [$(IFS=,; echo "${times[*]}")],
289+
"raw_cpu_times_seconds": [$(IFS=,; echo "${cpu_times[*]}")],
290+
"raw_cpu_percents": [$(IFS=,; echo "${cpu_percents[*]}")],
271291
"raw_memories_mb": [$(IFS=,; echo "${memories[*]}")]
272292
}
273293
EOF
@@ -277,8 +297,9 @@ EOF
277297
# Print summary
278298
echo ""
279299
echo "=== $scenario Summary ==="
280-
echo " Avg Time: ${time_avg}s (min: ${time_min}s, max: ${time_max}s)"
281-
echo " Avg Memory: ~${mem_avg}MB (estimated)"
300+
echo " Wall Time: ${time_avg}s (min: ${time_min}s, max: ${time_max}s)"
301+
echo " CPU Time: ${cpu_avg}s (~${cpu_pct_avg}% utilization)"
302+
echo " Memory: ${mem_avg}MB peak"
282303
echo " Files: $files"
283304
echo ""
284305
}

docs/index.html

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -294,9 +294,9 @@
294294
.bar-chart {
295295
display: flex;
296296
align-items: flex-end;
297-
height: 200px;
297+
height: 280px;
298298
gap: 2rem;
299-
padding: 1rem 0;
299+
padding: 2rem 0 1rem 0;
300300
}
301301

302302
.bar-group {

0 commit comments

Comments
 (0)