Skip to content

Commit 428bb99

Browse files
joaoantoniocardosopatrickelectric
authored andcommitted
core: start-blueos-core: Add runtime tunning for improved stream performance
1 parent 415d5bd commit 428bb99

File tree

1 file changed

+59
-0
lines changed

1 file changed

+59
-0
lines changed

core/start-blueos-core

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,65 @@ prepare_cgroups() {
220220

221221
prepare_cgroups
222222

223+
# Runtime performance tuning (applied every container start via /sys/ bind mount)
224+
tune_performance() {
225+
echo "Applying runtime performance tuning..."
226+
227+
# Network buffer tuning for high-bandwidth WebRTC streaming (55Mbps+)
228+
# Maximum send buffer size per socket. The Pi's default was 180KB.
229+
# WebRTC/SRTP sends ~55Mbps of UDP traffic. When the socket's send buffer is
230+
# too small, the kernel drops outgoing packets during CPU load spikes. 16MB
231+
# gives GStreamer/WebRTC headroom to queue bursts without drops. This is the
232+
# single most impactful change for preventing video stutters.
233+
echo $((16 * 1024 * 1024)) > /proc/sys/net/core/wmem_max 2>/dev/null || true
234+
echo "wmem_max: $(cat /proc/sys/net/core/wmem_max 2>/dev/null || echo N/A)"
235+
236+
# Maximum receive buffer size per socket. Same logic but for incoming traffic
237+
# (STUN/TURN negotiation, RTCP feedback, MAVLink). Less critical than wmem
238+
# for outbound-heavy streaming, but prevents receive-side drops during
239+
# inbound bursts.
240+
echo $((16 * 1024 * 1024)) > /proc/sys/net/core/rmem_max 2>/dev/null || true
241+
echo "rmem_max: $(cat /proc/sys/net/core/rmem_max 2>/dev/null || echo N/A)"
242+
243+
# Default buffer sizes for newly created sockets (when the application
244+
# doesn't explicitly set SO_SNDBUF/SO_RCVBUF). The Pi default is ~180KB. If
245+
# MCM or any BlueOS service opens a socket without tuning its buffers, it
246+
# inherits this default. 1MB is a reasonable baseline that prevents casual
247+
# sockets from starving under load.
248+
echo $((1024 * 1024)) > /proc/sys/net/core/wmem_default 2>/dev/null || true
249+
echo "wmem_default: $(cat /proc/sys/net/core/wmem_default 2>/dev/null || echo N/A)"
250+
echo $((1024 * 1024)) > /proc/sys/net/core/rmem_default 2>/dev/null || true
251+
echo "rmem_default: $(cat /proc/sys/net/core/rmem_default 2>/dev/null || echo N/A)"
252+
253+
# Per-CPU input queue length for incoming packets. When the network driver
254+
# delivers packets faster than the kernel's softirq can process them, they
255+
# queue here. Default is 1000. During dual-camera streaming, CPU0 was seeing
256+
# time_squeeze events (softirq ran out of its time budget and packets backed
257+
# up). A deeper queue reduces drops during those transient spikes. This pairs
258+
# with the IRQ affinity changes that spread eth0 across CPU1/CPU2.
259+
echo 5000 > /proc/sys/net/core/netdev_max_backlog 2>/dev/null || true
260+
echo "netdev_max_backlog: $(cat /proc/sys/net/core/netdev_max_backlog 2>/dev/null || echo N/A)"
261+
262+
# How aggressively the kernel swaps anonymous pages to disk (the SD card). At
263+
# the default of 60, under memory pressure, the kernel proactively swaps out
264+
# process memory to free RAM for file cache. On an SD card, swap I/O is
265+
# extremely slow (~10MB/s) and creates latency spikes for any swapped-out
266+
# process. During our baseline measurements, kswapd was spiking to 8-12% CPU
267+
# during streaming. At swappiness=10, the kernel strongly prefers reclaiming
268+
# file cache over swapping, which eliminated kswapd activity and freed ~113MB
269+
# of RAM.
270+
echo 10 > /proc/sys/vm/swappiness 2>/dev/null || true
271+
echo "swappiness: $(cat /proc/sys/vm/swappiness 2>/dev/null || echo N/A)"
272+
273+
# Prevent SCHED_RR/SCHED_FIFO threads from starving the rest of the system.
274+
# Setting to -1 would disable the safety net entirely.
275+
# 950000/1000000 = 95% RT, leaving 50ms/s per core for non-RT threads.
276+
echo 950000 > /proc/sys/kernel/sched_rt_runtime_us 2>/dev/null || true
277+
echo "RT throttle: $(cat /proc/sys/kernel/sched_rt_runtime_us 2>/dev/null || echo N/A)us per $(cat /proc/sys/kernel/sched_rt_period_us 2>/dev/null || echo N/A)us period"
278+
}
279+
280+
tune_performance
281+
223282
echo "Starting high priority services.."
224283
for TUPLE in "${PRIORITY_SERVICES[@]}"; do
225284
IFS=',' read -r NAME MEMORY_LIMIT_MB EXECUTABLE <<< "$TUPLE"

0 commit comments

Comments
 (0)