Skip to content

Commit 8a03975

Browse files
authored
Set the default vm_memory_high_watermark to 0.6 (#12161)
The default of 0.4 was very conservative even when it was set years ago. Since then: - we moved to CQv2, which have much more predictable memory usage than (non-lazy) CQv1 used to - we removed CQ mirroring which caused large sudden memory spikes in some situations - we removed the option to store message payload in memory in quorum queues For the past two years or so, we've been running all our internal tests and benchmarks using the value of 0.8 with no OOMkills at all (note: we do this on Kubernetes where the Cluster Operators overrides the available memory levaing some additional headroom, but effectively we are still using more than 0.6 of memory).
1 parent b6e8586 commit 8a03975

File tree

6 files changed

+7
-7
lines changed

6 files changed

+7
-7
lines changed

deps/rabbit/BUILD.bazel

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ _APP_ENV = """[
3434
{ssl_listeners, []},
3535
{num_ssl_acceptors, 10},
3636
{ssl_options, []},
37-
{vm_memory_high_watermark, 0.4},
37+
{vm_memory_high_watermark, 0.6},
3838
{vm_memory_calculation_strategy, rss},
3939
{disk_free_limit, 50000000}, %% 50MB
4040
{backing_queue_module, rabbit_variable_queue},

deps/rabbit/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ define PROJECT_ENV
1414
{ssl_listeners, []},
1515
{num_ssl_acceptors, 10},
1616
{ssl_options, []},
17-
{vm_memory_high_watermark, 0.4},
17+
{vm_memory_high_watermark, 0.6},
1818
{vm_memory_calculation_strategy, rss},
1919
{disk_free_limit, 50000000}, %% 50MB
2020
{backing_queue_module, rabbit_variable_queue},

deps/rabbit/docs/rabbitmq.conf.example

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@
382382

383383
## Memory-based Flow Control threshold.
384384
##
385-
# vm_memory_high_watermark.relative = 0.4
385+
# vm_memory_high_watermark.relative = 0.6
386386

387387
## Alternatively, we can set a limit (in bytes) of RAM used by the node.
388388
##

deps/rabbit/priv/schema/rabbit.schema

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1103,7 +1103,7 @@ end}.
11031103

11041104
%% Memory-based Flow Control threshold.
11051105
%%
1106-
%% {vm_memory_high_watermark, 0.4},
1106+
%% {vm_memory_high_watermark, 0.6},
11071107

11081108
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
11091109
%%

deps/rabbit/test/unit_vm_memory_monitor_SUITE.erl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,4 +118,4 @@ set_and_verify_vm_memory_high_watermark_absolute(MemLimit0) ->
118118
_ ->
119119
ct:fail("Expected memory high watermark to be ~tp but it was ~tp", [Interpreted, MemLimit])
120120
end,
121-
vm_memory_monitor:set_vm_memory_high_watermark(0.4).
121+
vm_memory_monitor:set_vm_memory_high_watermark(0.6).

deps/rabbitmq_mqtt/test/shared_SUITE.erl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1417,7 +1417,7 @@ block(Config) ->
14171417
puback_timeout = publish_qos1_timeout(C, Topic, <<"Still blocked">>, 1000),
14181418

14191419
%% Unblock
1420-
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
1420+
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
14211421
ok = expect_publishes(C, Topic, [<<"Not blocked yet">>,
14221422
<<"Now blocked">>,
14231423
<<"Still blocked">>]),
@@ -1458,7 +1458,7 @@ block_only_publisher(Config) ->
14581458
?assertEqual(puback_timeout, publish_qos1_timeout(Con, Topic, <<"from Con 2">>, 500)),
14591459
?assertEqual(pong, emqtt:ping(Sub)),
14601460

1461-
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.4]),
1461+
rpc(Config, vm_memory_monitor, set_vm_memory_high_watermark, [0.6]),
14621462
%% Let it unblock
14631463
timer:sleep(100),
14641464

0 commit comments

Comments
 (0)