diff options
| author | Simon MacMullen <simon@rabbitmq.com> | 2013-06-18 12:29:30 +0100 |
|---|---|---|
| committer | Simon MacMullen <simon@rabbitmq.com> | 2013-06-18 12:29:30 +0100 |
| commit | 413dd11bda0b1c06f3577f555094859777fe016e (patch) | |
| tree | a08316b999caa69ab8793cfc98fce5d0483d063a | |
| parent | b47d8173ca58fb4939edbdd35ab51e2ae347eb85 (diff) | |
| download | rabbitmq-server-git-413dd11bda0b1c06f3577f555094859777fe016e.tar.gz | |
Eliminate ?MEMORY_LIMIT_SCALING and make the ratio at which we page configurable. Note that I have changed the representation from a ratio-of-a-ratio to just a plain ratio (i.e. propertion of total memory, not proportion of the high watermark). I believe this will be easier to understand. Hence also the name vm_memory_paging_watermark, chosen by analogy with vm_memory_high_watermark.
| -rw-r--r-- | ebin/rabbit_app.in | 1 | ||||
| -rw-r--r-- | src/rabbit_memory_monitor.erl | 21 |
2 files changed, 5 insertions, 17 deletions
diff --git a/ebin/rabbit_app.in b/ebin/rabbit_app.in index 339fa69eae..b28214aff0 100644 --- a/ebin/rabbit_app.in +++ b/ebin/rabbit_app.in @@ -19,6 +19,7 @@ {ssl_listeners, []}, {ssl_options, []}, {vm_memory_high_watermark, 0.4}, + {vm_memory_paging_watermark, 0.2}, {disk_free_limit, 1000000000}, %% 1GB {msg_store_index_module, rabbit_msg_store_ets_index}, {backing_queue_module, rabbit_variable_queue}, diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl index 117ff95a97..a2df255cae 100644 --- a/src/rabbit_memory_monitor.erl +++ b/src/rabbit_memory_monitor.erl @@ -43,17 +43,6 @@ -define(DEFAULT_UPDATE_INTERVAL, 2500). -define(TABLE_NAME, ?MODULE). -%% Because we have a feedback loop here, we need to ensure that we -%% have some space for when the queues don't quite respond as fast as -%% we would like, or when there is buffering going on in other parts -%% of the system. In short, we aim to stay some distance away from -%% when the memory alarms will go off, which cause backpressure (of -%% some sort) on producers. Note that all other Thresholds are -%% relative to this scaling. --define(MEMORY_LIMIT_SCALING, 0.4). - --define(LIMIT_THRESHOLD, 0.5). %% don't limit queues when mem use is < this - %% If all queues are pushed to disk (duration 0), then the sum of %% their reported lengths will be 0. If memory then becomes available, %% unless we manually intervene, the sum will remain 0, and the queues @@ -207,15 +196,13 @@ internal_update(State = #state { queue_durations = Durations, desired_duration = DesiredDurationAvg, queue_duration_sum = Sum, queue_duration_count = Count }) -> - MemoryLimit = ?MEMORY_LIMIT_SCALING * vm_memory_monitor:get_memory_limit(), - MemoryRatio = case MemoryLimit > 0.0 of - true -> erlang:memory(total) / MemoryLimit; - false -> infinity - end, + {ok, LimitThreshold} = + application:get_env(rabbit, vm_memory_paging_watermark), + MemoryRatio = erlang:memory(total) / vm_memory_monitor:get_total_memory(), DesiredDurationAvg1 = if MemoryRatio =:= infinity -> 0.0; - MemoryRatio < ?LIMIT_THRESHOLD orelse Count == 0 -> + MemoryRatio < LimitThreshold orelse Count == 0 -> infinity; MemoryRatio < ?SUM_INC_THRESHOLD -> ((Sum + ?SUM_INC_AMOUNT) / Count) / MemoryRatio; |
