summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Sackman <matthew@lshift.net>2009-06-19 18:17:16 +0100
committerMatthew Sackman <matthew@lshift.net>2009-06-19 18:17:16 +0100
commite2735d50ed60bf8882c536cd2439bdf0a4a3a055 (patch)
treedc831c89df3df718e0fdb38e859d01f44652c458
parentae6989591b7a6e430a4f4c6ec4e51ebb15377b38 (diff)
downloadrabbitmq-server-git-e2735d50ed60bf8882c536cd2439bdf0a4a3a055.tar.gz
get_cache_info ==> cache_info.
An even better test (see parent commit message) is: rabbitmq-java-client/build/dist$ sh runjava.sh com/rabbitmq/examples/MulticastMain -y 50 -r 100 -s 1048576 -m 100 -z 120 Rabbit will now happily just sit there and work away (again, run reduce_memory_footprint twice first) even though it's seeing 100MB new a second which is going to 50 consumers, so 5GB a second. Needless to say, go back a few revisions, and it blows up within seconds.
-rw-r--r--src/rabbit_disk_queue.erl10
1 files changed, 5 insertions, 5 deletions
diff --git a/src/rabbit_disk_queue.erl b/src/rabbit_disk_queue.erl
index c8f726318e..6374fc6fe2 100644
--- a/src/rabbit_disk_queue.erl
+++ b/src/rabbit_disk_queue.erl
@@ -44,7 +44,7 @@
dump_queue/1, delete_non_durable_queues/1, auto_ack_next_message/1
]).
--export([length/1, get_cache_info/0]).
+-export([length/1, cache_info/0]).
-export([stop/0, stop_and_obliterate/0,
to_disk_only_mode/0, to_ram_disk_mode/0]).
@@ -263,7 +263,7 @@
-spec(to_ram_disk_mode/0 :: () -> 'ok').
-spec(to_disk_only_mode/0 :: () -> 'ok').
-spec(length/1 :: (queue_name()) -> non_neg_integer()).
--spec(get_cache_info/0 :: () -> [{atom(), term()}]).
+-spec(cache_info/0 :: () -> [{atom(), term()}]).
-endif.
@@ -334,8 +334,8 @@ to_ram_disk_mode() ->
length(Q) ->
gen_server2:call(?SERVER, {length, Q}, infinity).
-get_cache_info() ->
- gen_server2:call(?SERVER, get_cache_info, infinity).
+cache_info() ->
+ gen_server2:call(?SERVER, cache_info, infinity).
%% ---- GEN-SERVER INTERNAL API ----
@@ -478,7 +478,7 @@ handle_call({dump_queue, Q}, _From, State) ->
handle_call({delete_non_durable_queues, DurableQueues}, _From, State) ->
{ok, State1} = internal_delete_non_durable_queues(DurableQueues, State),
{reply, ok, State1};
-handle_call(get_cache_info, _From, State = #dqstate { message_cache = Cache }) ->
+handle_call(cache_info, _From, State = #dqstate { message_cache = Cache }) ->
{reply, ets:info(Cache), State}.
handle_cast({publish, Q, Message}, State) ->