summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/rabbit.erl8
-rw-r--r--src/rabbit_alarm.erl2
-rw-r--r--src/rabbit_cli.erl2
-rw-r--r--src/rabbit_control_main.erl1110
-rw-r--r--src/rabbit_lager.erl19
-rw-r--r--src/rabbit_log.erl1
-rw-r--r--src/rabbit_mnesia.erl2
-rw-r--r--src/rabbit_mnesia_rename.erl16
-rw-r--r--src/rabbit_msg_store.erl111
-rw-r--r--src/rabbit_msg_store_ets_index.erl10
-rw-r--r--src/rabbit_msg_store_vhost_sup.erl93
-rw-r--r--src/rabbit_plugins.erl190
-rw-r--r--src/rabbit_queue_index.erl75
-rw-r--r--src/rabbit_runtime_parameters.erl2
-rw-r--r--src/rabbit_sup.erl10
-rw-r--r--src/rabbit_upgrade.erl42
-rw-r--r--src/rabbit_variable_queue.erl289
-rw-r--r--src/rabbit_version.erl27
-rw-r--r--src/rabbit_vhost.erl32
-rw-r--r--src/rabbit_vm.erl4
20 files changed, 666 insertions, 1379 deletions
diff --git a/src/rabbit.erl b/src/rabbit.erl
index d895d57ff2..e121fb3e2e 100644
--- a/src/rabbit.erl
+++ b/src/rabbit.erl
@@ -153,6 +153,14 @@
{requires, core_initialized},
{enables, routing_ready}]}).
+-rabbit_boot_step({upgrade_queues,
+ [{description, "per-vhost message store migration"},
+ {mfa, {rabbit_upgrade,
+ maybe_migrate_queues_to_per_vhost_storage,
+ []}},
+ {requires, [core_initialized]},
+ {enables, recovery}]}).
+
-rabbit_boot_step({recovery,
[{description, "exchange, queue and binding recovery"},
{mfa, {rabbit, recover, []}},
diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl
index dd64c6f1c8..daf2c167fa 100644
--- a/src/rabbit_alarm.erl
+++ b/src/rabbit_alarm.erl
@@ -16,7 +16,7 @@
%% There are two types of alarms handled by this module:
%%
%% * per-node resource (disk, memory) alarms for the whole cluster. If any node
-%% has an alarm, then all publishing should be disabled througout the
+%% has an alarm, then all publishing should be disabled across the
%% cluster until all alarms clear. When a node sets such an alarm,
%% this information is automatically propagated throughout the cluster.
%% `#alarms.alarmed_nodes' is being used to track this type of alarms.
diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl
index 65e8563ddf..f6d005be90 100644
--- a/src/rabbit_cli.erl
+++ b/src/rabbit_cli.erl
@@ -154,7 +154,7 @@ start_distribution_anon(TriesLeft, _) ->
start_distribution_anon(TriesLeft - 1, Reason)
end.
-%% Tries to start distribution with random name choosen from limited list of candidates - to
+%% Tries to start distribution with random name chosen from limited list of candidates - to
%% prevent atom table pollution on target nodes.
start_distribution() ->
rabbit_nodes:ensure_epmd(),
diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl
deleted file mode 100644
index d96c1dd476..0000000000
--- a/src/rabbit_control_main.erl
+++ /dev/null
@@ -1,1110 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_control_main).
--include("rabbit.hrl").
--include("rabbit_cli.hrl").
--include("rabbit_misc.hrl").
-
--export([start/0, stop/0, parse_arguments/2, action/5, action/6,
- sync_queue/1, cancel_sync_queue/1, become/1,
- purge_queue/1]).
-
--import(rabbit_misc, [rpc_call/4, rpc_call/5]).
-
--define(EXTERNAL_CHECK_INTERVAL, 1000).
-
--define(GLOBAL_DEFS(Node), [?QUIET_DEF, ?NODE_DEF(Node), ?TIMEOUT_DEF]).
-
--define(COMMANDS,
- [stop,
- stop_app,
- start_app,
- wait,
- reset,
- force_reset,
- rotate_logs,
- hipe_compile,
-
- {join_cluster, [?RAM_DEF]},
- change_cluster_node_type,
- update_cluster_nodes,
- {forget_cluster_node, [?OFFLINE_DEF]},
- rename_cluster_node,
- force_boot,
- cluster_status,
- {sync_queue, [?VHOST_DEF]},
- {cancel_sync_queue, [?VHOST_DEF]},
- {purge_queue, [?VHOST_DEF]},
-
- add_user,
- delete_user,
- change_password,
- clear_password,
- authenticate_user,
- set_user_tags,
- list_users,
-
- add_vhost,
- delete_vhost,
- list_vhosts,
- {set_permissions, [?VHOST_DEF]},
- {clear_permissions, [?VHOST_DEF]},
- {list_permissions, [?VHOST_DEF]},
- list_user_permissions,
-
- {set_parameter, [?VHOST_DEF]},
- {clear_parameter, [?VHOST_DEF]},
- {list_parameters, [?VHOST_DEF]},
-
- set_global_parameter,
- clear_global_parameter,
- list_global_parameters,
-
- {set_policy, [?VHOST_DEF, ?PRIORITY_DEF, ?APPLY_TO_DEF]},
- {clear_policy, [?VHOST_DEF]},
- {set_operator_policy, [?VHOST_DEF, ?PRIORITY_DEF, ?APPLY_TO_DEF]},
- {clear_operator_policy, [?VHOST_DEF]},
- {list_policies, [?VHOST_DEF]},
- {list_operator_policies, [?VHOST_DEF]},
-
- {set_vhost_limits, [?VHOST_DEF]},
- {clear_vhost_limits, [?VHOST_DEF]},
- {list_queues, [?VHOST_DEF, ?OFFLINE_DEF, ?ONLINE_DEF, ?LOCAL_DEF]},
- {list_exchanges, [?VHOST_DEF]},
- {list_bindings, [?VHOST_DEF]},
- {list_connections, [?VHOST_DEF]},
- list_channels,
- {list_consumers, [?VHOST_DEF]},
- status,
- environment,
- report,
- set_cluster_name,
- eval,
- node_health_check,
-
- close_connection,
- {trace_on, [?VHOST_DEF]},
- {trace_off, [?VHOST_DEF]},
- set_vm_memory_high_watermark,
- set_disk_free_limit,
- help,
- {encode, [?DECODE_DEF, ?CIPHER_DEF, ?HASH_DEF, ?ITERATIONS_DEF, ?LIST_CIPHERS_DEF, ?LIST_HASHES_DEF]}
- ]).
-
--define(GLOBAL_QUERIES,
- [{"Connections", rabbit_networking, connection_info_all,
- connection_info_keys},
- {"Channels", rabbit_channel, info_all, info_keys}]).
-
--define(VHOST_QUERIES,
- [{"Queues", rabbit_amqqueue, info_all, info_keys},
- {"Exchanges", rabbit_exchange, info_all, info_keys},
- {"Bindings", rabbit_binding, info_all, info_keys},
- {"Consumers", rabbit_amqqueue, consumers_all, consumer_info_keys},
- {"Permissions", rabbit_auth_backend_internal, list_vhost_permissions,
- vhost_perms_info_keys},
- {"Policies", rabbit_policy, list_formatted, info_keys},
- {"Parameters", rabbit_runtime_parameters, list_formatted, info_keys}]).
-
--define(COMMANDS_NOT_REQUIRING_APP,
- [stop, stop_app, start_app, wait, reset, force_reset, rotate_logs,
- join_cluster, change_cluster_node_type, update_cluster_nodes,
- forget_cluster_node, rename_cluster_node, cluster_status, status,
- environment, eval, force_boot, help, hipe_compile, encode]).
-
-%% [Command | {Command, DefaultTimeoutInMilliSeconds}]
--define(COMMANDS_WITH_TIMEOUT,
- [list_user_permissions, list_policies, list_queues, list_exchanges,
- list_bindings, list_connections, list_channels, list_consumers,
- list_vhosts, list_parameters, list_global_parameters,
- purge_queue,
- {node_health_check, 70000}]).
-
-%%----------------------------------------------------------------------------
-
--spec start() -> no_return().
--spec stop() -> 'ok'.
--spec action
- (atom(), node(), [string()], [{string(), any()}],
- fun ((string(), [any()]) -> 'ok')) ->
- 'ok'.
-
--spec action
- (atom(), node(), [string()], [{string(), any()}],
- fun ((string(), [any()]) -> 'ok'), timeout()) ->
- 'ok'.
-
-%%----------------------------------------------------------------------------
-
-start() ->
- rabbit_cli:main(
- fun (Args, NodeStr) ->
- parse_arguments(Args, NodeStr)
- end,
- fun (Command, Node, Args, Opts) ->
- Quiet = proplists:get_bool(?QUIET_OPT, Opts),
- Inform = case Quiet of
- true -> fun (_Format, _Args1) -> ok end;
- false -> fun (Format, Args1) ->
- io:format(Format ++ " ...~n", Args1)
- end
- end,
- try
- T = case get_timeout(Command, Opts) of
- {ok, Timeout} ->
- Timeout;
- {error, _} ->
- %% since this is an error with user input, ignore the quiet
- %% setting
- io:format("Failed to parse provided timeout value, using ~s~n", [?RPC_TIMEOUT]),
- ?RPC_TIMEOUT
- end,
- do_action(Command, Node, Args, Opts, Inform, T)
- catch _:E -> E
- end
- end, rabbit_ctl_usage).
-
-parse_arguments(CmdLine, NodeStr) ->
- rabbit_cli:parse_arguments(
- ?COMMANDS, ?GLOBAL_DEFS(NodeStr), ?NODE_OPT, CmdLine).
-
-print_report(Node, {Descr, Module, InfoFun, KeysFun}) ->
- io:format("~s:~n", [Descr]),
- print_report0(Node, {Module, InfoFun, KeysFun}, []).
-
-print_report(Node, {Descr, Module, InfoFun, KeysFun}, VHostArg) ->
- io:format("~s on ~s:~n", [Descr, VHostArg]),
- print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg).
-
-print_report0(Node, {Module, InfoFun, KeysFun}, VHostArg) ->
- case rpc_call(Node, Module, InfoFun, VHostArg) of
- [_|_] = Results -> InfoItems = rpc_call(Node, Module, KeysFun, []),
- display_row([atom_to_list(I) || I <- InfoItems]),
- display_info_list(Results, InfoItems);
- _ -> ok
- end,
- io:nl().
-
-get_timeout(Command, Opts) ->
- Default = case proplists:lookup(Command, ?COMMANDS_WITH_TIMEOUT) of
- none ->
- infinity;
- {Command, true} ->
- ?RPC_TIMEOUT;
- {Command, D} ->
- D
- end,
- Result = case proplists:get_value(?TIMEOUT_OPT, Opts, Default) of
- use_default ->
- parse_timeout(Default);
- Value ->
- parse_timeout(Value)
- end,
- Result.
-
-
-parse_number(N) when is_list(N) ->
- try list_to_integer(N) of
- Val -> Val
- catch error:badarg ->
- %% could have been a float, give it
- %% another shot
- list_to_float(N)
- end.
-
-parse_timeout("infinity") ->
- {ok, infinity};
-parse_timeout(infinity) ->
- {ok, infinity};
-parse_timeout(N) when is_list(N) ->
- try parse_number(N) of
- M ->
- Y = case M >= 0 of
- true -> round(M) * 1000;
- false -> ?RPC_TIMEOUT
- end,
- {ok, Y}
- catch error:badarg ->
- {error, infinity}
- end;
-parse_timeout(N) ->
- {ok, N}.
-
-announce_timeout(infinity, _Inform) ->
- %% no-op
- ok;
-announce_timeout(Timeout, Inform) when is_number(Timeout) ->
- Inform("Timeout: ~w seconds", [Timeout/1000]),
- ok.
-
-stop() ->
- ok.
-
-%%----------------------------------------------------------------------------
-
-do_action(Command, Node, Args, Opts, Inform, Timeout) ->
- case lists:member(Command, ?COMMANDS_NOT_REQUIRING_APP) of
- false ->
- case ensure_app_running(Node) of
- ok ->
- case proplists:lookup(Command, ?COMMANDS_WITH_TIMEOUT) of
- {Command, _} ->
- announce_timeout(Timeout, Inform),
- action(Command, Node, Args, Opts, Inform, Timeout);
- none ->
- action(Command, Node, Args, Opts, Inform)
- end;
- E -> E
- end;
- true ->
- action(Command, Node, Args, Opts, Inform)
- end.
-
-action(stop, Node, Args, _Opts, Inform) ->
- Inform("Stopping and halting node ~p", [Node]),
- Res = call(Node, {rabbit, stop_and_halt, []}),
- case {Res, Args} of
- {ok, [PidFile]} -> wait_for_process_death(
- read_pid_file(PidFile, false));
- {ok, [_, _| _]} -> exit({badarg, Args});
- _ -> ok
- end,
- Res;
-
-action(stop_app, Node, [], _Opts, Inform) ->
- Inform("Stopping rabbit application on node ~p", [Node]),
- call(Node, {rabbit, stop, []});
-
-action(start_app, Node, [], _Opts, Inform) ->
- Inform("Starting node ~p", [Node]),
- call(Node, {rabbit, start, []});
-
-action(reset, Node, [], _Opts, Inform) ->
- Inform("Resetting node ~p", [Node]),
- require_mnesia_stopped(Node,
- fun() ->
- call(Node, {rabbit_mnesia, reset, []})
- end);
-
-action(force_reset, Node, [], _Opts, Inform) ->
- Inform("Forcefully resetting node ~p", [Node]),
- require_mnesia_stopped(Node,
- fun() ->
- call(Node, {rabbit_mnesia, force_reset, []})
- end);
-
-action(join_cluster, Node, [ClusterNodeS], Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- NodeType = case proplists:get_bool(?RAM_OPT, Opts) of
- true -> ram;
- false -> disc
- end,
- Inform("Clustering node ~p with ~p", [Node, ClusterNode]),
- require_mnesia_stopped(Node,
- fun() ->
- rpc_call(Node, rabbit_mnesia, join_cluster, [ClusterNode, NodeType])
- end);
-
-action(change_cluster_node_type, Node, ["ram"], _Opts, Inform) ->
- Inform("Turning ~p into a ram node", [Node]),
- require_mnesia_stopped(Node,
- fun() ->
- rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [ram])
- end);
-action(change_cluster_node_type, Node, [Type], _Opts, Inform)
- when Type =:= "disc" orelse Type =:= "disk" ->
- Inform("Turning ~p into a disc node", [Node]),
- require_mnesia_stopped(Node,
- fun() ->
- rpc_call(Node, rabbit_mnesia, change_cluster_node_type, [disc])
- end);
-
-action(update_cluster_nodes, Node, [ClusterNodeS], _Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- Inform("Updating cluster nodes for ~p from ~p", [Node, ClusterNode]),
- require_mnesia_stopped(Node,
- fun() ->
- rpc_call(Node, rabbit_mnesia, update_cluster_nodes, [ClusterNode])
- end);
-
-action(forget_cluster_node, Node, [ClusterNodeS], Opts, Inform) ->
- ClusterNode = list_to_atom(ClusterNodeS),
- RemoveWhenOffline = proplists:get_bool(?OFFLINE_OPT, Opts),
- Inform("Removing node ~p from cluster", [ClusterNode]),
- case RemoveWhenOffline of
- true -> become(Node),
- rabbit_mnesia:forget_cluster_node(ClusterNode, true);
- false -> rpc_call(Node, rabbit_mnesia, forget_cluster_node,
- [ClusterNode, false])
- end;
-
-action(rename_cluster_node, Node, NodesS, _Opts, Inform) ->
- Nodes = split_list([list_to_atom(N) || N <- NodesS]),
- Inform("Renaming cluster nodes:~n~s~n",
- [lists:flatten([rabbit_misc:format(" ~s -> ~s~n", [F, T]) ||
- {F, T} <- Nodes])]),
- rabbit_mnesia_rename:rename(Node, Nodes);
-
-action(force_boot, Node, [], _Opts, Inform) ->
- Inform("Forcing boot for Mnesia dir ~s", [mnesia:system_info(directory)]),
- case rabbit:is_running(Node) of
- false -> rabbit_mnesia:force_load_next_boot();
- true -> {error, rabbit_running}
- end;
-
-action(sync_queue, Node, [Q], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
- Inform("Synchronising ~s", [rabbit_misc:rs(QName)]),
- rpc_call(Node, rabbit_control_main, sync_queue, [QName]);
-
-action(cancel_sync_queue, Node, [Q], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- QName = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
- Inform("Stopping synchronising ~s", [rabbit_misc:rs(QName)]),
- rpc_call(Node, rabbit_control_main, cancel_sync_queue, [QName]);
-
-action(wait, Node, [PidFile], _Opts, Inform) ->
- Inform("Waiting for ~p", [Node]),
- wait_for_application(Node, PidFile, rabbit_and_plugins, Inform);
-action(wait, Node, [PidFile, App], _Opts, Inform) ->
- Inform("Waiting for ~p on ~p", [App, Node]),
- wait_for_application(Node, PidFile, list_to_atom(App), Inform);
-
-action(status, Node, [], _Opts, Inform) ->
- Inform("Status of node ~p", [Node]),
- display_call_result(Node, {rabbit, status, []});
-
-action(cluster_status, Node, [], _Opts, Inform) ->
- Inform("Cluster status of node ~p", [Node]),
- Status = unsafe_rpc(Node, rabbit_mnesia, status, []),
- io:format("~p~n", [Status ++ [{alarms,
- [alarms_by_node(Name) || Name <- nodes_in_cluster(Node)]}]]),
- ok;
-
-action(environment, Node, _App, _Opts, Inform) ->
- Inform("Application environment of node ~p", [Node]),
- display_call_result(Node, {rabbit, environment, []});
-
-action(rotate_logs, Node, [], _Opts, Inform) ->
- Inform("Rotating logs for node ~p", [Node]),
- call(Node, {rabbit, rotate_logs, []});
-
-action(hipe_compile, _Node, [TargetDir], _Opts, _Inform) ->
- ok = application:load(rabbit),
- case rabbit_hipe:can_hipe_compile() of
- true ->
- {ok, _, _} = rabbit_hipe:compile_to_directory(TargetDir),
- ok;
- false ->
- {error, "HiPE compilation is not supported"}
- end;
-
-action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) ->
- Inform("Closing connection \"~s\"", [PidStr]),
- rpc_call(Node, rabbit_networking, close_connection,
- [rabbit_misc:string_to_pid(PidStr), Explanation]);
-
-action(add_user, Node, Args = [Username, _Password], _Opts, Inform) ->
- Inform("Creating user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, add_user, Args});
-
-action(delete_user, Node, Args = [_Username], _Opts, Inform) ->
- Inform("Deleting user \"~s\"", Args),
- call(Node, {rabbit_auth_backend_internal, delete_user, Args});
-
-action(change_password, Node, Args = [Username, _Newpassword], _Opts, Inform) ->
- Inform("Changing password for user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, change_password, Args});
-
-action(clear_password, Node, Args = [Username], _Opts, Inform) ->
- Inform("Clearing password for user \"~s\"", [Username]),
- call(Node, {rabbit_auth_backend_internal, clear_password, Args});
-
-action(authenticate_user, Node, Args = [Username, _Password], _Opts, Inform) ->
- Inform("Authenticating user \"~s\"", [Username]),
- call(Node, {rabbit_access_control, check_user_pass_login, Args});
-
-action(set_user_tags, Node, [Username | TagsStr], _Opts, Inform) ->
- Tags = [list_to_atom(T) || T <- TagsStr],
- Inform("Setting tags for user \"~s\" to ~p", [Username, Tags]),
- rpc_call(Node, rabbit_auth_backend_internal, set_tags,
- [list_to_binary(Username), Tags]);
-
-action(add_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
- Inform("Creating vhost \"~s\"", Args),
- call(Node, {rabbit_vhost, add, Args});
-
-action(delete_vhost, Node, Args = [_VHostPath], _Opts, Inform) ->
- Inform("Deleting vhost \"~s\"", Args),
- call(Node, {rabbit_vhost, delete, Args});
-
-action(trace_on, Node, [], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Starting tracing for vhost \"~s\"", [VHost]),
- rpc_call(Node, rabbit_trace, start, [list_to_binary(VHost)]);
-
-action(trace_off, Node, [], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Stopping tracing for vhost \"~s\"", [VHost]),
- rpc_call(Node, rabbit_trace, stop, [list_to_binary(VHost)]);
-
-action(set_vm_memory_high_watermark, Node, [Arg], _Opts, Inform) ->
- Frac = list_to_float(case string:chr(Arg, $.) of
- 0 -> Arg ++ ".0";
- _ -> Arg
- end),
- Inform("Setting memory threshold on ~p to ~p", [Node, Frac]),
- rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]);
-
-action(set_vm_memory_high_watermark, Node, ["absolute", Arg], _Opts, Inform) ->
- case rabbit_resource_monitor_misc:parse_information_unit(Arg) of
- {ok, Limit} ->
- Inform("Setting memory threshold on ~p to ~p bytes", [Node, Limit]),
- rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark,
- [{absolute, Limit}]);
- {error, parse_error} ->
- {error_string, rabbit_misc:format(
- "Unable to parse absolute memory limit value ~p", [Arg])}
- end;
-
-action(set_disk_free_limit, Node, [Arg], _Opts, Inform) ->
- case rabbit_resource_monitor_misc:parse_information_unit(Arg) of
- {ok, Limit} ->
- Inform("Setting disk free limit on ~p to ~p bytes", [Node, Limit]),
- rpc_call(Node, rabbit_disk_monitor, set_disk_free_limit, [Limit]);
- {error, parse_error} ->
- {error_string, rabbit_misc:format(
- "Unable to parse disk free limit value ~p", [Arg])}
- end;
-
-action(set_disk_free_limit, Node, ["mem_relative", Arg], _Opts, Inform) ->
- Frac = list_to_float(case string:chr(Arg, $.) of
- 0 -> Arg ++ ".0";
- _ -> Arg
- end),
- Inform("Setting disk free limit on ~p to ~p of total RAM", [Node, Frac]),
- rpc_call(Node,
- rabbit_disk_monitor,
- set_disk_free_limit,
- [{mem_relative, Frac}]);
-
-
-action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Setting permissions for user \"~s\" in vhost \"~s\"",
- [Username, VHost]),
- call(Node, {rabbit_auth_backend_internal, set_permissions,
- [Username, VHost, CPerm, WPerm, RPerm]});
-
-action(clear_permissions, Node, [Username], Opts, Inform) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Clearing permissions for user \"~s\" in vhost \"~s\"",
- [Username, VHost]),
- call(Node, {rabbit_auth_backend_internal, clear_permissions,
- [Username, VHost]});
-
-action(set_parameter, Node, [Component, Key, Value], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Setting runtime parameter ~p for component ~p to ~p",
- [Key, Component, Value]),
- rpc_call(
- Node, rabbit_runtime_parameters, parse_set,
- [VHostArg, list_to_binary(Component), list_to_binary(Key), Value, none]);
-
-action(clear_parameter, Node, [Component, Key], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing runtime parameter ~p for component ~p", [Key, Component]),
- rpc_call(Node, rabbit_runtime_parameters, clear, [VHostArg,
- list_to_binary(Component),
- list_to_binary(Key)]);
-
-action(set_global_parameter, Node, [Key, Value], _Opts, Inform) ->
- Inform("Setting global runtime parameter ~p to ~p", [Key, Value]),
- rpc_call(
- Node, rabbit_runtime_parameters, parse_set_global,
- [rabbit_data_coercion:to_atom(Key), rabbit_data_coercion:to_binary(Value)]
- );
-
-action(clear_global_parameter, Node, [Key], _Opts, Inform) ->
- Inform("Clearing global runtime parameter ~p", [Key]),
- rpc_call(
- Node, rabbit_runtime_parameters, clear_global,
- [rabbit_data_coercion:to_atom(Key)]
- );
-
-action(set_policy, Node, [Key, Pattern, Defn], Opts, Inform) ->
- Msg = "Setting policy ~p for pattern ~p to ~p with priority ~p",
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- PriorityArg = proplists:get_value(?PRIORITY_OPT, Opts),
- ApplyToArg = list_to_binary(proplists:get_value(?APPLY_TO_OPT, Opts)),
- Inform(Msg, [Key, Pattern, Defn, PriorityArg]),
- Res = rpc_call(
- Node, rabbit_policy, parse_set,
- [VHostArg, list_to_binary(Key), list_to_binary(Pattern), list_to_binary(Defn), list_to_binary(PriorityArg), ApplyToArg]),
- case Res of
- {error, Format, Args} when is_list(Format) andalso is_list(Args) ->
- {error_string, rabbit_misc:format(Format, Args)};
- _ ->
- Res
- end;
-
-action(clear_policy, Node, [Key], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing policy ~p", [Key]),
- rpc_call(Node, rabbit_policy, delete, [VHostArg, list_to_binary(Key)]);
-
-action(set_operator_policy, Node, [Key, Pattern, Defn], Opts, Inform) ->
- Msg = "Setting operator policy override ~p for pattern ~p to ~p with priority ~p",
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- PriorityArg = proplists:get_value(?PRIORITY_OPT, Opts),
- ApplyToArg = list_to_binary(proplists:get_value(?APPLY_TO_OPT, Opts)),
- Inform(Msg, [Key, Pattern, Defn, PriorityArg]),
- Res = rpc_call(
- Node, rabbit_policy, parse_set_op,
- [VHostArg, list_to_binary(Key), list_to_binary(Pattern), list_to_binary(Defn), list_to_binary(PriorityArg), ApplyToArg]),
- case Res of
- {error, Format, Args} when is_list(Format) andalso is_list(Args) ->
- {error_string, rabbit_misc:format(Format, Args)};
- _ ->
- Res
- end;
-
-action(clear_operator_policy, Node, [Key], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing operator policy ~p", [Key]),
- rpc_call(Node, rabbit_policy, delete_op, [VHostArg, list_to_binary(Key)]);
-
-action(set_vhost_limits, Node, [Defn], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Setting vhost limits for vhost ~p", [VHostArg]),
- rpc_call(Node, rabbit_vhost_limit, parse_set, [VHostArg, Defn]),
- ok;
-
-action(clear_vhost_limits, Node, [], Opts, Inform) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Clearing vhost ~p limits", [VHostArg]),
- rpc_call(Node, rabbit_vhost_limit, clear, [VHostArg]);
-
-action(report, Node, _Args, _Opts, Inform) ->
- Inform("Reporting server status on ~p~n~n", [erlang:universaltime()]),
- [begin ok = action(Action, N, [], [], Inform), io:nl() end ||
- N <- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]),
- Action <- [status, cluster_status, environment]],
- VHosts = unsafe_rpc(Node, rabbit_vhost, list, []),
- [print_report(Node, Q) || Q <- ?GLOBAL_QUERIES],
- [print_report(Node, Q, [V]) || Q <- ?VHOST_QUERIES, V <- VHosts],
- ok;
-
-action(set_cluster_name, Node, [Name], _Opts, Inform) ->
- Inform("Setting cluster name to ~s", [Name]),
- rpc_call(Node, rabbit_nodes, set_cluster_name, [list_to_binary(Name)]);
-
-action(eval, Node, [Expr], _Opts, _Inform) ->
- case erl_scan:string(Expr) of
- {ok, Scanned, _} ->
- case erl_parse:parse_exprs(Scanned) of
- {ok, Parsed} -> {value, Value, _} =
- unsafe_rpc(
- Node, erl_eval, exprs, [Parsed, []]),
- io:format("~p~n", [Value]),
- ok;
- {error, E} -> {error_string, format_parse_error(E)}
- end;
- {error, E, _} ->
- {error_string, format_parse_error(E)}
- end;
-
-action(help, _Node, _Args, _Opts, _Inform) ->
- io:format("~s", [rabbit_ctl_usage:usage()]);
-
-action(encode, _Node, Args, Opts, _Inform) ->
- ListCiphers = lists:member({?LIST_CIPHERS_OPT, true}, Opts),
- ListHashes = lists:member({?LIST_HASHES_OPT, true}, Opts),
- Decode = lists:member({?DECODE_OPT, true}, Opts),
- Cipher = list_to_atom(proplists:get_value(?CIPHER_OPT, Opts)),
- Hash = list_to_atom(proplists:get_value(?HASH_OPT, Opts)),
- Iterations = list_to_integer(proplists:get_value(?ITERATIONS_OPT, Opts)),
-
- {_, Msg} = rabbit_control_pbe:encode(ListCiphers, ListHashes, Decode, Cipher, Hash, Iterations, Args),
- io:format(Msg ++ "~n");
-
-action(Command, Node, Args, Opts, Inform) ->
- %% For backward compatibility, run commands accepting a timeout with
- %% the default timeout.
- action(Command, Node, Args, Opts, Inform, ?RPC_TIMEOUT).
-
-action(purge_queue, _Node, [], _Opts, _Inform, _Timeout) ->
- {error, "purge_queue takes queue name as an argument"};
-
-action(purge_queue, Node, [Q], Opts, Inform, Timeout) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- QRes = rabbit_misc:r(list_to_binary(VHost), queue, list_to_binary(Q)),
- Inform("Purging ~s", [rabbit_misc:rs(QRes)]),
- rpc_call(Node, rabbit_control_main, purge_queue, [QRes], Timeout);
-
-action(list_users, Node, [], _Opts, Inform, Timeout) ->
- Inform("Listing users", []),
- call_emitter(Node, {rabbit_auth_backend_internal, list_users, []},
- rabbit_auth_backend_internal:user_info_keys(),
- [{timeout, Timeout}, to_bin_utf8]);
-
-action(list_permissions, Node, [], Opts, Inform, Timeout) ->
- VHost = proplists:get_value(?VHOST_OPT, Opts),
- Inform("Listing permissions in vhost \"~s\"", [VHost]),
- call_emitter(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
- rabbit_auth_backend_internal:vhost_perms_info_keys(),
- [{timeout, Timeout}, to_bin_utf8, is_escaped]);
-
-action(list_parameters, Node, [], Opts, Inform, Timeout) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Listing runtime parameters", []),
- call_emitter(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
- rabbit_runtime_parameters:info_keys(),
- [{timeout, Timeout}]);
-
-action(list_global_parameters, Node, [], _Opts, Inform, Timeout) ->
- Inform("Listing global runtime parameters", []),
- call_emitter(Node, {rabbit_runtime_parameters, list_global_formatted, []},
- rabbit_runtime_parameters:global_info_keys(),
- [{timeout, Timeout}]);
-
-action(list_policies, Node, [], Opts, Inform, Timeout) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Listing policies", []),
- call_emitter(Node, {rabbit_policy, list_formatted, [VHostArg]},
- rabbit_policy:info_keys(),
- [{timeout, Timeout}]);
-
-action(list_operator_policies, Node, [], Opts, Inform, Timeout) ->
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Inform("Listing policies", []),
- call_emitter(Node, {rabbit_policy, list_formatted_op, [VHostArg]},
- rabbit_policy:info_keys(),
- [{timeout, Timeout}]);
-
-
-action(list_vhosts, Node, Args, _Opts, Inform, Timeout) ->
- Inform("Listing vhosts", []),
- ArgAtoms = default_if_empty(Args, [name]),
- call_emitter(Node, {rabbit_vhost, info_all, []}, ArgAtoms,
- [{timeout, Timeout}, to_bin_utf8]);
-
-action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) ->
- {error_string,
- "list_user_permissions expects a username argument, but none provided."};
-action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) ->
- Inform("Listing permissions for user ~p", Args),
- call_emitter(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
- rabbit_auth_backend_internal:user_perms_info_keys(),
- [{timeout, Timeout}, to_bin_utf8, is_escaped]);
-
-action(list_queues, Node, Args, Opts, Inform, Timeout) ->
- case rabbit_cli:mutually_exclusive_flags(
- Opts, all, [{?ONLINE_OPT, online}
- ,{?OFFLINE_OPT, offline}
- ,{?LOCAL_OPT, local}]) of
- {ok, Filter} ->
- Inform("Listing queues", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [name, messages]),
-
- %% Data for emission
- Nodes = nodes_in_cluster(Node, Timeout),
- ChunksOpt = {chunks, get_number_of_chunks(Filter, Nodes)},
- TimeoutOpt = {timeout, Timeout},
- EmissionRef = make_ref(),
- EmissionRefOpt = {ref, EmissionRef},
-
- case Filter of
- all ->
- start_emission(Node, {rabbit_amqqueue, emit_info_all,
- [Nodes, VHostArg, ArgAtoms]},
- [TimeoutOpt, EmissionRefOpt]),
- start_emission(Node, {rabbit_amqqueue, emit_info_down,
- [VHostArg, ArgAtoms]},
- [TimeoutOpt, EmissionRefOpt]);
- online ->
- start_emission(Node, {rabbit_amqqueue, emit_info_all,
- [Nodes, VHostArg, ArgAtoms]},
- [TimeoutOpt, EmissionRefOpt]);
- offline ->
- start_emission(Node, {rabbit_amqqueue, emit_info_down,
- [VHostArg, ArgAtoms]},
- [TimeoutOpt, EmissionRefOpt]);
- local ->
- start_emission(Node, {rabbit_amqqueue, emit_info_local,
- [VHostArg, ArgAtoms]},
- [TimeoutOpt, EmissionRefOpt])
- end,
- display_emission_result(EmissionRef, ArgAtoms, [ChunksOpt, TimeoutOpt]);
- {error, ErrStr} ->
- {error_string, ErrStr}
- end;
-
-action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
- Inform("Listing exchanges", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [name, type]),
- call_emitter(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
- ArgAtoms, [{timeout, Timeout}]);
-
-action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
- Inform("Listing bindings", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- ArgAtoms = default_if_empty(Args, [source_name, source_kind,
- destination_name, destination_kind,
- routing_key, arguments]),
- call_emitter(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
- ArgAtoms, [{timeout, Timeout}]);
-
-action(list_connections, Node, Args, _Opts, Inform, Timeout) ->
- Inform("Listing connections", []),
- ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
- Nodes = nodes_in_cluster(Node, Timeout),
- call_emitter(Node, {rabbit_networking, emit_connection_info_all, [Nodes, ArgAtoms]},
- ArgAtoms, [{timeout, Timeout}, {chunks, length(Nodes)}]);
-
-action(list_channels, Node, Args, _Opts, Inform, Timeout) ->
- Inform("Listing channels", []),
- ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
- messages_unacknowledged]),
- Nodes = nodes_in_cluster(Node, Timeout),
- call_emitter(Node, {rabbit_channel, emit_info_all, [Nodes, ArgAtoms]}, ArgAtoms,
- [{timeout, Timeout}, {chunks, length(Nodes)}]);
-
-action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
- Inform("Listing consumers", []),
- VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- Nodes = nodes_in_cluster(Node, Timeout),
- call_emitter(Node, {rabbit_amqqueue, emit_consumers_all, [Nodes, VHostArg]},
- rabbit_amqqueue:consumer_info_keys(),
- [{timeout, Timeout}, {chunks, length(Nodes)}]);
-
-action(node_health_check, Node, _Args, _Opts, Inform, Timeout) ->
- Inform("Checking health of node ~p", [Node]),
- case rabbit_health_check:node(Node, Timeout) of
- ok ->
- io:format("Health check passed~n"),
- ok;
- Other ->
- Other
- end.
-
-format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
-
-sync_queue(Q) ->
- rabbit_mirror_queue_misc:sync_queue(Q).
-
-cancel_sync_queue(Q) ->
- rabbit_mirror_queue_misc:cancel_sync_queue(Q).
-
-purge_queue(Q) ->
- rabbit_amqqueue:with(
- Q, fun(Q1) ->
- rabbit_amqqueue:purge(Q1),
- ok
- end).
-
-%%----------------------------------------------------------------------------
-
-require_mnesia_stopped(Node, Fun) ->
- case Fun() of
- {error, mnesia_unexpectedly_running} ->
- {error_string, rabbit_misc:format(
- " Mnesia is still running on node ~p.
- Please stop the node with rabbitmqctl stop_app first.", [Node])};
- Other -> Other
- end.
-
-wait_for_application(Node, PidFile, Application, Inform) ->
- Pid = read_pid_file(PidFile, true),
- Inform("pid is ~s", [Pid]),
- wait_for_application(Node, Pid, Application).
-
-wait_for_application(Node, Pid, rabbit_and_plugins) ->
- wait_for_startup(Node, Pid);
-wait_for_application(Node, Pid, Application) ->
- while_process_is_alive(
- Node, Pid, fun() -> rabbit_nodes:is_running(Node, Application) end).
-
-wait_for_startup(Node, Pid) ->
- while_process_is_alive(
- Node, Pid, fun() -> rpc:call(Node, rabbit, await_startup, []) =:= ok end).
-
-while_process_is_alive(Node, Pid, Activity) ->
- case rabbit_misc:is_os_process_alive(Pid) of
- true -> case Activity() of
- true -> ok;
- false -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- while_process_is_alive(Node, Pid, Activity)
- end;
- false -> {error, process_not_running}
- end.
-
-wait_for_process_death(Pid) ->
- case rabbit_misc:is_os_process_alive(Pid) of
- true -> timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- wait_for_process_death(Pid);
- false -> ok
- end.
-
-read_pid_file(PidFile, Wait) ->
- case {file:read_file(PidFile), Wait} of
- {{ok, Bin}, _} ->
- S = binary_to_list(Bin),
- {match, [PidS]} = re:run(S, "[^\\s]+",
- [{capture, all, list}]),
- try list_to_integer(PidS)
- catch error:badarg ->
- exit({error, {garbage_in_pid_file, PidFile}})
- end,
- PidS;
- {{error, enoent}, true} ->
- timer:sleep(?EXTERNAL_CHECK_INTERVAL),
- read_pid_file(PidFile, Wait);
- {{error, _} = E, _} ->
- exit({error, {could_not_read_pid, E}})
- end.
-
-become(BecomeNode) ->
- error_logger:tty(false),
- case net_adm:ping(BecomeNode) of
- pong -> exit({node_running, BecomeNode});
- pang -> ok = net_kernel:stop(),
- io:format(" * Impersonating node: ~s...", [BecomeNode]),
- {ok, _} = rabbit_cli:start_distribution(BecomeNode),
- io:format(" done~n", []),
- Dir = mnesia:system_info(directory),
- io:format(" * Mnesia directory : ~s~n", [Dir])
- end.
-
-%%----------------------------------------------------------------------------
-
-default_if_empty(List, Default) when is_list(List) ->
- if List == [] -> Default;
- true -> [list_to_atom(X) || X <- List]
- end.
-
-display_info_message_row(IsEscaped, Result, InfoItemKeys) ->
- display_row([format_info_item(
- case proplists:lookup(X, Result) of
- none when is_list(Result), length(Result) > 0 ->
- exit({error, {bad_info_key, X}});
- none -> Result;
- {X, Value} -> Value
- end, IsEscaped) || X <- InfoItemKeys]).
-
-display_info_message(IsEscaped, InfoItemKeys) ->
- fun ([], _) ->
- ok;
- ([FirstResult|_] = List, _) when is_list(FirstResult) ->
- lists:foreach(fun(Result) ->
- display_info_message_row(IsEscaped, Result, InfoItemKeys)
- end,
- List),
- ok;
- (Result, _) ->
- display_info_message_row(IsEscaped, Result, InfoItemKeys),
- ok
- end.
-
-display_info_list(Results, InfoItemKeys) when is_list(Results) ->
- lists:foreach(
- fun (Result) -> display_row(
- [format_info_item(proplists:get_value(X, Result), true)
- || X <- InfoItemKeys])
- end, lists:sort(Results)),
- ok;
-display_info_list(Other, _) ->
- Other.
-
-display_row(Row) ->
- io:fwrite(string:join(Row, "\t")),
- io:nl().
-
--define(IS_U8(X), (X >= 0 andalso X =< 255)).
--define(IS_U16(X), (X >= 0 andalso X =< 65535)).
-
-format_info_item(#resource{name = Name}, IsEscaped) ->
- escape(Name, IsEscaped);
-format_info_item({N1, N2, N3, N4} = Value, _IsEscaped) when
- ?IS_U8(N1), ?IS_U8(N2), ?IS_U8(N3), ?IS_U8(N4) ->
- rabbit_misc:ntoa(Value);
-format_info_item({K1, K2, K3, K4, K5, K6, K7, K8} = Value, _IsEscaped) when
- ?IS_U16(K1), ?IS_U16(K2), ?IS_U16(K3), ?IS_U16(K4),
- ?IS_U16(K5), ?IS_U16(K6), ?IS_U16(K7), ?IS_U16(K8) ->
- rabbit_misc:ntoa(Value);
-format_info_item(Value, _IsEscaped) when is_pid(Value) ->
- rabbit_misc:pid_to_string(Value);
-format_info_item(Value, IsEscaped) when is_binary(Value) ->
- escape(Value, IsEscaped);
-format_info_item(Value, IsEscaped) when is_atom(Value) ->
- escape(atom_to_list(Value), IsEscaped);
-format_info_item([{TableEntryKey, TableEntryType, _TableEntryValue} | _] =
- Value, IsEscaped) when is_binary(TableEntryKey) andalso
- is_atom(TableEntryType) ->
- io_lib:format("~1000000000000p", [prettify_amqp_table(Value, IsEscaped)]);
-format_info_item([T | _] = Value, IsEscaped)
- when is_tuple(T) orelse is_pid(T) orelse is_binary(T) orelse is_atom(T) orelse
- is_list(T) ->
- "[" ++
- lists:nthtail(2, lists:append(
- [", " ++ format_info_item(E, IsEscaped)
- || E <- Value])) ++ "]";
-format_info_item({Key, Value}, IsEscaped) ->
- "{" ++ io_lib:format("~p", [Key]) ++ ", " ++
- format_info_item(Value, IsEscaped) ++ "}";
-format_info_item(Value, _IsEscaped) ->
- io_lib:format("~w", [Value]).
-
-display_call_result(Node, MFA) ->
- case call(Node, MFA) of
- {badrpc, _} = Res -> throw(Res);
- Res -> io:format("~p~n", [Res]),
- ok
- end.
-
-unsafe_rpc(Node, Mod, Fun, Args) ->
- unsafe_rpc(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
-
-unsafe_rpc(Node, Mod, Fun, Args, Timeout) ->
- case rpc_call(Node, Mod, Fun, Args, Timeout) of
- {badrpc, _} = Res -> throw(Res);
- Normal -> Normal
- end.
-
-ensure_app_running(Node) ->
- case call(Node, {rabbit, is_running, []}) of
- true -> ok;
- false -> {error_string,
- rabbit_misc:format(
- "rabbit application is not running on node ~s.~n"
- " * Suggestion: start it with \"rabbitmqctl start_app\" "
- "and try again", [Node])};
- Other -> Other
- end.
-
-call(Node, {Mod, Fun, Args}) ->
- rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
-
-call_emitter(Node, {Mod, Fun, Args}, InfoKeys, Opts) ->
- Ref = start_emission(Node, {Mod, Fun, Args}, Opts),
- display_emission_result(Ref, InfoKeys, Opts).
-
-start_emission(Node, {Mod, Fun, Args}, Opts) ->
- ToBinUtf8 = proplists:get_value(to_bin_utf8, Opts, false),
- Timeout = proplists:get_value(timeout, Opts, infinity),
- Ref = proplists:get_value(ref, Opts, make_ref()),
- rabbit_control_misc:spawn_emitter_caller(
- Node, Mod, Fun, prepare_call_args(Args, ToBinUtf8),
- Ref, self(), Timeout),
- Ref.
-
-display_emission_result(Ref, InfoKeys, Opts) ->
- IsEscaped = proplists:get_value(is_escaped, Opts, false),
- Chunks = proplists:get_value(chunks, Opts, 1),
- Timeout = proplists:get_value(timeout, Opts, infinity),
- EmissionStatus = rabbit_control_misc:wait_for_info_messages(
- self(), Ref, display_info_message(IsEscaped, InfoKeys), ok, Timeout, Chunks),
- emission_to_action_result(EmissionStatus).
-
-%% Convert rabbit_control_misc:wait_for_info_messages/6 return value
-%% into form expected by rabbit_cli:main/3.
-emission_to_action_result({ok, ok}) ->
- ok;
-emission_to_action_result({error, Error}) ->
- Error.
-
-prepare_call_args(Args, ToBinUtf8) ->
- case ToBinUtf8 of
- true -> valid_utf8_args(Args);
- false -> Args
- end.
-
-valid_utf8_args(Args) ->
- lists:map(fun list_to_binary_utf8/1, Args).
-
-list_to_binary_utf8(L) ->
- B = list_to_binary(L),
- case rabbit_binary_parser:validate_utf8(B) of
- ok -> B;
- error -> throw({error, {not_utf_8, L}})
- end.
-
-%% escape does C-style backslash escaping of non-printable ASCII
-%% characters. We don't escape characters above 127, since they may
-%% form part of UTF-8 strings.
-
-escape(Atom, IsEscaped) when is_atom(Atom) ->
- escape(atom_to_list(Atom), IsEscaped);
-escape(Bin, IsEscaped) when is_binary(Bin) ->
- escape(binary_to_list(Bin), IsEscaped);
-escape(L, false) when is_list(L) ->
- escape_char(lists:reverse(L), []);
-escape(L, true) when is_list(L) ->
- L.
-
-escape_char([$\\ | T], Acc) ->
- escape_char(T, [$\\, $\\ | Acc]);
-escape_char([X | T], Acc) when X >= 32, X /= 127 ->
- escape_char(T, [X | Acc]);
-escape_char([X | T], Acc) ->
- escape_char(T, [$\\, $0 + (X bsr 6), $0 + (X band 8#070 bsr 3),
- $0 + (X band 7) | Acc]);
-escape_char([], Acc) ->
- Acc.
-
-prettify_amqp_table(Table, IsEscaped) ->
- [{escape(K, IsEscaped), prettify_typed_amqp_value(T, V, IsEscaped)}
- || {K, T, V} <- Table].
-
-prettify_typed_amqp_value(longstr, Value, IsEscaped) ->
- escape(Value, IsEscaped);
-prettify_typed_amqp_value(table, Value, IsEscaped) ->
- prettify_amqp_table(Value, IsEscaped);
-prettify_typed_amqp_value(array, Value, IsEscaped) ->
- [prettify_typed_amqp_value(T, V, IsEscaped) || {T, V} <- Value];
-prettify_typed_amqp_value(_Type, Value, _IsEscaped) ->
- Value.
-
-split_list([]) -> [];
-split_list([_]) -> exit(even_list_needed);
-split_list([A, B | T]) -> [{A, B} | split_list(T)].
-
-nodes_in_cluster(Node) ->
- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], ?RPC_TIMEOUT).
-
-nodes_in_cluster(Node, Timeout) ->
- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], Timeout).
-
-alarms_by_node(Name) ->
- case rpc_call(Name, rabbit, status, []) of
- {badrpc,nodedown} -> {Name, [nodedown]};
- Status ->
- {_, As} = lists:keyfind(alarms, 1, Status),
- {Name, As}
- end.
-
-get_number_of_chunks(all, Nodes) ->
- length(Nodes) + 1;
-get_number_of_chunks(online, Nodes) ->
- length(Nodes);
-get_number_of_chunks(offline, _) ->
- 1;
-get_number_of_chunks(local, _) ->
- 1.
diff --git a/src/rabbit_lager.erl b/src/rabbit_lager.erl
index 8beee10846..c1ed613088 100644
--- a/src/rabbit_lager.erl
+++ b/src/rabbit_lager.erl
@@ -210,7 +210,7 @@ configure_lager() ->
%% messages to the default sink. To know the list of expected extra
%% sinks, we look at the 'lager_extra_sinks' compilation option.
Sinks0 = application:get_env(lager, extra_sinks, []),
- Sinks1 = configure_extra_sinks(Sinks0,
+ Sinks1 = configure_extra_sinks(Sinks0,
[error_logger | list_expected_sinks()]),
%% TODO Waiting for basho/lager#303
%% Sinks2 = lists:keystore(error_logger_lager_event, 1, Sinks1,
@@ -231,11 +231,7 @@ configure_lager() ->
configure_extra_sinks(Sinks, [SinkName | Rest]) ->
Sink0 = proplists:get_value(SinkName, Sinks, []),
Sink1 = case proplists:is_defined(handlers, Sink0) of
- false -> lists:keystore(handlers, 1, Sink0,
- {handlers,
- [{lager_forwarder_backend,
- lager_util:make_internal_sink_name(lager)
- }]});
+ false -> default_sink_config(SinkName, Sink0);
true -> Sink0
end,
Sinks1 = lists:keystore(SinkName, 1, Sinks, {SinkName, Sink1}),
@@ -243,6 +239,17 @@ configure_extra_sinks(Sinks, [SinkName | Rest]) ->
configure_extra_sinks(Sinks, []) ->
Sinks.
+default_sink_config(rabbit_log_upgrade_lager_event, Sink) ->
+ Handlers = lager_handlers(application:get_env(rabbit,
+ lager_handler_upgrade,
+ tty)),
+ lists:keystore(handlers, 1, Sink, {handlers, Handlers});
+default_sink_config(_, Sink) ->
+ lists:keystore(handlers, 1, Sink,
+ {handlers,
+ [{lager_forwarder_backend,
+ lager_util:make_internal_sink_name(lager)}]}).
+
list_expected_sinks() ->
case application:get_env(rabbit, lager_extra_sinks) of
{ok, List} ->
diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl
index be5f0146b6..22181ce8b7 100644
--- a/src/rabbit_log.erl
+++ b/src/rabbit_log.erl
@@ -78,6 +78,7 @@ make_internal_sink_name(rabbit_log_channel) -> rabbit_log_channel_lager_event;
make_internal_sink_name(rabbit_log_mirroring) -> rabbit_log_mirroring_lager_event;
make_internal_sink_name(rabbit_log_queue) -> rabbit_log_queue_lager_event;
make_internal_sink_name(rabbit_log_federation) -> rabbit_log_federation_lager_event;
+make_internal_sink_name(rabbit_log_upgrade) -> rabbit_log_upgrade_lager_event;
make_internal_sink_name(Category) ->
lager_util:make_internal_sink_name(Category).
diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl
index 51deed8597..9d1282b936 100644
--- a/src/rabbit_mnesia.erl
+++ b/src/rabbit_mnesia.erl
@@ -877,7 +877,7 @@ check_rabbit_consistency(Remote) ->
%% that a `reset' would leave it in. We cannot simply check if the
%% mnesia tables aren't there because restarted RAM nodes won't have
%% tables while still being non-virgin. What we do instead is to
-%% check if the mnesia directory is non existant or empty, with the
+%% check if the mnesia directory is non existent or empty, with the
%% exception of the cluster status files, which will be there thanks to
%% `rabbit_node_monitor:prepare_cluster_status_file/0'.
is_virgin_node() ->
diff --git a/src/rabbit_mnesia_rename.erl b/src/rabbit_mnesia_rename.erl
index 2d7e0f56b6..bcaaf117ff 100644
--- a/src/rabbit_mnesia_rename.erl
+++ b/src/rabbit_mnesia_rename.erl
@@ -70,13 +70,13 @@ rename(Node, NodeMapList) ->
ok = rabbit_mnesia:copy_db(mnesia_copy_dir()),
%% And make the actual changes
- rabbit_control_main:become(FromNode),
+ become(FromNode),
take_backup(before_backup_name()),
convert_backup(NodeMap, before_backup_name(), after_backup_name()),
ok = rabbit_file:write_term_file(rename_config_name(),
[{FromNode, ToNode}]),
convert_config_files(NodeMap),
- rabbit_control_main:become(ToNode),
+ become(ToNode),
restore_backup(after_backup_name()),
ok
after
@@ -267,3 +267,15 @@ transform_table(Table, Map, Key) ->
[Term] = mnesia:read(Table, Key, write),
ok = mnesia:write(Table, update_term(Map, Term), write),
transform_table(Table, Map, mnesia:next(Table, Key)).
+
+become(BecomeNode) ->
+ error_logger:tty(false),
+ case net_adm:ping(BecomeNode) of
+ pong -> exit({node_running, BecomeNode});
+ pang -> ok = net_kernel:stop(),
+ io:format(" * Impersonating node: ~s...", [BecomeNode]),
+ {ok, _} = rabbit_cli:start_distribution(BecomeNode),
+ io:format(" done~n", []),
+ Dir = mnesia:system_info(directory),
+ io:format(" * Mnesia directory : ~s~n", [Dir])
+ end.
diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl
index 8e2b1c0d49..6c9eb92cff 100644
--- a/src/rabbit_msg_store.erl
+++ b/src/rabbit_msg_store.erl
@@ -18,7 +18,7 @@
-behaviour(gen_server2).
--export([start_link/4, successfully_recovered_state/1,
+-export([start_link/4, start_global_store_link/4, successfully_recovered_state/1,
client_init/4, client_terminate/1, client_delete_and_terminate/1,
client_ref/1, close_all_indicated/1,
write/3, write_flow/3, read/2, contains/2, remove/2]).
@@ -63,7 +63,7 @@
%% the module for index ops,
%% rabbit_msg_store_ets_index by default
index_module,
- %% %% where are messages?
+ %% where are messages?
index_state,
%% current file name as number
current_file,
@@ -91,8 +91,6 @@
flying_ets,
%% set of dying clients
dying_clients,
- %% index of file positions for client death messages
- dying_client_index,
%% map of references of all registered clients
%% to callbacks
clients,
@@ -265,7 +263,7 @@
%% updated.
%%
%% On non-clean startup, we scan the files we discover, dealing with
-%% the possibilites of a crash having occured during a compaction
+%% the possibilites of a crash having occurred during a compaction
%% (this consists of tidyup - the compaction is deliberately designed
%% such that data is duplicated on disk rather than risking it being
%% lost), and rebuild the file summary and index ETS table.
@@ -310,7 +308,7 @@
%% From this reasoning, we do have a bound on the number of times the
%% message is rewritten. From when it is inserted, there can be no
%% files inserted between it and the head of the queue, and the worst
-%% case is that everytime it is rewritten, it moves one position lower
+%% case is that every time it is rewritten, it moves one position lower
%% in the file (for it to stay at the same position requires that
%% there are no holes beneath it, which means truncate would be used
%% and so it would not be rewritten at all). Thus this seems to
@@ -352,7 +350,7 @@
%% because in the event of the same message being sent to several
%% different queues, there is the possibility of one queue writing and
%% removing the message before other queues write it at all. Thus
-%% accomodating 0-reference counts allows us to avoid unnecessary
+%% accommodating 0-reference counts allows us to avoid unnecessary
%% writes here. Of course, there are complications: the file to which
%% the message has already been written could be locked pending
%% deletion or GC, which means we have to rewrite the message as the
@@ -474,15 +472,20 @@
%% public API
%%----------------------------------------------------------------------------
-start_link(Server, Dir, ClientRefs, StartupFunState) ->
- gen_server2:start_link({local, Server}, ?MODULE,
- [Server, Dir, ClientRefs, StartupFunState],
+start_link(Name, Dir, ClientRefs, StartupFunState) when is_atom(Name) ->
+ gen_server2:start_link(?MODULE,
+ [Name, Dir, ClientRefs, StartupFunState],
+ [{timeout, infinity}]).
+
+start_global_store_link(Name, Dir, ClientRefs, StartupFunState) when is_atom(Name) ->
+ gen_server2:start_link({local, Name}, ?MODULE,
+ [Name, Dir, ClientRefs, StartupFunState],
[{timeout, infinity}]).
successfully_recovered_state(Server) ->
gen_server2:call(Server, successfully_recovered_state, infinity).
-client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) ->
+client_init(Server, Ref, MsgOnDiskFun, CloseFDsFun) when is_pid(Server); is_atom(Server) ->
{IState, IModule, Dir, GCPid,
FileHandlesEts, FileSummaryEts, CurFileCacheEts, FlyingEts} =
gen_server2:call(
@@ -522,7 +525,7 @@ write_flow(MsgId, Msg,
%% rabbit_amqqueue_process process via the
%% rabbit_variable_queue. We are accessing the
%% rabbit_amqqueue_process process dictionary.
- credit_flow:send(whereis(Server), CreditDiscBound),
+ credit_flow:send(Server, CreditDiscBound),
client_write(MsgId, Msg, flow, CState).
write(MsgId, Msg, CState) -> client_write(MsgId, Msg, noflow, CState).
@@ -548,7 +551,7 @@ remove(MsgIds, CState = #client_msstate { client_ref = CRef }) ->
[client_update_flying(-1, MsgId, CState) || MsgId <- MsgIds],
server_cast(CState, {remove, CRef, MsgIds}).
-set_maximum_since_use(Server, Age) ->
+set_maximum_since_use(Server, Age) when is_pid(Server); is_atom(Server) ->
gen_server2:cast(Server, {set_maximum_since_use, Age}).
%%----------------------------------------------------------------------------
@@ -699,27 +702,25 @@ client_update_flying(Diff, MsgId, #client_msstate { flying_ets = FlyingEts,
end.
clear_client(CRef, State = #msstate { cref_to_msg_ids = CTM,
- dying_clients = DyingClients,
- dying_client_index = DyingIndex }) ->
- ets:delete(DyingIndex, CRef),
+ dying_clients = DyingClients }) ->
State #msstate { cref_to_msg_ids = dict:erase(CRef, CTM),
- dying_clients = sets:del_element(CRef, DyingClients) }.
+ dying_clients = maps:remove(CRef, DyingClients) }.
%%----------------------------------------------------------------------------
%% gen_server callbacks
%%----------------------------------------------------------------------------
-init([Server, BaseDir, ClientRefs, StartupFunState]) ->
+init([Name, BaseDir, ClientRefs, StartupFunState]) ->
process_flag(trap_exit, true),
ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
[self()]),
- Dir = filename:join(BaseDir, atom_to_list(Server)),
+ Dir = filename:join(BaseDir, atom_to_list(Name)),
- {ok, IndexModule} = application:get_env(msg_store_index_module),
- rabbit_log:info("~w: using ~p to provide index~n", [Server, IndexModule]),
+ {ok, IndexModule} = application:get_env(rabbit, msg_store_index_module),
+ rabbit_log:info("~tp: using ~p to provide index~n", [Dir, IndexModule]),
AttemptFileSummaryRecovery =
case ClientRefs of
@@ -738,7 +739,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
{CleanShutdown, IndexState, ClientRefs1} =
recover_index_and_client_refs(IndexModule, FileSummaryRecovered,
- ClientRefs, Dir, Server),
+ ClientRefs, Dir),
Clients = dict:from_list(
[{CRef, {undefined, undefined, undefined}} ||
CRef <- ClientRefs1]),
@@ -755,10 +756,8 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
[ordered_set, public]),
CurFileCacheEts = ets:new(rabbit_msg_store_cur_file, [set, public]),
FlyingEts = ets:new(rabbit_msg_store_flying, [set, public]),
- DyingIndex = ets:new(rabbit_msg_store_dying_client_index,
- [set, public, {keypos, #dying_client.client_ref}]),
- {ok, FileSizeLimit} = application:get_env(msg_store_file_size_limit),
+ {ok, FileSizeLimit} = application:get_env(rabbit, msg_store_file_size_limit),
{ok, GCPid} = rabbit_msg_store_gc:start_link(
#gc_state { dir = Dir,
@@ -787,8 +786,7 @@ init([Server, BaseDir, ClientRefs, StartupFunState]) ->
file_summary_ets = FileSummaryEts,
cur_file_cache_ets = CurFileCacheEts,
flying_ets = FlyingEts,
- dying_clients = sets:new(),
- dying_client_index = DyingIndex,
+ dying_clients = #{},
clients = Clients,
successfully_recovered = CleanShutdown,
file_size_limit = FileSizeLimit,
@@ -866,14 +864,14 @@ handle_call({contains, MsgId}, From, State) ->
handle_cast({client_dying, CRef},
State = #msstate { dying_clients = DyingClients,
- dying_client_index = DyingIndex,
current_file_handle = CurHdl,
current_file = CurFile }) ->
- DyingClients1 = sets:add_element(CRef, DyingClients),
{ok, CurOffset} = file_handle_cache:current_virtual_offset(CurHdl),
- true = ets:insert_new(DyingIndex, #dying_client{client_ref = CRef,
- file = CurFile,
- offset = CurOffset}),
+ DyingClients1 = maps:put(CRef,
+ #dying_client{client_ref = CRef,
+ file = CurFile,
+ offset = CurOffset},
+ DyingClients),
noreply(State #msstate { dying_clients = DyingClients1 });
handle_cast({client_delete, CRef},
@@ -995,12 +993,25 @@ terminate(_Reason, State = #msstate { index_state = IndexState,
State2
end,
State3 = close_all_handles(State1),
- ok = store_file_summary(FileSummaryEts, Dir),
+ case store_file_summary(FileSummaryEts, Dir) of
+ ok -> ok;
+ {error, FSErr} ->
+ rabbit_log:error("Unable to store file summary"
+ " for vhost message store for directory ~p~n"
+ "Error: ~p~n",
+ [Dir, FSErr])
+ end,
[true = ets:delete(T) || T <- [FileSummaryEts, FileHandlesEts,
CurFileCacheEts, FlyingEts]],
IndexModule:terminate(IndexState),
- ok = store_recovery_terms([{client_refs, dict:fetch_keys(Clients)},
- {index_module, IndexModule}], Dir),
+ case store_recovery_terms([{client_refs, dict:fetch_keys(Clients)},
+ {index_module, IndexModule}], Dir) of
+ ok -> ok;
+ {error, RTErr} ->
+ rabbit_log:error("Unable to save message store recovery terms"
+ "for directory ~p~nError: ~p~n",
+ [Dir, RTErr])
+ end,
State3 #msstate { index_state = undefined,
current_file_handle = undefined }.
@@ -1357,17 +1368,15 @@ blind_confirm(CRef, MsgIds, ActionTaken, State) ->
%% msg and thus should be ignored. Note that this (correctly) returns
%% false when testing to remove the death msg itself.
should_mask_action(CRef, MsgId,
- State = #msstate { dying_clients = DyingClients,
- dying_client_index = DyingIndex }) ->
- case {sets:is_element(CRef, DyingClients), index_lookup(MsgId, State)} of
- {false, Location} ->
+ State = #msstate{dying_clients = DyingClients}) ->
+ case {maps:find(CRef, DyingClients), index_lookup(MsgId, State)} of
+ {error, Location} ->
{false, Location};
- {true, not_found} ->
+ {{ok, _}, not_found} ->
{true, not_found};
- {true, #msg_location { file = File, offset = Offset,
- ref_count = RefCount } = Location} ->
- [#dying_client { file = DeathFile, offset = DeathOffset }] =
- ets:lookup(DyingIndex, CRef),
+ {{ok, Client}, #msg_location { file = File, offset = Offset,
+ ref_count = RefCount } = Location} ->
+ #dying_client{file = DeathFile, offset = DeathOffset} = Client,
{case {{DeathFile, DeathOffset} < {File, Offset}, RefCount} of
{true, _} -> true;
{false, 0} -> false_if_increment;
@@ -1538,16 +1547,16 @@ index_delete_by_file(File, #msstate { index_module = Index,
%% shutdown and recovery
%%----------------------------------------------------------------------------
-recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir, _Server) ->
+recover_index_and_client_refs(IndexModule, _Recover, undefined, Dir) ->
{false, IndexModule:new(Dir), []};
-recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir, Server) ->
- rabbit_log:warning("~w: rebuilding indices from scratch~n", [Server]),
+recover_index_and_client_refs(IndexModule, false, _ClientRefs, Dir) ->
+ rabbit_log:warning("~tp : rebuilding indices from scratch~n", [Dir]),
{false, IndexModule:new(Dir), []};
-recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir, Server) ->
+recover_index_and_client_refs(IndexModule, true, ClientRefs, Dir) ->
Fresh = fun (ErrorMsg, ErrorArgs) ->
- rabbit_log:warning("~w: " ++ ErrorMsg ++ "~n"
+ rabbit_log:warning("~tp : " ++ ErrorMsg ++ "~n"
"rebuilding indices from scratch~n",
- [Server | ErrorArgs]),
+ [Dir | ErrorArgs]),
{false, IndexModule:new(Dir), []}
end,
case read_recovery_terms(Dir) of
@@ -1582,7 +1591,7 @@ read_recovery_terms(Dir) ->
end.
store_file_summary(Tid, Dir) ->
- ok = ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
+ ets:tab2file(Tid, filename:join(Dir, ?FILE_SUMMARY_FILENAME),
[{extended_info, [object_count]}]).
recover_file_summary(false, _Dir) ->
diff --git a/src/rabbit_msg_store_ets_index.erl b/src/rabbit_msg_store_ets_index.erl
index 76ef112069..0e8b7174e2 100644
--- a/src/rabbit_msg_store_ets_index.erl
+++ b/src/rabbit_msg_store_ets_index.erl
@@ -74,6 +74,12 @@ delete_by_file(File, State) ->
ok.
terminate(#state { table = MsgLocations, dir = Dir }) ->
- ok = ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
- [{extended_info, [object_count]}]),
+ case ets:tab2file(MsgLocations, filename:join(Dir, ?FILENAME),
+ [{extended_info, [object_count]}]) of
+ ok -> ok;
+ {error, Err} ->
+ rabbit_log:error("Unable to save message store index"
+ " for directory ~p.~nError: ~p~n",
+ [Dir, Err])
+ end,
ets:delete(MsgLocations).
diff --git a/src/rabbit_msg_store_vhost_sup.erl b/src/rabbit_msg_store_vhost_sup.erl
new file mode 100644
index 0000000000..0209e88cf7
--- /dev/null
+++ b/src/rabbit_msg_store_vhost_sup.erl
@@ -0,0 +1,93 @@
+-module(rabbit_msg_store_vhost_sup).
+
+-behaviour(supervisor2).
+
+-export([start_link/3, init/1, add_vhost/2, delete_vhost/2,
+ client_init/5, successfully_recovered_state/2]).
+
+%% Internal
+-export([start_store_for_vhost/4]).
+
+start_link(Name, VhostsClientRefs, StartupFunState) when is_map(VhostsClientRefs);
+ VhostsClientRefs == undefined ->
+ supervisor2:start_link({local, Name}, ?MODULE,
+ [Name, VhostsClientRefs, StartupFunState]).
+
+init([Name, VhostsClientRefs, StartupFunState]) ->
+ ets:new(Name, [named_table, public]),
+ {ok, {{simple_one_for_one, 1, 1},
+ [{rabbit_msg_store_vhost, {rabbit_msg_store_vhost_sup, start_store_for_vhost,
+ [Name, VhostsClientRefs, StartupFunState]},
+ transient, infinity, supervisor, [rabbit_msg_store]}]}}.
+
+
+add_vhost(Name, VHost) ->
+ supervisor2:start_child(Name, [VHost]).
+
+start_store_for_vhost(Name, VhostsClientRefs, StartupFunState, VHost) ->
+ case vhost_store_pid(Name, VHost) of
+ no_pid ->
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ ok = rabbit_file:ensure_dir(VHostDir),
+ rabbit_log:info("Making sure message store directory '~s' for vhost '~s' exists~n", [VHostDir, VHost]),
+ VhostRefs = refs_for_vhost(VHost, VhostsClientRefs),
+ case rabbit_msg_store:start_link(Name, VHostDir, VhostRefs, StartupFunState) of
+ {ok, Pid} ->
+ ets:insert(Name, {VHost, Pid}),
+ {ok, Pid};
+ Other -> Other
+ end;
+ Pid when is_pid(Pid) ->
+ {error, {already_started, Pid}}
+ end.
+
+refs_for_vhost(_, undefined) -> undefined;
+refs_for_vhost(VHost, Refs) ->
+ case maps:find(VHost, Refs) of
+ {ok, Val} -> Val;
+ error -> []
+ end.
+
+
+delete_vhost(Name, VHost) ->
+ case vhost_store_pid(Name, VHost) of
+ no_pid -> ok;
+ Pid when is_pid(Pid) ->
+ supervisor2:terminate_child(Name, Pid),
+ cleanup_vhost_store(Name, VHost, Pid)
+ end,
+ ok.
+
+client_init(Name, Ref, MsgOnDiskFun, CloseFDsFun, VHost) ->
+ VHostPid = maybe_start_store_for_vhost(Name, VHost),
+ rabbit_msg_store:client_init(VHostPid, Ref, MsgOnDiskFun, CloseFDsFun).
+
+maybe_start_store_for_vhost(Name, VHost) ->
+ case add_vhost(Name, VHost) of
+ {ok, Pid} -> Pid;
+ {error, {already_started, Pid}} -> Pid;
+ Error -> throw(Error)
+ end.
+
+vhost_store_pid(Name, VHost) ->
+ case ets:lookup(Name, VHost) of
+ [] -> no_pid;
+ [{VHost, Pid}] ->
+ case erlang:is_process_alive(Pid) of
+ true -> Pid;
+ false ->
+ cleanup_vhost_store(Name, VHost, Pid),
+ no_pid
+ end
+ end.
+
+cleanup_vhost_store(Name, VHost, Pid) ->
+ ets:delete_object(Name, {VHost, Pid}).
+
+successfully_recovered_state(Name, VHost) ->
+ case vhost_store_pid(Name, VHost) of
+ no_pid ->
+ throw({message_store_not_started, Name, VHost});
+ Pid when is_pid(Pid) ->
+ rabbit_msg_store:successfully_recovered_state(Pid)
+ end.
diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl
index 9da68b7640..54f4180244 100644
--- a/src/rabbit_plugins.erl
+++ b/src/rabbit_plugins.erl
@@ -67,22 +67,15 @@ ensure(FileJustChanged0) ->
{error, {enabled_plugins_mismatch, FileJustChanged, OurFile}}
end.
+%% @doc Prepares the file system and installs all enabled plugins.
setup() ->
- case application:get_env(rabbit, plugins_expand_dir) of
- {ok, ExpandDir} ->
- case filelib:is_dir(ExpandDir) of
- true ->
- rabbit_log:info(
- "\"~s\" is no longer used to expand plugins.~n"
- "RabbitMQ still manages this directory "
- "but will stop doing so in the future.", [ExpandDir]),
-
- _ = delete_recursively(ExpandDir);
- false ->
- ok
- end;
- undefined ->
- ok
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+
+ %% Eliminate the contents of the destination directory
+ case delete_recursively(ExpandDir) of
+ ok -> ok;
+ {error, E1} -> throw({error, {cannot_delete_plugins_expand_dir,
+ [ExpandDir, E1]}})
end,
{ok, EnabledFile} = application:get_env(rabbit, enabled_plugins_file),
@@ -135,61 +128,10 @@ extract_schema(#plugin{type = dir, location = Location}, SchemaDir) ->
%% @doc Lists the plugins which are currently running.
active() ->
- LoadedPluginNames = maybe_keep_required_deps(false, loaded_plugin_names()),
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+ InstalledPlugins = plugin_names(list(ExpandDir)),
[App || {App, _, _} <- rabbit_misc:which_applications(),
- lists:member(App, LoadedPluginNames)].
-
-loaded_plugin_names() ->
- {ok, PluginsPath} = application:get_env(rabbit, plugins_dir),
- PluginsDirs = split_path(PluginsPath),
- lists:flatmap(
- fun(PluginsDir) ->
- PluginsDirComponents = filename:split(PluginsDir),
- loaded_plugin_names(code:get_path(), PluginsDirComponents, [])
- end,
- PluginsDirs).
-
-loaded_plugin_names([Path | OtherPaths], PluginsDirComponents, PluginNames) ->
- case lists:sublist(filename:split(Path), length(PluginsDirComponents)) of
- PluginsDirComponents ->
- case build_plugin_name_from_code_path(Path) of
- undefined ->
- loaded_plugin_names(
- OtherPaths, PluginsDirComponents, PluginNames);
- PluginName ->
- loaded_plugin_names(
- OtherPaths, PluginsDirComponents,
- [list_to_atom(PluginName) | PluginNames])
- end;
- _ ->
- loaded_plugin_names(OtherPaths, PluginsDirComponents, PluginNames)
- end;
-loaded_plugin_names([], _, PluginNames) ->
- PluginNames.
-
-build_plugin_name_from_code_path(Path) ->
- AppPath = case filelib:is_dir(Path) of
- true ->
- case filelib:wildcard(filename:join(Path, "*.app")) of
- [AP | _] -> AP;
- [] -> undefined
- end;
- false ->
- EZ = filename:dirname(filename:dirname(Path)),
- case filelib:is_regular(EZ) of
- true ->
- case find_app_path_in_ez(EZ) of
- {ok, AP} -> AP;
- _ -> undefined
- end;
- _ ->
- undefined
- end
- end,
- case AppPath of
- undefined -> undefined;
- _ -> filename:basename(AppPath, ".app")
- end.
+ lists:member(App, InstalledPlugins)].
%% @doc Get the list of plugins which are ready to be enabled.
list(PluginsPath) ->
@@ -279,19 +221,25 @@ running_plugins() ->
%%----------------------------------------------------------------------------
prepare_plugins(Enabled) ->
- AllPlugins = installed_plugins(),
+ {ok, PluginsDistDir} = application:get_env(rabbit, plugins_dir),
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+
+ AllPlugins = list(PluginsDistDir),
Wanted = dependencies(false, Enabled, AllPlugins),
WantedPlugins = lookup_plugins(Wanted, AllPlugins),
{ValidPlugins, Problems} = validate_plugins(WantedPlugins),
maybe_warn_about_invalid_plugins(Problems),
+ case filelib:ensure_dir(ExpandDir ++ "/") of
+ ok -> ok;
+ {error, E2} -> throw({error, {cannot_create_plugins_expand_dir,
+ [ExpandDir, E2]}})
+ end,
+ [prepare_plugin(Plugin, ExpandDir) || Plugin <- ValidPlugins],
- [prepare_dir_plugin(ValidPlugin) || ValidPlugin <- ValidPlugins],
+ [prepare_dir_plugin(PluginAppDescPath) ||
+ PluginAppDescPath <- filelib:wildcard(ExpandDir ++ "/*/ebin/*.app")],
Wanted.
-installed_plugins() ->
- {ok, PluginsDistDir} = application:get_env(rabbit, plugins_dir),
- list(PluginsDistDir).
-
maybe_warn_about_invalid_plugins([]) ->
ok;
maybe_warn_about_invalid_plugins(InvalidPlugins) ->
@@ -404,60 +352,40 @@ is_version_supported(Version, ExpectedVersions) ->
end.
clean_plugins(Plugins) ->
- [clean_plugin(Plugin) || Plugin <- Plugins].
+ {ok, ExpandDir} = application:get_env(rabbit, plugins_expand_dir),
+ [clean_plugin(Plugin, ExpandDir) || Plugin <- Plugins].
-clean_plugin(Plugin) ->
+clean_plugin(Plugin, ExpandDir) ->
{ok, Mods} = application:get_key(Plugin, modules),
- PluginEbinDir = code:lib_dir(Plugin, ebin),
-
application:unload(Plugin),
[begin
code:soft_purge(Mod),
code:delete(Mod),
false = code:is_loaded(Mod)
end || Mod <- Mods],
-
- code:del_path(PluginEbinDir).
-
-plugin_ebin_dir(#plugin{type = ez, location = Location}) ->
- case find_app_path_in_ez(Location) of
- {ok, AppPath} ->
- filename:join(Location, filename:dirname(AppPath));
- {error, Reason} ->
- {error, Reason}
- end;
-plugin_ebin_dir(#plugin{type = dir, location = Location}) ->
- filename:join(Location, "ebin").
-
-prepare_dir_plugin(#plugin{name = Name} = Plugin) ->
- PluginEbinDir = case plugin_ebin_dir(Plugin) of
- {error, Reason} ->
- throw({plugin_ebin_dir_not_found, Name, Reason});
- Dir ->
- Dir
- end,
- case code:add_patha(PluginEbinDir) of
- true ->
- case filelib:wildcard(PluginEbinDir++ "/*.beam") of
- [] ->
+ delete_recursively(rabbit_misc:format("~s/~s", [ExpandDir, Plugin])).
+
+prepare_dir_plugin(PluginAppDescPath) ->
+ PluginEbinDir = filename:dirname(PluginAppDescPath),
+ Plugin = filename:basename(PluginAppDescPath, ".app"),
+ code:add_patha(PluginEbinDir),
+ case filelib:wildcard(PluginEbinDir++ "/*.beam") of
+ [] ->
+ ok;
+ [BeamPath | _] ->
+ Module = list_to_atom(filename:basename(BeamPath, ".beam")),
+ case code:ensure_loaded(Module) of
+ {module, _} ->
ok;
- [BeamPath | _] ->
- Module = list_to_atom(filename:basename(BeamPath, ".beam")),
- case code:ensure_loaded(Module) of
- {module, _} ->
- ok;
- {error, badfile} ->
- rabbit_log:error("Failed to enable plugin \"~s\": "
- "it may have been built with an "
- "incompatible (more recent?) "
- "version of Erlang~n", [Name]),
- throw({plugin_built_with_incompatible_erlang, Name});
- Error ->
- throw({plugin_module_unloadable, Name, Error})
- end
- end;
- {error, bad_directory} ->
- throw({plugin_ebin_path_incorrect, Name, PluginEbinDir})
+ {error, badfile} ->
+ rabbit_log:error("Failed to enable plugin \"~s\": "
+ "it may have been built with an "
+ "incompatible (more recent?) "
+ "version of Erlang~n", [Plugin]),
+ throw({plugin_built_with_incompatible_erlang, Plugin});
+ Error ->
+ throw({plugin_module_unloadable, Plugin, Error})
+ end
end.
%%----------------------------------------------------------------------------
@@ -468,6 +396,12 @@ delete_recursively(Fn) ->
{error, {Path, E}} -> {error, {cannot_delete, Path, E}}
end.
+prepare_plugin(#plugin{type = ez, location = Location}, ExpandDir) ->
+ zip:unzip(Location, [{cwd, ExpandDir}]);
+prepare_plugin(#plugin{type = dir, name = Name, location = Location},
+ ExpandDir) ->
+ rabbit_file:recursive_copy(Location, filename:join([ExpandDir, Name])).
+
plugin_info({ez, EZ}) ->
case read_app_file(EZ) of
{application, Name, Props} -> mkplugin(Name, Props, ez, EZ);
@@ -494,12 +428,14 @@ mkplugin(Name, Props, Type, Location) ->
broker_version_requirements = BrokerVersions,
dependency_version_requirements = DepsVersions}.
-find_app_path_in_ez(EZ) ->
+read_app_file(EZ) ->
case zip:list_dir(EZ) of
{ok, [_|ZippedFiles]} ->
case find_app_files(ZippedFiles) of
[AppPath|_] ->
- {ok, AppPath};
+ {ok, [{AppPath, AppFile}]} =
+ zip:extract(EZ, [{file_list, [AppPath]}, memory]),
+ parse_binary(AppFile);
[] ->
{error, no_app_file}
end;
@@ -507,16 +443,6 @@ find_app_path_in_ez(EZ) ->
{error, {invalid_ez, Reason}}
end.
-read_app_file(EZ) ->
- case find_app_path_in_ez(EZ) of
- {ok, AppPath} ->
- {ok, [{AppPath, AppFile}]} =
- zip:extract(EZ, [{file_list, [AppPath]}, memory]),
- parse_binary(AppFile);
- {error, Reason} ->
- {error, Reason}
- end.
-
find_app_files(ZippedFiles) ->
{ok, RE} = re:compile("^.*/ebin/.*.app$"),
[Path || {zip_file, Path, _, _, _, _} <- ZippedFiles,
diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl
index 8b96bbffbd..793eb3e514 100644
--- a/src/rabbit_queue_index.erl
+++ b/src/rabbit_queue_index.erl
@@ -23,6 +23,10 @@
read/3, next_segment_boundary/1, bounds/1, start/1, stop/0]).
-export([add_queue_ttl/0, avoid_zeroes/0, store_msg_size/0, store_msg/0]).
+-export([scan_queue_segments/3]).
+
+%% Migrates from global to per-vhost message stores
+-export([move_to_per_vhost_stores/1, update_recovery_term/2]).
-define(CLEAN_FILENAME, "clean.dot").
@@ -123,7 +127,7 @@
-define(SEGMENT_EXTENSION, ".idx").
%% TODO: The segment size would be configurable, but deriving all the
-%% other values is quite hairy and quite possibly noticably less
+%% other values is quite hairy and quite possibly noticeably less
%% efficient, depending on how clever the compiler is when it comes to
%% binary generation/matching with constant vs variable lengths.
@@ -475,11 +479,10 @@ start(DurableQueueNames) ->
end, {[], sets:new()}, DurableQueueNames),
%% Any queue directory we've not been asked to recover is considered garbage
- QueuesDir = queues_dir(),
rabbit_file:recursive_delete(
- [filename:join(QueuesDir, DirName) ||
- DirName <- all_queue_directory_names(QueuesDir),
- not sets:is_element(DirName, DurableDirectories)]),
+ [DirName ||
+ DirName <- all_queue_directory_names(),
+ not sets:is_element(filename:basename(DirName), DurableDirectories)]),
rabbit_recovery_terms:clear(),
@@ -490,12 +493,9 @@ start(DurableQueueNames) ->
stop() -> rabbit_recovery_terms:stop().
-all_queue_directory_names(Dir) ->
- case rabbit_file:list_dir(Dir) of
- {ok, Entries} -> [E || E <- Entries,
- rabbit_file:is_dir(filename:join(Dir, E))];
- {error, enoent} -> []
- end.
+all_queue_directory_names() ->
+ filelib:wildcard(filename:join([rabbit_vhost:msg_store_dir_wildcard(),
+ "queues", "*"])).
%%----------------------------------------------------------------------------
%% startup and shutdown
@@ -508,14 +508,20 @@ erase_index_dir(Dir) ->
end.
blank_state(QueueName) ->
- blank_state_dir(
- filename:join(queues_dir(), queue_name_to_dir_name(QueueName))).
+ blank_state_dir(queue_dir(QueueName)).
blank_state_dir(Dir) ->
blank_state_dir_funs(Dir,
fun (_) -> ok end,
fun (_) -> ok end).
+queue_dir(#resource{ virtual_host = VHost } = QueueName) ->
+ %% Queue directory is
+ %% {node_database_dir}/msg_stores/vhosts/{vhost}/queues/{queue}
+ VHostDir = rabbit_vhost:msg_store_dir_path(VHost),
+ QueueDir = queue_name_to_dir_name(QueueName),
+ filename:join([VHostDir, "queues", QueueDir]).
+
blank_state_dir_funs(Dir, OnSyncFun, OnSyncMsgFun) ->
{ok, MaxJournal} =
application:get_env(rabbit, queue_index_max_journal_entries),
@@ -629,8 +635,8 @@ queue_name_to_dir_name(Name = #resource { kind = queue }) ->
<<Num:128>> = erlang:md5(term_to_binary(Name)),
rabbit_misc:format("~.36B", [Num]).
-queues_dir() ->
- filename:join(rabbit_mnesia:dir(), "queues").
+queues_base_dir() ->
+ rabbit_mnesia:dir().
%%----------------------------------------------------------------------------
%% msg store startup delta function
@@ -660,20 +666,19 @@ queue_index_walker({next, Gatherer}) when is_pid(Gatherer) ->
end.
queue_index_walker_reader(QueueName, Gatherer) ->
- State = blank_state(QueueName),
- ok = scan_segments(
+ ok = scan_queue_segments(
fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, ok)
when is_binary(MsgId) ->
gatherer:sync_in(Gatherer, {MsgId, 1});
(_SeqId, _MsgId, _MsgProps, _IsPersistent, _IsDelivered,
_IsAcked, Acc) ->
Acc
- end, ok, State),
+ end, ok, QueueName),
ok = gatherer:finish(Gatherer).
-scan_segments(Fun, Acc, State) ->
- State1 = #qistate { segments = Segments, dir = Dir } =
- recover_journal(State),
+scan_queue_segments(Fun, Acc, QueueName) ->
+ State = #qistate { segments = Segments, dir = Dir } =
+ recover_journal(blank_state(QueueName)),
Result = lists:foldr(
fun (Seg, AccN) ->
segment_entries_foldr(
@@ -682,8 +687,8 @@ scan_segments(Fun, Acc, State) ->
Fun(reconstruct_seq_id(Seg, RelSeq), MsgOrId, MsgProps,
IsPersistent, IsDelivered, IsAcked, AccM)
end, AccN, segment_find_or_new(Seg, Dir, Segments))
- end, Acc, all_segment_nums(State1)),
- {_SegmentCounts, _State} = terminate(State1),
+ end, Acc, all_segment_nums(State)),
+ {_SegmentCounts, _State} = terminate(State),
Result.
%%----------------------------------------------------------------------------
@@ -1353,15 +1358,13 @@ store_msg_segment(_) ->
%%----------------------------------------------------------------------------
foreach_queue_index(Funs) ->
- QueuesDir = queues_dir(),
- QueueDirNames = all_queue_directory_names(QueuesDir),
+ QueueDirNames = all_queue_directory_names(),
{ok, Gatherer} = gatherer:start_link(),
[begin
ok = gatherer:fork(Gatherer),
ok = worker_pool:submit_async(
fun () ->
- transform_queue(filename:join(QueuesDir, QueueDirName),
- Gatherer, Funs)
+ transform_queue(QueueDirName, Gatherer, Funs)
end)
end || QueueDirName <- QueueDirNames],
empty = gatherer:out(Gatherer),
@@ -1402,3 +1405,21 @@ drive_transform_fun(Fun, Hdl, Contents) ->
{Output, Contents1} -> ok = file_handle_cache:append(Hdl, Output),
drive_transform_fun(Fun, Hdl, Contents1)
end.
+
+move_to_per_vhost_stores(#resource{} = QueueName) ->
+ OldQueueDir = filename:join([queues_base_dir(), "queues",
+ queue_name_to_dir_name(QueueName)]),
+ NewQueueDir = queue_dir(QueueName),
+ case rabbit_file:is_dir(OldQueueDir) of
+ true ->
+ ok = rabbit_file:ensure_dir(NewQueueDir),
+ ok = rabbit_file:rename(OldQueueDir, NewQueueDir);
+ false ->
+ rabbit_log:info("Queue index directory not found for queue ~p~n",
+ [QueueName])
+ end,
+ ok.
+
+update_recovery_term(#resource{} = QueueName, Term) ->
+ Key = queue_name_to_dir_name(QueueName),
+ rabbit_recovery_terms:store(Key, Term).
diff --git a/src/rabbit_runtime_parameters.erl b/src/rabbit_runtime_parameters.erl
index 94018a5b54..cee5408f0a 100644
--- a/src/rabbit_runtime_parameters.erl
+++ b/src/rabbit_runtime_parameters.erl
@@ -27,7 +27,7 @@
%%
%% The most obvious use case for runtime parameters is policies but
%% there are others:
-%%
+%%
%% * Plugin-specific parameters that only make sense at runtime,
%% e.g. Federation and Shovel link settings
%% * Exchange and queue decorators
diff --git a/src/rabbit_sup.erl b/src/rabbit_sup.erl
index ad70540e5b..a457938dc9 100644
--- a/src/rabbit_sup.erl
+++ b/src/rabbit_sup.erl
@@ -18,7 +18,7 @@
-behaviour(supervisor).
--export([start_link/0, start_child/1, start_child/2, start_child/3,
+-export([start_link/0, start_child/1, start_child/2, start_child/3, start_child/4,
start_supervisor_child/1, start_supervisor_child/2,
start_supervisor_child/3,
start_restartable_child/1, start_restartable_child/2,
@@ -37,6 +37,7 @@
-spec start_child(atom()) -> 'ok'.
-spec start_child(atom(), [any()]) -> 'ok'.
-spec start_child(atom(), atom(), [any()]) -> 'ok'.
+-spec start_child(atom(), atom(), atom(), [any()]) -> 'ok'.
-spec start_supervisor_child(atom()) -> 'ok'.
-spec start_supervisor_child(atom(), [any()]) -> 'ok'.
-spec start_supervisor_child(atom(), atom(), [any()]) -> 'ok'.
@@ -60,6 +61,13 @@ start_child(ChildId, Mod, Args) ->
{ChildId, {Mod, start_link, Args},
transient, ?WORKER_WAIT, worker, [Mod]})).
+start_child(ChildId, Mod, Fun, Args) ->
+ child_reply(supervisor:start_child(
+ ?SERVER,
+ {ChildId, {Mod, Fun, Args},
+ transient, ?WORKER_WAIT, worker, [Mod]})).
+
+
start_supervisor_child(Mod) -> start_supervisor_child(Mod, []).
start_supervisor_child(Mod, Args) -> start_supervisor_child(Mod, Mod, Args).
diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl
index f88b7cc73f..95af84de39 100644
--- a/src/rabbit_upgrade.erl
+++ b/src/rabbit_upgrade.erl
@@ -17,6 +17,7 @@
-module(rabbit_upgrade).
-export([maybe_upgrade_mnesia/0, maybe_upgrade_local/0,
+ maybe_migrate_queues_to_per_vhost_storage/0,
nodes_running/1, secondary_upgrade/1]).
-include("rabbit.hrl").
@@ -98,7 +99,7 @@ ensure_backup_taken() ->
_ -> ok
end;
true ->
- error("Found lock file at ~s.
+ rabbit_log:error("Found lock file at ~s.
Either previous upgrade is in progress or has failed.
Database backup path: ~s",
[lock_filename(), backup_dir()]),
@@ -107,6 +108,7 @@ ensure_backup_taken() ->
take_backup() ->
BackupDir = backup_dir(),
+ info("upgrades: Backing up mnesia dir to ~p~n", [BackupDir]),
case rabbit_mnesia:copy_db(BackupDir) of
ok -> info("upgrades: Mnesia dir backed up to ~p~n",
[BackupDir]);
@@ -126,7 +128,9 @@ remove_backup() ->
maybe_upgrade_mnesia() ->
AllNodes = rabbit_mnesia:cluster_nodes(all),
ok = rabbit_mnesia_rename:maybe_finish(AllNodes),
- case rabbit_version:upgrades_required(mnesia) of
+ %% Mnesia upgrade is the first upgrade scope,
+ %% so we should create a backup here if there are any upgrades
+ case rabbit_version:all_upgrades_required([mnesia, local, message_store]) of
{error, starting_from_scratch} ->
ok;
{error, version_not_available} ->
@@ -142,10 +146,15 @@ maybe_upgrade_mnesia() ->
ok;
{ok, Upgrades} ->
ensure_backup_taken(),
- ok = case upgrade_mode(AllNodes) of
- primary -> primary_upgrade(Upgrades, AllNodes);
- secondary -> secondary_upgrade(AllNodes)
- end
+ run_mnesia_upgrades(proplists:get_value(mnesia, Upgrades, []),
+ AllNodes)
+ end.
+
+run_mnesia_upgrades([], _) -> ok;
+run_mnesia_upgrades(Upgrades, AllNodes) ->
+ case upgrade_mode(AllNodes) of
+ primary -> primary_upgrade(Upgrades, AllNodes);
+ secondary -> secondary_upgrade(AllNodes)
end.
upgrade_mode(AllNodes) ->
@@ -243,15 +252,32 @@ maybe_upgrade_local() ->
{ok, []} -> ensure_backup_removed(),
ok;
{ok, Upgrades} -> mnesia:stop(),
- ensure_backup_taken(),
ok = apply_upgrades(local, Upgrades,
fun () -> ok end),
- ensure_backup_removed(),
ok
end.
%% -------------------------------------------------------------------
+maybe_migrate_queues_to_per_vhost_storage() ->
+ Result = case rabbit_version:upgrades_required(message_store) of
+ {error, version_not_available} -> version_not_available;
+ {error, starting_from_scratch} -> starting_from_scratch;
+ {error, _} = Err -> throw(Err);
+ {ok, []} -> ok;
+ {ok, Upgrades} -> apply_upgrades(message_store,
+ Upgrades,
+ fun() -> ok end),
+ ok
+ end,
+ %% Message store upgrades should be
+ %% the last group.
+ %% Backup can be deleted here.
+ ensure_backup_removed(),
+ Result.
+
+%% -------------------------------------------------------------------
+
apply_upgrades(Scope, Upgrades, Fun) ->
ok = rabbit_file:lock_file(lock_filename()),
info("~s upgrades: ~w to apply~n", [Scope, length(Upgrades)]),
diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl
index bbec4c749d..9dbd4fdbf2 100644
--- a/src/rabbit_variable_queue.erl
+++ b/src/rabbit_variable_queue.erl
@@ -30,9 +30,18 @@
-export([start/1, stop/0]).
+%% exported for parallel map
+-export([add_vhost_msg_store/1]).
+
%% exported for testing only
-export([start_msg_store/2, stop_msg_store/0, init/6]).
+-export([move_messages_to_vhost_store/0]).
+-export([stop_vhost_msg_store/1]).
+-include_lib("stdlib/include/qlc.hrl").
+
+-define(QUEUE_MIGRATION_BATCH_SIZE, 100).
+
%%----------------------------------------------------------------------------
%% Messages, and their position in the queue, can be in memory or on
%% disk, or both. Persistent messages will have both message and
@@ -334,8 +343,11 @@
}).
-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
+-define(PERSISTENT_MSG_STORE_SUP, msg_store_persistent_vhost).
+-define(TRANSIENT_MSG_STORE_SUP, msg_store_transient_vhost).
-define(PERSISTENT_MSG_STORE, msg_store_persistent).
-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
-define(QUEUE, lqueue).
-include("rabbit.hrl").
@@ -344,6 +356,9 @@
%%----------------------------------------------------------------------------
-rabbit_upgrade({multiple_routing_keys, local, []}).
+-rabbit_upgrade({move_messages_to_vhost_store, message_store, []}).
+
+-compile(export_all).
-type seq_id() :: non_neg_integer().
@@ -453,31 +468,61 @@
start(DurableQueues) ->
{AllTerms, StartFunState} = rabbit_queue_index:start(DurableQueues),
- start_msg_store(
- [Ref || Terms <- AllTerms,
- Terms /= non_clean_shutdown,
- begin
- Ref = proplists:get_value(persistent_ref, Terms),
- Ref =/= undefined
- end],
- StartFunState),
+ %% Group recovery terms by vhost.
+ {[], VhostRefs} = lists:foldl(
+ fun
+ %% We need to skip a queue name
+ (non_clean_shutdown, {[_|QNames], VhostRefs}) ->
+ {QNames, VhostRefs};
+ (Terms, {[QueueName | QNames], VhostRefs}) ->
+ case proplists:get_value(persistent_ref, Terms) of
+ undefined -> {QNames, VhostRefs};
+ Ref ->
+ #resource{virtual_host = VHost} = QueueName,
+ Refs = case maps:find(VHost, VhostRefs) of
+ {ok, Val} -> Val;
+ error -> []
+ end,
+ {QNames, maps:put(VHost, [Ref|Refs], VhostRefs)}
+ end
+ end,
+ {DurableQueues, #{}},
+ AllTerms),
+ start_msg_store(VhostRefs, StartFunState),
{ok, AllTerms}.
stop() ->
ok = stop_msg_store(),
ok = rabbit_queue_index:stop().
-start_msg_store(Refs, StartFunState) ->
- ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store,
- [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(),
+start_msg_store(Refs, StartFunState) when is_map(Refs); Refs == undefined ->
+ ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE_SUP, rabbit_msg_store_vhost_sup,
+ [?TRANSIENT_MSG_STORE_SUP,
undefined, {fun (ok) -> finished end, ok}]),
- ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store,
- [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(),
- Refs, StartFunState]).
+ ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE_SUP, rabbit_msg_store_vhost_sup,
+ [?PERSISTENT_MSG_STORE_SUP, Refs, StartFunState]),
+ %% Start message store for all known vhosts
+ VHosts = rabbit_vhost:list(),
+ lists:foreach(fun(VHost) ->
+ add_vhost_msg_store(VHost)
+ end,
+ VHosts),
+ ok.
+
+add_vhost_msg_store(VHost) ->
+ rabbit_log:info("Starting message store vor vhost ~p~n", [VHost]),
+ rabbit_msg_store_vhost_sup:add_vhost(?TRANSIENT_MSG_STORE_SUP, VHost),
+ rabbit_msg_store_vhost_sup:add_vhost(?PERSISTENT_MSG_STORE_SUP, VHost),
+ rabbit_log:info("Message store is started vor vhost ~p~n", [VHost]).
stop_msg_store() ->
- ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
- ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
+ ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE_SUP),
+ ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE_SUP).
+
+stop_vhost_msg_store(VHost) ->
+ rabbit_msg_store_vhost_sup:delete_vhost(?TRANSIENT_MSG_STORE_SUP, VHost),
+ rabbit_msg_store_vhost_sup:delete_vhost(?PERSISTENT_MSG_STORE_SUP, VHost),
+ ok.
init(Queue, Recover, Callback) ->
init(
@@ -492,22 +537,26 @@ init(#amqqueue { name = QueueName, durable = IsDurable }, new,
AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
IndexState = rabbit_queue_index:init(QueueName,
MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ VHost = QueueName#resource.virtual_host,
init(IsDurable, IndexState, 0, 0, [],
case IsDurable of
- true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
- MsgOnDiskFun, AsyncCallback);
+ true -> msg_store_client_init(?PERSISTENT_MSG_STORE_SUP,
+ MsgOnDiskFun, AsyncCallback, VHost);
false -> undefined
end,
- msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
+ msg_store_client_init(?TRANSIENT_MSG_STORE_SUP, undefined,
+ AsyncCallback, VHost));
%% We can be recovering a transient queue if it crashed
init(#amqqueue { name = QueueName, durable = IsDurable }, Terms,
AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
{PRef, RecoveryTerms} = process_recovery_terms(Terms),
+ VHost = QueueName#resource.virtual_host,
{PersistentClient, ContainsCheckFun} =
case IsDurable of
- true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
- MsgOnDiskFun, AsyncCallback),
+ true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE_SUP, PRef,
+ MsgOnDiskFun, AsyncCallback,
+ VHost),
{C, fun (MsgId) when is_binary(MsgId) ->
rabbit_msg_store:contains(MsgId, C);
(#basic_message{is_persistent = Persistent}) ->
@@ -515,12 +564,14 @@ init(#amqqueue { name = QueueName, durable = IsDurable }, Terms,
end};
false -> {undefined, fun(_MsgId) -> false end}
end,
- TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
- undefined, AsyncCallback),
+ TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE_SUP,
+ undefined, AsyncCallback,
+ VHost),
{DeltaCount, DeltaBytes, IndexState} =
rabbit_queue_index:recover(
QueueName, RecoveryTerms,
- rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
+ rabbit_msg_store_vhost_sup:successfully_recovered_state(
+ ?PERSISTENT_MSG_STORE_SUP, VHost),
ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
PersistentClient, TransientClient).
@@ -1195,14 +1246,17 @@ with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
end),
Res.
-msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) ->
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback, VHost) ->
msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
- Callback).
+ Callback, VHost).
-msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) ->
- CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
- rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun,
- fun () -> Callback(?MODULE, CloseFDsFun) end).
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback, VHost) ->
+ CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE_SUP),
+ rabbit_msg_store_vhost_sup:client_init(MsgStore, Ref, MsgOnDiskFun,
+ fun () ->
+ Callback(?MODULE, CloseFDsFun)
+ end,
+ VHost).
msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
with_immutable_msg_store_state(
@@ -2673,9 +2727,180 @@ multiple_routing_keys() ->
%% Assumes message store is not running
transform_storage(TransformFun) ->
- transform_store(?PERSISTENT_MSG_STORE, TransformFun),
- transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+ transform_store(?PERSISTENT_MSG_STORE_SUP, TransformFun),
+ transform_store(?TRANSIENT_MSG_STORE_SUP, TransformFun).
transform_store(Store, TransformFun) ->
rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
+
+move_messages_to_vhost_store() ->
+ log_upgrade("Moving messages to per-vhost message store"),
+ Queues = list_persistent_queues(),
+ %% Move the queue index for each persistent queue to the new store
+ lists:foreach(
+ fun(Queue) ->
+ #amqqueue{name = QueueName} = Queue,
+ rabbit_queue_index:move_to_per_vhost_stores(QueueName)
+ end,
+ Queues),
+ %% Legacy (global) msg_store may require recovery.
+ %% This upgrade step should only be started
+ %% if we are upgrading from a pre-3.7.0 version.
+ {QueuesWithTerms, RecoveryRefs, StartFunState} = start_recovery_terms(Queues),
+
+ OldStore = run_old_persistent_store(RecoveryRefs, StartFunState),
+ %% New store should not be recovered.
+ NewStoreSup = start_new_store_sup(),
+ Vhosts = rabbit_vhost:list(),
+ lists:foreach(fun(VHost) ->
+ rabbit_msg_store_vhost_sup:add_vhost(NewStoreSup, VHost)
+ end,
+ Vhosts),
+ MigrationBatchSize = application:get_env(rabbit, queue_migration_batch_size,
+ ?QUEUE_MIGRATION_BATCH_SIZE),
+ in_batches(MigrationBatchSize,
+ {rabbit_variable_queue, migrate_queue, [OldStore, NewStoreSup]},
+ QueuesWithTerms,
+ "Migrating batch ~p of ~p queues ~n",
+ "Batch ~p of ~p queues migrated ~n"),
+
+ log_upgrade("Message store migration finished"),
+ delete_old_store(OldStore),
+
+ ok = rabbit_queue_index:stop(),
+ ok = rabbit_sup:stop_child(NewStoreSup),
+ ok.
+
+in_batches(Size, MFA, List, MessageStart, MessageEnd) ->
+ in_batches(Size, 1, MFA, List, MessageStart, MessageEnd).
+
+in_batches(_, _, _, [], _, _) -> ok;
+in_batches(Size, BatchNum, MFA, List, MessageStart, MessageEnd) ->
+ {Batch, Tail} = case Size > length(List) of
+ true -> {List, []};
+ false -> lists:split(Size, List)
+ end,
+ log_upgrade(MessageStart, [BatchNum, Size]),
+ {M, F, A} = MFA,
+ Keys = [ rpc:async_call(node(), M, F, [El | A]) || El <- Batch ],
+ lists:foreach(fun(Key) ->
+ case rpc:yield(Key) of
+ {badrpc, Err} -> throw(Err);
+ _ -> ok
+ end
+ end,
+ Keys),
+ log_upgrade(MessageEnd, [BatchNum, Size]),
+ in_batches(Size, BatchNum + 1, MFA, Tail, MessageStart, MessageEnd).
+
+migrate_queue({QueueName = #resource{virtual_host = VHost, name = Name}, RecoveryTerm}, OldStore, NewStoreSup) ->
+ log_upgrade_verbose(
+ "Migrating messages in queue ~s in vhost ~s to per-vhost message store~n",
+ [Name, VHost]),
+ OldStoreClient = get_global_store_client(OldStore),
+ NewStoreClient = get_per_vhost_store_client(QueueName, NewStoreSup),
+ %% WARNING: During scan_queue_segments queue index state is being recovered
+ %% and terminated. This can cause side effects!
+ rabbit_queue_index:scan_queue_segments(
+ %% We migrate only persistent messages which are found in message store
+ %% and are not acked yet
+ fun (_SeqId, MsgId, _MsgProps, true, _IsDelivered, no_ack, OldC)
+ when is_binary(MsgId) ->
+ migrate_message(MsgId, OldC, NewStoreClient);
+ (_SeqId, _MsgId, _MsgProps,
+ _IsPersistent, _IsDelivered, _IsAcked, OldC) ->
+ OldC
+ end,
+ OldStoreClient,
+ QueueName),
+ rabbit_msg_store:client_terminate(OldStoreClient),
+ rabbit_msg_store:client_terminate(NewStoreClient),
+ NewClientRef = rabbit_msg_store:client_ref(NewStoreClient),
+ case RecoveryTerm of
+ non_clean_shutdown -> ok;
+ Term when is_list(Term) ->
+ NewRecoveryTerm = lists:keyreplace(persistent_ref, 1, RecoveryTerm,
+ {persistent_ref, NewClientRef}),
+ rabbit_queue_index:update_recovery_term(QueueName, NewRecoveryTerm)
+ end,
+ log_upgrade_verbose("Finished migrating queue ~s in vhost ~s", [Name, VHost]),
+ {QueueName, NewClientRef}.
+
+migrate_message(MsgId, OldC, NewC) ->
+ case rabbit_msg_store:read(MsgId, OldC) of
+ {{ok, Msg}, OldC1} ->
+ ok = rabbit_msg_store:write(MsgId, Msg, NewC),
+ OldC1;
+ _ -> OldC
+ end.
+
+get_per_vhost_store_client(#resource{virtual_host = VHost}, NewStoreSup) ->
+ rabbit_msg_store_vhost_sup:client_init(NewStoreSup,
+ rabbit_guid:gen(),
+ fun(_,_) -> ok end,
+ fun() -> ok end,
+ VHost).
+
+get_global_store_client(OldStore) ->
+ rabbit_msg_store:client_init(OldStore,
+ rabbit_guid:gen(),
+ fun(_,_) -> ok end,
+ fun() -> ok end).
+
+list_persistent_queues() ->
+ Node = node(),
+ mnesia:async_dirty(
+ fun () ->
+ qlc:e(qlc:q([Q || Q = #amqqueue{name = Name,
+ pid = Pid}
+ <- mnesia:table(rabbit_durable_queue),
+ node(Pid) == Node,
+ mnesia:read(rabbit_queue, Name, read) =:= []]))
+ end).
+
+start_recovery_terms(Queues) ->
+ QueueNames = [Name || #amqqueue{name = Name} <- Queues],
+ {AllTerms, StartFunState} = rabbit_queue_index:start(QueueNames),
+ Refs = [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ {lists:zip(QueueNames, AllTerms), Refs, StartFunState}.
+
+run_old_persistent_store(Refs, StartFunState) ->
+ OldStoreName = ?PERSISTENT_MSG_STORE,
+ ok = rabbit_sup:start_child(OldStoreName, rabbit_msg_store, start_global_store_link,
+ [OldStoreName, rabbit_mnesia:dir(),
+ Refs, StartFunState]),
+ OldStoreName.
+
+start_new_store_sup() ->
+ % Start persistent store sup without recovery.
+ ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE_SUP,
+ rabbit_msg_store_vhost_sup,
+ [?PERSISTENT_MSG_STORE_SUP,
+ undefined, {fun (ok) -> finished end, ok}]),
+ ?PERSISTENT_MSG_STORE_SUP.
+
+delete_old_store(OldStore) ->
+ ok = rabbit_sup:stop_child(OldStore),
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?PERSISTENT_MSG_STORE])]),
+ %% Delete old transient store as well
+ rabbit_file:recursive_delete(
+ [filename:join([rabbit_mnesia:dir(), ?TRANSIENT_MSG_STORE])]).
+
+log_upgrade(Msg) ->
+ log_upgrade(Msg, []).
+
+log_upgrade(Msg, Args) ->
+ rabbit_log:info("message_store upgrades: " ++ Msg, Args).
+
+log_upgrade_verbose(Msg) ->
+ log_upgrade_verbose(Msg, []).
+
+log_upgrade_verbose(Msg, Args) ->
+ rabbit_log_upgrade:info(Msg, Args).
diff --git a/src/rabbit_version.erl b/src/rabbit_version.erl
index a27f0aca00..4e2edd19eb 100644
--- a/src/rabbit_version.erl
+++ b/src/rabbit_version.erl
@@ -18,7 +18,8 @@
-export([recorded/0, matches/2, desired/0, desired_for_scope/1,
record_desired/0, record_desired_for_scope/1,
- upgrades_required/1, check_version_consistency/3,
+ upgrades_required/1, all_upgrades_required/1,
+ check_version_consistency/3,
check_version_consistency/4, check_otp_consistency/1,
version_error/3]).
@@ -117,6 +118,30 @@ upgrades_required(Scope) ->
end, Scope)
end.
+all_upgrades_required(Scopes) ->
+ case recorded() of
+ {error, enoent} ->
+ case filelib:is_file(rabbit_guid:filename()) of
+ false -> {error, starting_from_scratch};
+ true -> {error, version_not_available}
+ end;
+ {ok, _} ->
+ lists:foldl(
+ fun
+ (_, {error, Err}) -> {error, Err};
+ (Scope, {ok, Acc}) ->
+ case upgrades_required(Scope) of
+ %% Lift errors from any scope.
+ {error, Err} -> {error, Err};
+ %% Filter non-upgradable scopes
+ {ok, []} -> {ok, Acc};
+ {ok, Upgrades} -> {ok, [{Scope, Upgrades} | Acc]}
+ end
+ end,
+ {ok, []},
+ Scopes)
+ end.
+
%% -------------------------------------------------------------------
with_upgrade_graph(Fun, Scope) ->
diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl
index 213dbaaa0c..6edb62425b 100644
--- a/src/rabbit_vhost.erl
+++ b/src/rabbit_vhost.erl
@@ -23,7 +23,8 @@
-export([add/1, delete/1, exists/1, list/0, with/2, assert/1, update/2,
set_limits/2, limits_of/1]).
-export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
-
+-export([dir/1, msg_store_dir_path/1, msg_store_dir_wildcard/0]).
+-export([purge_messages/1]).
-spec add(rabbit_types:vhost()) -> 'ok'.
-spec delete(rabbit_types:vhost()) -> 'ok'.
@@ -95,6 +96,16 @@ delete(VHostPath) ->
[ok = Fun() || Fun <- Funs],
ok.
+purge_messages(VHost) ->
+ VhostDir = msg_store_dir_path(VHost),
+ rabbit_log:info("Deleting message store directory for vhost '~s' at '~s'~n", [VHost, VhostDir]),
+ %% Message store is stopped to close file handles
+ rabbit_variable_queue:stop_vhost_msg_store(VHost),
+ ok = rabbit_file:recursive_delete([VhostDir]),
+ %% Ensure the store is terminated even if it was restarted during the delete operation
+ %% above.
+ rabbit_variable_queue:stop_vhost_msg_store(VHost).
+
assert_benign(ok) -> ok;
assert_benign({ok, _}) -> ok;
assert_benign({error, not_found}) -> ok;
@@ -120,6 +131,7 @@ internal_delete(VHostPath) ->
Fs2 = [rabbit_policy:delete(VHostPath, proplists:get_value(name, Info))
|| Info <- rabbit_policy:list(VHostPath)],
ok = mnesia:delete({rabbit_vhost, VHostPath}),
+ purge_messages(VHostPath),
Fs1 ++ Fs2.
exists(VHostPath) ->
@@ -170,6 +182,23 @@ set_limits(VHost = #vhost{}, undefined) ->
set_limits(VHost = #vhost{}, Limits) ->
VHost#vhost{limits = Limits}.
+
+dir(Vhost) ->
+ <<Num:128>> = erlang:md5(term_to_binary(Vhost)),
+ rabbit_misc:format("~.36B", [Num]).
+
+msg_store_dir_path(VHost) ->
+ EncodedName = list_to_binary(dir(VHost)),
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(),
+ EncodedName])).
+
+msg_store_dir_wildcard() ->
+ rabbit_data_coercion:to_list(filename:join([msg_store_dir_base(), "*"])).
+
+msg_store_dir_base() ->
+ Dir = rabbit_mnesia:dir(),
+ filename:join([Dir, "msg_stores", "vhosts"]).
+
%%----------------------------------------------------------------------------
infos(Items, X) -> [{Item, i(Item, X)} || Item <- Items].
@@ -188,3 +217,4 @@ info_all(Ref, AggregatorPid) -> info_all(?INFO_KEYS, Ref, AggregatorPid).
info_all(Items, Ref, AggregatorPid) ->
rabbit_control_misc:emitting_map(
AggregatorPid, Ref, fun(VHost) -> info(VHost, Items) end, list()).
+
diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl
index eae7119007..59c63022d8 100644
--- a/src/rabbit_vm.erl
+++ b/src/rabbit_vm.erl
@@ -42,7 +42,7 @@ memory() ->
|| Names <- distinguished_interesting_sups()],
Mnesia = mnesia_memory(),
- MsgIndexETS = ets_memory([msg_store_persistent, msg_store_transient]),
+ MsgIndexETS = ets_memory([msg_store_persistent_vhost, msg_store_transient_vhost]),
MetricsETS = ets_memory([rabbit_metrics]),
MetricsProc = try
[{_, M}] = process_info(whereis(rabbit_metrics), [memory]),
@@ -149,7 +149,7 @@ interesting_sups() ->
[[rabbit_amqqueue_sup_sup], conn_sups() | interesting_sups0()].
interesting_sups0() ->
- MsgIndexProcs = [msg_store_transient, msg_store_persistent],
+ MsgIndexProcs = [msg_store_transient_vhost, msg_store_persistent_vhost],
MgmtDbProcs = [rabbit_mgmt_sup_sup],
PluginProcs = plugin_sups(),
[MsgIndexProcs, MgmtDbProcs, PluginProcs].