summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/rabbit_amqqueue_process.erl45
-rw-r--r--src/rabbit_error_logger.erl4
-rw-r--r--src/rabbit_lager.erl3
-rw-r--r--src/rabbit_log.erl12
4 files changed, 59 insertions, 5 deletions
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
index 1a86851d0a..f79304632e 100644
--- a/src/rabbit_amqqueue_process.erl
+++ b/src/rabbit_amqqueue_process.erl
@@ -142,6 +142,7 @@ init_it(Recover, From, State = #q{q = #amqqueue{exclusive_owner = Owner}}) ->
{_, Terms} = recovery_status(Recover),
BQS = bq_init(BQ, Q, Terms),
%% Rely on terminate to delete the queue.
+ log_delete_exclusive(Owner, State),
{stop, {shutdown, missing_owner},
State#q{backing_queue = BQ, backing_queue_state = BQS}}
end.
@@ -701,7 +702,13 @@ handle_ch_down(DownPid, State = #q{consumers = Consumers,
exclusive_consumer = Holder1},
notify_decorators(State2),
case should_auto_delete(State2) of
- true -> {stop, State2};
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because all of its consumers (~p) were on a channel that was closed",
+ [length(ChCTags)]),
+ State),
+ {stop, State2};
false -> {ok, requeue_and_run(ChAckTags,
ensure_expiry_timer(State2))}
end
@@ -939,6 +946,7 @@ prioritise_call(Msg, _From, _Len, State) ->
prioritise_cast(Msg, _Len, State) ->
case Msg of
delete_immediately -> 8;
+ {delete_exclusive, _Pid} -> 8;
{set_ram_duration_target, _Duration} -> 8;
{set_maximum_since_use, _Age} -> 8;
{run_backing_queue, _Mod, _Fun} -> 6;
@@ -1063,7 +1071,13 @@ handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From,
notify_decorators(State1),
case should_auto_delete(State1) of
false -> reply(ok, ensure_expiry_timer(State1));
- true -> stop(ok, State1)
+ true ->
+ log_auto_delete(
+ io_lib:format(
+ "because its last consumer with tag '~s' was cancelled",
+ [ConsumerTag]),
+ State),
+ stop(ok, State1)
end
end;
@@ -1165,6 +1179,10 @@ handle_cast({reject, false, AckTags, ChPid}, State) ->
end) end,
fun () -> ack(AckTags, ChPid, State) end));
+handle_cast({delete_exclusive, ConnPid}, State) ->
+ log_delete_exclusive(ConnPid, State),
+ stop(State);
+
handle_cast(delete_immediately, State) ->
stop(State);
@@ -1284,6 +1302,7 @@ handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason},
%% match what people expect (see bug 21824). However we need this
%% monitor-and-async- delete in case the connection goes away
%% unexpectedly.
+ log_delete_exclusive(DownPid, State),
stop(State);
handle_info({'DOWN', _MonitorRef, process, DownPid, _Reason}, State) ->
@@ -1347,3 +1366,25 @@ handle_pre_hibernate(State = #q{backing_queue = BQ,
{hibernate, stop_rate_timer(State1)}.
format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
+
+log_delete_exclusive({ConPid, ConRef}, State) ->
+ log_delete_exclusive(ConPid, State);
+log_delete_exclusive(ConPid, #q{ q = #amqqueue{ name = Resource } }) ->
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++
+ " because its declaring connection ~p was closed",
+ [QName, VHost, ConPid]).
+
+log_auto_delete(Reason, #q{ q = #amqqueue{ name = Resource } }) ->
+ #resource{ name = QName, virtual_host = VHost } = Resource,
+ rabbit_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++
+ Reason,
+ [QName, VHost]).
+
+
+
+
+
+
+
+
diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl
index e8b7ce5669..efe8495299 100644
--- a/src/rabbit_error_logger.erl
+++ b/src/rabbit_error_logger.erl
@@ -103,10 +103,12 @@ publish1(RoutingKey, Format, Data, LogExch) ->
Timestamp = time_compat:os_system_time(seconds),
Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data],
+ Headers = [{<<"node">>, longstr, list_to_binary(atom_to_list(node()))}],
{ok, _DeliveredQPids} =
rabbit_basic:publish(LogExch, RoutingKey,
#'P_basic'{content_type = <<"text/plain">>,
- timestamp = Timestamp},
+ timestamp = Timestamp,
+ headers = Headers},
list_to_binary(io_lib:format(Format, Args))),
ok.
diff --git a/src/rabbit_lager.erl b/src/rabbit_lager.erl
index 6ae9c10a5e..8beee10846 100644
--- a/src/rabbit_lager.erl
+++ b/src/rabbit_lager.erl
@@ -210,7 +210,8 @@ configure_lager() ->
%% messages to the default sink. To know the list of expected extra
%% sinks, we look at the 'lager_extra_sinks' compilation option.
Sinks0 = application:get_env(lager, extra_sinks, []),
- Sinks1 = configure_extra_sinks(Sinks0, list_expected_sinks()),
+ Sinks1 = configure_extra_sinks(Sinks0,
+ [error_logger | list_expected_sinks()]),
%% TODO Waiting for basho/lager#303
%% Sinks2 = lists:keystore(error_logger_lager_event, 1, Sinks1,
%% {error_logger_lager_event,
diff --git a/src/rabbit_log.erl b/src/rabbit_log.erl
index 5eedd925fe..a22dcbb6f0 100644
--- a/src/rabbit_log.erl
+++ b/src/rabbit_log.erl
@@ -74,10 +74,20 @@ log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
log(Category, Level, Fmt, Args) when is_list(Args) ->
Sink = case Category of
default -> ?LAGER_SINK;
- _ -> lager_util:make_internal_sink_name(Category)
+ _ -> make_internal_sink_name(Category)
end,
lager:log(Sink, Level, self(), Fmt, Args).
+make_internal_sink_name(Category) when Category == channel;
+ Category == connection;
+ Category == mirroring;
+ Category == queue;
+ Category == federation ->
+ lager_util:make_internal_sink_name(list_to_atom("rabbit_" ++
+ atom_to_list(Category)));
+make_internal_sink_name(Category) ->
+ lager_util:make_internal_sink_name(Category).
+
debug(Format) -> debug(Format, []).
debug(Format, Args) -> debug(self(), Format, Args).
debug(Metadata, Format, Args) ->