summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorMichael Klishin <michael@clojurewerkz.org>2020-06-26 14:53:12 +0300
committerMichael Klishin <michael@clojurewerkz.org>2020-06-26 14:53:12 +0300
commitb76bd6d6530b53324eb50e31380f7bd462869417 (patch)
treeebf7f64d70351a266d9b8b7c6e0191baae1f4f87 /src
parentf935cc1a7f7ccf651129b85cd3200066b881711d (diff)
downloadrabbitmq-server-git-b76bd6d6530b53324eb50e31380f7bd462869417.tar.gz
Avoid using unfortunate terms in code comments and log messages
We have switched all doc guides to use "mirror" or "secondary replica" years ago but these were never updated. Renaming functions and record/HTTP API fields (including CLI tools) would be major a breaking change, so they will be aliased or renamed with a lot more extensive review in the future.
Diffstat (limited to 'src')
-rw-r--r--src/rabbit_amqqueue.erl10
-rw-r--r--src/rabbit_mirror_queue_coordinator.erl68
-rw-r--r--src/rabbit_mirror_queue_master.erl6
-rw-r--r--src/rabbit_mirror_queue_misc.erl24
-rw-r--r--src/rabbit_mirror_queue_mode.erl6
-rw-r--r--src/rabbit_mirror_queue_mode_nodes.erl2
-rw-r--r--src/rabbit_mirror_queue_slave.erl8
-rw-r--r--src/rabbit_mirror_queue_sync.erl12
-rw-r--r--src/rabbit_prequeue.erl2
9 files changed, 69 insertions, 69 deletions
diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
index 7d1837550a..fbf169363e 100644
--- a/src/rabbit_amqqueue.erl
+++ b/src/rabbit_amqqueue.erl
@@ -679,7 +679,7 @@ with(#resource{} = Name, F, E, RetriesLeft) ->
{ok, Q} when ?amqqueue_state_is(Q, crashed) ->
E({absent, Q, crashed});
%% The queue process has been stopped by a supervisor.
- %% In that case a synchronised slave can take over
+ %% In that case a synchronised mirror can take over
%% so we should retry.
{ok, Q} when ?amqqueue_state_is(Q, stopped) ->
%% The queue process was stopped by the supervisor
@@ -715,7 +715,7 @@ retry_wait(Q, F, E, RetriesLeft) ->
QState = amqqueue:get_state(Q),
case {QState, is_replicated(Q)} of
%% We don't want to repeat an operation if
- %% there are no slaves to migrate to
+ %% there are no mirrors to migrate to
{stopped, false} ->
E({absent, Q, stopped});
_ ->
@@ -1869,7 +1869,7 @@ forget_node_for_queue(DeadNode, Q) ->
forget_node_for_queue(DeadNode, RS, Q).
forget_node_for_queue(_DeadNode, [], Q) ->
- %% No slaves to recover from, queue is gone.
+ %% No mirrors to recover from, queue is gone.
%% Don't process_deletions since that just calls callbacks and we
%% are not really up.
Name = amqqueue:get_name(Q),
@@ -1986,7 +1986,7 @@ maybe_clear_recoverable_node(Node, Q) ->
%% by the incoming slave node and this function, called
%% by the master node. If this function is executed after
%% record_synchronised/1, the node is erroneously removed
- %% from the recoverable slaves list.
+ %% from the recoverable mirrors list.
%%
%% We check if the slave node's queue PID is alive. If it is
%% the case, then this function is executed after. In this
@@ -2134,7 +2134,7 @@ deliver(Qs, Delivery = #delivery{flow = Flow,
noflow -> ok
end,
- %% We let slaves know that they were being addressed as slaves at
+ %% We let mirrors know that they were being addressed as mirrors at
%% the time - if they receive such a message from the channel
%% after they have become master they should mark the message as
%% 'delivered' since they do not know what the master may have
diff --git a/src/rabbit_mirror_queue_coordinator.erl b/src/rabbit_mirror_queue_coordinator.erl
index 9c185b0f24..2437f77b09 100644
--- a/src/rabbit_mirror_queue_coordinator.erl
+++ b/src/rabbit_mirror_queue_coordinator.erl
@@ -48,7 +48,7 @@
%% +----------+ +-------+--------------+-----------...etc...
%% | | |
%% V V V
-%% amqqueue_process---+ slave-----+ slave-----+ ...etc...
+%% amqqueue_process---+ mirror-----+ mirror-----+ ...etc...
%% | BQ = master----+ | | BQ = vq | | BQ = vq |
%% | | BQ = vq | | +-+-------+ +-+-------+
%% | +-+-------+ | | |
@@ -63,50 +63,50 @@
%% consumers
%%
%% The master is merely an implementation of bq, and thus is invoked
-%% through the normal bq interface by the amqqueue_process. The slaves
+%% through the normal bq interface by the amqqueue_process. The mirrors
%% meanwhile are processes in their own right (as is the
-%% coordinator). The coordinator and all slaves belong to the same gm
+%% coordinator). The coordinator and all mirrors belong to the same gm
%% group. Every member of a gm group receives messages sent to the gm
%% group. Because the master is the bq of amqqueue_process, it doesn't
%% have sole control over its mailbox, and as a result, the master
%% itself cannot be passed messages directly (well, it could by via
%% the amqqueue:run_backing_queue callback but that would induce
%% additional unnecessary loading on the master queue process), yet it
-%% needs to react to gm events, such as the death of slaves. Thus the
+%% needs to react to gm events, such as the death of mirrors. Thus the
%% master creates the coordinator, and it is the coordinator that is
%% the gm callback module and event handler for the master.
%%
%% Consumers are only attached to the master. Thus the master is
-%% responsible for informing all slaves when messages are fetched from
+%% responsible for informing all mirrors when messages are fetched from
%% the bq, when they're acked, and when they're requeued.
%%
-%% The basic goal is to ensure that all slaves performs actions on
+%% The basic goal is to ensure that all mirrors performs actions on
%% their bqs in the same order as the master. Thus the master
%% intercepts all events going to its bq, and suitably broadcasts
-%% these events on the gm. The slaves thus receive two streams of
+%% these events on the gm. The mirrors thus receive two streams of
%% events: one stream is via the gm, and one stream is from channels
%% directly. Whilst the stream via gm is guaranteed to be consistently
-%% seen by all slaves, the same is not true of the stream via
+%% seen by all mirrors , the same is not true of the stream via
%% channels. For example, in the event of an unexpected death of a
%% channel during a publish, only some of the mirrors may receive that
%% publish. As a result of this problem, the messages broadcast over
-%% the gm contain published content, and thus slaves can operate
+%% the gm contain published content, and thus mirrors can operate
%% successfully on messages that they only receive via the gm.
%%
%% The key purpose of also sending messages directly from the channels
-%% to the slaves is that without this, in the event of the death of
+%% to the mirrors is that without this, in the event of the death of
%% the master, messages could be lost until a suitable slave is
%% promoted. However, that is not the only reason. A slave cannot send
%% confirms for a message until it has seen it from the
%% channel. Otherwise, it might send a confirm to a channel for a
%% message that it might *never* receive from that channel. This can
-%% happen because new slaves join the gm ring (and thus receive
+%% happen because new mirrors join the gm ring (and thus receive
%% messages from the master) before inserting themselves in the
%% queue's mnesia record (which is what channels look at for routing).
%% As it turns out, channels will simply ignore such bogus confirms,
%% but relying on that would introduce a dangerously tight coupling.
%%
-%% Hence the slaves have to wait until they've seen both the publish
+%% Hence the mirrors have to wait until they've seen both the publish
%% via gm, and the publish via the channel before they issue the
%% confirm. Either form of publish can arrive first, and a slave can
%% be upgraded to the master at any point during this
@@ -116,7 +116,7 @@
%% amqqueue API. However, it does not need to implement all parts: for
%% example, no ack or consumer-related message can arrive directly at
%% a slave from a channel: it is only publishes that pass both
-%% directly to the slaves and go via gm.
+%% directly to the mirrors and go via gm.
%%
%% Slaves can be added dynamically. When this occurs, there is no
%% attempt made to sync the current contents of the master with the
@@ -144,18 +144,18 @@
%% the master queue but can't go back in the slave, since we don't
%% want "holes" in the slave queue. Note that the depth, and the
%% length likewise, must always be shorter on the slave - we assert
-%% that in various places. In case slaves are joined to an empty queue
+%% that in various places. In case mirrors are joined to an empty queue
%% which only goes on to receive publishes, they start by asking the
-%% master to broadcast its depth. This is enough for slaves to always
+%% master to broadcast its depth. This is enough for mirrors to always
%% be able to work out when their head does not differ from the master
%% (and is much simpler and cheaper than getting the master to hang on
%% to the guid of the msg at the head of its queue). When a slave is
%% promoted to a master, it unilaterally broadcasts its depth, in
-%% order to solve the problem of depth requests from new slaves being
+%% order to solve the problem of depth requests from new mirrors being
%% unanswered by a dead master.
%%
%% Obviously, due to the async nature of communication across gm, the
-%% slaves can fall behind. This does not matter from a sync pov: if
+%% mirrors can fall behind. This does not matter from a sync pov: if
%% they fall behind and the master dies then a) no publishes are lost
%% because all publishes go to all mirrors anyway; b) the worst that
%% happens is that acks get lost and so messages come back to
@@ -164,12 +164,12 @@
%% but close enough for jazz).
%%
%% Because acktags are issued by the bq independently, and because
-%% there is no requirement for the master and all slaves to use the
+%% there is no requirement for the master and all mirrors to use the
%% same bq, all references to msgs going over gm is by msg_id. Thus
%% upon acking, the master must convert the acktags back to msg_ids
%% (which happens to be what bq:ack returns), then sends the msg_ids
-%% over gm, the slaves must convert the msg_ids to acktags (a mapping
-%% the slaves themselves must maintain).
+%% over gm, the mirrors must convert the msg_ids to acktags (a mapping
+%% the mirrors themselves must maintain).
%%
%% When the master dies, a slave gets promoted. This will be the
%% eldest slave, and thus the hope is that that slave is most likely
@@ -196,9 +196,9 @@
%% mirrors to be able to detect this and tidy up as necessary to avoid
%% leaks. If we just had the master monitoring all senders then we
%% would have the possibility that a sender appears and only sends the
-%% message to a few of the slaves before dying. Those slaves would
+%% message to a few of the mirrors before dying. Those mirrors would
%% then hold on to the message, assuming they'll receive some
-%% instruction eventually from the master. Thus we have both slaves
+%% instruction eventually from the master. Thus we have both mirrors
%% and the master monitor all senders they become aware of. But there
%% is a race: if the slave receives a DOWN of a sender, how does it
%% know whether or not the master is going to send it instructions
@@ -209,8 +209,8 @@
%% coordinator receives a DOWN message from a sender, it informs the
%% master via a callback. This allows the master to do any tidying
%% necessary, but more importantly allows the master to broadcast a
-%% sender_death message to all the slaves, saying the sender has
-%% died. Once the slaves receive the sender_death message, they know
+%% sender_death message to all the mirrors , saying the sender has
+%% died. Once the mirrors receive the sender_death message, they know
%% that they're not going to receive any more instructions from the gm
%% regarding that sender. However, it is possible that the coordinator
%% receives the DOWN and communicates that to the master before the
@@ -230,11 +230,11 @@
%% received the sender_death message from the master via gm already,
%% then it will wait 20 seconds before broadcasting a request for
%% confirmation from the master that the sender really has died.
-%% Should a sender have only sent a publish to slaves, this allows
-%% slaves to inform the master of the previous existence of the
+%% Should a sender have only sent a publish to mirrors , this allows
+%% mirrors to inform the master of the previous existence of the
%% sender. The master will thus monitor the sender, receive the DOWN,
%% and subsequently broadcast the sender_death message, allowing the
-%% slaves to tidy up. This process can repeat for the same sender:
+%% mirrors to tidy up. This process can repeat for the same sender:
%% consider one slave receives the publication, then the DOWN, then
%% asks for confirmation of death, then the master broadcasts the
%% sender_death message. Only then does another slave receive the
@@ -248,7 +248,7 @@
%% When the 20 second timer expires, the slave first checks to see
%% whether it still needs confirmation of the death before requesting
%% it. This prevents unnecessary traffic on gm as it allows one
-%% broadcast of the sender_death message to satisfy many slaves.
+%% broadcast of the sender_death message to satisfy many mirrors.
%%
%% If we consider the promotion of a slave at this point, we have two
%% possibilities: that of the slave that has received the DOWN and is
@@ -257,14 +257,14 @@
%% DOWN. In the first case, in the act of promotion to master, the new
%% master will monitor again the dead sender, and after it has
%% finished promoting itself, it should find another DOWN waiting,
-%% which it will then broadcast. This will allow slaves to tidy up as
+%% which it will then broadcast. This will allow mirrors to tidy up as
%% normal. In the second case, we have the possibility that
%% confirmation-of-sender-death request has been broadcast, but that
%% it was broadcast before the master failed, and that the slave being
%% promoted does not know anything about that sender, and so will not
%% monitor it on promotion. Thus a slave that broadcasts such a
%% request, at the point of broadcasting it, recurses, setting another
-%% 20 second timer. As before, on expiry of the timer, the slaves
+%% 20 second timer. As before, on expiry of the timer, the mirrors
%% checks to see whether it still has not received a sender_death
%% message for the dead sender, and if not, broadcasts a death
%% confirmation request. Thus this ensures that even when a master
@@ -273,12 +273,12 @@
%% dead sender, receive the DOWN and broadcast the sender_death
%% message.
%%
-%% The preceding commentary deals with the possibility of slaves
+%% The preceding commentary deals with the possibility of mirrors
%% receiving publications from senders which the master does not, and
%% the need to prevent memory leaks in such scenarios. The inverse is
%% also possible: a partial publication may cause only the master to
%% receive a publication. It will then publish the message via gm. The
-%% slaves will receive it via gm, will publish it to their BQ and will
+%% mirrors will receive it via gm, will publish it to their BQ and will
%% set up monitoring on the sender. They will then receive the DOWN
%% message and the master will eventually publish the corresponding
%% sender_death message. The slave will then be able to tidy up its
@@ -419,7 +419,7 @@ handle_pre_hibernate(State = #state { gm = GM }) ->
%% timely notification of slave death if policy changes when
%% everything is idle. So cause some activity just before we
%% sleep. This won't cause us to go into perpetual motion as the
- %% heartbeat does not wake up coordinator or slaves.
+ %% heartbeat does not wake up coordinator or mirrors.
gm:broadcast(GM, hibernate_heartbeat),
{hibernate, State}.
@@ -446,7 +446,7 @@ handle_msg([_CPid], _From, {delete_and_terminate, _Reason}) ->
%% actually delivered. Then it calls handle_terminate/2 below so the
%% coordinator is stopped.
%%
- %% If we stop the coordinator right now, remote slaves could see the
+ %% If we stop the coordinator right now, remote mirrors could see the
%% coordinator DOWN before delete_and_terminate was delivered to all
%% GMs. One of those GM would be promoted as the master, and this GM
%% would hang forever, waiting for other GMs to stop.
diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl
index cf0c196a73..96c96bd689 100644
--- a/src/rabbit_mirror_queue_master.erl
+++ b/src/rabbit_mirror_queue_master.erl
@@ -118,9 +118,9 @@ init_with_existing_bq(Q0, BQ, BQS) when ?is_amqqueue(Q0) ->
{_MNode, SNodes} = rabbit_mirror_queue_misc:suggested_queue_nodes(Q0),
%% We need synchronous add here (i.e. do not return until the
%% slave is running) so that when queue declaration is finished
- %% all slaves are up; we don't want to end up with unsynced slaves
+ %% all mirrors are up; we don't want to end up with unsynced mirrors
%% just by declaring a new queue. But add can't be synchronous all
- %% the time as it can be called by slaves and that's
+ %% the time as it can be called by mirrors and that's
%% deadlock-prone.
rabbit_mirror_queue_misc:add_mirrors(QName, SNodes, sync),
#state{name = QName,
@@ -207,7 +207,7 @@ terminate(Reason,
true -> %% Remove the whole queue to avoid data loss
rabbit_mirror_queue_misc:log_warning(
QName, "Stopping all nodes on master shutdown since no "
- "synchronised slave is available~n", []),
+ "synchronised mirror (replica) is available~n", []),
stop_all_slaves(Reason, State);
false -> %% Just let some other slave take over.
ok
diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl
index d92e74ea7e..df878986bf 100644
--- a/src/rabbit_mirror_queue_misc.erl
+++ b/src/rabbit_mirror_queue_misc.erl
@@ -317,11 +317,11 @@ store_updated_slaves(Q0) when ?is_amqqueue(Q0) ->
%% Recoverable nodes are those which we could promote if the whole
%% cluster were to suddenly stop and we then lose the master; i.e. all
-%% nodes with running slaves, and all stopped nodes which had running
-%% slaves when they were up.
+%% nodes with running mirrors , and all stopped nodes which had running
+%% mirrors when they were up.
%%
-%% Therefore we aim here to add new nodes with slaves, and remove
-%% running nodes without slaves, We also try to keep the order
+%% Therefore we aim here to add new nodes with mirrors , and remove
+%% running nodes without mirrors , We also try to keep the order
%% constant, and similar to the live SPids field (i.e. oldest
%% first). That's not necessarily optimal if nodes spend a long time
%% down, but we don't have a good way to predict what the optimal is
@@ -337,10 +337,10 @@ update_recoverable(SPids, RS) ->
stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
PidsMRefs = [{Pid, erlang:monitor(process, Pid)} || Pid <- [GM | SPids]],
ok = gm:broadcast(GM, {delete_and_terminate, Reason}),
- %% It's possible that we could be partitioned from some slaves
+ %% It's possible that we could be partitioned from some mirrors
%% between the lookup and the broadcast, in which case we could
%% monitor them but they would not have received the GM
- %% message. So only wait for slaves which are still
+ %% message. So only wait for mirrors which are still
%% not-partitioned.
PendingSlavePids = lists:foldl(fun({Pid, MRef}, Acc) ->
case rabbit_mnesia:on_running_node(Pid) of
@@ -365,7 +365,7 @@ stop_all_slaves(Reason, SPids, QName, GM, WaitTimeout) ->
[Q0] = mnesia:read({rabbit_queue, QName}),
Q1 = amqqueue:set_gm_pids(Q0, []),
Q2 = amqqueue:set_slave_pids(Q1, []),
- %% Restarted slaves on running nodes can
+ %% Restarted mirrors on running nodes can
%% ensure old incarnations are stopped using
%% the pending slave pids.
Q3 = amqqueue:set_slave_pids_pending_shutdown(Q2, PendingSlavePids),
@@ -534,10 +534,10 @@ update_mirrors(Q) when ?is_amqqueue(Q) ->
OldNodes = [OldMNode | OldSNodes],
NewNodes = [NewMNode | NewSNodes],
%% When a mirror dies, remove_from_queue/2 might have to add new
- %% slaves (in "exactly" mode). It will check mnesia to see which
- %% slaves there currently are. If drop_mirror/2 is invoked first
+ %% mirrors (in "exactly" mode). It will check mnesia to see which
+ %% mirrors there currently are. If drop_mirror/2 is invoked first
%% then when we end up in remove_from_queue/2 it will not see the
- %% slaves that add_mirror/2 will add, and also want to add them
+ %% mirrors that add_mirror/2 will add, and also want to add them
%% (even though we are not responding to the death of a
%% mirror). Breakage ensues.
add_mirrors (QName, NewNodes -- OldNodes, async),
@@ -589,7 +589,7 @@ wait_for_new_master(QName, Destination, N) ->
%% The arrival of a newly synced slave may cause the master to die if
%% the policy does not want the master but it has been kept alive
-%% because there were no synced slaves.
+%% because there were no synced mirrors.
%%
%% We don't just call update_mirrors/2 here since that could decide to
%% start a slave for some other reason, and since we are the slave ATM
@@ -608,7 +608,7 @@ maybe_drop_master_after_sync(Q) when ?is_amqqueue(Q) ->
end,
ok.
%% [0] ASSERTION - if the policy wants the master to change, it has
-%% not just shuffled it into the slaves. All our modes ensure this
+%% not just shuffled it into the mirrors. All our modes ensure this
%% does not happen, but we should guard against a misbehaving plugin.
%%----------------------------------------------------------------------------
diff --git a/src/rabbit_mirror_queue_mode.erl b/src/rabbit_mirror_queue_mode.erl
index 377c98f726..8c335668f6 100644
--- a/src/rabbit_mirror_queue_mode.erl
+++ b/src/rabbit_mirror_queue_mode.erl
@@ -33,11 +33,11 @@
%%
%% Takes: parameters set in the policy,
%% current master,
-%% current slaves,
-%% current synchronised slaves,
+%% current mirrors,
+%% current synchronised mirrors,
%% all nodes to consider
%%
-%% Returns: tuple of new master, new slaves
+%% Returns: tuple of new master, new mirrors
%%
-callback suggested_queue_nodes(
params(), master(), [slave()], [slave()], [node()]) ->
diff --git a/src/rabbit_mirror_queue_mode_nodes.erl b/src/rabbit_mirror_queue_mode_nodes.erl
index af6ddaef49..df9fe6fafe 100644
--- a/src/rabbit_mirror_queue_mode_nodes.erl
+++ b/src/rabbit_mirror_queue_mode_nodes.erl
@@ -35,7 +35,7 @@ description() ->
suggested_queue_nodes(PolicyNodes0, CurrentMaster, _SNodes, SSNodes, NodesRunningRabbitMQ) ->
PolicyNodes1 = [list_to_atom(binary_to_list(Node)) || Node <- PolicyNodes0],
%% If the current master is not in the nodes specified, then what we want
- %% to do depends on whether there are any synchronised slaves. If there
+ %% to do depends on whether there are any synchronised mirrors. If there
%% are then we can just kill the current master - the admin has asked for
%% a migration and we should give it to them. If there are not however
%% then we must keep the master around so as not to lose messages.
diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl
index 0c880e9a5d..f9651a035f 100644
--- a/src/rabbit_mirror_queue_slave.erl
+++ b/src/rabbit_mirror_queue_slave.erl
@@ -193,8 +193,8 @@ init_it(Self, GM, Node, QName) ->
master_in_recovery
end.
-%% Pending slaves have been asked to stop by the master, but despite the node
-%% being up these did not answer on the expected timeout. Stop local slaves now.
+%% Pending mirrors have been asked to stop by the master, but despite the node
+%% being up these did not answer on the expected timeout. Stop local mirrors now.
stop_pending_slaves(QName, Pids) ->
[begin
rabbit_mirror_queue_misc:log_warning(
@@ -276,7 +276,7 @@ handle_call({gm_deaths, DeadGMPids}, From,
end,
%% Since GM is by nature lazy we need to make sure
%% there is some traffic when a master dies, to
- %% make sure all slaves get informed of the
+ %% make sure all mirrors get informed of the
%% death. That is all process_death does, create
%% some traffic.
ok = gm:broadcast(GM, process_death),
@@ -323,7 +323,7 @@ handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true},
%% the message delivery. See
%% rabbit_amqqueue_process:handle_ch_down for more info.
%% If message is rejected by the master, the publish will be nacked
- %% even if slaves confirm it. No need to check for length here.
+ %% even if mirrors confirm it. No need to check for length here.
maybe_flow_ack(Sender, Flow),
noreply(maybe_enqueue_message(Delivery, State));
diff --git a/src/rabbit_mirror_queue_sync.erl b/src/rabbit_mirror_queue_sync.erl
index c516470bf9..76774e4f2b 100644
--- a/src/rabbit_mirror_queue_sync.erl
+++ b/src/rabbit_mirror_queue_sync.erl
@@ -194,11 +194,11 @@ handle_set_maximum_since_use() ->
syncer(Ref, Log, MPid, SPids) ->
[erlang:monitor(process, SPid) || SPid <- SPids],
- %% We wait for a reply from the slaves so that we know they are in
+ %% We wait for a reply from the mirrors so that we know they are in
%% a receive block and will thus receive messages we send to them
%% *without* those messages ending up in their gen_server2 pqueue.
case await_slaves(Ref, SPids) of
- [] -> Log("all slaves already synced", []);
+ [] -> Log("all mirrors already synced", []);
SPids1 -> MPid ! {ready, self()},
Log("mirrors ~p to sync", [[node(SPid) || SPid <- SPids1]]),
syncer_check_resources(Ref, MPid, SPids1)
@@ -214,7 +214,7 @@ await_slaves(Ref, SPids) ->
end].
%% [0] This check is in case there's been a partition which has then
%% healed in between the master retrieving the slave pids from Mnesia
-%% and sending 'sync_start' over GM. If so there might be slaves on the
+%% and sending 'sync_start' over GM. If so there might be mirrors on the
%% other side of the partition which we can monitor (since they have
%% rejoined the distributed system with us) but which did not get the
%% 'sync_start' and so will not reply. We need to act as though they are
@@ -257,7 +257,7 @@ syncer_loop(Ref, MPid, SPids) ->
SPids1 = wait_for_credit(SPids),
case SPids1 of
[] ->
- % Die silently because there are no slaves left.
+ % Die silently because there are no mirrors left.
ok;
_ ->
broadcast(SPids1, {sync_msgs, Ref, Msgs}),
@@ -265,7 +265,7 @@ syncer_loop(Ref, MPid, SPids) ->
syncer_loop(Ref, MPid, SPids1)
end;
{cancel, Ref} ->
- %% We don't tell the slaves we will die - so when we do
+ %% We don't tell the mirrors we will die - so when we do
%% they interpret that as a failure, which is what we
%% want.
ok;
@@ -304,7 +304,7 @@ wait_for_resources(Ref, SPids) ->
%% Ignore other alerts.
wait_for_resources(Ref, SPids);
{cancel, Ref} ->
- %% We don't tell the slaves we will die - so when we do
+ %% We don't tell the mirrors we will die - so when we do
%% they interpret that as a failure, which is what we
%% want.
cancel;
diff --git a/src/rabbit_prequeue.erl b/src/rabbit_prequeue.erl
index 51164af53f..994e66a96d 100644
--- a/src/rabbit_prequeue.erl
+++ b/src/rabbit_prequeue.erl
@@ -81,7 +81,7 @@ init(Q0, restart) when ?is_amqqueue(Q0) ->
%%
%% [2] Nothing is alive. We are the last best hope. Try to restart as a master.
%%
-%% [3] The current master is dead but either there are alive slaves to
+%% [3] The current master is dead but either there are alive mirrors to
%% take over or it's all happening on a different node anyway. This is
%% not a stable situation. Sleep and wait for somebody else to make a
%% move.