summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x.travis.sh258
-rw-r--r--.travis.yml65
-rw-r--r--src/rabbit_amqqueue.erl16
-rw-r--r--src/rabbit_amqqueue_process.erl13
-rw-r--r--src/rabbit_channel.erl2
-rw-r--r--src/rabbit_vhost_process.erl1
-rw-r--r--test/rabbitmqctl_integration_SUITE.erl33
-rw-r--r--test/vhost_SUITE.erl24
8 files changed, 374 insertions, 38 deletions
diff --git a/.travis.sh b/.travis.sh
new file mode 100755
index 0000000000..f5f786b907
--- /dev/null
+++ b/.travis.sh
@@ -0,0 +1,258 @@
+#!/usr/bin/env bash
+
+set -o nounset
+set -o errexit
+
+declare -r tmp_file="$(mktemp)"
+declare -r script_arg="${1:-unset}"
+
+function onexit
+{
+ rm -vf "$tmp_file"
+}
+
+trap onexit EXIT
+
+function main
+{
+ # Note: if script_arg is kiex_cleanup,
+ # this function exits early
+ kiex_cleanup
+
+ # Note: if script_arg is tests,
+ # this function exits early
+ maybe_run_tests "$@"
+
+ ensure_directories
+ ensure_kerl
+ ensure_kiex
+ ensure_make
+ ensure_otp
+}
+
+function test_group_0
+{
+ make ct-backing_queue
+ make ct-channel_interceptor
+ make ct-channel_operation_timeout
+ make ct-cluster_formation_locking
+}
+
+function test_group_1
+{
+ make ct-clustering_management
+ make ct-cluster_rename
+ make ct-cluster
+ make ct-config_schema
+}
+
+function test_group_2
+{
+ make ct-crashing_queues
+ make ct-credential_validation
+ make ct-disconnect_detected_during_alarm
+ make ct-dynamic_ha
+}
+
+function test_group_3
+{
+ make ct-eager_sync
+ make ct-gm
+ make ct-health_check
+ make ct-lazy_queue
+}
+
+function test_group_4
+{
+ make ct-list_consumers_sanity_check
+ make ct-list_queues_online_and_offline
+ make ct-many_node_ha
+ make ct-metrics
+}
+
+function test_group_5
+{
+ make ct-mirrored_supervisor
+ make ct-msg_store
+ # TODO FUTURE HACK
+ # This suite fails frequently on Travis CI
+ # make ct-partitions
+ make ct-peer_discovery_dns
+}
+
+function test_group_6
+{
+ make ct-per_user_connection_tracking
+ make ct-per_vhost_connection_limit_partitions
+ make ct-per_vhost_connection_limit
+ make ct-per_vhost_msg_store
+}
+
+function test_group_7
+{
+ make ct-per_vhost_queue_limit
+ make ct-plugin_versioning
+ make ct-policy
+ make ct-priority_queue_recovery
+}
+
+function test_group_8
+{
+ make ct-priority_queue
+ make ct-proxy_protocol
+ make ct-queue_master_location
+ make ct-rabbit_core_metrics_gc
+}
+
+function test_group_9
+{
+ make ct-rabbitmqctl_integration
+ make ct-rabbitmqctl_shutdown
+ make ct-simple_ha
+ make ct-sup_delayed_restart
+}
+
+function test_group_10
+{
+ make ct-sync_detection
+ make ct-term_to_binary_compat_prop
+ make ct-topic_permission
+ make ct-unit_inbroker_non_parallel
+}
+
+function test_group_11
+{
+ make ct-unit_inbroker_parallel
+ make ct-unit
+ make ct-worker_pool
+}
+
+function maybe_run_tests
+{
+ if [[ $script_arg == 'tests' ]]
+ then
+ # Note: Travis env specifies test suite number
+ local -ri group="${2:-999}"
+
+ local -r test_func="test_group_$group"
+ "$test_func"
+
+ # Only doing tests, so early exit
+ exit 0
+ fi
+}
+
+function kiex_cleanup
+{
+ rm -vf "$HOME"/.kiex/bin/*.bak*
+ rm -vf "$HOME"/.kiex/elixirs/.*.old
+ rm -vf "$HOME"/.kiex/elixirs/*.old
+ rm -vf "$HOME"/.kiex/scripts/*.bak*
+
+ if [[ $script_arg == 'kiex_cleanup' ]]
+ then
+ # Only doing cleanup, so early exit
+ exit 0
+ fi
+}
+
+
+function ensure_directories
+{
+ set +o errexit
+ mkdir "$HOME/otp"
+ mkdir "$HOME/bin"
+ set -o errexit
+ export PATH="$HOME/bin:$PATH"
+}
+
+function ensure_kerl
+{
+ curl -Lo "$HOME/bin/kerl" https://raw.githubusercontent.com/kerl/kerl/master/kerl
+ chmod 755 "$HOME/bin/kerl"
+}
+
+function ensure_kiex
+{
+ curl -sSL https://raw.githubusercontent.com/taylor/kiex/master/install | /usr/bin/env bash -s
+ local -r kiex_script="$HOME/.kiex/scripts/kiex"
+ if [[ -s $kiex_script ]]
+ then
+ source "$kiex_script"
+ # Note: this produces a lot of output but without running
+ # "list known" first, kiex install ... sometimes fails
+ kiex list known
+ kiex_cleanup
+ else
+ echo "Did not find kiex at $kiex_script" 1>&2
+ exit 1
+ fi
+}
+
+function ensure_make
+{
+ # GNU Make build variables
+ local -r make_install_dir="$HOME/gmake"
+ local -r make_bin_dir="$make_install_dir/bin"
+
+ export PATH="$make_bin_dir:$PATH"
+
+ if [[ -x $make_bin_dir/make ]]
+ then
+ echo "Found GNU Make installation at $make_install_dir"
+ else
+ mkdir -p "$make_install_dir"
+ curl -sLO http://ftp.gnu.org/gnu/make/make-4.2.1.tar.gz
+ tar xf make-4.2.1.tar.gz
+ pushd make-4.2.1
+ ./configure --prefix="$make_install_dir"
+ make
+ make install
+ popd
+ fi
+}
+
+function build_ticker
+{
+ local status
+
+ status=$(< "$tmp_file")
+ while [[ $status == 'true' ]]
+ do
+ echo '------------------------------------------------------------------------------------------------------------------------------------------------'
+ echo "$(date) building $otp_tag_name ..."
+ if ls "$otp_build_log_dir"/otp_build*.log > /dev/null
+ then
+ tail "$otp_build_log_dir"/otp_build*.log
+ fi
+ sleep 10
+ status=$(< "$tmp_file")
+ done
+ echo '.'
+}
+
+function ensure_otp
+{
+ # OTP build variables
+ local -r otp_tag_name="$script_arg"
+ local -r otp_build_log_dir="$HOME/.kerl/builds/$otp_tag_name"
+ local -r otp_install_dir="$HOME/otp/$otp_tag_name"
+ if [[ -s $otp_install_dir/activate ]]
+ then
+ echo "Found OTP installation at $otp_install_dir"
+ else
+ export KERL_CONFIGURE_OPTIONS='--enable-hipe --enable-smp-support --enable-threads --enable-kernel-poll'
+ rm -rf "$otp_install_dir"
+ mkdir -p "$otp_install_dir"
+
+ echo -n 'true' > "$tmp_file"
+ build_ticker &
+ kerl build git https://github.com/erlang/otp.git "$otp_tag_name" "$otp_tag_name"
+ echo -n 'false' > "$tmp_file"
+ wait
+
+ kerl install "$otp_tag_name" "$otp_install_dir"
+ fi
+}
+
+main "$@"
diff --git a/.travis.yml b/.travis.yml
index 7a46f085b5..41f45de163 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,32 +1,34 @@
# vim:sw=2:et:
+sudo: false
-# Use a real VM so we can install all the packages we want.
-sudo: required
+language: generic
-language: erlang
-notifications:
- email:
- - alerts@rabbitmq.com
addons:
apt:
- sources:
- - sourceline: deb https://packages.erlang-solutions.com/ubuntu precise contrib
- key_url: https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc
packages:
- # Use Elixir from Erlang Solutions. The provided Elixir is
- # installed with kiex but is old. By using an prebuilt Debian
- # package, we save the compilation time.
- - elixir
- - xsltproc
+ - unixodbc
+ - unixodbc-dev
+ - libwxgtk2.8-dev
+
otp_release:
- - "19.2"
- - "19.3"
+ - "20.0"
+
services:
- docker
+
env:
- matrix:
- - GROUP=1
- - GROUP=2
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=0
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=1
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=2
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=3
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=4
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=5
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=6
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=7
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=8
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=9
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=10
+ - OTP_TAG_NAME=OTP-20.0 TEST_SUITE=11
before_script:
# The checkout made by Travis is a "detached HEAD" and branches
@@ -40,13 +42,26 @@ before_script:
git remote add upstream https://github.com/$TRAVIS_REPO_SLUG.git
git fetch upstream stable:stable || :
git fetch upstream master:master || :
- # Remove all kiex installations. This makes sure that the Erlang
- # Solutions one is picked: it's after the kiex installations in $PATH.
- - echo YES | kiex implode
+ # Install kerl; build gmake 4.2.1 and OTP
+ - $TRAVIS_BUILD_DIR/.travis.sh $OTP_TAG_NAME
+ - export PATH="$HOME/bin:$HOME/gmake/bin:$PATH"
+ - source "$HOME/otp/$OTP_TAG_NAME/activate"
+ - kerl active
+ - test -s "$HOME/.kiex/scripts/kiex" && source "$HOME/.kiex/scripts/kiex"
+ - test -x "$HOME/.kiex/elixirs/elixir-1.4.5/bin/elixir" || kiex install 1.4.5
+ - kiex use 1.4.5 --default
+ - mix local.hex --force
+ - make --version
script:
- - if test "${GROUP}" = '1'; then make tests; fi
- - if test "${GROUP}" = '2'; then sh ./scripts/travis_test_ocf_ra.sh; fi
+ - $TRAVIS_BUILD_DIR/.travis.sh tests $TEST_SUITE
+
+before_cache:
+ - $TRAVIS_BUILD_DIR/.travis.sh kiex_cleanup
cache:
- apt: true
+ directories:
+ - "$HOME/otp"
+ - "$HOME/.kiex"
+ - "$HOME/gmake"
+ - "$HOME/bin"
diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
index ff57593374..5537634144 100644
--- a/src/rabbit_amqqueue.erl
+++ b/src/rabbit_amqqueue.erl
@@ -40,6 +40,7 @@
-export([update_mirroring/1, sync_mirrors/1, cancel_sync_mirrors/1, is_mirrored/1]).
-export([pid_of/1, pid_of/2]).
+-export([mark_local_durable_queues_stopped/1]).
%% internal
-export([internal_declare/2, internal_delete/2, run_backing_queue/3,
@@ -255,6 +256,15 @@ start(Qs) ->
[Pid ! {self(), go} || #amqqueue{pid = Pid} <- Qs],
ok.
+mark_local_durable_queues_stopped(VHost) ->
+ Qs = find_durable_queues(VHost),
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [ store_queue(Q#amqqueue{ state = stopped })
+ || Q = #amqqueue{ state = State } <- Qs,
+ State =/= stopped ]
+ end).
+
find_durable_queues(VHost) ->
Node = node(),
mnesia:async_dirty(
@@ -452,6 +462,9 @@ with(Name, F, E, RetriesLeft) ->
E({absent, Q, timeout});
{ok, Q = #amqqueue{state = crashed}} ->
E({absent, Q, crashed});
+ {ok, Q = #amqqueue{state = stopped}} ->
+ %% The queue process was stopped by the supervisor
+ E({absent, Q, stopped});
{ok, Q = #amqqueue{pid = QPid}} ->
%% We check is_process_alive(QPid) in case we receive a
%% nodedown (for example) in F() that has nothing to do
@@ -642,10 +655,13 @@ info_keys() -> rabbit_amqqueue_process:info_keys().
map(Qs, F) -> rabbit_misc:filter_exit_map(F, Qs).
info(Q = #amqqueue{ state = crashed }) -> info_down(Q, crashed);
+info(Q = #amqqueue{ state = stopped }) -> info_down(Q, stopped);
info(#amqqueue{ pid = QPid }) -> delegate:invoke(QPid, {gen_server2, call, [info, infinity]}).
info(Q = #amqqueue{ state = crashed }, Items) ->
info_down(Q, Items, crashed);
+info(Q = #amqqueue{ state = stopped }, Items) ->
+ info_down(Q, Items, stopped);
info(#amqqueue{ pid = QPid }, Items) ->
case delegate:invoke(QPid, {gen_server2, call, [{info, Items}, infinity]}) of
{ok, Res} -> Res;
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
index 4e43104de2..678f1136c3 100644
--- a/src/rabbit_amqqueue_process.erl
+++ b/src/rabbit_amqqueue_process.erl
@@ -265,9 +265,18 @@ init_with_backing_queue_state(Q = #amqqueue{exclusive_owner = Owner}, BQ, BQS,
notify_decorators(startup, State3),
State3.
-terminate(shutdown = R, State = #q{backing_queue = BQ}) ->
+terminate(shutdown = R, State = #q{backing_queue = BQ, q = #amqqueue{ name = QName }}) ->
rabbit_core_metrics:queue_deleted(qname(State)),
- terminate_shutdown(fun (BQS) -> BQ:terminate(R, BQS) end, State);
+ terminate_shutdown(
+ fun (BQS) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun() ->
+ [Q] = mnesia:read({rabbit_queue, QName}),
+ Q2 = Q#amqqueue{state = stopped},
+ rabbit_amqqueue:store_queue(Q2)
+ end),
+ BQ:terminate(R, BQS)
+ end, State);
terminate({shutdown, missing_owner} = Reason, State) ->
%% if the owner was missing then there will be no queue, so don't emit stats
terminate_shutdown(terminate_delete(false, Reason, State), State);
diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl
index 00a6607dfb..c69a27d57c 100644
--- a/src/rabbit_channel.erl
+++ b/src/rabbit_channel.erl
@@ -2142,6 +2142,8 @@ handle_method(#'queue.delete'{queue = QueueNameBin,
fun (not_found) -> {ok, 0};
({absent, Q, crashed}) -> rabbit_amqqueue:delete_crashed(Q, Username),
{ok, 0};
+ ({absent, Q, stopped}) -> rabbit_amqqueue:delete_crashed(Q, Username),
+ {ok, 0};
({absent, Q, Reason}) -> rabbit_misc:absent(Q, Reason)
end) of
{error, in_use} ->
diff --git a/src/rabbit_vhost_process.erl b/src/rabbit_vhost_process.erl
index e3c815a727..f6e4a83daa 100644
--- a/src/rabbit_vhost_process.erl
+++ b/src/rabbit_vhost_process.erl
@@ -55,6 +55,7 @@ init([VHost]) ->
timer:send_interval(Interval, check_vhost),
{ok, VHost}
catch _:Reason ->
+ rabbit_amqqueue:mark_local_durable_queues_stopped(VHost),
rabbit_log:error("Unable to recover vhost ~p data. Reason ~p~n"
" Stacktrace ~p",
[VHost, Reason, erlang:get_stacktrace()]),
diff --git a/test/rabbitmqctl_integration_SUITE.erl b/test/rabbitmqctl_integration_SUITE.erl
index 535725d585..71b74ea104 100644
--- a/test/rabbitmqctl_integration_SUITE.erl
+++ b/test/rabbitmqctl_integration_SUITE.erl
@@ -31,6 +31,7 @@
-export([list_queues_local/1
,list_queues_offline/1
,list_queues_online/1
+ ,list_queues_stopped/1
]).
all() ->
@@ -44,6 +45,7 @@ groups() ->
[list_queues_local
,list_queues_online
,list_queues_offline
+ ,list_queues_stopped
]}
].
@@ -96,13 +98,19 @@ end_per_group(list_queues, Config0) ->
rabbit_ct_helpers:run_steps(Config1,
rabbit_ct_client_helpers:teardown_steps() ++
rabbit_ct_broker_helpers:teardown_steps());
-end_per_group(global_parameters, Config) ->
- rabbit_ct_helpers:run_teardown_steps(Config,
- rabbit_ct_client_helpers:teardown_steps() ++
- rabbit_ct_broker_helpers:teardown_steps());
end_per_group(_, Config) ->
Config.
+init_per_testcase(list_queues_stopped, Config0) ->
+ %% Start node 3 to crash it's queues
+ rabbit_ct_broker_helpers:start_node(Config0, 2),
+ %% Make vhost "down" on nodes 2 and 3
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 1, <<"/">>),
+ rabbit_ct_broker_helpers:force_vhost_failure(Config0, 2, <<"/">>),
+
+ rabbit_ct_broker_helpers:stop_node(Config0, 2),
+ rabbit_ct_helpers:testcase_started(Config0, list_queues_stopped);
+
init_per_testcase(Testcase, Config0) ->
rabbit_ct_helpers:testcase_started(Config0, Testcase).
@@ -134,6 +142,23 @@ list_queues_offline(Config) ->
assert_ctl_queues(Config, 1, ["--offline"], OfflineQueues),
ok.
+list_queues_stopped(Config) ->
+ Node1Queues = lists:sort(lists:nth(1, ?config(per_node_queues, Config))),
+ Node2Queues = lists:sort(lists:nth(2, ?config(per_node_queues, Config))),
+ Node3Queues = lists:sort(lists:nth(3, ?config(per_node_queues, Config))),
+
+ %% All queues are listed
+ ListedQueues =
+ [ {Name, State}
+ || [Name, State] <- rabbit_ct_broker_helpers:rabbitmqctl_list(
+ Config, 0, ["list_queues", "name", "state"]) ],
+
+ [ <<"running">> = proplists:get_value(Q, ListedQueues) || Q <- Node1Queues ],
+ %% Node is running. Vhost is down
+ [ <<"stopped">> = proplists:get_value(Q, ListedQueues) || Q <- Node2Queues ],
+ %% Node is not running. Vhost is down
+ [ <<"down">> = proplists:get_value(Q, ListedQueues) || Q <- Node3Queues ].
+
%%----------------------------------------------------------------------------
%% Helpers
%%----------------------------------------------------------------------------
diff --git a/test/vhost_SUITE.erl b/test/vhost_SUITE.erl
index 1e1c03bd72..0418312afb 100644
--- a/test/vhost_SUITE.erl
+++ b/test/vhost_SUITE.erl
@@ -87,7 +87,7 @@ init_per_group(cluster_size_2_direct, Config) ->
Config1 = rabbit_ct_helpers:set_config(Config, [{connection_type, direct}]),
init_per_multinode_group(cluster_size_2_direct, Config1, 2).
-init_per_multinode_group(Group, Config, NodeCount) ->
+init_per_multinode_group(_Group, Config, NodeCount) ->
Suffix = rabbit_ct_helpers:testcase_absname(Config, "", "-"),
Config1 = rabbit_ct_helpers:set_config(Config, [
{rmq_nodes_count, NodeCount},
@@ -115,12 +115,18 @@ end_per_testcase(Testcase, Config) ->
cluster_vhost_deletion_forces_connection_closure -> ok;
single_node_vhost_deletion_forces_connection_closure -> ok;
_ ->
- ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHost2)
+ delete_vhost(Config, VHost2)
end,
- ok = rabbit_ct_broker_helpers:delete_vhost(Config, VHost1),
+ delete_vhost(Config, VHost1),
clear_all_connection_tracking_tables(Config),
rabbit_ct_helpers:testcase_finished(Config, Testcase).
+delete_vhost(Config, VHost) ->
+ case rabbit_ct_broker_helpers:delete_vhost(Config, VHost) of
+ ok -> ok;
+ {error, {no_such_vhost, _}} -> ok
+ end.
+
clear_all_connection_tracking_tables(Config) ->
[rabbit_ct_broker_helpers:rpc(Config,
N,
@@ -131,6 +137,7 @@ clear_all_connection_tracking_tables(Config) ->
%% -------------------------------------------------------------------
%% Test cases.
%% -------------------------------------------------------------------
+
single_node_vhost_deletion_forces_connection_closure(Config) ->
VHost1 = <<"vhost1">>,
VHost2 = <<"vhost2">>,
@@ -252,7 +259,10 @@ dead_vhost_connection_refused_on_failure_node(Config) ->
%% Can open connections to vhost2 on node 1
[_Conn21] = open_connections(Config, [{1, VHost2}]),
- ?assertEqual(1, count_connections_in(Config, VHost2)).
+ ?assertEqual(1, count_connections_in(Config, VHost2)),
+
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost2),
+ rabbit_ct_broker_helpers:delete_vhost(Config, VHost1).
cluster_vhost_deletion_forces_connection_closure(Config) ->
VHost1 = <<"vhost1">>,
@@ -380,7 +390,7 @@ open_connections(Config, NodesAndVHosts) ->
(Node) ->
rabbit_ct_client_helpers:OpenConnectionFun(Config, Node)
end, NodesAndVHosts),
- timer:sleep(500),
+ timer:sleep(700),
Conns.
close_connections(Conns) ->
@@ -388,12 +398,12 @@ close_connections(Conns) ->
(Conn) ->
rabbit_ct_client_helpers:close_connection(Conn)
end, Conns),
- timer:sleep(500).
+ timer:sleep(700).
count_connections_in(Config, VHost) ->
count_connections_in(Config, VHost, 0).
count_connections_in(Config, VHost, NodeIndex) ->
- timer:sleep(200),
+ timer:sleep(300),
rabbit_ct_broker_helpers:rpc(Config, NodeIndex,
rabbit_connection_tracking,
count_connections_in, [VHost]).