summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Klishin <mklishin@pivotal.io>2016-06-02 14:54:44 +0300
committerMichael Klishin <mklishin@pivotal.io>2016-06-02 14:54:44 +0300
commit1ca18072ac3c036e89069733a10f957b0f6df707 (patch)
treee5e50c26e4aca77de4778f6385bd161354b659da
parent270997fd46286aa69c9da5fb30124f8e93bc7512 (diff)
parent45641a62675b1b67b01a5db76056a7d22de68175 (diff)
downloadrabbitmq-server-git-1ca18072ac3c036e89069733a10f957b0f6df707.tar.gz
Merge branch 'master' into rabbitmq-delayed-message-exchange-3
-rw-r--r--.gitignore2
-rw-r--r--CODE_OF_CONDUCT.md44
-rw-r--r--CONTRIBUTING.md17
-rw-r--r--Makefile25
-rw-r--r--build.config43
-rw-r--r--docs/rabbitmqctl.1.xml27
-rw-r--r--erlang.mk495
-rw-r--r--packaging/Makefile4
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.spec3
-rw-r--r--packaging/debs/Debian/debian/changelog6
-rw-r--r--packaging/debs/Debian/debian/control3
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.service18
-rwxr-xr-xpackaging/debs/Debian/debian/rules2
-rw-r--r--packaging/debs/apt-repository/Makefile4
-rw-r--r--rabbitmq-components.mk4
-rwxr-xr-x[-rw-r--r--]scripts/rabbitmq-env1
-rwxr-xr-xscripts/rabbitmq-server6
-rw-r--r--src/rabbit_amqqueue_process.erl92
-rw-r--r--src/rabbit_control_main.erl167
-rw-r--r--src/rabbit_file.erl10
-rw-r--r--src/rabbit_hipe.erl89
-rw-r--r--src/rabbit_mirror_queue_master.erl16
-rw-r--r--src/rabbit_mirror_queue_misc.erl17
-rw-r--r--src/rabbit_policy.erl4
-rw-r--r--src/rabbit_priority_queue.erl68
-rw-r--r--src/rabbit_upgrade_functions.erl19
-rw-r--r--src/truncate.erl75
-rw-r--r--test/channel_interceptor_SUITE.erl113
-rw-r--r--test/channel_operation_timeout_SUITE.erl196
-rw-r--r--test/channel_operation_timeout_test_queue.erl2443
-rw-r--r--test/cluster_rename_SUITE.erl304
-rw-r--r--test/clustering_management_SUITE.erl728
-rw-r--r--test/config_schema_SUITE.erl143
-rw-r--r--test/config_schema_SUITE_data/certs/cacert.pem1
-rw-r--r--test/config_schema_SUITE_data/certs/cert.pem1
-rw-r--r--test/config_schema_SUITE_data/certs/key.pem1
-rw-r--r--test/config_schema_SUITE_data/rabbit-mgmt/access.log0
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq.schema961
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema31
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema27
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema15
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema183
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema58
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_management.schema203
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema9
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema235
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema110
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema44
-rw-r--r--test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema64
-rw-r--r--test/config_schema_SUITE_data/snippets.config714
-rw-r--r--test/crashing_queues_SUITE.erl269
-rw-r--r--test/dummy_event_receiver.erl58
-rw-r--r--test/dummy_interceptor.erl26
-rw-r--r--test/dummy_runtime_parameters.erl72
-rw-r--r--test/dummy_supervisor2.erl41
-rw-r--r--test/dynamic_ha_SUITE.erl438
-rw-r--r--test/eager_sync_SUITE.erl278
-rw-r--r--test/gm_SUITE.erl205
-rw-r--r--test/inet_proxy_dist.erl201
-rw-r--r--test/inet_tcp_proxy.erl134
-rw-r--r--test/inet_tcp_proxy_manager.erl107
-rw-r--r--test/lazy_queue_SUITE.erl224
-rw-r--r--test/many_node_ha_SUITE.erl117
-rw-r--r--test/mirrored_supervisor_SUITE.erl335
-rw-r--r--test/mirrored_supervisor_SUITE_gs.erl66
-rw-r--r--test/msg_store_SUITE.erl62
-rw-r--r--test/partitions_SUITE.erl438
-rw-r--r--test/plugin_versioning_SUITE.erl177
-rw-r--r--test/priority_queue_SUITE.erl558
-rw-r--r--test/queue_master_location_SUITE.erl271
-rw-r--r--test/rabbit_ha_test_consumer.erl114
-rw-r--r--test/rabbit_ha_test_producer.erl119
-rw-r--r--test/simple_ha_SUITE.erl216
-rw-r--r--test/sup_delayed_restart_SUITE.erl91
-rw-r--r--test/sync_detection_SUITE.erl252
-rw-r--r--test/unit_SUITE.erl736
-rw-r--r--test/unit_inbroker_SUITE.erl3802
77 files changed, 16731 insertions, 521 deletions
diff --git a/.gitignore b/.gitignore
index a42a3ab53f..e2b46ecc33 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
.sw?
.*.sw?
*.beam
+*.coverdata
.erlang.mk/
cover/
debug/
@@ -12,6 +13,7 @@ ebin/
etc/
logs/
plugins/
+test/ct.cover.spec
PACKAGES/
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..1f6ef1c576
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](http://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](http://contributor-covenant.org/version/1/3/0/)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 69a4b4a437..45bbcbe62e 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -20,22 +20,9 @@ If what you are going to work on is a substantial change, please first ask the c
of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
-## (Brief) Code of Conduct
+## Code of Conduct
-In one line: don't be a dick.
-
-Be respectful to the maintainers and other contributors. Open source
-contributors put long hours into developing projects and doing user
-support. Those projects and user support are available for free. We
-believe this deserves some respect.
-
-Be respectful to people of all races, genders, religious beliefs and
-political views. Regardless of how brilliant a pull request is
-technically, we will not tolerate disrespectful or aggressive
-behaviour.
-
-Contributors who violate this straightforward Code of Conduct will see
-their pull requests closed and locked.
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
## Contributor Agreement
diff --git a/Makefile b/Makefile
index fa03c3f9d1..a27e0cdc6b 100644
--- a/Makefile
+++ b/Makefile
@@ -26,6 +26,8 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \
rabbit_common/mk/rabbitmq-dist.mk \
rabbit_common/mk/rabbitmq-tools.mk
+CT_OPTS += -ct_hooks cth_surefire
+
# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
# reviewed and merged.
@@ -42,6 +44,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \
rabbitmq_event_exchange \
rabbitmq_federation \
rabbitmq_federation_management \
+ rabbitmq_jms_topic_exchange \
rabbitmq_management \
rabbitmq_management_agent \
rabbitmq_management_visualiser \
@@ -52,6 +55,7 @@ DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \
rabbitmq_shovel_management \
rabbitmq_stomp \
rabbitmq_tracing \
+ rabbitmq_trust_store \
rabbitmq_web_dispatch \
rabbitmq_web_stomp \
rabbitmq_web_stomp_examples
@@ -65,6 +69,9 @@ DEPS += $(DISTRIBUTED_DEPS)
endif
endif
+# FIXME: Remove rabbitmq_test as TEST_DEPS from here for now.
+TEST_DEPS := amqp_client meck proper $(filter-out rabbitmq_test,$(TEST_DEPS))
+
include erlang.mk
# --------------------------------------------------------------------
@@ -102,24 +109,6 @@ clean-extra-sources:
$(gen_verbose) rm -f $(EXTRA_SOURCES)
# --------------------------------------------------------------------
-# Tests.
-# --------------------------------------------------------------------
-
-TARGETS_IN_RABBITMQ_TEST = $(patsubst %,%-in-rabbitmq_test,\
- tests full unit lite conformance16 lazy-vq-tests)
-
-.PHONY: $(TARGETS_IN_RABBITMQ_TEST)
-
-tests:: tests-in-rabbitmq_test
-
-$(TARGETS_IN_RABBITMQ_TEST): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
- test-build $(DEPS_DIR)/rabbitmq_test
- $(MAKE) -C $(DEPS_DIR)/rabbitmq_test \
- IS_DEP=1 \
- RABBITMQ_BROKER_DIR=$(RABBITMQ_BROKER_DIR) \
- $(patsubst %-in-rabbitmq_test,%,$@)
-
-# --------------------------------------------------------------------
# Documentation.
# --------------------------------------------------------------------
diff --git a/build.config b/build.config
deleted file mode 100644
index b1430689a1..0000000000
--- a/build.config
+++ /dev/null
@@ -1,43 +0,0 @@
-# Do *not* comment or remove core modules
-# unless you know what you are doing.
-#
-# Feel free to comment plugins out however.
-
-# Core modules.
-core/core
-index/*
-core/index
-core/deps
-
-# Plugins that must run before Erlang code gets compiled.
-plugins/erlydtl
-plugins/protobuffs
-
-# Core modules, continued.
-core/erlc
-core/docs
-core/rel
-core/test
-core/compat
-
-# Plugins.
-plugins/asciidoc
-plugins/bootstrap
-plugins/c_src
-plugins/ci
-plugins/ct
-plugins/dialyzer
-# plugins/edoc
-plugins/elvis
-plugins/escript
-plugins/eunit
-plugins/relx
-plugins/shell
-plugins/triq
-plugins/xref
-
-# Plugins enhancing the functionality of other plugins.
-plugins/cover
-
-# Core modules which can use variables from plugins.
-core/deps-tools
diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml
index 55b557a81a..363748e046 100644
--- a/docs/rabbitmqctl.1.xml
+++ b/docs/rabbitmqctl.1.xml
@@ -285,6 +285,33 @@
</para>
</listitem>
</varlistentry>
+
+ <varlistentry>
+ <term><cmdsynopsis><command>hipe_compile</command> <arg choice="req"><replaceable>directory</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <para>
+ Performs HiPE-compilation and caches resulting
+ .beam-files in the given directory.
+ </para>
+ <para>
+ Parent directories are created if necessary. Any
+ existing <command>.beam</command> files from the
+ directory are automatically deleted prior to
+ compilation.
+ </para>
+ <para>
+ To use this precompiled files, you should set
+ <command>RABBITMQ_SERVER_CODE_PATH</command> environment
+ variable to directory specified in
+ <command>hipe_compile</command> invokation.
+ </para>
+ <para role="example-prefix">For example:</para>
+ <screen role="example">rabbitmqctl hipe_compile /tmp/rabbit-hipe/ebin</screen>
+ <para role="example">
+ HiPE-compiles modules and stores them to /tmp/rabbit-hipe/ebin directory.
+ </para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect2>
diff --git a/erlang.mk b/erlang.mk
index fc2d806f2e..efbcf5cd11 100644
--- a/erlang.mk
+++ b/erlang.mk
@@ -16,7 +16,7 @@
ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
-ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+ERLANG_MK_VERSION = 2.0.0-pre.2-76-g427cfb8
# Core configuration.
@@ -84,7 +84,7 @@ all:: deps app rel
rel::
$(verbose) :
-check:: clean app tests
+check:: tests
clean:: clean-crashdump
@@ -421,6 +421,14 @@ pkg_boss_db_fetch = git
pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
pkg_boss_db_commit = master
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
PACKAGES += bson
pkg_bson_name = bson
pkg_bson_description = BSON documents in Erlang, see bsonspec.org
@@ -885,14 +893,6 @@ pkg_dh_date_fetch = git
pkg_dh_date_repo = https://github.com/daleharvey/dh_date
pkg_dh_date_commit = master
-PACKAGES += dhtcrawler
-pkg_dhtcrawler_name = dhtcrawler
-pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
-pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_fetch = git
-pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
-pkg_dhtcrawler_commit = master
-
PACKAGES += dirbusterl
pkg_dirbusterl_name = dirbusterl
pkg_dirbusterl_description = DirBuster successor in Erlang
@@ -1139,7 +1139,7 @@ pkg_elvis_description = Erlang Style Reviewer
pkg_elvis_homepage = https://github.com/inaka/elvis
pkg_elvis_fetch = git
pkg_elvis_repo = https://github.com/inaka/elvis
-pkg_elvis_commit = 0.2.4
+pkg_elvis_commit = master
PACKAGES += emagick
pkg_emagick_name = emagick
@@ -1781,6 +1781,14 @@ pkg_geef_fetch = git
pkg_geef_repo = https://github.com/carlosmn/geef
pkg_geef_commit = master
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
PACKAGES += gen_cycle
pkg_gen_cycle_name = gen_cycle
pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
@@ -1981,6 +1989,14 @@ pkg_hyper_fetch = git
pkg_hyper_repo = https://github.com/GameAnalytics/hyper
pkg_hyper_commit = master
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
PACKAGES += ibrowse
pkg_ibrowse_name = ibrowse
pkg_ibrowse_description = Erlang HTTP client
@@ -2501,6 +2517,14 @@ pkg_merl_fetch = git
pkg_merl_repo = https://github.com/richcarl/merl
pkg_merl_commit = master
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
PACKAGES += mimetypes
pkg_mimetypes_name = mimetypes
pkg_mimetypes_description = Erlang MIME types library
@@ -2733,14 +2757,6 @@ pkg_oauth2_fetch = git
pkg_oauth2_repo = https://github.com/kivra/oauth2
pkg_oauth2_commit = master
-PACKAGES += oauth2c
-pkg_oauth2c_name = oauth2c
-pkg_oauth2c_description = Erlang OAuth2 Client
-pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
-pkg_oauth2c_fetch = git
-pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
-pkg_oauth2c_commit = master
-
PACKAGES += octopus
pkg_octopus_name = octopus
pkg_octopus_description = Small and flexible pool manager written in Erlang
@@ -3533,6 +3549,14 @@ pkg_stripe_fetch = git
pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
pkg_stripe_commit = v1
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
PACKAGES += surrogate
pkg_surrogate_name = surrogate
pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
@@ -3907,7 +3931,7 @@ pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
pkg_xref_runner_fetch = git
pkg_xref_runner_repo = https://github.com/inaka/xref_runner
-pkg_xref_runner_commit = 0.2.0
+pkg_xref_runner_commit = 0.2.3
PACKAGES += yamerl
pkg_yamerl_name = yamerl
@@ -4092,7 +4116,10 @@ endif
# While Makefile file could be GNUmakefile or makefile,
# in practice only Makefile is needed so far.
define dep_autopatch
- if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
$(call dep_autopatch2,$(1)); \
elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
@@ -4100,12 +4127,7 @@ define dep_autopatch
elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
$(call dep_autopatch2,$(1)); \
else \
- if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
- $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
- $(call dep_autopatch_erlang_mk,$(1)); \
- else \
- $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
- fi \
+ $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
fi \
else \
if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
@@ -4117,8 +4139,11 @@ define dep_autopatch
endef
define dep_autopatch2
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
$(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
- if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
$(call dep_autopatch_fetch_rebar); \
$(call dep_autopatch_rebar,$(1)); \
else \
@@ -4256,57 +4281,6 @@ define dep_autopatch_rebar.erl
Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
end
end(),
- FindFirst = fun(F, Fd) ->
- case io:parse_erl_form(Fd, undefined) of
- {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
- [PT, F(F, Fd)];
- {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
- case proplists:get_value(parse_transform, CompileOpts) of
- undefined -> [F(F, Fd)];
- PT -> [PT, F(F, Fd)]
- end;
- {ok, {attribute, _, include, Hrl}, _} ->
- case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
- {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
- _ ->
- case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
- {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
- _ -> [F(F, Fd)]
- end
- end;
- {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
- {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
- [F(F, HrlFd), F(F, Fd)];
- {ok, {attribute, _, include_lib, Hrl}, _} ->
- case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
- {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
- _ -> [F(F, Fd)]
- end;
- {ok, {attribute, _, import, {Imp, _}}, _} ->
- case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
- {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
- _ -> [F(F, Fd)]
- end;
- {eof, _} ->
- file:close(Fd),
- [];
- _ ->
- F(F, Fd)
- end
- end,
- fun() ->
- ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
- First0 = lists:usort(lists:flatten([begin
- {ok, Fd} = file:open(F, [read]),
- FindFirst(FindFirst, Fd)
- end || F <- ErlFiles])),
- First = lists:flatten([begin
- {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
- FindFirst(FindFirst, Fd)
- end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
- Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
- lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
- end(),
Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
Write("\npreprocess::\n"),
Write("\npre-deps::\n"),
@@ -4419,9 +4393,10 @@ define dep_autopatch_rebar.erl
Output, ": $$\(foreach ext,.c .C .cc .cpp,",
"$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
"\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
- case filename:extension(Output) of
- [] -> "\n";
- _ -> " -shared\n"
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
end])
end,
[PortSpec(S) || S <- PortSpecs]
@@ -4490,6 +4465,15 @@ define dep_autopatch_app.erl
halt()
endef
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ Bindings = erl_eval:new_bindings(),
+ {ok, Conf} = file:script(AppSrcScript, Bindings),
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
define dep_autopatch_appsrc.erl
AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
@@ -4576,10 +4560,11 @@ $(DEPS_DIR)/$(call dep_name,$1):
exit 17; \
fi
$(verbose) mkdir -p $(DEPS_DIR)
- $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
- $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
- echo " AUTO " $(DEP_STR); \
- cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(1); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
fi
- $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
echo " CONF " $(DEP_STR); \
@@ -4672,28 +4657,10 @@ dtl_verbose = $(dtl_verbose_$(V))
# Core targets.
-define erlydtl_compile.erl
- [begin
- Module0 = case "$(strip $(DTL_FULL_PATH))" of
- "" ->
- filename:basename(F, ".dtl");
- _ ->
- "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
- re:replace(F2, "/", "_", [{return, list}, global])
- end,
- Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
- case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
- ok -> ok;
- {ok, _} -> ok
- end
- end || F <- string:tokens("$(1)", " ")],
- halt().
-endef
-
-ifneq ($(wildcard src/),)
-
DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
+ifneq ($(DTL_FILES),)
+
ifdef DTL_FULL_PATH
BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
else
@@ -4701,7 +4668,7 @@ BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES
endif
ifneq ($(words $(DTL_FILES)),0)
-# Rebuild everything when the Makefile changes.
+# Rebuild templates when the Makefile changes.
$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
@mkdir -p $(ERLANG_MK_TMP)
@if test -f $@; then \
@@ -4712,9 +4679,28 @@ $(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
endif
-ebin/$(PROJECT).app:: $(DTL_FILES)
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
$(if $(strip $?),\
- $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?),-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/))
+
endif
# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
@@ -4888,51 +4874,79 @@ $(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
# Erlang and Core Erlang files.
define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
- Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
- Add = fun (Dep, Acc) ->
- case lists:keyfind(atom_to_list(Dep), 1, Modules) of
- {_, DepFile} -> [DepFile|Acc];
- false -> Acc
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
end
end,
- AddHd = fun (Dep, Acc) ->
- case {Dep, lists:keymember(Dep, 2, Modules)} of
- {"src/" ++ _, false} -> [Dep|Acc];
- {"include/" ++ _, false} -> [Dep|Acc];
- _ -> Acc
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} -> ok;
+ {ok, Fd} ->
+ F(F, Fd, Mod),
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile})
end
end,
- CompileFirst = fun (Deps) ->
- First0 = [case filename:extension(D) of
- ".erl" -> filename:basename(D, ".erl");
- _ -> []
- end || D <- Deps],
- case lists:usort(First0) of
- [] -> [];
- [[]] -> [];
- First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
- end
+ Attr = fun
+ (F, Mod, behavior, Dep) -> Add(Mod, Dep);
+ (F, Mod, behaviour, Dep) -> Add(Mod, Dep);
+ (F, Mod, compile, {parse_transform, Dep}) -> Add(Mod, Dep);
+ (F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case filelib:is_file("include/" ++ Hrl) of
+ true -> AddHd(F, Mod, "include/" ++ Hrl);
+ false ->
+ case filelib:is_file("src/" ++ Hrl) of
+ true -> AddHd(F, Mod, "src/" ++ Hrl);
+ false -> false
+ end
+ end;
+ (F, Mod, include_lib, "$1/include/" ++ Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+ (F, Mod, include_lib, Hrl) -> AddHd(F, Mod, "include/" ++ Hrl);
+ (F, Mod, import, {Imp, _}) ->
+ case filelib:is_file("src/" ++ atom_to_list(Imp) ++ ".erl") of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
end,
- Depend = [begin
- case epp:parse_file(F, ["include/"], []) of
- {ok, Forms} ->
- Deps = lists:usort(lists:foldl(fun
- ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
- ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
- ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
- ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
- (_, Acc) -> Acc
- end, [], Forms)),
- case Deps of
- [] -> "";
- _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
- end;
- {error, enoent} ->
- []
+ MakeDepend = fun(F, Fd, Mod) ->
+ case io:parse_erl_form(Fd, undefined) of
+ {ok, {attribute, _, Key, Value}, _} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod);
+ {eof, _} ->
+ file:close(Fd);
+ _ ->
+ F(F, Fd, Mod)
end
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ {ok, Fd} = file:open(F, [read]),
+ MakeDepend(MakeDepend, Fd, Mod)
end || F <- ErlFiles],
- ok = file:write_file("$(1)", Depend),
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ ok = file:write_file("$(1)", [
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", atom_to_list(CF)] || CF <- CompileFirst], "\n"
+ ]),
halt()
endef
@@ -5069,6 +5083,11 @@ test-dir:
$(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
endif
+ifeq ($(wildcard src),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+else
ifeq ($(wildcard ebin/test),)
test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
test-build:: clean deps test-deps $(PROJECT).d
@@ -5086,6 +5105,7 @@ clean-test-dir:
ifneq ($(wildcard $(TEST_DIR)/*.beam),)
$(gen_verbose) rm -f $(TEST_DIR)/*.beam
endif
+endif
# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
# This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -5103,11 +5123,14 @@ $(if $(filter-out -Werror,$1),\
$(shell echo $1 | cut -b 2-)))
endef
+define compat_erlc_opts_to_list
+ [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
define compat_rebar_config
{deps, [$(call comma_list,$(foreach d,$(DEPS),\
{$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
-{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
- $(call compat_convert_erlc_opts,$o)))]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
endef
$(eval _compat_rebar_config = $$(compat_rebar_config))
@@ -5126,12 +5149,12 @@ MAN_SECTIONS ?= 3 7
docs:: asciidoc
-asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+asciidoc: asciidoc-guide asciidoc-manual
ifeq ($(wildcard doc/src/guide/book.asciidoc),)
asciidoc-guide:
else
-asciidoc-guide:
+asciidoc-guide: distclean-asciidoc doc-deps
a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
endif
@@ -5139,7 +5162,7 @@ endif
ifeq ($(wildcard doc/src/manual/*.asciidoc),)
asciidoc-manual:
else
-asciidoc-manual:
+asciidoc-manual: distclean-asciidoc doc-deps
for f in doc/src/manual/*.asciidoc ; do \
a2x -v -f manpage $$f ; \
done
@@ -5154,7 +5177,7 @@ install-docs:: install-asciidoc
install-asciidoc: asciidoc-manual
for s in $(MAN_SECTIONS); do \
mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
- install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+ install -g `id -u` -o `id -g` -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
done
endif
@@ -5214,6 +5237,8 @@ define bs_appsrc_lib
]}.
endef
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
ifdef SP
define bs_Makefile
PROJECT = $p
@@ -5223,17 +5248,21 @@ PROJECT_VERSION = 0.0.1
# Whitespace to be used when creating files from templates.
SP = $(SP)
-include erlang.mk
endef
else
define bs_Makefile
PROJECT = $p
-include erlang.mk
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
endef
endif
define bs_apps_Makefile
PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
endef
@@ -5527,6 +5556,7 @@ endif
$(eval p := $(PROJECT))
$(eval n := $(PROJECT)_sup)
$(call render_template,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
$(verbose) mkdir src/
ifdef LEGACY
$(call render_template,bs_appsrc,src/$(PROJECT).app.src)
@@ -5540,6 +5570,7 @@ ifneq ($(wildcard src/),)
endif
$(eval p := $(PROJECT))
$(call render_template,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
$(verbose) mkdir src/
ifdef LEGACY
$(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
@@ -5620,12 +5651,32 @@ list-templates:
C_SRC_DIR ?= $(CURDIR)/c_src
C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
-C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
C_SRC_TYPE ?= shared
# System type and C compiler/flags.
-ifeq ($(PLATFORM),darwin)
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
CC ?= cc
CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
@@ -5640,10 +5691,15 @@ else ifeq ($(PLATFORM),linux)
CXXFLAGS ?= -O3 -finline-functions -Wall
endif
-CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
-CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
-LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lerl_interface -lei
# Verbosity.
@@ -5680,15 +5736,15 @@ OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
-app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
-test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
-$(C_SRC_OUTPUT): $(OBJECTS)
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
$(verbose) mkdir -p priv/
$(link_verbose) $(CC) $(OBJECTS) \
$(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
- -o $(C_SRC_OUTPUT)
+ -o $(C_SRC_OUTPUT_FILE)
%.o: %.c
$(COMPILE_C) $(OUTPUT_OPTION) $<
@@ -5705,13 +5761,13 @@ $(C_SRC_OUTPUT): $(OBJECTS)
clean:: clean-c_src
clean-c_src:
- $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
endif
ifneq ($(wildcard $(C_SRC_DIR)),)
$(C_SRC_ENV):
- $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
io_lib:format( \
\"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
\"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
@@ -5889,7 +5945,7 @@ endif
# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
# This file is part of erlang.mk and subject to the terms of the ISC License.
-.PHONY: ct distclean-ct
+.PHONY: ct apps-ct distclean-ct
# Configuration.
@@ -5924,17 +5980,33 @@ CT_RUN = ct_run \
-logdir $(CURDIR)/logs
ifeq ($(CT_SUITES),)
-ct:
+ct: $(if $(IS_APP),,apps-ct)
else
-ct: test-build
+ct: test-build $(if $(IS_APP),,apps-ct)
$(verbose) mkdir -p $(CURDIR)/logs/
$(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
endif
+ifneq ($(ALL_APPS_DIRS),)
+apps-ct:
+ $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app ct IS_APP=1; done
+endif
+
+ifndef t
+CT_EXTRA =
+else
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+endif
+
define ct_suite_target
ct-$(1): test-build
$(verbose) mkdir -p $(CURDIR)/logs/
- $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+ $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
endef
$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
@@ -5953,9 +6025,8 @@ DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
export DIALYZER_PLT
PLT_APPS ?=
-DIALYZER_DIRS ?= --src -r src
-DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
- -Wunmatched_returns # -Wunderspecs
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
# Core targets.
@@ -5971,6 +6042,18 @@ help::
# Plugin-specific targets.
+define filter_opts.erl
+ Opts = binary:split(<<"$1">>, <<"-">>, [global]),
+ Filtered = lists:reverse(lists:foldl(fun
+ (O = <<"pa ", _/bits>>, Acc) -> [O|Acc];
+ (O = <<"D ", _/bits>>, Acc) -> [O|Acc];
+ (O = <<"I ", _/bits>>, Acc) -> [O|Acc];
+ (_, Acc) -> Acc
+ end, [], Opts)),
+ io:format("~s~n", [[["-", O] || O <- Filtered]]),
+ halt().
+endef
+
$(DIALYZER_PLT): deps app
$(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
@@ -5984,47 +6067,32 @@ dialyze:
else
dialyze: $(DIALYZER_PLT)
endif
- $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+ $(verbose) dialyzer --no_native `$(call erlang,$(call filter_opts.erl,$(ERLC_OPTS)))` $(DIALYZER_DIRS) $(DIALYZER_OPTS)
-# Copyright (c) 2015, Erlang Solutions Ltd.
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
# This file is part of erlang.mk and subject to the terms of the ISC License.
-.PHONY: elvis distclean-elvis
+.PHONY: distclean-edoc edoc
# Configuration.
-ELVIS_CONFIG ?= $(CURDIR)/elvis.config
-
-ELVIS ?= $(CURDIR)/elvis
-export ELVIS
-
-ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
-ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
-ELVIS_OPTS ?=
+EDOC_OPTS ?=
# Core targets.
-help::
- $(verbose) printf "%s\n" "" \
- "Elvis targets:" \
- " elvis Run Elvis using the local elvis.config or download the default otherwise"
+ifneq ($(wildcard doc/overview.edoc),)
+docs:: edoc
+endif
-distclean:: distclean-elvis
+distclean:: distclean-edoc
# Plugin-specific targets.
-$(ELVIS):
- $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
- $(verbose) chmod +x $(ELVIS)
-
-$(ELVIS_CONFIG):
- $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
-
-elvis: $(ELVIS) $(ELVIS_CONFIG)
- $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(ERL) -eval 'edoc:application($(PROJECT), ".", [$(EDOC_OPTS)]), halt().'
-distclean-elvis:
- $(gen_verbose) rm -rf $(ELVIS)
+distclean-edoc:
+ $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info
# Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
# This file is part of erlang.mk and subject to the terms of the ISC License.
@@ -6095,11 +6163,12 @@ distclean-escript:
# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
# This file is contributed to erlang.mk and subject to the terms of the ISC License.
-.PHONY: eunit
+.PHONY: eunit apps-eunit
# Configuration
EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
# Core targets.
@@ -6121,7 +6190,7 @@ define eunit.erl
_ -> ok
end
end,
- case eunit:test([$(call comma_list,$(1))], [$(EUNIT_OPTS)]) of
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
ok -> ok;
error -> halt(2)
end,
@@ -6133,14 +6202,30 @@ define eunit.erl
halt()
endef
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin $(APPS_DIR)/*/ebin ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam)))
EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam)))
EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
- $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),{module,'$(mod)'})
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
-eunit: test-build
- $(gen_verbose) $(ERL) -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin ebin \
- -eval "$(subst $(newline),,$(subst ",\",$(call eunit.erl,$(EUNIT_MODS))))"
+eunit: test-build $(if $(IS_APP),,apps-eunit)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit:
+ $(verbose) for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; done
+endif
+endif
# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
# This file is part of erlang.mk and subject to the terms of the ISC License.
diff --git a/packaging/Makefile b/packaging/Makefile
index da3dcccb60..31b85d9c1f 100644
--- a/packaging/Makefile
+++ b/packaging/Makefile
@@ -62,9 +62,7 @@ endif
VARS = SOURCE_DIST_FILE="$(abspath $(SOURCE_DIST_FILE))" \
PACKAGES_DIR="$(abspath $(PACKAGES_DIR))" \
- SIGNING_KEY="$(SIGNING_KEY)" \
- SIGNING_USER_ID="$(SIGNING_USER_ID)" \
- SIGNING_USER_EMAIL="$(SIGNING_USER_EMAIL)"
+ SIGNING_KEY="$(SIGNING_KEY)"
packages: package-deb package-rpm package-windows package-generic-unix
@:
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec
index 04676b0802..5905747903 100644
--- a/packaging/RPMS/Fedora/rabbitmq-server.spec
+++ b/packaging/RPMS/Fedora/rabbitmq-server.spec
@@ -126,6 +126,9 @@ done
rm -rf %{buildroot}
%changelog
+* Thu May 19 2016 michael@rabbitmq.com 3.6.2-1
+- New Upstream Release
+
* Tue Mar 1 2016 michael@rabbitmq.com 3.6.1-1
- New Upstream Release
diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog
index adf8ce5aa5..52459181a6 100644
--- a/packaging/debs/Debian/debian/changelog
+++ b/packaging/debs/Debian/debian/changelog
@@ -1,3 +1,9 @@
+rabbitmq-server (3.6.2-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Michael Klishin <michael@rabbitmq.com> Thu, 19 May 2016 09:20:06 +0100
+
rabbitmq-server (3.6.1-1) unstable; urgency=low
* New Upstream Release
diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control
index 9cf494ab87..29ea81049c 100644
--- a/packaging/debs/Debian/debian/control
+++ b/packaging/debs/Debian/debian/control
@@ -7,11 +7,12 @@ Uploaders: Alvaro Videla <alvaro@rabbitmq.com>,
Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>,
Giuseppe Privitera <giuseppe@rabbitmq.com>
Build-Depends: debhelper (>= 9),
+ dh-systemd (>= 1.5),
erlang-dev,
python-simplejson,
xmlto,
xsltproc,
- erlang-nox (>= 1:16.b.3),
+ erlang-nox (>= 1:16.b.3) | esl-erlang,
zip,
rsync
Standards-Version: 3.9.4
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.service b/packaging/debs/Debian/debian/rabbitmq-server.service
new file mode 100644
index 0000000000..1aa6549b64
--- /dev/null
+++ b/packaging/debs/Debian/debian/rabbitmq-server.service
@@ -0,0 +1,18 @@
+# systemd unit example
+[Unit]
+Description=RabbitMQ broker
+After=network.target epmd@0.0.0.0.socket
+Wants=network.target epmd@0.0.0.0.socket
+
+[Service]
+Type=notify
+User=rabbitmq
+Group=rabbitmq
+NotifyAccess=all
+TimeoutStartSec=3600
+WorkingDirectory=/var/lib/rabbitmq
+ExecStart=/usr/lib/rabbitmq/bin/rabbitmq-server
+ExecStop=/usr/lib/rabbitmq/bin/rabbitmqctl stop
+
+[Install]
+WantedBy=multi-user.target
diff --git a/packaging/debs/Debian/debian/rules b/packaging/debs/Debian/debian/rules
index 053df18115..770eeb0ea5 100755
--- a/packaging/debs/Debian/debian/rules
+++ b/packaging/debs/Debian/debian/rules
@@ -8,7 +8,7 @@ DEB_DESTDIR = debian/rabbitmq-server
VERSION = $(shell dpkg-parsechangelog | awk '/^Version:/ {version=$$0; sub(/Version: /, "", version); sub(/-.*/, "", version); print version;}')
%:
- dh $@ --parallel
+ dh $@ --parallel --with systemd
override_dh_auto_clean:
$(MAKE) clean distclean-manpages
diff --git a/packaging/debs/apt-repository/Makefile b/packaging/debs/apt-repository/Makefile
index bbddc15a4e..dbf8871a51 100644
--- a/packaging/debs/apt-repository/Makefile
+++ b/packaging/debs/apt-repository/Makefile
@@ -1,7 +1,7 @@
PACKAGES_DIR ?= ../../../PACKAGES
REPO_DIR ?= debian
-SIGNING_USER_EMAIL ?= info@rabbitmq.com
+SIGNING_KEY ?= default
ifeq "$(UNOFFICIAL_RELEASE)" ""
HOME_ARG = HOME=$(GNUPG_PATH)
@@ -18,7 +18,7 @@ debian_apt_repository: clean
mkdir -p $(REPO_DIR)/conf
cp -a distributions $(REPO_DIR)/conf
ifeq "$(UNOFFICIAL_RELEASE)" ""
- echo SignWith: $(SIGNING_USER_EMAIL) >> $(REPO_DIR)/conf/distributions
+ echo SignWith: $(SIGNING_KEY) >> $(REPO_DIR)/conf/distributions
endif
for FILE in $(PACKAGES_DIR)/*.changes ; do \
$(HOME_ARG) reprepro --ignore=wrongdistribution \
diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk
index 920a67b121..4d61002052 100644
--- a/rabbitmq-components.mk
+++ b/rabbitmq-components.mk
@@ -54,7 +54,9 @@ dep_rabbitmq_management_visualiser = git_rmq rabbitmq-management-visualiser $
dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
@@ -111,7 +113,9 @@ RABBITMQ_COMPONENTS = amqp_client \
rabbitmq_message_timestamp \
rabbitmq_metronome \
rabbitmq_mqtt \
+ rabbitmq_objc_client \
rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
rabbitmq_rtopic_exchange \
rabbitmq_sharding \
rabbitmq_shovel \
diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env
index b59e323e66..62cff0b248 100644..100755
--- a/scripts/rabbitmq-env
+++ b/scripts/rabbitmq-env
@@ -177,6 +177,7 @@ DEFAULT_NODE_PORT=5672
[ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
[ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
[ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+[ "x" = "x$RABBITMQ_SERVER_CODE_PATH" ] && RABBITMQ_SERVER_CODE_PATH=${SERVER_CODE_PATH}
[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
[ "x" = "x$RABBITMQ_GENERATED_CONFIG_DIR" ] && RABBITMQ_GENERATED_CONFIG_DIR=${GENERATED_CONFIG_DIR}
diff --git a/scripts/rabbitmq-server b/scripts/rabbitmq-server
index 8a6279dc65..6d82588ee2 100755
--- a/scripts/rabbitmq-server
+++ b/scripts/rabbitmq-server
@@ -171,11 +171,15 @@ ensure_thread_pool_size() {
}
start_rabbitmq_server() {
+ # "-pa ${RABBITMQ_SERVER_CODE_PATH}" should be the very first
+ # command-line argument. In case of using cached HiPE-compilation,
+ # this will allow for compiled versions of erlang built-in modules
+ # (e.g. lists) to be loaded.
ensure_thread_pool_size
check_start_params &&
RABBITMQ_CONFIG_FILE=$RABBITMQ_CONFIG_FILE \
exec ${ERL_DIR}erl \
- -pa ${RABBITMQ_EBIN_ROOT} \
+ -pa ${RABBITMQ_SERVER_CODE_PATH} ${RABBITMQ_EBIN_ROOT} \
${RABBITMQ_START_RABBIT} \
${RABBITMQ_NAME_TYPE} ${RABBITMQ_NODENAME} \
-boot "${SASL_BOOT_FILE}" \
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
index 3ee14e4f7d..2db86391a5 100644
--- a/src/rabbit_amqqueue_process.erl
+++ b/src/rabbit_amqqueue_process.erl
@@ -54,6 +54,7 @@
max_length,
max_bytes,
args_policy_version,
+ mirroring_policy_version = 0,
status
}).
@@ -702,7 +703,7 @@ handle_ch_down(DownPid, State = #q{consumers = Consumers,
exclusive_consumer = Holder1},
notify_decorators(State2),
case should_auto_delete(State2) of
- true ->
+ true ->
log_auto_delete(
io_lib:format(
"because all of its consumers (~p) were on a channel that was closed",
@@ -1071,11 +1072,11 @@ handle_call({basic_cancel, ChPid, ConsumerTag, OkMsg}, _From,
notify_decorators(State1),
case should_auto_delete(State1) of
false -> reply(ok, ensure_expiry_timer(State1));
- true ->
+ true ->
log_auto_delete(
io_lib:format(
"because its last consumer with tag '~s' was cancelled",
- [ConsumerTag]),
+ [ConsumerTag]),
State),
stop(ok, State1)
end
@@ -1207,22 +1208,15 @@ handle_cast({set_maximum_since_use, Age}, State) ->
ok = file_handle_cache:set_maximum_since_use(Age),
noreply(State);
-handle_cast(start_mirroring, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- %% lookup again to get policy for init_with_existing_bq
- {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
- true = BQ =/= rabbit_mirror_queue_master, %% assertion
- BQ1 = rabbit_mirror_queue_master,
- BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
- noreply(State#q{backing_queue = BQ1,
- backing_queue_state = BQS1});
-
-handle_cast(stop_mirroring, State = #q{backing_queue = BQ,
- backing_queue_state = BQS}) ->
- BQ = rabbit_mirror_queue_master, %% assertion
- {BQ1, BQS1} = BQ:stop_mirroring(BQS),
- noreply(State#q{backing_queue = BQ1,
- backing_queue_state = BQS1});
+handle_cast(update_mirroring, State = #q{q = Q,
+ mirroring_policy_version = Version}) ->
+ case needs_update_mirroring(Q, Version) of
+ false ->
+ noreply(State);
+ {Policy, NewVersion} ->
+ State1 = State#q{mirroring_policy_version = NewVersion},
+ noreply(update_mirroring(Policy, State1))
+ end;
handle_cast({credit, ChPid, CTag, Credit, Drain},
State = #q{consumers = Consumers,
@@ -1371,19 +1365,67 @@ log_delete_exclusive({ConPid, _ConRef}, State) ->
log_delete_exclusive(ConPid, State);
log_delete_exclusive(ConPid, #q{ q = #amqqueue{ name = Resource } }) ->
#resource{ name = QName, virtual_host = VHost } = Resource,
- rabbit_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++
- " because its declaring connection ~p was closed",
- [QName, VHost, ConPid]).
+ rabbit_log_queue:debug("Deleting exclusive queue '~s' in vhost '~s' " ++
+ "because its declaring connection ~p was closed",
+ [QName, VHost, ConPid]).
log_auto_delete(Reason, #q{ q = #amqqueue{ name = Resource } }) ->
#resource{ name = QName, virtual_host = VHost } = Resource,
- rabbit_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++
- Reason,
- [QName, VHost]).
+ rabbit_log_queue:debug("Deleting auto-delete queue '~s' in vhost '~s' " ++
+ Reason,
+ [QName, VHost]).
+
+needs_update_mirroring(Q, Version) ->
+ {ok, UpQ} = rabbit_amqqueue:lookup(Q#amqqueue.name),
+ DBVersion = UpQ#amqqueue.policy_version,
+ case DBVersion > Version of
+ true -> {rabbit_policy:get(<<"ha-mode">>, UpQ), DBVersion};
+ false -> false
+ end.
+update_mirroring(Policy, State = #q{backing_queue = BQ}) ->
+ case update_to(Policy, BQ) of
+ start_mirroring ->
+ start_mirroring(State);
+ stop_mirroring ->
+ stop_mirroring(State);
+ ignore ->
+ State;
+ update_ha_mode ->
+ update_ha_mode(State)
+ end.
+update_to(undefined, rabbit_mirror_queue_master) ->
+ stop_mirroring;
+update_to(_, rabbit_mirror_queue_master) ->
+ update_ha_mode;
+update_to(undefined, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ ignore;
+update_to(_, BQ) when BQ =/= rabbit_mirror_queue_master ->
+ start_mirroring.
+
+start_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ %% lookup again to get policy for init_with_existing_bq
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ true = BQ =/= rabbit_mirror_queue_master, %% assertion
+ BQ1 = rabbit_mirror_queue_master,
+ BQS1 = BQ1:init_with_existing_bq(Q, BQ, BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+stop_mirroring(State = #q{backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQ = rabbit_mirror_queue_master, %% assertion
+ {BQ1, BQS1} = BQ:stop_mirroring(BQS),
+ State#q{backing_queue = BQ1,
+ backing_queue_state = BQS1}.
+
+update_ha_mode(State) ->
+ {ok, Q} = rabbit_amqqueue:lookup(qname(State)),
+ ok = rabbit_mirror_queue_misc:update_mirrors(Q),
+ State.
diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl
index 7f653c3780..bf50fd6c49 100644
--- a/src/rabbit_control_main.erl
+++ b/src/rabbit_control_main.erl
@@ -23,7 +23,7 @@
sync_queue/1, cancel_sync_queue/1, become/1,
purge_queue/1]).
--import(rabbit_misc, [rpc_call/4, rpc_call/5, rpc_call/7]).
+-import(rabbit_misc, [rpc_call/4, rpc_call/5]).
-define(EXTERNAL_CHECK_INTERVAL, 1000).
@@ -37,6 +37,7 @@
reset,
force_reset,
rotate_logs,
+ hipe_compile,
{join_cluster, [?RAM_DEF]},
change_cluster_node_type,
@@ -113,7 +114,7 @@
[stop, stop_app, start_app, wait, reset, force_reset, rotate_logs,
join_cluster, change_cluster_node_type, update_cluster_nodes,
forget_cluster_node, rename_cluster_node, cluster_status, status,
- environment, eval, force_boot, help, node_health_check]).
+ environment, eval, force_boot, help, node_health_check, hipe_compile]).
-define(COMMANDS_WITH_TIMEOUT,
[list_user_permissions, list_policies, list_queues, list_exchanges,
@@ -380,6 +381,16 @@ action(rotate_logs, Node, [], _Opts, Inform) ->
Inform("Rotating logs for node ~p", [Node]),
call(Node, {rabbit, rotate_logs, []});
+action(hipe_compile, _Node, [TargetDir], _Opts, _Inform) ->
+ ok = application:load(rabbit),
+ case rabbit_hipe:can_hipe_compile() of
+ true ->
+ {ok, _, _} = rabbit_hipe:compile_to_directory(TargetDir),
+ ok;
+ false ->
+ {error, "HiPE compilation is not supported"}
+ end;
+
action(close_connection, Node, [PidStr, Explanation], _Opts, Inform) ->
Inform("Closing connection \"~s\"", [PidStr]),
rpc_call(Node, rabbit_networking, close_connection,
@@ -579,56 +590,74 @@ action(purge_queue, Node, [Q], Opts, Inform, Timeout) ->
action(list_users, Node, [], _Opts, Inform, Timeout) ->
Inform("Listing users", []),
- call(Node, {rabbit_auth_backend_internal, list_users, []},
- rabbit_auth_backend_internal:user_info_keys(), true, Timeout);
+ call_emitter(Node, {rabbit_auth_backend_internal, list_users, []},
+ rabbit_auth_backend_internal:user_info_keys(),
+ [{timeout, Timeout}, to_bin_utf8]);
action(list_permissions, Node, [], Opts, Inform, Timeout) ->
VHost = proplists:get_value(?VHOST_OPT, Opts),
Inform("Listing permissions in vhost \"~s\"", [VHost]),
- call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
- rabbit_auth_backend_internal:vhost_perms_info_keys(), true, Timeout,
- true);
+ call_emitter(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
+ rabbit_auth_backend_internal:vhost_perms_info_keys(),
+ [{timeout, Timeout}, to_bin_utf8, is_escaped]);
action(list_parameters, Node, [], Opts, Inform, Timeout) ->
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
Inform("Listing runtime parameters", []),
- call(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
- rabbit_runtime_parameters:info_keys(), Timeout);
+ call_emitter(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
+ rabbit_runtime_parameters:info_keys(),
+ [{timeout, Timeout}]);
action(list_policies, Node, [], Opts, Inform, Timeout) ->
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
Inform("Listing policies", []),
- call(Node, {rabbit_policy, list_formatted, [VHostArg]},
- rabbit_policy:info_keys(), Timeout);
+ call_emitter(Node, {rabbit_policy, list_formatted, [VHostArg]},
+ rabbit_policy:info_keys(),
+ [{timeout, Timeout}]);
action(list_vhosts, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing vhosts", []),
ArgAtoms = default_if_empty(Args, [name]),
- call(Node, {rabbit_vhost, info_all, []}, ArgAtoms, true, Timeout);
+ call_emitter(Node, {rabbit_vhost, info_all, []}, ArgAtoms,
+ [{timeout, Timeout}, to_bin_utf8]);
action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) ->
{error_string,
"list_user_permissions expects a username argument, but none provided."};
action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) ->
Inform("Listing permissions for user ~p", Args),
- call(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
- rabbit_auth_backend_internal:user_perms_info_keys(), true, Timeout,
- true);
+ call_emitter(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
+ rabbit_auth_backend_internal:user_perms_info_keys(),
+ [{timeout, Timeout}, to_bin_utf8, is_escaped]);
action(list_queues, Node, Args, Opts, Inform, Timeout) ->
- [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
Inform("Listing queues", []),
+ %% User options
+ [Online, Offline] = rabbit_cli:filter_opts(Opts, [?ONLINE_OPT, ?OFFLINE_OPT]),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
ArgAtoms = default_if_empty(Args, [name, messages]),
- call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms, Online, Offline]},
- ArgAtoms, Timeout);
+
+ %% Data for emission
+ Nodes = nodes_in_cluster(Node, Timeout),
+ OnlineChunks = if Online -> length(Nodes); true -> 0 end,
+ OfflineChunks = if Offline -> 1; true -> 0 end,
+ ChunksOpt = {chunks, OnlineChunks + OfflineChunks},
+ TimeoutOpt = {timeout, Timeout},
+ EmissionRef = make_ref(),
+ EmissionRefOpt = {ref, EmissionRef},
+
+ _ = Online andalso start_emission(Node, {rabbit_amqqueue, emit_info_all, [Nodes, VHostArg, ArgAtoms]},
+ [TimeoutOpt, EmissionRefOpt]),
+ _ = Offline andalso start_emission(Node, {rabbit_amqqueue, emit_info_down, [VHostArg, ArgAtoms]},
+ [TimeoutOpt, EmissionRefOpt]),
+ display_emission_result(EmissionRef, ArgAtoms, [ChunksOpt, TimeoutOpt]);
action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
Inform("Listing exchanges", []),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
ArgAtoms = default_if_empty(Args, [name, type]),
- call(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
- ArgAtoms, Timeout);
+ call_emitter(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
+ ArgAtoms, [{timeout, Timeout}]);
action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
Inform("Listing bindings", []),
@@ -636,27 +665,31 @@ action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
ArgAtoms = default_if_empty(Args, [source_name, source_kind,
destination_name, destination_kind,
routing_key, arguments]),
- call(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
- ArgAtoms, Timeout);
+ call_emitter(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
+ ArgAtoms, [{timeout, Timeout}]);
action(list_connections, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing connections", []),
ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
- call(Node, {rabbit_networking, connection_info_all, [ArgAtoms]},
- ArgAtoms, Timeout);
+ Nodes = nodes_in_cluster(Node, Timeout),
+ call_emitter(Node, {rabbit_networking, emit_connection_info_all, [Nodes, ArgAtoms]},
+ ArgAtoms, [{timeout, Timeout}, {chunks, length(Nodes)}]);
action(list_channels, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing channels", []),
ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
messages_unacknowledged]),
- call(Node, {rabbit_channel, info_all, [ArgAtoms]},
- ArgAtoms, Timeout);
+ Nodes = nodes_in_cluster(Node, Timeout),
+ call_emitter(Node, {rabbit_channel, emit_info_all, [Nodes, ArgAtoms]}, ArgAtoms,
+ [{timeout, Timeout}, {chunks, length(Nodes)}]);
action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
Inform("Listing consumers", []),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- call(Node, {rabbit_amqqueue, consumers_all, [VHostArg]},
- rabbit_amqqueue:consumer_info_keys(), Timeout).
+ Nodes = nodes_in_cluster(Node, Timeout),
+ call_emitter(Node, {rabbit_amqqueue, emit_consumers_all, [Nodes, VHostArg]},
+ rabbit_amqqueue:consumer_info_keys(),
+ [{timeout, Timeout}, {chunks, length(Nodes)}]).
format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
@@ -766,17 +799,18 @@ display_info_message_row(IsEscaped, Result, InfoItemKeys) ->
{X, Value} -> Value
end, IsEscaped) || X <- InfoItemKeys]).
-display_info_message(IsEscaped) ->
+display_info_message(IsEscaped, InfoItemKeys) ->
fun ([], _) ->
ok;
- ([FirstResult|_] = List, InfoItemKeys) when is_list(FirstResult) ->
+ ([FirstResult|_] = List, _) when is_list(FirstResult) ->
lists:foreach(fun(Result) ->
display_info_message_row(IsEscaped, Result, InfoItemKeys)
end,
List),
ok;
- (Result, InfoItemKeys) ->
- display_info_message_row(IsEscaped, Result, InfoItemKeys)
+ (Result, _) ->
+ display_info_message_row(IsEscaped, Result, InfoItemKeys),
+ ok
end.
display_info_list(Results, InfoItemKeys) when is_list(Results) ->
@@ -833,7 +867,10 @@ display_call_result(Node, MFA) ->
end.
unsafe_rpc(Node, Mod, Fun, Args) ->
- case rpc_call(Node, Mod, Fun, Args) of
+ unsafe_rpc(Node, Mod, Fun, Args, ?RPC_TIMEOUT).
+
+unsafe_rpc(Node, Mod, Fun, Args, Timeout) ->
+ case rpc_call(Node, Mod, Fun, Args, Timeout) of
{badrpc, _} = Res -> throw(Res);
Normal -> Normal
end.
@@ -852,33 +889,42 @@ ensure_app_running(Node) ->
call(Node, {Mod, Fun, Args}) ->
rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
-call(Node, {Mod, Fun, Args}, InfoKeys, Timeout) ->
- call(Node, {Mod, Fun, Args}, InfoKeys, false, Timeout, false).
+call_emitter(Node, {Mod, Fun, Args}, InfoKeys, Opts) ->
+ Ref = start_emission(Node, {Mod, Fun, Args}, Opts),
+ display_emission_result(Ref, InfoKeys, Opts).
+
+start_emission(Node, {Mod, Fun, Args}, Opts) ->
+ ToBinUtf8 = proplists:get_value(to_bin_utf8, Opts, false),
+ Timeout = proplists:get_value(timeout, Opts, infinity),
+ Ref = proplists:get_value(ref, Opts, make_ref()),
+ rabbit_control_misc:spawn_emitter_caller(
+ Node, Mod, Fun, prepare_call_args(Args, ToBinUtf8),
+ Ref, self(), Timeout),
+ Ref.
+
+display_emission_result(Ref, InfoKeys, Opts) ->
+ IsEscaped = proplists:get_value(is_escaped, Opts, false),
+ Chunks = proplists:get_value(chunks, Opts, 1),
+ Timeout = proplists:get_value(timeout, Opts, infinity),
+ EmissionStatus = rabbit_control_misc:wait_for_info_messages(
+ self(), Ref, display_info_message(IsEscaped, InfoKeys), ok, Timeout, Chunks),
+ emission_to_action_result(EmissionStatus).
+
+%% Convert rabbit_control_misc:wait_for_info_messages/6 return value
+%% into form expected by rabbit_cli:main/3.
+emission_to_action_result({ok, ok}) ->
+ ok;
+emission_to_action_result({error, Error}) ->
+ Error.
-call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout) ->
- call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, false).
+prepare_call_args(Args, ToBinUtf8) ->
+ case ToBinUtf8 of
+ true -> valid_utf8_args(Args);
+ false -> Args
+ end.
-call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout, IsEscaped) ->
- Args0 = case ToBinUtf8 of
- true -> lists:map(fun list_to_binary_utf8/1, Args);
- false -> Args
- end,
- Ref = make_ref(),
- Pid = self(),
- spawn_link(
- fun () ->
- case rabbit_cli:rpc_call(Node, Mod, Fun, Args0,
- Ref, Pid, Timeout) of
- {error, _} = Error ->
- Pid ! {error, Error};
- {bad_argument, _} = Error ->
- Pid ! {error, Error};
- _ ->
- ok
- end
- end),
- rabbit_control_misc:wait_for_info_messages(
- Pid, Ref, InfoKeys, display_info_message(IsEscaped), Timeout).
+valid_utf8_args(Args) ->
+ lists:map(fun list_to_binary_utf8/1, Args).
list_to_binary_utf8(L) ->
B = list_to_binary(L),
@@ -928,7 +974,10 @@ split_list([_]) -> exit(even_list_needed);
split_list([A, B | T]) -> [{A, B} | split_list(T)].
nodes_in_cluster(Node) ->
- unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running]).
+ unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], ?RPC_TIMEOUT).
+
+nodes_in_cluster(Node, Timeout) ->
+ unsafe_rpc(Node, rabbit_mnesia, cluster_nodes, [running], Timeout).
alarms_by_node(Name) ->
Status = unsafe_rpc(Name, rabbit, status, []),
diff --git a/src/rabbit_file.erl b/src/rabbit_file.erl
index 3dd0421485..d2d37f0ec0 100644
--- a/src/rabbit_file.erl
+++ b/src/rabbit_file.erl
@@ -24,6 +24,7 @@
-export([rename/2, delete/1, recursive_delete/1, recursive_copy/2]).
-export([lock_file/1]).
-export([read_file_info/1]).
+-export([filename_as_a_directory/1]).
-import(file_handle_cache, [with_handle/1, with_handle/2]).
@@ -59,6 +60,7 @@
(file:filename(), file:filename())
-> rabbit_types:ok_or_error({file:filename(), file:filename(), any()})).
-spec(lock_file/1 :: (file:filename()) -> rabbit_types:ok_or_error('eexist')).
+-spec(filename_as_a_directory/1 :: (file:filename()) -> file:filename()).
-endif.
@@ -306,3 +308,11 @@ lock_file(Path) ->
ok = prim_file:close(Lock)
end)
end.
+
+filename_as_a_directory(FileName) ->
+ case lists:last(FileName) of
+ "/" ->
+ FileName;
+ _ ->
+ FileName ++ "/"
+ end.
diff --git a/src/rabbit_hipe.erl b/src/rabbit_hipe.erl
index cbd9181e6a..6957d85cb4 100644
--- a/src/rabbit_hipe.erl
+++ b/src/rabbit_hipe.erl
@@ -5,15 +5,15 @@
%% practice 2 processes seems just as fast as any other number > 1,
%% and keeps the progress bar realistic-ish.
-define(HIPE_PROCESSES, 2).
--export([maybe_hipe_compile/0, log_hipe_result/1]).
-%% HiPE compilation happens before we have log handlers - so we have
-%% to io:format/2, it's all we can do.
+-export([maybe_hipe_compile/0, log_hipe_result/1]).
+-export([compile_to_directory/1]).
+-export([can_hipe_compile/0]).
+%% Compile and load during server startup sequence
maybe_hipe_compile() ->
{ok, Want} = application:get_env(rabbit, hipe_compile),
- Can = code:which(hipe) =/= non_existing,
- case {Want, Can} of
+ case {Want, can_hipe_compile()} of
{true, true} -> hipe_compile();
{true, false} -> false;
{false, _} -> {ok, disabled}
@@ -33,38 +33,49 @@ log_hipe_result(false) ->
rabbit_log:warning(
"Not HiPE compiling: HiPE not found in this Erlang installation.~n").
+hipe_compile() ->
+ hipe_compile(fun compile_and_load/1, false).
+
+compile_to_directory(Dir0) ->
+ Dir = rabbit_file:filename_as_a_directory(Dir0),
+ ok = prepare_ebin_directory(Dir),
+ hipe_compile(fun (Mod) -> compile_and_save(Mod, Dir) end, true).
+
+needs_compilation(Mod, Force) ->
+ Exists = code:which(Mod) =/= non_existing,
+ %% We skip modules already natively compiled. This
+ %% happens when RabbitMQ is stopped (just the
+ %% application, not the entire node) and started
+ %% again.
+ NotYetCompiled = not already_hipe_compiled(Mod),
+ NotVersioned = not compiled_with_version_support(Mod),
+ Exists andalso (Force orelse (NotYetCompiled andalso NotVersioned)).
+
%% HiPE compilation happens before we have log handlers and can take a
%% long time, so make an exception to our no-stdout policy and display
%% progress via stdout.
-hipe_compile() ->
+hipe_compile(CompileFun, Force) ->
{ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
- HipeModules = [HM || HM <- HipeModulesAll,
- code:which(HM) =/= non_existing andalso
- %% We skip modules already natively compiled. This
- %% happens when RabbitMQ is stopped (just the
- %% application, not the entire node) and started
- %% again.
- already_hipe_compiled(HM)
- andalso (not compiled_with_version_support(HM))],
+ HipeModules = lists:filter(fun(Mod) -> needs_compilation(Mod, Force) end, HipeModulesAll),
case HipeModules of
[] -> {ok, already_compiled};
- _ -> do_hipe_compile(HipeModules)
+ _ -> do_hipe_compile(HipeModules, CompileFun)
end.
already_hipe_compiled(Mod) ->
try
%% OTP 18.x or later
- Mod:module_info(native) =:= false
+ Mod:module_info(native) =:= true
%% OTP prior to 18.x
catch error:badarg ->
- code:is_module_native(Mod) =:= false
+ code:is_module_native(Mod) =:= true
end.
compiled_with_version_support(Mod) ->
proplists:get_value(erlang_version_support, Mod:module_info(attributes))
=/= undefined.
-do_hipe_compile(HipeModules) ->
+do_hipe_compile(HipeModules, CompileFun) ->
Count = length(HipeModules),
io:format("~nHiPE compiling: |~s|~n |",
[string:copies("-", Count)]),
@@ -79,11 +90,7 @@ do_hipe_compile(HipeModules) ->
%% advanced API does not load automatically the code, except if the
%% 'load' option is set.
PidMRefs = [spawn_monitor(fun () -> [begin
- {M, Beam, _} =
- code:get_object_code(M),
- {ok, _} =
- hipe:compile(M, [], Beam,
- [o3, load]),
+ CompileFun(M),
io:format("#")
end || M <- Ms]
end) ||
@@ -101,3 +108,39 @@ split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
split0([], Ls) -> Ls;
split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
+
+prepare_ebin_directory(Dir) ->
+ ok = rabbit_file:ensure_dir(Dir),
+ ok = delete_beam_files(Dir),
+ ok.
+
+delete_beam_files(Dir) ->
+ {ok, Files} = file:list_dir(Dir),
+ lists:foreach(fun(File) ->
+ case filename:extension(File) of
+ ".beam" ->
+ ok = file:delete(filename:join([Dir, File]));
+ _ ->
+ ok
+ end
+ end,
+ Files).
+
+compile_and_load(Mod) ->
+ {Mod, Beam, _} = code:get_object_code(Mod),
+ {ok, _} = hipe:compile(Mod, [], Beam, [o3, load]).
+
+compile_and_save(Module, Dir) ->
+ {Module, BeamCode, _} = code:get_object_code(Module),
+ BeamName = filename:join([Dir, atom_to_list(Module) ++ ".beam"]),
+ {ok, {Architecture, NativeCode}} = hipe:compile(Module, [], BeamCode, [o3]),
+ {ok, _, Chunks0} = beam_lib:all_chunks(BeamCode),
+ ChunkName = hipe_unified_loader:chunk_name(Architecture),
+ Chunks1 = lists:keydelete(ChunkName, 1, Chunks0),
+ Chunks = Chunks1 ++ [{ChunkName,NativeCode}],
+ {ok, BeamPlusNative} = beam_lib:build_module(Chunks),
+ ok = file:write_file(BeamName, BeamPlusNative),
+ BeamName.
+
+can_hipe_compile() ->
+ code:which(hipe) =/= non_existing.
diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl
index e447e9de82..9674a4ef2c 100644
--- a/src/rabbit_mirror_queue_master.erl
+++ b/src/rabbit_mirror_queue_master.erl
@@ -363,7 +363,7 @@ fetch(AckRequired, State = #state { backing_queue = BQ,
State1 = State #state { backing_queue_state = BQS1 },
{Result, case Result of
empty -> State1;
- {_MsgId, _IsDelivered, AckTag} -> drop_one(AckTag, State1)
+ {_MsgId, _IsDelivered, _AckTag} -> drop_one(AckRequired, State1)
end}.
drop(AckRequired, State = #state { backing_queue = BQ,
@@ -372,7 +372,7 @@ drop(AckRequired, State = #state { backing_queue = BQ,
State1 = State #state { backing_queue_state = BQS1 },
{Result, case Result of
empty -> State1;
- {_MsgId, AckTag} -> drop_one(AckTag, State1)
+ {_MsgId, _AckTag} -> drop_one(AckRequired, State1)
end}.
ack(AckTags, State = #state { gm = GM,
@@ -518,6 +518,7 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
Depth = BQ:depth(BQS1),
true = Len == Depth, %% ASSERTION: everything must have been requeued
ok = gm:broadcast(GM, {depth, Depth}),
+ WaitTimeout = rabbit_misc:get_env(rabbit, slave_wait_timeout, 15000),
#state { name = QName,
gm = GM,
coordinator = CPid,
@@ -525,7 +526,8 @@ promote_backing_queue_state(QName, CPid, BQ, BQS, GM, AckTags, Seen, KS) ->
backing_queue_state = BQS1,
seen_status = Seen,
confirmed = [],
- known_senders = sets:from_list(KS) }.
+ known_senders = sets:from_list(KS),
+ wait_timeout = WaitTimeout }.
sender_death_fun() ->
Self = self(),
@@ -556,10 +558,10 @@ depth_fun() ->
%% Helpers
%% ---------------------------------------------------------------------------
-drop_one(AckTag, State = #state { gm = GM,
- backing_queue = BQ,
- backing_queue_state = BQS }) ->
- ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckTag =/= undefined}),
+drop_one(AckRequired, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {drop, BQ:len(BQS), 1, AckRequired}),
State.
drop(PrevLen, AckRequired, State = #state { gm = GM,
diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl
index 849efa3611..b188298a9b 100644
--- a/src/rabbit_mirror_queue_misc.erl
+++ b/src/rabbit_mirror_queue_misc.erl
@@ -20,7 +20,7 @@
-export([remove_from_queue/3, on_node_up/0, add_mirrors/3,
report_deaths/4, store_updated_slaves/1,
initial_queue_node/2, suggested_queue_nodes/1,
- is_mirrored/1, update_mirrors/2, validate_policy/1,
+ is_mirrored/1, update_mirrors/2, update_mirrors/1, validate_policy/1,
maybe_auto_sync/1, maybe_drop_master_after_sync/1,
sync_batch_size/1, log_info/3, log_warning/3]).
@@ -64,6 +64,8 @@
-spec(is_mirrored/1 :: (rabbit_types:amqqueue()) -> boolean()).
-spec(update_mirrors/2 ::
(rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
+-spec(update_mirrors/1 ::
+ (rabbit_types:amqqueue()) -> 'ok').
-spec(maybe_drop_master_after_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
-spec(maybe_auto_sync/1 :: (rabbit_types:amqqueue()) -> 'ok').
-spec(log_info/3 :: (rabbit_amqqueue:name(), string(), [any()]) -> 'ok').
@@ -384,15 +386,12 @@ update_mirrors(OldQ = #amqqueue{pid = QPid},
NewQ = #amqqueue{pid = QPid}) ->
case {is_mirrored(OldQ), is_mirrored(NewQ)} of
{false, false} -> ok;
- {true, false} -> rabbit_amqqueue:stop_mirroring(QPid);
- {false, true} -> rabbit_amqqueue:start_mirroring(QPid);
- {true, true} -> update_mirrors0(OldQ, NewQ)
+ _ -> rabbit_amqqueue:update_mirroring(QPid)
end.
-update_mirrors0(OldQ = #amqqueue{name = QName},
- NewQ = #amqqueue{name = QName}) ->
- {OldMNode, OldSNodes, _} = actual_queue_nodes(OldQ),
- {NewMNode, NewSNodes} = suggested_queue_nodes(NewQ),
+update_mirrors(Q = #amqqueue{name = QName}) ->
+ {OldMNode, OldSNodes, _} = actual_queue_nodes(Q),
+ {NewMNode, NewSNodes} = suggested_queue_nodes(Q),
OldNodes = [OldMNode | OldSNodes],
NewNodes = [NewMNode | NewSNodes],
%% When a mirror dies, remove_from_queue/2 might have to add new
@@ -406,7 +405,7 @@ update_mirrors0(OldQ = #amqqueue{name = QName},
drop_mirrors(QName, OldNodes -- NewNodes),
%% This is for the case where no extra nodes were added but we changed to
%% a policy requiring auto-sync.
- maybe_auto_sync(NewQ),
+ maybe_auto_sync(Q),
ok.
%% The arrival of a newly synced slave may cause the master to die if
diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl
index eb8cf63327..a9caadf972 100644
--- a/src/rabbit_policy.erl
+++ b/src/rabbit_policy.erl
@@ -276,7 +276,9 @@ update_queue(Q = #amqqueue{name = QName, policy = OldPolicy}, Policies) ->
NewPolicy -> case rabbit_amqqueue:update(
QName, fun(Q1) ->
rabbit_queue_decorator:set(
- Q1#amqqueue{policy = NewPolicy})
+ Q1#amqqueue{policy = NewPolicy,
+ policy_version =
+ Q1#amqqueue.policy_version + 1 })
end) of
#amqqueue{} = Q1 -> {Q, Q1};
not_found -> {Q, Q }
diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl
index 6141796f7b..a3bfb5cdfa 100644
--- a/src/rabbit_priority_queue.erl
+++ b/src/rabbit_priority_queue.erl
@@ -205,8 +205,8 @@ publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
-batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
- PubDict = partition_publish_batch(Publishes),
+batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubDict = partition_publish_batch(Publishes, MaxP),
lists:foldl(
fun ({Priority, Pubs}, St) ->
pick1(fun (_P, BQSN) ->
@@ -227,8 +227,8 @@ publish_delivered(Msg, MsgProps, ChPid, Flow,
State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
-batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
- PubDict = partition_publish_delivered_batch(Publishes),
+batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ, bqss = [{MaxP, _} |_]}) ->
+ PubDict = partition_publish_delivered_batch(Publishes, MaxP),
{PrioritiesAndAcks, State1} =
lists:foldl(
fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
@@ -404,7 +404,6 @@ msg_rates(#state{bq = BQ, bqss = BQSs}) ->
end, {0.0, 0.0}, BQSs);
msg_rates(#passthrough{bq = BQ, bqs = BQS}) ->
BQ:msg_rates(BQS).
-
info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
fold0(fun (P, BQSN, Acc) ->
combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
@@ -433,8 +432,8 @@ set_queue_mode(Mode, State = #state{bq = BQ}) ->
set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough1(set_queue_mode(Mode, BQS)).
-zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{}) ->
- MsgsByPriority = partition_publish_delivered_batch(Msgs),
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{bqss = [{MaxP, _} |_]}) ->
+ MsgsByPriority = partition_publish_delivered_batch(Msgs, MaxP),
lists:foldl(fun (Acks, MAs) ->
{P, _AckTag} = hd(Acks),
Pubs = orddict:fetch(P, MsgsByPriority),
@@ -484,13 +483,14 @@ foreach1(_Fun, [], BQSAcc) ->
%% For a given thing, just go to its BQ
pick1(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
- {P, BQSN} = priority(Prioritisable, BQSs),
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
a(State#state{bqss = bq_store(P, Fun(P, BQSN), BQSs)}).
%% Fold over results
fold2(Fun, Acc, State = #state{bqss = BQSs}) ->
{Res, BQSs1} = fold2(Fun, Acc, BQSs, []),
{Res, a(State#state{bqss = BQSs1})}.
+
fold2(Fun, Acc, [{P, BQSN} | Rest], BQSAcc) ->
{Acc1, BQSN1} = Fun(P, BQSN, Acc),
fold2(Fun, Acc1, Rest, [{P, BQSN1} | BQSAcc]);
@@ -532,7 +532,7 @@ fold_by_acktags2(Fun, AckTags, State) ->
%% For a given thing, just go to its BQ
pick2(Fun, Prioritisable, #state{bqss = BQSs} = State) ->
- {P, BQSN} = priority(Prioritisable, BQSs),
+ {P, BQSN} = priority_bq(Prioritisable, BQSs),
{Res, BQSN1} = Fun(P, BQSN),
{Res, a(State#state{bqss = bq_store(P, BQSN1, BQSs)})}.
@@ -564,8 +564,7 @@ findfold3(_Fun, Acc, NotFound, [], BQSAcc) ->
{NotFound, Acc, lists:reverse(BQSAcc)}.
bq_fetch(P, []) -> exit({not_found, P});
-bq_fetch(P, [{P, BQSN} | _]) -> BQSN;
-bq_fetch(P, [{P1, BQSN} | _]) when P > P1 -> BQSN;
+bq_fetch(P, [{P, BQSN} | _]) -> {P, BQSN};
bq_fetch(P, [{_, _BQSN} | T]) -> bq_fetch(P, T).
bq_store(P, BQS, BQSs) ->
@@ -583,41 +582,36 @@ a(State = #state{bqss = BQSs}) ->
end.
%%----------------------------------------------------------------------------
-partition_publish_batch(Publishes) ->
+partition_publish_batch(Publishes, MaxP) ->
partition_publishes(
- Publishes, fun ({Msg, _, _}) -> Msg end).
+ Publishes, fun ({Msg, _, _}) -> Msg end, MaxP).
-partition_publish_delivered_batch(Publishes) ->
+partition_publish_delivered_batch(Publishes, MaxP) ->
partition_publishes(
- Publishes, fun ({Msg, _}) -> Msg end).
+ Publishes, fun ({Msg, _}) -> Msg end, MaxP).
-partition_publishes(Publishes, ExtractMsg) ->
+partition_publishes(Publishes, ExtractMsg, MaxP) ->
lists:foldl(fun (Pub, Dict) ->
Msg = ExtractMsg(Pub),
- rabbit_misc:orddict_cons(priority2(Msg), Pub, Dict)
+ rabbit_misc:orddict_cons(priority(Msg, MaxP), Pub, Dict)
end, orddict:new(), Publishes).
-priority(P, BQSs) when is_integer(P) ->
- {P, bq_fetch(P, BQSs)};
-priority(#basic_message{content = Content}, BQSs) ->
- priority1(rabbit_binary_parser:ensure_content_decoded(Content), BQSs).
-
-priority1(_Content, [{P, BQSN}]) ->
- {P, BQSN};
-priority1(Content, [{P, BQSN} | Rest]) ->
- case priority2(Content) >= P of
- true -> {P, BQSN};
- false -> priority1(Content, Rest)
- end.
-
-priority2(#basic_message{content = Content}) ->
- priority2(rabbit_binary_parser:ensure_content_decoded(Content));
-priority2(#content{properties = Props}) ->
+priority_bq(Priority, [{MaxP, _} | _] = BQSs) ->
+ bq_fetch(priority(Priority, MaxP), BQSs).
+
+%% Messages with a priority which is higher than the queue's maximum are treated
+%% as if they were published with the maximum priority.
+priority(undefined, _MaxP) ->
+ 0;
+priority(Priority, MaxP) when is_integer(Priority), Priority =< MaxP ->
+ Priority;
+priority(Priority, MaxP) when is_integer(Priority), Priority > MaxP ->
+ MaxP;
+priority(#basic_message{content = Content}, MaxP) ->
+ priority(rabbit_binary_parser:ensure_content_decoded(Content), MaxP);
+priority(#content{properties = Props}, MaxP) ->
#'P_basic'{priority = Priority0} = Props,
- case Priority0 of
- undefined -> 0;
- _ when is_integer(Priority0) -> Priority0
- end.
+ priority(Priority0, MaxP).
add_maybe_infinity(infinity, _) -> infinity;
add_maybe_infinity(_, infinity) -> infinity;
diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl
index b99a1d12ee..0f55b9e4a9 100644
--- a/src/rabbit_upgrade_functions.erl
+++ b/src/rabbit_upgrade_functions.erl
@@ -52,6 +52,7 @@
-rabbit_upgrade({down_slave_nodes, mnesia, [queue_decorators]}).
-rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}).
-rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}).
+-rabbit_upgrade({policy_version, mnesia, [recoverable_slaves]}).
-rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}).
%% -------------------------------------------------------------------
@@ -447,6 +448,24 @@ recoverable_slaves(Table) ->
sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators,
state]).
+policy_version() ->
+ ok = policy_version(rabbit_queue),
+ ok = policy_version(rabbit_durable_queue).
+
+policy_version(Table) ->
+ transform(
+ Table,
+ fun ({amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State}) ->
+ {amqqueue, Name, Durable, AutoDelete, ExclusiveOwner, Arguments,
+ Pid, SlavePids, SyncSlavePids, DSN, Policy, GmPids, Decorators,
+ State, 0}
+ end,
+ [name, durable, auto_delete, exclusive_owner, arguments, pid, slave_pids,
+ sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators, state,
+ policy_version]).
+
%% Prior to 3.6.0, passwords were hashed using MD5, this populates
%% existing records with said default. Users created with 3.6.0+ will
%% have internal_user.hashing_algorithm populated by the internal
diff --git a/src/truncate.erl b/src/truncate.erl
index 1c9b08ed27..a1586b0cb0 100644
--- a/src/truncate.erl
+++ b/src/truncate.erl
@@ -21,8 +21,10 @@
-record(params, {content, struct, content_dec, struct_dec}).
-export([log_event/2, term/2]).
-%% exported for testing
--export([test/0]).
+
+-ifdef(TEST).
+-export([term_size/3]).
+-endif.
log_event({Type, GL, {Pid, Format, Args}}, Params)
when Type =:= error orelse
@@ -123,72 +125,3 @@ tuple_term_size(_T, M, I, S, _W) when I > S ->
M;
tuple_term_size(T, M, I, S, W) ->
tuple_term_size(T, lim(term_size(element(I, T), M, W), 2 * W), I + 1, S, W).
-
-%%----------------------------------------------------------------------------
-
-test() ->
- test_short_examples_exactly(),
- test_term_limit(),
- test_large_examples_for_size(),
- ok.
-
-test_short_examples_exactly() ->
- F = fun (Term, Exp) ->
- Exp = term(Term, {1, {10, 10, 5, 5}}),
- Term = term(Term, {100000, {10, 10, 5, 5}})
- end,
- FSmall = fun (Term, Exp) ->
- Exp = term(Term, {1, {2, 2, 2, 2}}),
- Term = term(Term, {100000, {2, 2, 2, 2}})
- end,
- F([], []),
- F("h", "h"),
- F("hello world", "hello w..."),
- F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]),
- F([a|b], [a|b]),
- F(<<"hello">>, <<"hello">>),
- F([<<"hello world">>], [<<"he...">>]),
- F(<<1:1>>, <<1:1>>),
- F(<<1:81>>, <<0:56, "...">>),
- F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}),
- FSmall({a,30,40,40,40,40}, {a,30,'...'}),
- FSmall([a,30,40,40,40,40], [a,30,'...']),
- P = spawn(fun() -> receive die -> ok end end),
- F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]),
- P ! die,
- R = make_ref(),
- F([R], [R]),
- ok.
-
-test_term_limit() ->
- W = erlang:system_info(wordsize),
- S = <<"abc">>,
- 1 = term_size(S, 4, W),
- limit_exceeded = term_size(S, 3, W),
- case 100 - term_size([S, S], 100, W) of
- 22 -> ok; %% 32 bit
- 38 -> ok %% 64 bit
- end,
- case 100 - term_size([S, [S]], 100, W) of
- 30 -> ok; %% ditto
- 54 -> ok
- end,
- limit_exceeded = term_size([S, S], 6, W),
- ok.
-
-test_large_examples_for_size() ->
- %% Real world values
- Shrink = fun(Term) -> term(Term, {1, {1000, 100, 50, 5}}) end,
- TestSize = fun(Term) ->
- true = 5000000 < size(term_to_binary(Term)),
- true = 500000 > size(term_to_binary(Shrink(Term)))
- end,
- TestSize(lists:seq(1, 5000000)),
- TestSize(recursive_list(1000, 10)),
- TestSize(recursive_list(5000, 20)),
- TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])),
- TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])),
- ok.
-
-recursive_list(S, 0) -> lists:seq(1, S);
-recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)].
diff --git a/test/channel_interceptor_SUITE.erl b/test/channel_interceptor_SUITE.erl
new file mode 100644
index 0000000000..0e4948ea3c
--- /dev/null
+++ b/test/channel_interceptor_SUITE.erl
@@ -0,0 +1,113 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(channel_interceptor_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ register_interceptor
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+register_interceptor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, register_interceptor1, [Config]).
+
+register_interceptor1(Config) ->
+ PredefinedChannels = rabbit_channel:list(),
+
+ Ch1 = rabbit_ct_client_helpers:open_channel(Config, 0),
+
+ QName = <<"register_interceptor-q">>,
+ amqp_channel:call(Ch1, #'queue.declare'{queue = QName}),
+
+ [ChannelProc] = rabbit_channel:list() -- PredefinedChannels,
+
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+
+ ok = rabbit_registry:register(channel_interceptor,
+ <<"dummy interceptor">>,
+ dummy_interceptor),
+ [{interceptors, [{dummy_interceptor, undefined}]}] =
+ rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"">>),
+
+ ok = rabbit_registry:unregister(channel_interceptor,
+ <<"dummy interceptor">>),
+ [{interceptors, []}] = rabbit_channel:info(ChannelProc, [interceptors]),
+
+ check_send_receive(Ch1, QName, <<"bar">>, <<"bar">>),
+ passed.
+
+
+check_send_receive(Ch1, QName, Send, Receive) ->
+ amqp_channel:call(Ch1,
+ #'basic.publish'{routing_key = QName},
+ #amqp_msg{payload = Send}),
+
+ {#'basic.get_ok'{}, #amqp_msg{payload = Receive}} =
+ amqp_channel:call(Ch1, #'basic.get'{queue = QName,
+ no_ack = true}).
diff --git a/test/channel_operation_timeout_SUITE.erl b/test/channel_operation_timeout_SUITE.erl
new file mode 100644
index 0000000000..7b41b9c225
--- /dev/null
+++ b/test/channel_operation_timeout_SUITE.erl
@@ -0,0 +1,196 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(channel_operation_timeout_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile([export_all]).
+
+-import(rabbit_misc, [pget/2]).
+
+-define(CONFIG, [cluster_ab]).
+-define(DEFAULT_VHOST, <<"/">>).
+-define(QRESOURCE(Q), rabbit_misc:r(?DEFAULT_VHOST, queue, Q)).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+-define(DELAY, 25).
+
+all() ->
+ [
+ notify_down_all
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+notify_down_all(Config) ->
+ Rabbit = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, 0),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, 1),
+
+ %% success
+ set_channel_operation_timeout_config(Config, 1000),
+ configure_bq(Config),
+ QCfg0 = qconfig(RabbitCh, <<"q0">>, <<"ex0">>, true, false),
+ declare(QCfg0),
+ %% Testing rabbit_amqqueue:notify_down_all via rabbit_channel.
+ %% Consumer count = 0 after correct channel termination and
+ %% notification of queues via delagate:call/3
+ true = (0 =/= length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST))),
+ rabbit_ct_client_helpers:close_channel(RabbitCh),
+ 0 = length(get_consumers(Config, Rabbit, ?DEFAULT_VHOST)),
+ false = is_process_alive(RabbitCh),
+
+ %% fail
+ set_channel_operation_timeout_config(Config, 10),
+ QCfg2 = qconfig(HareCh, <<"q1">>, <<"ex1">>, true, false),
+ declare(QCfg2),
+ publish(QCfg2, ?TIMEOUT_TEST_MSG),
+ timer:sleep(?DELAY),
+ rabbit_ct_client_helpers:close_channel(HareCh),
+ timer:sleep(?DELAY),
+ false = is_process_alive(HareCh),
+
+ pass.
+
+%% -------------------------
+%% Internal helper functions
+%% -------------------------
+
+set_channel_operation_timeout_config(Config, Timeout) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env, [rabbit, channel_operation_timeout, Timeout])],
+ ok.
+
+set_channel_operation_backing_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env,
+ [rabbit, backing_queue_module, channel_operation_timeout_test_queue])],
+ ok.
+
+re_enable_priority_queue(Config) ->
+ [ok = Ret
+ || Ret <- rabbit_ct_broker_helpers:rpc_all(Config,
+ rabbit_priority_queue, enable, [])],
+ ok.
+
+declare(QCfg) ->
+ QDeclare = #'queue.declare'{queue = Q = pget(name, QCfg), durable = true},
+ #'queue.declare_ok'{} = amqp_channel:call(Ch = pget(ch, QCfg), QDeclare),
+
+ ExDeclare = #'exchange.declare'{exchange = Ex = pget(ex, QCfg)},
+ #'exchange.declare_ok'{} = amqp_channel:call(Ch, ExDeclare),
+
+ #'queue.bind_ok'{} =
+ amqp_channel:call(Ch, #'queue.bind'{queue = Q,
+ exchange = Ex,
+ routing_key = Q}),
+ maybe_subscribe(QCfg).
+
+maybe_subscribe(QCfg) ->
+ case pget(consume, QCfg) of
+ true ->
+ Sub = #'basic.consume'{queue = pget(name, QCfg)},
+ Ch = pget(ch, QCfg),
+ Del = pget(deliver, QCfg),
+ amqp_channel:subscribe(Ch, Sub,
+ spawn(fun() -> consume(Ch, Del) end));
+ _ -> ok
+ end.
+
+consume(_Ch, false) -> receive_nothing();
+consume(Ch, Deliver = true) ->
+ receive
+ {#'basic.deliver'{}, _Msg} ->
+ consume(Ch, Deliver)
+ end.
+
+publish(QCfg, Msg) ->
+ Publish = #'basic.publish'{exchange = pget(ex, QCfg),
+ routing_key = pget(name, QCfg)},
+ amqp_channel:call(pget(ch, QCfg), Publish,
+ #amqp_msg{payload = Msg}).
+
+get_consumers(Config, Node, VHost) when is_atom(Node),
+ is_binary(VHost) ->
+ rabbit_ct_broker_helpers:rpc(Config, Node,
+ rabbit_amqqueue, consumers_all, [VHost]).
+
+get_amqqueue(Q, []) -> throw({not_found, Q});
+get_amqqueue(Q, [AMQQ = #amqqueue{name = Q} | _]) -> AMQQ;
+get_amqqueue(Q, [_| Rem]) -> get_amqqueue(Q, Rem).
+
+qconfig(Ch, Name, Ex, Consume, Deliver) ->
+ [{ch, Ch}, {name, Name}, {ex,Ex}, {consume, Consume}, {deliver, Deliver}].
+
+receive_nothing() ->
+ receive
+ after infinity -> void
+ end.
+
+unhandled_req(Fun) ->
+ try
+ Fun()
+ catch
+ exit:{{shutdown,{_, ?NOT_FOUND, _}}, _} -> ok;
+ _:Reason -> {error, Reason}
+ end.
+
+configure_bq(Config) ->
+ ok = set_channel_operation_backing_queue(Config),
+ ok = re_enable_priority_queue(Config),
+ ok = rabbit_ct_broker_helpers:add_code_path_to_all_nodes(Config,
+ ?MODULE).
diff --git a/test/channel_operation_timeout_test_queue.erl b/test/channel_operation_timeout_test_queue.erl
new file mode 100644
index 0000000000..87c33bea87
--- /dev/null
+++ b/test/channel_operation_timeout_test_queue.erl
@@ -0,0 +1,2443 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(channel_operation_timeout_test_queue).
+
+-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
+ purge/1, purge_acks/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
+ dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
+ ackfold/4, fold/3, len/1, is_empty/1, depth/1,
+ set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
+ handle_pre_hibernate/1, resume/1, msg_rates/1,
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, multiple_routing_keys/0]).
+
+-export([start/1, stop/0]).
+
+%% exported for testing only
+-export([start_msg_store/2, stop_msg_store/0, init/6]).
+
+%%----------------------------------------------------------------------------
+%% This test backing queue follows the variable queue implementation, with
+%% the exception that it will introduce infinite delays on some operations if
+%% the test message has been published, and is awaiting acknowledgement in the
+%% queue index. Test message is "timeout_test_msg!".
+%%
+%%----------------------------------------------------------------------------
+
+-behaviour(rabbit_backing_queue).
+
+-record(vqstate,
+ { q1,
+ q2,
+ delta,
+ q3,
+ q4,
+ next_seq_id,
+ ram_pending_ack, %% msgs using store, still in RAM
+ disk_pending_ack, %% msgs in store, paged out
+ qi_pending_ack, %% msgs using qi, *can't* be paged out
+ index_state,
+ msg_store_clients,
+ durable,
+ transient_threshold,
+ qi_embed_msgs_below,
+
+ len, %% w/o unacked
+ bytes, %% w/o unacked
+ unacked_bytes,
+ persistent_count, %% w unacked
+ persistent_bytes, %% w unacked
+
+ target_ram_count,
+ ram_msg_count, %% w/o unacked
+ ram_msg_count_prev,
+ ram_ack_count_prev,
+ ram_bytes, %% w unacked
+ out_counter,
+ in_counter,
+ rates,
+ msgs_on_disk,
+ msg_indices_on_disk,
+ unconfirmed,
+ confirmed,
+ ack_out_counter,
+ ack_in_counter,
+ %% Unlike the other counters these two do not feed into
+ %% #rates{} and get reset
+ disk_read_count,
+ disk_write_count,
+
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode
+ }).
+
+-record(rates, { in, out, ack_in, ack_out, timestamp }).
+
+-record(msg_status,
+ { seq_id,
+ msg_id,
+ msg,
+ is_persistent,
+ is_delivered,
+ msg_in_store,
+ index_on_disk,
+ persist_to,
+ msg_props
+ }).
+
+-record(delta,
+ { start_seq_id, %% start_seq_id is inclusive
+ count,
+ end_seq_id %% end_seq_id is exclusive
+ }).
+
+-define(HEADER_GUESS_SIZE, 100). %% see determine_persist_to/2
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+-define(QUEUE, lqueue).
+-define(TIMEOUT_TEST_MSG, <<"timeout_test_msg!">>).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+%%----------------------------------------------------------------------------
+
+-rabbit_upgrade({multiple_routing_keys, local, []}).
+
+-ifdef(use_specs).
+
+-type(seq_id() :: non_neg_integer()).
+
+-type(rates() :: #rates { in :: float(),
+ out :: float(),
+ ack_in :: float(),
+ ack_out :: float(),
+ timestamp :: rabbit_types:timestamp()}).
+
+-type(delta() :: #delta { start_seq_id :: non_neg_integer(),
+ count :: non_neg_integer(),
+ end_seq_id :: non_neg_integer() }).
+
+%% The compiler (rightfully) complains that ack() and state() are
+%% unused. For this reason we duplicate a -spec from
+%% rabbit_backing_queue with the only intent being to remove
+%% warnings. The problem here is that we can't parameterise the BQ
+%% behaviour by these two types as we would like to. We still leave
+%% these here for documentation purposes.
+-type(ack() :: seq_id()).
+-type(state() :: #vqstate {
+ q1 :: ?QUEUE:?QUEUE(),
+ q2 :: ?QUEUE:?QUEUE(),
+ delta :: delta(),
+ q3 :: ?QUEUE:?QUEUE(),
+ q4 :: ?QUEUE:?QUEUE(),
+ next_seq_id :: seq_id(),
+ ram_pending_ack :: gb_trees:tree(),
+ disk_pending_ack :: gb_trees:tree(),
+ qi_pending_ack :: gb_trees:tree(),
+ index_state :: any(),
+ msg_store_clients :: 'undefined' | {{any(), binary()},
+ {any(), binary()}},
+ durable :: boolean(),
+ transient_threshold :: non_neg_integer(),
+ qi_embed_msgs_below :: non_neg_integer(),
+
+ len :: non_neg_integer(),
+ bytes :: non_neg_integer(),
+ unacked_bytes :: non_neg_integer(),
+
+ persistent_count :: non_neg_integer(),
+ persistent_bytes :: non_neg_integer(),
+
+ target_ram_count :: non_neg_integer() | 'infinity',
+ ram_msg_count :: non_neg_integer(),
+ ram_msg_count_prev :: non_neg_integer(),
+ ram_ack_count_prev :: non_neg_integer(),
+ ram_bytes :: non_neg_integer(),
+ out_counter :: non_neg_integer(),
+ in_counter :: non_neg_integer(),
+ rates :: rates(),
+ msgs_on_disk :: gb_sets:set(),
+ msg_indices_on_disk :: gb_sets:set(),
+ unconfirmed :: gb_sets:set(),
+ confirmed :: gb_sets:set(),
+ ack_out_counter :: non_neg_integer(),
+ ack_in_counter :: non_neg_integer(),
+ disk_read_count :: non_neg_integer(),
+ disk_write_count :: non_neg_integer(),
+
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy' }).
+%% Duplicated from rabbit_backing_queue
+-spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
+
+-spec(multiple_routing_keys/0 :: () -> 'ok').
+
+-endif.
+
+-define(BLANK_DELTA, #delta { start_seq_id = undefined,
+ count = 0,
+ end_seq_id = undefined }).
+-define(BLANK_DELTA_PATTERN(Z), #delta { start_seq_id = Z,
+ count = 0,
+ end_seq_id = Z }).
+
+-define(MICROS_PER_SECOND, 1000000.0).
+
+%% We're sampling every 5s for RAM duration; a half life that is of
+%% the same order of magnitude is probably about right.
+-define(RATE_AVG_HALF_LIFE, 5.0).
+
+%% We will recalculate the #rates{} every time we get asked for our
+%% RAM duration, or every N messages published, whichever is
+%% sooner. We do this since the priority calculations in
+%% rabbit_amqqueue_process need fairly fresh rates.
+-define(MSGS_PER_RATE_CALC, 100).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start(DurableQueues) ->
+ {AllTerms, StartFunState} = rabbit_queue_index:start(DurableQueues),
+ start_msg_store(
+ [Ref || Terms <- AllTerms,
+ Terms /= non_clean_shutdown,
+ begin
+ Ref = proplists:get_value(persistent_ref, Terms),
+ Ref =/= undefined
+ end],
+ StartFunState),
+ {ok, AllTerms}.
+
+stop() ->
+ ok = stop_msg_store(),
+ ok = rabbit_queue_index:stop().
+
+start_msg_store(Refs, StartFunState) ->
+ ok = rabbit_sup:start_child(?TRANSIENT_MSG_STORE, rabbit_msg_store,
+ [?TRANSIENT_MSG_STORE, rabbit_mnesia:dir(),
+ undefined, {fun (ok) -> finished end, ok}]),
+ ok = rabbit_sup:start_child(?PERSISTENT_MSG_STORE, rabbit_msg_store,
+ [?PERSISTENT_MSG_STORE, rabbit_mnesia:dir(),
+ Refs, StartFunState]).
+
+stop_msg_store() ->
+ ok = rabbit_sup:stop_child(?PERSISTENT_MSG_STORE),
+ ok = rabbit_sup:stop_child(?TRANSIENT_MSG_STORE).
+
+init(Queue, Recover, Callback) ->
+ init(
+ Queue, Recover, Callback,
+ fun (MsgIds, ActionTaken) ->
+ msgs_written_to_disk(Callback, MsgIds, ActionTaken)
+ end,
+ fun (MsgIds) -> msg_indices_written_to_disk(Callback, MsgIds) end,
+ fun (MsgIds) -> msgs_and_indices_written_to_disk(Callback, MsgIds) end).
+
+init(#amqqueue { name = QueueName, durable = IsDurable }, new,
+ AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
+ IndexState = rabbit_queue_index:init(QueueName,
+ MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ init(IsDurable, IndexState, 0, 0, [],
+ case IsDurable of
+ true -> msg_store_client_init(?PERSISTENT_MSG_STORE,
+ MsgOnDiskFun, AsyncCallback);
+ false -> undefined
+ end,
+ msg_store_client_init(?TRANSIENT_MSG_STORE, undefined, AsyncCallback));
+
+%% We can be recovering a transient queue if it crashed
+init(#amqqueue { name = QueueName, durable = IsDurable }, Terms,
+ AsyncCallback, MsgOnDiskFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun) ->
+ {PRef, RecoveryTerms} = process_recovery_terms(Terms),
+ {PersistentClient, ContainsCheckFun} =
+ case IsDurable of
+ true -> C = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef,
+ MsgOnDiskFun, AsyncCallback),
+ {C, fun (MsgId) when is_binary(MsgId) ->
+ rabbit_msg_store:contains(MsgId, C);
+ (#basic_message{is_persistent = Persistent}) ->
+ Persistent
+ end};
+ false -> {undefined, fun(_MsgId) -> false end}
+ end,
+ TransientClient = msg_store_client_init(?TRANSIENT_MSG_STORE,
+ undefined, AsyncCallback),
+ {DeltaCount, DeltaBytes, IndexState} =
+ rabbit_queue_index:recover(
+ QueueName, RecoveryTerms,
+ rabbit_msg_store:successfully_recovered_state(?PERSISTENT_MSG_STORE),
+ ContainsCheckFun, MsgIdxOnDiskFun, MsgAndIdxOnDiskFun),
+ init(IsDurable, IndexState, DeltaCount, DeltaBytes, RecoveryTerms,
+ PersistentClient, TransientClient).
+
+process_recovery_terms(Terms=non_clean_shutdown) ->
+ {rabbit_guid:gen(), Terms};
+process_recovery_terms(Terms) ->
+ case proplists:get_value(persistent_ref, Terms) of
+ undefined -> {rabbit_guid:gen(), []};
+ PRef -> {PRef, Terms}
+ end.
+
+terminate(_Reason, State) ->
+ State1 = #vqstate { persistent_count = PCount,
+ persistent_bytes = PBytes,
+ index_state = IndexState,
+ msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack(true, State),
+ PRef = case MSCStateP of
+ undefined -> undefined;
+ _ -> ok = rabbit_msg_store:client_terminate(MSCStateP),
+ rabbit_msg_store:client_ref(MSCStateP)
+ end,
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ Terms = [{persistent_ref, PRef},
+ {persistent_count, PCount},
+ {persistent_bytes, PBytes}],
+ a(State1 #vqstate { index_state = rabbit_queue_index:terminate(
+ Terms, IndexState),
+ msg_store_clients = undefined }).
+
+%% the only difference between purge and delete is that delete also
+%% needs to delete everything that's been delivered and not ack'd.
+delete_and_terminate(_Reason, State) ->
+ %% Normally when we purge messages we interact with the qi by
+ %% issues delivers and acks for every purged message. In this case
+ %% we don't need to do that, so we just delete the qi.
+ State1 = purge_and_index_reset(State),
+ State2 = #vqstate { msg_store_clients = {MSCStateP, MSCStateT} } =
+ purge_pending_ack_delete_and_terminate(State1),
+ case MSCStateP of
+ undefined -> ok;
+ _ -> rabbit_msg_store:client_delete_and_terminate(MSCStateP)
+ end,
+ rabbit_msg_store:client_delete_and_terminate(MSCStateT),
+ a(State2 #vqstate { msg_store_clients = undefined }).
+
+delete_crashed(#amqqueue{name = QName}) ->
+ ok = rabbit_queue_index:erase(QName).
+
+purge(State = #vqstate { len = Len, qi_pending_ack= QPA }) ->
+ maybe_delay(QPA),
+ case is_pending_ack_empty(State) of
+ true ->
+ {Len, purge_and_index_reset(State)};
+ false ->
+ {Len, purge_when_pending_acks(State)}
+ end.
+
+purge_acks(State) -> a(purge_pending_ack(false, State)).
+
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ State1 =
+ publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ a(reduce_memory_use(maybe_update_rates(State1))).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, State1} =
+ lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
+ State2 = ui(State1),
+ a(reduce_memory_use(maybe_update_rates(State2))).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ {SeqId, a(reduce_memory_use(maybe_update_rates(State1)))}.
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, SeqIds, State1} =
+ lists:foldl(fun batch_publish_delivered1/2,
+ {ChPid, Flow, [], State}, Publishes),
+ State2 = ui(State1),
+ {lists:reverse(SeqIds), a(reduce_memory_use(maybe_update_rates(State2)))}.
+
+discard(_MsgId, _ChPid, _Flow, State) -> State.
+
+drain_confirmed(State = #vqstate { confirmed = C }) ->
+ case gb_sets:is_empty(C) of
+ true -> {[], State}; %% common case
+ false -> {gb_sets:to_list(C), State #vqstate {
+ confirmed = gb_sets:new() }}
+ end.
+
+dropwhile(Pred, State) ->
+ {MsgProps, State1} =
+ remove_by_predicate(Pred, State),
+ {MsgProps, a(State1)}.
+
+fetchwhile(Pred, Fun, Acc, State) ->
+ {MsgProps, Acc1, State1} =
+ fetch_by_predicate(Pred, Fun, Acc, State),
+ {MsgProps, Acc1, a(State1)}.
+
+fetch(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ %% it is possible that the message wasn't read from disk
+ %% at this point, so read it in.
+ {Msg, State2} = read_msg(MsgStatus, State1),
+ {AckTag, State3} = remove(AckRequired, MsgStatus, State2),
+ {{Msg, MsgStatus#msg_status.is_delivered, AckTag}, a(State3)}
+ end.
+
+drop(AckRequired, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {empty, a(State1)};
+ {{value, MsgStatus}, State1} ->
+ {AckTag, State2} = remove(AckRequired, MsgStatus, State1),
+ {{MsgStatus#msg_status.msg_id, AckTag}, a(State2)}
+ end.
+
+ack([], State) ->
+ {[], State};
+%% optimisation: this head is essentially a partial evaluation of the
+%% general case below, for the single-ack case.
+ack([SeqId], State) ->
+ {#msg_status { msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} =
+ remove_pending_ack(true, SeqId, State),
+ IndexState1 = case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState);
+ false -> IndexState
+ end,
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+ {[MsgId],
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + 1 })};
+ack(AckTags, State) ->
+ {{IndexOnDiskSeqIds, MsgIdsByStore, AllMsgIds},
+ State1 = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState,
+ ack_out_counter = AckOutCount }} =
+ lists:foldl(
+ fun (SeqId, {Acc, State2}) ->
+ {MsgStatus, State3} = remove_pending_ack(true, SeqId, State2),
+ {accumulate_ack(MsgStatus, Acc), State3}
+ end, {accumulate_ack_init(), State}, AckTags),
+ IndexState1 = rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ {lists:reverse(AllMsgIds),
+ a(State1 #vqstate { index_state = IndexState1,
+ ack_out_counter = AckOutCount + length(AckTags) })}.
+
+requeue(AckTags, #vqstate { mode = default,
+ delta = Delta,
+ q3 = Q3,
+ q4 = Q4,
+ in_counter = InCounter,
+ len = Len,
+ qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ {SeqIds, Q4a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q4, [],
+ beta_limit(Q3),
+ fun publish_alpha/2, State),
+ {SeqIds1, Q3a, MsgIds1, State2} = queue_merge(SeqIds, Q3, MsgIds,
+ delta_limit(Delta),
+ fun publish_beta/2, State1),
+ {Delta1, MsgIds2, State3} = delta_merge(SeqIds1, Delta, MsgIds1,
+ State2),
+ MsgCount = length(MsgIds2),
+ {MsgIds2, a(reduce_memory_use(
+ maybe_update_rates(
+ State3 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ q4 = Q4a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount })))};
+requeue(AckTags, #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3,
+ in_counter = InCounter,
+ len = Len,
+ qi_pending_ack = QPA } = State) ->
+ maybe_delay(QPA),
+ {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [],
+ delta_limit(Delta),
+ fun publish_beta/2, State),
+ {Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds,
+ State1),
+ MsgCount = length(MsgIds1),
+ {MsgIds1, a(reduce_memory_use(
+ maybe_update_rates(
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ in_counter = InCounter + MsgCount,
+ len = Len + MsgCount })))}.
+
+ackfold(MsgFun, Acc, State, AckTags) ->
+ {AccN, StateN} =
+ lists:foldl(fun(SeqId, {Acc0, State0}) ->
+ MsgStatus = lookup_pending_ack(SeqId, State0),
+ {Msg, State1} = read_msg(MsgStatus, State0),
+ {MsgFun(Msg, SeqId, Acc0), State1}
+ end, {Acc, State}, AckTags),
+ {AccN, a(StateN)}.
+
+fold(Fun, Acc, State = #vqstate{index_state = IndexState}) ->
+ {Its, IndexState1} = lists:foldl(fun inext/2, {[], IndexState},
+ [msg_iterator(State),
+ disk_ack_iterator(State),
+ ram_ack_iterator(State),
+ qi_ack_iterator(State)]),
+ ifold(Fun, Acc, Its, State#vqstate{index_state = IndexState1}).
+
+len(#vqstate { len = Len, qi_pending_ack = QPA }) ->
+ maybe_delay(QPA),
+ Len.
+
+is_empty(State) -> 0 == len(State).
+
+depth(State) ->
+ len(State) + count_pending_acks(State).
+
+set_ram_duration_target(
+ DurationTarget, State = #vqstate {
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ target_ram_count = TargetRamCount }) ->
+ Rate =
+ AvgEgressRate + AvgIngressRate + AvgAckEgressRate + AvgAckIngressRate,
+ TargetRamCount1 =
+ case DurationTarget of
+ infinity -> infinity;
+ _ -> trunc(DurationTarget * Rate) %% msgs = sec * msgs/sec
+ end,
+ State1 = State #vqstate { target_ram_count = TargetRamCount1 },
+ a(case TargetRamCount1 == infinity orelse
+ (TargetRamCount =/= infinity andalso
+ TargetRamCount1 >= TargetRamCount) of
+ true -> State1;
+ false -> reduce_memory_use(State1)
+ end).
+
+maybe_update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount })
+ when InCount + OutCount > ?MSGS_PER_RATE_CALC ->
+ update_rates(State);
+maybe_update_rates(State) ->
+ State.
+
+update_rates(State = #vqstate{ in_counter = InCount,
+ out_counter = OutCount,
+ ack_in_counter = AckInCount,
+ ack_out_counter = AckOutCount,
+ rates = #rates{ in = InRate,
+ out = OutRate,
+ ack_in = AckInRate,
+ ack_out = AckOutRate,
+ timestamp = TS }}) ->
+ Now = erlang:monotonic_time(),
+
+ Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
+ out = update_rate(Now, TS, OutCount, OutRate),
+ ack_in = update_rate(Now, TS, AckInCount, AckInRate),
+ ack_out = update_rate(Now, TS, AckOutCount, AckOutRate),
+ timestamp = Now },
+
+ State#vqstate{ in_counter = 0,
+ out_counter = 0,
+ ack_in_counter = 0,
+ ack_out_counter = 0,
+ rates = Rates }.
+
+update_rate(Now, TS, Count, Rate) ->
+ Time = erlang:convert_time_unit(Now - TS, native, micro_seconds) /
+ ?MICROS_PER_SECOND,
+ if
+ Time == 0 -> Rate;
+ true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
+ Count / Time, Rate)
+ end.
+
+ram_duration(State) ->
+ State1 = #vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate },
+ ram_msg_count = RamMsgCount,
+ ram_msg_count_prev = RamMsgCountPrev,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA,
+ ram_ack_count_prev = RamAckCountPrev } =
+ update_rates(State),
+
+ RamAckCount = gb_trees:size(RPA) + gb_trees:size(QPA),
+
+ Duration = %% msgs+acks / (msgs+acks/sec) == sec
+ case lists:all(fun (X) -> X < 0.01 end,
+ [AvgEgressRate, AvgIngressRate,
+ AvgAckEgressRate, AvgAckIngressRate]) of
+ true -> infinity;
+ false -> (RamMsgCountPrev + RamMsgCount +
+ RamAckCount + RamAckCountPrev) /
+ (4 * (AvgEgressRate + AvgIngressRate +
+ AvgAckEgressRate + AvgAckIngressRate))
+ end,
+
+ {Duration, State1}.
+
+needs_timeout(#vqstate { index_state = IndexState }) ->
+ case rabbit_queue_index:needs_sync(IndexState) of
+ confirms -> timed;
+ other -> idle;
+ false -> false
+ end.
+
+timeout(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:sync(IndexState) }.
+
+handle_pre_hibernate(State = #vqstate { index_state = IndexState }) ->
+ State #vqstate { index_state = rabbit_queue_index:flush(IndexState) }.
+
+resume(State) -> a(reduce_memory_use(State)).
+
+msg_rates(#vqstate { rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate } }) ->
+ {AvgIngressRate, AvgEgressRate}.
+
+info(messages_ready_ram, #vqstate{ram_msg_count = RamMsgCount}) ->
+ RamMsgCount;
+info(messages_unacknowledged_ram, #vqstate{ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ gb_trees:size(RPA) + gb_trees:size(QPA);
+info(messages_ram, State) ->
+ info(messages_ready_ram, State) + info(messages_unacknowledged_ram, State);
+info(messages_persistent, #vqstate{persistent_count = PersistentCount}) ->
+ PersistentCount;
+info(message_bytes, #vqstate{bytes = Bytes,
+ unacked_bytes = UBytes}) ->
+ Bytes + UBytes;
+info(message_bytes_ready, #vqstate{bytes = Bytes}) ->
+ Bytes;
+info(message_bytes_unacknowledged, #vqstate{unacked_bytes = UBytes}) ->
+ UBytes;
+info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
+ RamBytes;
+info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
+ PersistentBytes;
+info(head_message_timestamp, #vqstate{
+ q3 = Q3,
+ q4 = Q4,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ head_message_timestamp(Q3, Q4, RPA, QPA);
+info(disk_reads, #vqstate{disk_read_count = Count}) ->
+ Count;
+info(disk_writes, #vqstate{disk_write_count = Count}) ->
+ Count;
+info(backing_queue_status, #vqstate {
+ q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = Mode,
+ len = Len,
+ target_ram_count = TargetRamCount,
+ next_seq_id = NextSeqId,
+ rates = #rates { in = AvgIngressRate,
+ out = AvgEgressRate,
+ ack_in = AvgAckIngressRate,
+ ack_out = AvgAckEgressRate }}) ->
+
+ [ {mode , Mode},
+ {q1 , ?QUEUE:len(Q1)},
+ {q2 , ?QUEUE:len(Q2)},
+ {delta , Delta},
+ {q3 , ?QUEUE:len(Q3)},
+ {q4 , ?QUEUE:len(Q4)},
+ {len , Len},
+ {target_ram_count , TargetRamCount},
+ {next_seq_id , NextSeqId},
+ {avg_ingress_rate , AvgIngressRate},
+ {avg_egress_rate , AvgEgressRate},
+ {avg_ack_ingress_rate, AvgAckIngressRate},
+ {avg_ack_egress_rate , AvgAckEgressRate} ];
+info(Item, _) ->
+ throw({bad_argument, Item}).
+
+invoke(?MODULE, Fun, State) -> Fun(?MODULE, State);
+invoke( _, _, State) -> State.
+
+is_duplicate(_Msg, State) -> {false, State}.
+
+set_queue_mode(Mode, State = #vqstate { mode = Mode }) ->
+ State;
+set_queue_mode(lazy, State = #vqstate {
+ target_ram_count = TargetRamCount }) ->
+ %% To become a lazy queue we need to page everything to disk first.
+ State1 = convert_to_lazy(State),
+ %% restore the original target_ram_count
+ a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount });
+set_queue_mode(default, State) ->
+ %% becoming a default queue means loading messages from disk like
+ %% when a queue is recovered.
+ a(maybe_deltas_to_betas(State #vqstate { mode = default }));
+set_queue_mode(_, State) ->
+ State.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
+ lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) ->
+ [{Id, AckTag} | Acc]
+ end, Accumulator, lists:zip(Msgs, AckTags)).
+
+convert_to_lazy(State) ->
+ State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } =
+ set_ram_duration_target(0, State),
+ case Delta#delta.count + ?QUEUE:len(Q3) == Len of
+ true ->
+ State1;
+ false ->
+ %% When pushing messages to disk, we might have been
+ %% blocked by the msg_store, so we need to see if we have
+ %% to wait for more credit, and then keep paging messages.
+ %%
+ %% The amqqueue_process could have taken care of this, but
+ %% between the time it receives the bump_credit msg and
+ %% calls BQ:resume to keep paging messages to disk, some
+ %% other request may arrive to the BQ which at this moment
+ %% is not in a proper state for a lazy BQ (unless all
+ %% messages have been paged to disk already).
+ wait_for_msg_store_credit(),
+ convert_to_lazy(State1)
+ end.
+
+wait_for_msg_store_credit() ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg)
+ end;
+ false -> ok
+ end.
+
+%% Get the Timestamp property of the first msg, if present. This is
+%% the one with the oldest timestamp among the heads of the pending
+%% acks and unread queues. We can't check disk_pending_acks as these
+%% are paged out - we assume some will soon be paged in rather than
+%% forcing it to happen. Pending ack msgs are included as they are
+%% regarded as unprocessed until acked, this also prevents the result
+%% apparently oscillating during repeated rejects. Q3 is only checked
+%% when Q4 is empty as any Q4 msg will be earlier.
+head_message_timestamp(Q3, Q4, RPA, QPA) ->
+ HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
+ HeadMsgStatus <-
+ [ get_qs_head([Q4, Q3]),
+ get_pa_head(RPA),
+ get_pa_head(QPA) ],
+ HeadMsgStatus /= undefined,
+ HeadMsgStatus#msg_status.msg /= undefined ],
+
+ Timestamps =
+ [Timestamp || HeadMsg <- HeadMsgs,
+ Timestamp <- [rabbit_basic:extract_timestamp(
+ HeadMsg#basic_message.content)],
+ Timestamp /= undefined
+ ],
+
+ case Timestamps == [] of
+ true -> '';
+ false -> lists:min(Timestamps)
+ end.
+
+get_qs_head(Qs) ->
+ catch lists:foldl(
+ fun (Q, Acc) ->
+ case get_q_head(Q) of
+ undefined -> Acc;
+ Val -> throw(Val)
+ end
+ end, undefined, Qs).
+
+get_q_head(Q) ->
+ get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1).
+
+get_pa_head(PA) ->
+ get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1).
+
+get_collection_head(Col, IsEmpty, GetVal) ->
+ case IsEmpty(Col) of
+ false ->
+ {_, MsgStatus} = GetVal(Col),
+ MsgStatus;
+ true -> undefined
+ end.
+
+%%----------------------------------------------------------------------------
+%% Minor helpers
+%%----------------------------------------------------------------------------
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = default,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+
+ %% if q1 has messages then q3 cannot be empty. See publish/6.
+ true = E1 or not E3,
+ %% if q2 has messages then we have messages in delta (paged to
+ %% disk). See push_alphas_to_betas/2.
+ true = E2 or not ED,
+ %% if delta has messages then q3 cannot be empty. This is enforced
+ %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages
+ %% are always kept on RAM.
+ true = ED or not E3,
+ %% if the queue length is 0, then q3 and q4 must be empty.
+ true = LZ == (E3 and E4),
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State;
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = lazy,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+ L3 = ?QUEUE:len(Q3),
+
+ %% q1 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E1,
+
+ %% q2 only gets messages from q1 when push_alphas_to_betas is
+ %% called for a non empty delta, which won't be the case for a
+ %% lazy queue. This means q2 must always be empty.
+ true = E2,
+
+ %% q4 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E4,
+
+ %% if the queue is empty, then delta is empty and q3 is empty.
+ true = LZ == (ED and E3),
+
+ %% There should be no messages in q1, q2, and q4
+ true = Delta#delta.count + L3 == Len,
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
+ State.
+
+d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
+ when Start + Count =< End ->
+ Delta.
+
+m(MsgStatus = #msg_status { is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk }) ->
+ true = (not IsPersistent) or IndexOnDisk,
+ true = msg_in_ram(MsgStatus) or MsgInStore,
+ MsgStatus.
+
+one_if(true ) -> 1;
+one_if(false) -> 0.
+
+cons_if(true, E, L) -> [E | L];
+cons_if(false, _E, L) -> L.
+
+gb_sets_maybe_insert(false, _Val, Set) -> Set;
+gb_sets_maybe_insert(true, Val, Set) -> gb_sets:add(Val, Set).
+
+msg_status(IsPersistent, IsDelivered, SeqId,
+ Msg = #basic_message {id = MsgId}, MsgProps, IndexMaxSize) ->
+ #msg_status{seq_id = SeqId,
+ msg_id = MsgId,
+ msg = Msg,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = false,
+ index_on_disk = false,
+ persist_to = determine_persist_to(Msg, MsgProps, IndexMaxSize),
+ msg_props = MsgProps}.
+
+beta_msg_status({Msg = #basic_message{id = MsgId},
+ SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = Msg,
+ persist_to = queue_index,
+ msg_in_store = false};
+
+beta_msg_status({MsgId, SeqId, MsgProps, IsPersistent, IsDelivered}) ->
+ MS0 = beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered),
+ MS0#msg_status{msg_id = MsgId,
+ msg = undefined,
+ persist_to = msg_store,
+ msg_in_store = true}.
+
+beta_msg_status0(SeqId, MsgProps, IsPersistent, IsDelivered) ->
+ #msg_status{seq_id = SeqId,
+ msg = undefined,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ index_on_disk = true,
+ msg_props = MsgProps}.
+
+trim_msg_status(MsgStatus) ->
+ case persist_to(MsgStatus) of
+ msg_store -> MsgStatus#msg_status{msg = undefined};
+ queue_index -> MsgStatus
+ end.
+
+with_msg_store_state({MSCStateP, MSCStateT}, true, Fun) ->
+ {Result, MSCStateP1} = Fun(MSCStateP),
+ {Result, {MSCStateP1, MSCStateT}};
+with_msg_store_state({MSCStateP, MSCStateT}, false, Fun) ->
+ {Result, MSCStateT1} = Fun(MSCStateT),
+ {Result, {MSCStateP, MSCStateT1}}.
+
+with_immutable_msg_store_state(MSCState, IsPersistent, Fun) ->
+ {Res, MSCState} = with_msg_store_state(MSCState, IsPersistent,
+ fun (MSCState1) ->
+ {Fun(MSCState1), MSCState1}
+ end),
+ Res.
+
+msg_store_client_init(MsgStore, MsgOnDiskFun, Callback) ->
+ msg_store_client_init(MsgStore, rabbit_guid:gen(), MsgOnDiskFun,
+ Callback).
+
+msg_store_client_init(MsgStore, Ref, MsgOnDiskFun, Callback) ->
+ CloseFDsFun = msg_store_close_fds_fun(MsgStore =:= ?PERSISTENT_MSG_STORE),
+ rabbit_msg_store:client_init(MsgStore, Ref, MsgOnDiskFun,
+ fun () -> Callback(?MODULE, CloseFDsFun) end).
+
+msg_store_write(MSCState, IsPersistent, MsgId, Msg) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:write_flow(MsgId, Msg, MSCState1)
+ end).
+
+msg_store_read(MSCState, IsPersistent, MsgId) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) ->
+ rabbit_msg_store:read(MsgId, MSCState1)
+ end).
+
+msg_store_remove(MSCState, IsPersistent, MsgIds) ->
+ with_immutable_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MCSState1) ->
+ rabbit_msg_store:remove(MsgIds, MCSState1)
+ end).
+
+msg_store_close_fds(MSCState, IsPersistent) ->
+ with_msg_store_state(
+ MSCState, IsPersistent,
+ fun (MSCState1) -> rabbit_msg_store:close_all_indicated(MSCState1) end).
+
+msg_store_close_fds_fun(IsPersistent) ->
+ fun (?MODULE, State = #vqstate { msg_store_clients = MSCState }) ->
+ {ok, MSCState1} = msg_store_close_fds(MSCState, IsPersistent),
+ State #vqstate { msg_store_clients = MSCState1 }
+ end.
+
+maybe_write_delivered(false, _SeqId, IndexState) ->
+ IndexState;
+maybe_write_delivered(true, SeqId, IndexState) ->
+ rabbit_queue_index:deliver([SeqId], IndexState).
+
+betas_from_index_entries(List, TransientThreshold, DelsAndAcksFun, State) ->
+ {Filtered, Delivers, Acks, RamReadyCount, RamBytes} =
+ lists:foldr(
+ fun ({_MsgOrId, SeqId, _MsgProps, IsPersistent, IsDelivered} = M,
+ {Filtered1, Delivers1, Acks1, RRC, RB} = Acc) ->
+ case SeqId < TransientThreshold andalso not IsPersistent of
+ true -> {Filtered1,
+ cons_if(not IsDelivered, SeqId, Delivers1),
+ [SeqId | Acks1], RRC, RB};
+ false -> MsgStatus = m(beta_msg_status(M)),
+ HaveMsg = msg_in_ram(MsgStatus),
+ Size = msg_size(MsgStatus),
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> {?QUEUE:in_r(MsgStatus, Filtered1),
+ Delivers1, Acks1,
+ RRC + one_if(HaveMsg),
+ RB + one_if(HaveMsg) * Size};
+ true -> Acc %% [0]
+ end
+ end
+ end, {?QUEUE:new(), [], [], 0, 0}, List),
+ {Filtered, RamReadyCount, RamBytes, DelsAndAcksFun(Delivers, Acks, State)}.
+%% [0] We don't increase RamBytes here, even though it pertains to
+%% unacked messages too, since if HaveMsg then the message must have
+%% been stored in the QI, thus the message must have been in
+%% qi_pending_ack, thus it must already have been in RAM.
+
+is_msg_in_pending_acks(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ (gb_trees:is_defined(SeqId, RPA) orelse
+ gb_trees:is_defined(SeqId, DPA) orelse
+ gb_trees:is_defined(SeqId, QPA)).
+
+expand_delta(SeqId, ?BLANK_DELTA_PATTERN(X)) ->
+ d(#delta { start_seq_id = SeqId, count = 1, end_seq_id = SeqId + 1 });
+expand_delta(SeqId, #delta { start_seq_id = StartSeqId,
+ count = Count } = Delta)
+ when SeqId < StartSeqId ->
+ d(Delta #delta { start_seq_id = SeqId, count = Count + 1 });
+expand_delta(SeqId, #delta { count = Count,
+ end_seq_id = EndSeqId } = Delta)
+ when SeqId >= EndSeqId ->
+ d(Delta #delta { count = Count + 1, end_seq_id = SeqId + 1 });
+expand_delta(_SeqId, #delta { count = Count } = Delta) ->
+ d(Delta #delta { count = Count + 1 }).
+
+%%----------------------------------------------------------------------------
+%% Internal major helpers for Public API
+%%----------------------------------------------------------------------------
+
+init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
+ PersistentClient, TransientClient) ->
+ {LowSeqId, NextSeqId, IndexState1} = rabbit_queue_index:bounds(IndexState),
+
+ {DeltaCount1, DeltaBytes1} =
+ case Terms of
+ non_clean_shutdown -> {DeltaCount, DeltaBytes};
+ _ -> {proplists:get_value(persistent_count,
+ Terms, DeltaCount),
+ proplists:get_value(persistent_bytes,
+ Terms, DeltaBytes)}
+ end,
+ Delta = case DeltaCount1 == 0 andalso DeltaCount /= undefined of
+ true -> ?BLANK_DELTA;
+ false -> d(#delta { start_seq_id = LowSeqId,
+ count = DeltaCount1,
+ end_seq_id = NextSeqId })
+ end,
+ Now = erlang:monotonic_time(),
+ IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
+ ?IO_BATCH_SIZE),
+
+ {ok, IndexMaxSize} = application:get_env(
+ rabbit, queue_index_embed_msgs_below),
+ State = #vqstate {
+ q1 = ?QUEUE:new(),
+ q2 = ?QUEUE:new(),
+ delta = Delta,
+ q3 = ?QUEUE:new(),
+ q4 = ?QUEUE:new(),
+ next_seq_id = NextSeqId,
+ ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty(),
+ index_state = IndexState1,
+ msg_store_clients = {PersistentClient, TransientClient},
+ durable = IsDurable,
+ transient_threshold = NextSeqId,
+ qi_embed_msgs_below = IndexMaxSize,
+
+ len = DeltaCount1,
+ persistent_count = DeltaCount1,
+ bytes = DeltaBytes1,
+ persistent_bytes = DeltaBytes1,
+
+ target_ram_count = infinity,
+ ram_msg_count = 0,
+ ram_msg_count_prev = 0,
+ ram_ack_count_prev = 0,
+ ram_bytes = 0,
+ unacked_bytes = 0,
+ out_counter = 0,
+ in_counter = 0,
+ rates = blank_rates(Now),
+ msgs_on_disk = gb_sets:new(),
+ msg_indices_on_disk = gb_sets:new(),
+ unconfirmed = gb_sets:new(),
+ confirmed = gb_sets:new(),
+ ack_out_counter = 0,
+ ack_in_counter = 0,
+ disk_read_count = 0,
+ disk_write_count = 0,
+
+ io_batch_size = IoBatchSize,
+
+ mode = default },
+ a(maybe_deltas_to_betas(State)).
+
+blank_rates(Now) ->
+ #rates { in = 0.0,
+ out = 0.0,
+ ack_in = 0.0,
+ ack_out = 0.0,
+ timestamp = Now}.
+
+in_r(MsgStatus = #msg_status { msg = undefined },
+ State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) ->
+ case ?QUEUE:is_empty(Q4) of
+ true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
+ false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
+ read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status{msg = Msg},
+ stats(ready0, {MsgStatus, MsgStatus1},
+ State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
+ end;
+in_r(MsgStatus,
+ State = #vqstate { mode = default, q4 = Q4 }) ->
+ State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) };
+%% lazy queues
+in_r(MsgStatus = #msg_status { seq_id = SeqId },
+ State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) ->
+ case ?QUEUE:is_empty(Q3) of
+ true ->
+ {_MsgStatus1, State1} =
+ maybe_write_to_disk(true, true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, State1),
+ Delta1 = expand_delta(SeqId, Delta),
+ State2 #vqstate{ delta = Delta1 };
+ false ->
+ State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }
+ end.
+
+queue_out(State = #vqstate { mode = default, q4 = Q4 }) ->
+ case ?QUEUE:out(Q4) of
+ {empty, _Q4} ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end;
+ {{value, MsgStatus}, Q4a} ->
+ {{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+ end;
+%% lazy queues
+queue_out(State = #vqstate { mode = lazy }) ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
+ end.
+
+read_msg(#msg_status{msg = undefined,
+ msg_id = MsgId,
+ is_persistent = IsPersistent}, State) ->
+ read_msg(MsgId, IsPersistent, State);
+read_msg(#msg_status{msg = Msg}, State) ->
+ {Msg, State}.
+
+read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
+ disk_read_count = Count}) ->
+ {{ok, Msg = #basic_message {}}, MSCState1} =
+ msg_store_read(MSCState, IsPersistent, MsgId),
+ {Msg, State #vqstate {msg_store_clients = MSCState1,
+ disk_read_count = Count + 1}}.
+
+stats(Signs, Statuses, State) ->
+ stats0(expand_signs(Signs), expand_statuses(Statuses), State).
+
+expand_signs(ready0) -> {0, 0, true};
+expand_signs(lazy_pub) -> {1, 0, true};
+expand_signs({A, B}) -> {A, B, false}.
+
+expand_statuses({none, A}) -> {false, msg_in_ram(A), A};
+expand_statuses({B, none}) -> {msg_in_ram(B), false, B};
+expand_statuses({lazy, A}) -> {false , false, A};
+expand_statuses({B, A}) -> {msg_in_ram(B), msg_in_ram(A), B}.
+
+%% In this function at least, we are religious: the variable name
+%% contains "Ready" or "Unacked" iff that is what it counts. If
+%% neither is present it counts both.
+stats0({DeltaReady, DeltaUnacked, ReadyMsgPaged},
+ {InRamBefore, InRamAfter, MsgStatus},
+ State = #vqstate{len = ReadyCount,
+ bytes = ReadyBytes,
+ ram_msg_count = RamReadyCount,
+ persistent_count = PersistentCount,
+ unacked_bytes = UnackedBytes,
+ ram_bytes = RamBytes,
+ persistent_bytes = PersistentBytes}) ->
+ S = msg_size(MsgStatus),
+ DeltaTotal = DeltaReady + DeltaUnacked,
+ DeltaRam = case {InRamBefore, InRamAfter} of
+ {false, false} -> 0;
+ {false, true} -> 1;
+ {true, false} -> -1;
+ {true, true} -> 0
+ end,
+ DeltaRamReady = case DeltaReady of
+ 1 -> one_if(InRamAfter);
+ -1 -> -one_if(InRamBefore);
+ 0 when ReadyMsgPaged -> DeltaRam;
+ 0 -> 0
+ end,
+ DeltaPersistent = DeltaTotal * one_if(MsgStatus#msg_status.is_persistent),
+ State#vqstate{len = ReadyCount + DeltaReady,
+ ram_msg_count = RamReadyCount + DeltaRamReady,
+ persistent_count = PersistentCount + DeltaPersistent,
+ bytes = ReadyBytes + DeltaReady * S,
+ unacked_bytes = UnackedBytes + DeltaUnacked * S,
+ ram_bytes = RamBytes + DeltaRam * S,
+ persistent_bytes = PersistentBytes + DeltaPersistent * S}.
+
+msg_size(#msg_status{msg_props = #message_properties{size = Size}}) -> Size.
+
+msg_in_ram(#msg_status{msg = Msg}) -> Msg =/= undefined.
+
+%% first param: AckRequired
+remove(true, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ State1 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State),
+
+ State2 = stats({-1, 1}, {MsgStatus, MsgStatus}, State1),
+
+ {SeqId, maybe_update_rates(
+ State2 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState1})};
+
+%% This function body has the same behaviour as remove_queue_entries/3
+%% but instead of removing messages based on a ?QUEUE, this removes
+%% just one message, the one referenced by the MsgStatus provided.
+remove(false, MsgStatus = #msg_status {
+ seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ State = #vqstate {out_counter = OutCount,
+ index_state = IndexState,
+ msg_store_clients = MSCState}) ->
+ %% Mark it delivered if necessary
+ IndexState1 = maybe_write_delivered(
+ IndexOnDisk andalso not IsDelivered,
+ SeqId, IndexState),
+
+ %% Remove from msg_store and queue index, if necessary
+ case MsgInStore of
+ true -> ok = msg_store_remove(MSCState, IsPersistent, [MsgId]);
+ false -> ok
+ end,
+
+ IndexState2 =
+ case IndexOnDisk of
+ true -> rabbit_queue_index:ack([SeqId], IndexState1);
+ false -> IndexState1
+ end,
+
+ State1 = stats({-1, 0}, {MsgStatus, none}, State),
+
+ {undefined, maybe_update_rates(
+ State1 #vqstate {out_counter = OutCount + 1,
+ index_state = IndexState2})}.
+
+%% This function exists as a way to improve dropwhile/2
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers and acks, instead of
+%% sending them one by one.
+%%
+%% Instead of removing every message as their are popped from the
+%% queue, it first accumulates them and then removes them by calling
+%% remove_queue_entries/3, since the behaviour of
+%% remove_queue_entries/3 when used with
+%% process_delivers_and_acks_fun(deliver_and_ack) is the same as
+%% calling remove(false, MsgStatus, State).
+%%
+%% remove/3 also updates the out_counter in every call, but here we do
+%% it just once at the end.
+remove_by_predicate(Pred, State = #vqstate {out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+ State2 =
+ remove_queue_entries(
+ QAcc, process_delivers_and_acks_fun(deliver_and_ack), State1),
+ %% maybe_update_rates/1 is called in remove/2 for every
+ %% message. Since we update out_counter only once, we call it just
+ %% there.
+ {MsgProps, maybe_update_rates(
+ State2 #vqstate {
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% This function exists as a way to improve fetchwhile/4
+%% performance. The idea of having this function is to optimise calls
+%% to rabbit_queue_index by batching delivers, instead of sending them
+%% one by one.
+%%
+%% Fun is the function passed to fetchwhile/4 that's
+%% applied to every fetched message and used to build the fetchwhile/4
+%% result accumulator FetchAcc.
+fetch_by_predicate(Pred, Fun, FetchAcc,
+ State = #vqstate {
+ index_state = IndexState,
+ out_counter = OutCount}) ->
+ {MsgProps, QAcc, State1} =
+ collect_by_predicate(Pred, ?QUEUE:new(), State),
+
+ {Delivers, FetchAcc1, State2} =
+ process_queue_entries(QAcc, Fun, FetchAcc, State1),
+
+ IndexState1 = rabbit_queue_index:deliver(Delivers, IndexState),
+
+ {MsgProps, FetchAcc1, maybe_update_rates(
+ State2 #vqstate {
+ index_state = IndexState1,
+ out_counter = OutCount + ?QUEUE:len(QAcc)})}.
+
+%% We try to do here the same as what remove(true, State) does but
+%% processing several messages at the same time. The idea is to
+%% optimize rabbit_queue_index:deliver/2 calls by sending a list of
+%% SeqIds instead of one by one, thus process_queue_entries1 will
+%% accumulate the required deliveries, will record_pending_ack for
+%% each message, and will update stats, like remove/2 does.
+%%
+%% For the meaning of Fun and FetchAcc arguments see
+%% fetch_by_predicate/4 above.
+process_queue_entries(Q, Fun, FetchAcc, State) ->
+ ?QUEUE:foldl(fun (MsgStatus, Acc) ->
+ process_queue_entries1(MsgStatus, Fun, Acc)
+ end,
+ {[], FetchAcc, State}, Q).
+
+process_queue_entries1(
+ #msg_status { seq_id = SeqId, is_delivered = IsDelivered,
+ index_on_disk = IndexOnDisk} = MsgStatus,
+ Fun,
+ {Delivers, FetchAcc, State}) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ State2 = record_pending_ack(
+ MsgStatus #msg_status {
+ is_delivered = true }, State1),
+ {cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ Fun(Msg, SeqId, FetchAcc),
+ stats({-1, 1}, {MsgStatus, MsgStatus}, State2)}.
+
+collect_by_predicate(Pred, QAcc, State) ->
+ case queue_out(State) of
+ {empty, State1} ->
+ {undefined, QAcc, State1};
+ {{value, MsgStatus = #msg_status { msg_props = MsgProps }}, State1} ->
+ case Pred(MsgProps) of
+ true -> collect_by_predicate(Pred, ?QUEUE:in(MsgStatus, QAcc),
+ State1);
+ false -> {MsgProps, QAcc, in_r(MsgStatus, State1)}
+ end
+ end.
+
+%%----------------------------------------------------------------------------
+%% Helpers for Public API purge/1 function
+%%----------------------------------------------------------------------------
+
+%% The difference between purge_when_pending_acks/1
+%% vs. purge_and_index_reset/1 is that the first one issues a deliver
+%% and an ack to the queue index for every message that's being
+%% removed, while the later just resets the queue index state.
+purge_when_pending_acks(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(deliver_and_ack), State),
+ a(State1).
+
+purge_and_index_reset(State) ->
+ State1 = purge1(process_delivers_and_acks_fun(none), State),
+ a(reset_qi_state(State1)).
+
+%% This function removes messages from each of {q1, q2, q3, q4}.
+%%
+%% With remove_queue_entries/3 q1 and q4 are emptied, while q2 and q3
+%% are specially handled by purge_betas_and_deltas/2.
+%%
+%% purge_betas_and_deltas/2 loads messages from the queue index,
+%% filling up q3 and in some cases moving messages form q2 to q3 while
+%% reseting q2 to an empty queue (see maybe_deltas_to_betas/2). The
+%% messages loaded into q3 are removed by calling
+%% remove_queue_entries/3 until there are no more messages to be read
+%% from the queue index. Messages are read in batches from the queue
+%% index.
+purge1(AfterFun, State = #vqstate { q4 = Q4}) ->
+ State1 = remove_queue_entries(Q4, AfterFun, State),
+
+ State2 = #vqstate {q1 = Q1} =
+ purge_betas_and_deltas(AfterFun, State1#vqstate{q4 = ?QUEUE:new()}),
+
+ State3 = remove_queue_entries(Q1, AfterFun, State2),
+
+ a(State3#vqstate{q1 = ?QUEUE:new()}).
+
+reset_qi_state(State = #vqstate{index_state = IndexState}) ->
+ State#vqstate{index_state =
+ rabbit_queue_index:reset_state(IndexState)}.
+
+is_pending_ack_empty(State) ->
+ count_pending_acks(State) =:= 0.
+
+count_pending_acks(#vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
+
+purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) ->
+ State0 = #vqstate { q3 = Q3 } =
+ case Mode of
+ lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State);
+ _ -> State
+ end,
+
+ case ?QUEUE:is_empty(Q3) of
+ true -> State0;
+ false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
+ purge_betas_and_deltas(DelsAndAcksFun,
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State1#vqstate{q3 = ?QUEUE:new()}))
+ end.
+
+remove_queue_entries(Q, DelsAndAcksFun,
+ State = #vqstate{msg_store_clients = MSCState}) ->
+ {MsgIdsByStore, Delivers, Acks, State1} =
+ ?QUEUE:foldl(fun remove_queue_entries1/2,
+ {orddict:new(), [], [], State}, Q),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ DelsAndAcksFun(Delivers, Acks, State1).
+
+remove_queue_entries1(
+ #msg_status { msg_id = MsgId, seq_id = SeqId, is_delivered = IsDelivered,
+ msg_in_store = MsgInStore, index_on_disk = IndexOnDisk,
+ is_persistent = IsPersistent} = MsgStatus,
+ {MsgIdsByStore, Delivers, Acks, State}) ->
+ {case MsgInStore of
+ true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ cons_if(IndexOnDisk andalso not IsDelivered, SeqId, Delivers),
+ cons_if(IndexOnDisk, SeqId, Acks),
+ stats({-1, 0}, {MsgStatus, none}, State)}.
+
+process_delivers_and_acks_fun(deliver_and_ack) ->
+ fun (Delivers, Acks, State = #vqstate { index_state = IndexState }) ->
+ IndexState1 =
+ rabbit_queue_index:ack(
+ Acks, rabbit_queue_index:deliver(Delivers, IndexState)),
+ State #vqstate { index_state = IndexState1 }
+ end;
+process_delivers_and_acks_fun(_) ->
+ fun (_, _, State) ->
+ State
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for publishing
+%%----------------------------------------------------------------------------
+
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+ mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = case ?QUEUE:is_empty(Q3) of
+ false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+ true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+ end,
+ InCount1 = InCount + 1,
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats({1, 0}, {none, MsgStatus1},
+ State2#vqstate{ next_seq_id = SeqId + 1,
+ in_counter = InCount1,
+ unconfirmed = UC1 });
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC,
+ delta = Delta }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ Delta1 = expand_delta(SeqId, Delta),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats(lazy_pub, {lazy, m(MsgStatus1)},
+ State1#vqstate{ delta = Delta1,
+ next_seq_id = SeqId + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }).
+
+batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
+ {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4, State)}.
+
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1},
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3};
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1},
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3}.
+
+batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4,
+ State),
+ {ChPid, Flow, [SeqId | SeqIds], State1}.
+
+maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
+ msg_in_store = true }, State) ->
+ {MsgStatus, State};
+maybe_write_msg_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg, msg_id = MsgId,
+ is_persistent = IsPersistent },
+ State = #vqstate{ msg_store_clients = MSCState,
+ disk_write_count = Count})
+ when Force orelse IsPersistent ->
+ case persist_to(MsgStatus) of
+ msg_store -> ok = msg_store_write(MSCState, IsPersistent, MsgId,
+ prepare_to_store(Msg)),
+ {MsgStatus#msg_status{msg_in_store = true},
+ State#vqstate{disk_write_count = Count + 1}};
+ queue_index -> {MsgStatus, State}
+ end;
+maybe_write_msg_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+%% Due to certain optimizations made inside
+%% rabbit_queue_index:pre_publish/7 we need to have two separate
+%% functions for index persistence. This one is only used when paging
+%% during memory pressure. We didn't want to modify
+%% maybe_write_index_to_disk/3 because that function is used in other
+%% places.
+maybe_batch_write_index_to_disk(_Force,
+ MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_batch_write_index_to_disk(Force,
+ MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate {
+ target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:pre_publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, IsDelivered,
+ TargetRamCount, IndexState),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState1,
+ disk_write_count = DiskWriteCount1}};
+maybe_batch_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_index_to_disk(_Force, MsgStatus = #msg_status {
+ index_on_disk = true }, State) ->
+ {MsgStatus, State};
+maybe_write_index_to_disk(Force, MsgStatus = #msg_status {
+ msg = Msg,
+ msg_id = MsgId,
+ seq_id = SeqId,
+ is_persistent = IsPersistent,
+ is_delivered = IsDelivered,
+ msg_props = MsgProps},
+ State = #vqstate{target_ram_count = TargetRamCount,
+ disk_write_count = DiskWriteCount,
+ index_state = IndexState})
+ when Force orelse IsPersistent ->
+ {MsgOrId, DiskWriteCount1} =
+ case persist_to(MsgStatus) of
+ msg_store -> {MsgId, DiskWriteCount};
+ queue_index -> {prepare_to_store(Msg), DiskWriteCount + 1}
+ end,
+ IndexState1 = rabbit_queue_index:publish(
+ MsgOrId, SeqId, MsgProps, IsPersistent, TargetRamCount,
+ IndexState),
+ IndexState2 = maybe_write_delivered(IsDelivered, SeqId, IndexState1),
+ {MsgStatus#msg_status{index_on_disk = true},
+ State#vqstate{index_state = IndexState2,
+ disk_write_count = DiskWriteCount1}};
+
+maybe_write_index_to_disk(_Force, MsgStatus, State) ->
+ {MsgStatus, State}.
+
+maybe_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+maybe_prepare_write_to_disk(ForceMsg, ForceIndex, MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_msg_to_disk(ForceMsg, MsgStatus, State),
+ maybe_batch_write_index_to_disk(ForceIndex, MsgStatus1, State1).
+
+determine_persist_to(#basic_message{
+ content = #content{properties = Props,
+ properties_bin = PropsBin}},
+ #message_properties{size = BodySize},
+ IndexMaxSize) ->
+ %% The >= is so that you can set the env to 0 and never persist
+ %% to the index.
+ %%
+ %% We want this to be fast, so we avoid size(term_to_binary())
+ %% here, or using the term size estimation from truncate.erl, both
+ %% of which are too slow. So instead, if the message body size
+ %% goes over the limit then we avoid any other checks.
+ %%
+ %% If it doesn't we need to decide if the properties will push
+ %% it past the limit. If we have the encoded properties (usual
+ %% case) we can just check their size. If we don't (message came
+ %% via the direct client), we make a guess based on the number of
+ %% headers.
+ case BodySize >= IndexMaxSize of
+ true -> msg_store;
+ false -> Est = case is_binary(PropsBin) of
+ true -> BodySize + size(PropsBin);
+ false -> #'P_basic'{headers = Hs} = Props,
+ case Hs of
+ undefined -> 0;
+ _ -> length(Hs)
+ end * ?HEADER_GUESS_SIZE + BodySize
+ end,
+ case Est >= IndexMaxSize of
+ true -> msg_store;
+ false -> queue_index
+ end
+ end.
+
+persist_to(#msg_status{persist_to = To}) -> To.
+
+prepare_to_store(Msg) ->
+ Msg#basic_message{
+ %% don't persist any recoverable decoded properties
+ content = rabbit_binary_parser:clear_decoded_content(
+ Msg #basic_message.content)}.
+
+%%----------------------------------------------------------------------------
+%% Internal gubbins for acks
+%%----------------------------------------------------------------------------
+
+record_pending_ack(#msg_status { seq_id = SeqId } = MsgStatus,
+ State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA,
+ ack_in_counter = AckInCount}) ->
+ Insert = fun (Tree) -> gb_trees:insert(SeqId, MsgStatus, Tree) end,
+ {RPA1, DPA1, QPA1} =
+ case {msg_in_ram(MsgStatus), persist_to(MsgStatus)} of
+ {false, _} -> {RPA, Insert(DPA), QPA};
+ {_, queue_index} -> {RPA, DPA, Insert(QPA)};
+ {_, msg_store} -> {Insert(RPA), DPA, QPA}
+ end,
+ State #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1,
+ qi_pending_ack = QPA1,
+ ack_in_counter = AckInCount + 1}.
+
+lookup_pending_ack(SeqId, #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> V;
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} -> V;
+ none -> gb_trees:get(SeqId, QPA)
+ end
+ end.
+
+%% First parameter = UpdateStats
+remove_pending_ack(true, SeqId, State) ->
+ {MsgStatus, State1} = remove_pending_ack(false, SeqId, State),
+ {MsgStatus, stats({0, -1}, {MsgStatus, none}, State1)};
+remove_pending_ack(false, SeqId, State = #vqstate{ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA}) ->
+ case gb_trees:lookup(SeqId, RPA) of
+ {value, V} -> RPA1 = gb_trees:delete(SeqId, RPA),
+ {V, State #vqstate { ram_pending_ack = RPA1 }};
+ none -> case gb_trees:lookup(SeqId, DPA) of
+ {value, V} ->
+ DPA1 = gb_trees:delete(SeqId, DPA),
+ {V, State#vqstate{disk_pending_ack = DPA1}};
+ none ->
+ QPA1 = gb_trees:delete(SeqId, QPA),
+ {gb_trees:get(SeqId, QPA),
+ State#vqstate{qi_pending_ack = QPA1}}
+ end
+ end.
+
+purge_pending_ack(KeepPersistent,
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ case KeepPersistent of
+ true -> remove_transient_msgs_by_id(MsgIdsByStore, MSCState),
+ State1;
+ false -> IndexState1 =
+ rabbit_queue_index:ack(IndexOnDiskSeqIds, IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }
+ end.
+
+purge_pending_ack_delete_and_terminate(
+ State = #vqstate { index_state = IndexState,
+ msg_store_clients = MSCState }) ->
+ {_, MsgIdsByStore, State1} = purge_pending_ack1(State),
+ IndexState1 = rabbit_queue_index:delete_and_terminate(IndexState),
+ remove_msgs_by_id(MsgIdsByStore, MSCState),
+ State1 #vqstate { index_state = IndexState1 }.
+
+purge_pending_ack1(State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA,
+ qi_pending_ack = QPA }) ->
+ F = fun (_SeqId, MsgStatus, Acc) -> accumulate_ack(MsgStatus, Acc) end,
+ {IndexOnDiskSeqIds, MsgIdsByStore, _AllMsgIds} =
+ rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, rabbit_misc:gb_trees_fold(
+ F, accumulate_ack_init(), RPA), DPA), QPA),
+ State1 = State #vqstate { ram_pending_ack = gb_trees:empty(),
+ disk_pending_ack = gb_trees:empty(),
+ qi_pending_ack = gb_trees:empty()},
+ {IndexOnDiskSeqIds, MsgIdsByStore, State1}.
+
+%% MsgIdsByStore is an orddict with two keys:
+%%
+%% true: holds a list of Persistent Message Ids.
+%% false: holds a list of Transient Message Ids.
+%%
+%% When we call orddict:to_list/1 we get two sets of msg ids, where
+%% IsPersistent is either true for persistent messages or false for
+%% transient ones. The msg_store_remove/3 function takes this boolean
+%% flag to determine from which store the messages should be removed
+%% from.
+remove_msgs_by_id(MsgIdsByStore, MSCState) ->
+ [ok = msg_store_remove(MSCState, IsPersistent, MsgIds)
+ || {IsPersistent, MsgIds} <- orddict:to_list(MsgIdsByStore)].
+
+remove_transient_msgs_by_id(MsgIdsByStore, MSCState) ->
+ case orddict:find(false, MsgIdsByStore) of
+ error -> ok;
+ {ok, MsgIds} -> ok = msg_store_remove(MSCState, false, MsgIds)
+ end.
+
+accumulate_ack_init() -> {[], orddict:new(), []}.
+
+accumulate_ack(#msg_status { seq_id = SeqId,
+ msg_id = MsgId,
+ is_persistent = IsPersistent,
+ msg_in_store = MsgInStore,
+ index_on_disk = IndexOnDisk },
+ {IndexOnDiskSeqIdsAcc, MsgIdsByStore, AllMsgIds}) ->
+ {cons_if(IndexOnDisk, SeqId, IndexOnDiskSeqIdsAcc),
+ case MsgInStore of
+ true -> rabbit_misc:orddict_cons(IsPersistent, MsgId, MsgIdsByStore);
+ false -> MsgIdsByStore
+ end,
+ [MsgId | AllMsgIds]}.
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for confirms (aka publisher acks)
+%%----------------------------------------------------------------------------
+
+record_confirms(MsgIdSet, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC,
+ confirmed = C }) ->
+ State #vqstate {
+ msgs_on_disk = rabbit_misc:gb_sets_difference(MOD, MsgIdSet),
+ msg_indices_on_disk = rabbit_misc:gb_sets_difference(MIOD, MsgIdSet),
+ unconfirmed = rabbit_misc:gb_sets_difference(UC, MsgIdSet),
+ confirmed = gb_sets:union(C, MsgIdSet) }.
+
+msgs_written_to_disk(Callback, MsgIdSet, ignored) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end);
+msgs_written_to_disk(Callback, MsgIdSet, written) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MIOD),
+ State #vqstate {
+ msgs_on_disk =
+ gb_sets:union(MOD, Confirmed) })
+ end).
+
+msg_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State = #vqstate { msgs_on_disk = MOD,
+ msg_indices_on_disk = MIOD,
+ unconfirmed = UC }) ->
+ Confirmed = gb_sets:intersection(UC, MsgIdSet),
+ record_confirms(gb_sets:intersection(MsgIdSet, MOD),
+ State #vqstate {
+ msg_indices_on_disk =
+ gb_sets:union(MIOD, Confirmed) })
+ end).
+
+msgs_and_indices_written_to_disk(Callback, MsgIdSet) ->
+ Callback(?MODULE,
+ fun (?MODULE, State) -> record_confirms(MsgIdSet, State) end).
+
+%%----------------------------------------------------------------------------
+%% Internal plumbing for requeue
+%%----------------------------------------------------------------------------
+
+publish_alpha(#msg_status { msg = undefined } = MsgStatus, State) ->
+ {Msg, State1} = read_msg(MsgStatus, State),
+ MsgStatus1 = MsgStatus#msg_status { msg = Msg },
+ {MsgStatus1, stats({1, -1}, {MsgStatus, MsgStatus1}, State1)};
+publish_alpha(MsgStatus, State) ->
+ {MsgStatus, stats({1, -1}, {MsgStatus, MsgStatus}, State)}.
+
+publish_beta(MsgStatus, State) ->
+ {MsgStatus1, State1} = maybe_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ {MsgStatus2, stats({1, -1}, {MsgStatus, MsgStatus2}, State1)}.
+
+%% Rebuild queue, inserting sequence ids to maintain ordering
+queue_merge(SeqIds, Q, MsgIds, Limit, PubFun, State) ->
+ queue_merge(SeqIds, Q, ?QUEUE:new(), MsgIds,
+ Limit, PubFun, State).
+
+queue_merge([SeqId | Rest] = SeqIds, Q, Front, MsgIds,
+ Limit, PubFun, State)
+ when Limit == undefined orelse SeqId < Limit ->
+ case ?QUEUE:out(Q) of
+ {{value, #msg_status { seq_id = SeqIdQ } = MsgStatus}, Q1}
+ when SeqIdQ < SeqId ->
+ %% enqueue from the remaining queue
+ queue_merge(SeqIds, Q1, ?QUEUE:in(MsgStatus, Front), MsgIds,
+ Limit, PubFun, State);
+ {_, _Q1} ->
+ %% enqueue from the remaining list of sequence ids
+ {MsgStatus, State1} = msg_from_pending_ack(SeqId, State),
+ {#msg_status { msg_id = MsgId } = MsgStatus1, State2} =
+ PubFun(MsgStatus, State1),
+ queue_merge(Rest, Q, ?QUEUE:in(MsgStatus1, Front), [MsgId | MsgIds],
+ Limit, PubFun, State2)
+ end;
+queue_merge(SeqIds, Q, Front, MsgIds,
+ _Limit, _PubFun, State) ->
+ {SeqIds, ?QUEUE:join(Front, Q), MsgIds, State}.
+
+delta_merge([], Delta, MsgIds, State) ->
+ {Delta, MsgIds, State};
+delta_merge(SeqIds, Delta, MsgIds, State) ->
+ lists:foldl(fun (SeqId, {Delta0, MsgIds0, State0}) ->
+ {#msg_status { msg_id = MsgId } = MsgStatus, State1} =
+ msg_from_pending_ack(SeqId, State0),
+ {_MsgStatus, State2} =
+ maybe_write_to_disk(true, true, MsgStatus, State1),
+ {expand_delta(SeqId, Delta0), [MsgId | MsgIds0],
+ stats({1, -1}, {MsgStatus, none}, State2)}
+ end, {Delta, MsgIds, State}, SeqIds).
+
+%% Mostly opposite of record_pending_ack/2
+msg_from_pending_ack(SeqId, State) ->
+ {#msg_status { msg_props = MsgProps } = MsgStatus, State1} =
+ remove_pending_ack(false, SeqId, State),
+ {MsgStatus #msg_status {
+ msg_props = MsgProps #message_properties { needs_confirming = false } },
+ State1}.
+
+beta_limit(Q) ->
+ case ?QUEUE:peek(Q) of
+ {value, #msg_status { seq_id = SeqId }} -> SeqId;
+ empty -> undefined
+ end.
+
+delta_limit(?BLANK_DELTA_PATTERN(_X)) -> undefined;
+delta_limit(#delta { start_seq_id = StartSeqId }) -> StartSeqId.
+
+%%----------------------------------------------------------------------------
+%% Iterator
+%%----------------------------------------------------------------------------
+
+ram_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.ram_pending_ack)}.
+
+disk_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.disk_pending_ack)}.
+
+qi_ack_iterator(State) ->
+ {ack, gb_trees:iterator(State#vqstate.qi_pending_ack)}.
+
+msg_iterator(State) -> istate(start, State).
+
+istate(start, State) -> {q4, State#vqstate.q4, State};
+istate(q4, State) -> {q3, State#vqstate.q3, State};
+istate(q3, State) -> {delta, State#vqstate.delta, State};
+istate(delta, State) -> {q2, State#vqstate.q2, State};
+istate(q2, State) -> {q1, State#vqstate.q1, State};
+istate(q1, _State) -> done.
+
+next({ack, It}, IndexState) ->
+ case gb_trees:next(It) of
+ none -> {empty, IndexState};
+ {_SeqId, MsgStatus, It1} -> Next = {ack, It1},
+ {value, MsgStatus, true, Next, IndexState}
+ end;
+next(done, IndexState) -> {empty, IndexState};
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqId}, State}, IndexState) ->
+ next(istate(delta, State), IndexState);
+next({delta, #delta{start_seq_id = SeqId,
+ end_seq_id = SeqIdEnd} = Delta, State}, IndexState) ->
+ SeqIdB = rabbit_queue_index:next_segment_boundary(SeqId),
+ SeqId1 = lists:min([SeqIdB, SeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(SeqId, SeqId1, IndexState),
+ next({delta, Delta#delta{start_seq_id = SeqId1}, List, State}, IndexState1);
+next({delta, Delta, [], State}, IndexState) ->
+ next({delta, Delta, State}, IndexState);
+next({delta, Delta, [{_, SeqId, _, _, _} = M | Rest], State}, IndexState) ->
+ case is_msg_in_pending_acks(SeqId, State) of
+ false -> Next = {delta, Delta, Rest, State},
+ {value, beta_msg_status(M), false, Next, IndexState};
+ true -> next({delta, Delta, Rest, State}, IndexState)
+ end;
+next({Key, Q, State}, IndexState) ->
+ case ?QUEUE:out(Q) of
+ {empty, _Q} -> next(istate(Key, State), IndexState);
+ {{value, MsgStatus}, QN} -> Next = {Key, QN, State},
+ {value, MsgStatus, false, Next, IndexState}
+ end.
+
+inext(It, {Its, IndexState}) ->
+ case next(It, IndexState) of
+ {empty, IndexState1} ->
+ {Its, IndexState1};
+ {value, MsgStatus1, Unacked, It1, IndexState1} ->
+ {[{MsgStatus1, Unacked, It1} | Its], IndexState1}
+ end.
+
+ifold(_Fun, Acc, [], State) ->
+ {Acc, State};
+ifold(Fun, Acc, Its, State) ->
+ [{MsgStatus, Unacked, It} | Rest] =
+ lists:sort(fun ({#msg_status{seq_id = SeqId1}, _, _},
+ {#msg_status{seq_id = SeqId2}, _, _}) ->
+ SeqId1 =< SeqId2
+ end, Its),
+ {Msg, State1} = read_msg(MsgStatus, State),
+ case Fun(Msg, MsgStatus#msg_status.msg_props, Unacked, Acc) of
+ {stop, Acc1} ->
+ {Acc1, State};
+ {cont, Acc1} ->
+ {Its1, IndexState1} = inext(It, {Rest, State1#vqstate.index_state}),
+ ifold(Fun, Acc1, Its1, State1#vqstate{index_state = IndexState1})
+ end.
+
+%%----------------------------------------------------------------------------
+%% Phase changes
+%%----------------------------------------------------------------------------
+
+reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
+ State;
+reduce_memory_use(State = #vqstate {
+ mode = default,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount,
+ io_batch_size = IoBatchSize,
+ rates = #rates { in = AvgIngress,
+ out = AvgEgress,
+ ack_in = AvgAckIngress,
+ ack_out = AvgAckEgress } }) ->
+
+ State1 = #vqstate { q2 = Q2, q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ %% Reduce memory of pending acks and alphas. The order is
+ %% determined based on which is growing faster. Whichever
+ %% comes second may very well get a quota of 0 if the
+ %% first manages to push out the max number of messages.
+ S1 -> Funs = case ((AvgAckIngress - AvgAckEgress) >
+ (AvgIngress - AvgEgress)) of
+ true -> [fun limit_ram_acks/2,
+ fun push_alphas_to_betas/2];
+ false -> [fun push_alphas_to_betas/2,
+ fun limit_ram_acks/2]
+ end,
+ {_, State2} = lists:foldl(fun (ReduceFun, {QuotaN, StateN}) ->
+ ReduceFun(QuotaN, StateN)
+ end, {S1, State}, Funs),
+ State2
+ end,
+
+ State3 =
+ case chunk_size(?QUEUE:len(Q2) + ?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ S2 when S2 >= IoBatchSize ->
+ %% There is an implicit, but subtle, upper bound here. We
+ %% may shuffle a lot of messages from Q2/3 into delta, but
+ %% the number of these that require any disk operation,
+ %% namely index writing, i.e. messages that are genuine
+ %% betas and not gammas, is bounded by the credit_flow
+ %% limiting of the alpha->beta conversion above.
+ push_betas_to_deltas(S2, State1);
+ _ ->
+ State1
+ end,
+ %% See rabbitmq-server-290 for the reasons behind this GC call.
+ garbage_collect(),
+ State3;
+%% When using lazy queues, there are no alphas, so we don't need to
+%% call push_alphas_to_betas/2.
+reduce_memory_use(State = #vqstate {
+ mode = lazy,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount }) ->
+ State1 = #vqstate { q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ S1 -> {_, State2} = limit_ram_acks(S1, State),
+ State2
+ end,
+
+ State3 =
+ case chunk_size(?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ 0 ->
+ State1;
+ S2 ->
+ push_betas_to_deltas(S2, State1)
+ end,
+ garbage_collect(),
+ State3.
+
+limit_ram_acks(0, State) ->
+ {0, ui(State)};
+limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
+ disk_pending_ack = DPA }) ->
+ case gb_trees:is_empty(RPA) of
+ true ->
+ {Quota, ui(State)};
+ false ->
+ {SeqId, MsgStatus, RPA1} = gb_trees:take_largest(RPA),
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus, State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ DPA1 = gb_trees:insert(SeqId, MsgStatus2, DPA),
+ limit_ram_acks(Quota - 1,
+ stats({0, 0}, {MsgStatus, MsgStatus2},
+ State1 #vqstate { ram_pending_ack = RPA1,
+ disk_pending_ack = DPA1 }))
+ end.
+
+permitted_beta_count(#vqstate { len = 0 }) ->
+ infinity;
+permitted_beta_count(#vqstate { mode = lazy,
+ target_ram_count = TargetRamCount}) ->
+ TargetRamCount;
+permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
+ lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
+permitted_beta_count(#vqstate { q1 = Q1,
+ q4 = Q4,
+ target_ram_count = TargetRamCount,
+ len = Len }) ->
+ BetaDelta = Len - ?QUEUE:len(Q1) - ?QUEUE:len(Q4),
+ lists:max([rabbit_queue_index:next_segment_boundary(0),
+ BetaDelta - ((BetaDelta * BetaDelta) div
+ (BetaDelta + TargetRamCount))]).
+
+chunk_size(Current, Permitted)
+ when Permitted =:= infinity orelse Permitted >= Current ->
+ 0;
+chunk_size(Current, Permitted) ->
+ Current - Permitted.
+
+fetch_from_q3(State = #vqstate { mode = default,
+ q1 = Q1,
+ q2 = Q2,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3,
+ q4 = Q4 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} ->
+ {empty, State};
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ State2 = case {?QUEUE:is_empty(Q3a), 0 == DeltaCount} of
+ {true, true} ->
+ %% q3 is now empty, it wasn't before;
+ %% delta is still empty. So q2 must be
+ %% empty, and we know q4 is empty
+ %% otherwise we wouldn't be loading from
+ %% q3. As such, we can just set q4 to Q1.
+ true = ?QUEUE:is_empty(Q2), %% ASSERTION
+ true = ?QUEUE:is_empty(Q4), %% ASSERTION
+ State1 #vqstate { q1 = ?QUEUE:new(), q4 = Q1 };
+ {true, false} ->
+ maybe_deltas_to_betas(State1);
+ {false, _} ->
+ %% q3 still isn't empty, we've not
+ %% touched delta, so the invariants
+ %% between q1, q2, delta and q3 are
+ %% maintained
+ State1
+ end,
+ {loaded, {MsgStatus, State2}}
+ end;
+%% lazy queues
+fetch_from_q3(State = #vqstate { mode = lazy,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} when DeltaCount =:= 0 ->
+ {empty, State};
+ {empty, _Q3} ->
+ fetch_from_q3(maybe_deltas_to_betas(State));
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ {loaded, {MsgStatus, State1}}
+ end.
+
+maybe_deltas_to_betas(State) ->
+ AfterFun = process_delivers_and_acks_fun(deliver_and_ack),
+ maybe_deltas_to_betas(AfterFun, State).
+
+maybe_deltas_to_betas(_DelsAndAcksFun,
+ State = #vqstate {delta = ?BLANK_DELTA_PATTERN(X) }) ->
+ State;
+maybe_deltas_to_betas(DelsAndAcksFun,
+ State = #vqstate {
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3,
+ index_state = IndexState,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes,
+ disk_read_count = DiskReadCount,
+ transient_threshold = TransientThreshold }) ->
+ #delta { start_seq_id = DeltaSeqId,
+ count = DeltaCount,
+ end_seq_id = DeltaSeqIdEnd } = Delta,
+ DeltaSeqId1 =
+ lists:min([rabbit_queue_index:next_segment_boundary(DeltaSeqId),
+ DeltaSeqIdEnd]),
+ {List, IndexState1} = rabbit_queue_index:read(DeltaSeqId, DeltaSeqId1,
+ IndexState),
+ {Q3a, RamCountsInc, RamBytesInc, State1} =
+ betas_from_index_entries(List, TransientThreshold,
+ DelsAndAcksFun,
+ State #vqstate { index_state = IndexState1 }),
+ State2 = State1 #vqstate { ram_msg_count = RamMsgCount + RamCountsInc,
+ ram_bytes = RamBytes + RamBytesInc,
+ disk_read_count = DiskReadCount + RamCountsInc },
+ case ?QUEUE:len(Q3a) of
+ 0 ->
+ %% we ignored every message in the segment due to it being
+ %% transient and below the threshold
+ maybe_deltas_to_betas(
+ DelsAndAcksFun,
+ State2 #vqstate {
+ delta = d(Delta #delta { start_seq_id = DeltaSeqId1 })});
+ Q3aLen ->
+ Q3b = ?QUEUE:join(Q3, Q3a),
+ case DeltaCount - Q3aLen of
+ 0 ->
+ %% delta is now empty, but it wasn't before, so
+ %% can now join q2 onto q3
+ State2 #vqstate { q2 = ?QUEUE:new(),
+ delta = ?BLANK_DELTA,
+ q3 = ?QUEUE:join(Q3b, Q2) };
+ N when N > 0 ->
+ Delta1 = d(#delta { start_seq_id = DeltaSeqId1,
+ count = N,
+ end_seq_id = DeltaSeqIdEnd }),
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3b }
+ end
+ end.
+
+push_alphas_to_betas(Quota, State) ->
+ {Quota1, State1} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out/1,
+ fun (MsgStatus, Q1a,
+ State0 = #vqstate { q3 = Q3, delta = #delta { count = 0 } }) ->
+ State0 #vqstate { q1 = Q1a, q3 = ?QUEUE:in(MsgStatus, Q3) };
+ (MsgStatus, Q1a, State0 = #vqstate { q2 = Q2 }) ->
+ State0 #vqstate { q1 = Q1a, q2 = ?QUEUE:in(MsgStatus, Q2) }
+ end, Quota, State #vqstate.q1, State),
+ {Quota2, State2} =
+ push_alphas_to_betas(
+ fun ?QUEUE:out_r/1,
+ fun (MsgStatus, Q4a, State0 = #vqstate { q3 = Q3 }) ->
+ State0 #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3), q4 = Q4a }
+ end, Quota1, State1 #vqstate.q4, State1),
+ {Quota2, State2}.
+
+push_alphas_to_betas(_Generator, _Consumer, Quota, _Q,
+ State = #vqstate { ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount })
+ when Quota =:= 0 orelse
+ TargetRamCount =:= infinity orelse
+ TargetRamCount >= RamMsgCount ->
+ {Quota, ui(State)};
+push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
+ %% We consume credits from the message_store whenever we need to
+ %% persist a message to disk. See:
+ %% rabbit_variable_queue:msg_store_write/4. So perhaps the
+ %% msg_store is trying to throttle down our queue.
+ case credit_flow:blocked() of
+ true -> {Quota, ui(State)};
+ false -> case Generator(Q) of
+ {empty, _Q} ->
+ {Quota, ui(State)};
+ {{value, MsgStatus}, Qa} ->
+ {MsgStatus1, State1} =
+ maybe_prepare_write_to_disk(true, false, MsgStatus,
+ State),
+ MsgStatus2 = m(trim_msg_status(MsgStatus1)),
+ State2 = stats(
+ ready0, {MsgStatus, MsgStatus2}, State1),
+ State3 = Consumer(MsgStatus2, Qa, State2),
+ push_alphas_to_betas(Generator, Consumer, Quota - 1,
+ Qa, State3)
+ end
+ end.
+
+push_betas_to_deltas(Quota, State = #vqstate { mode = default,
+ q2 = Q2,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun rabbit_queue_index:next_segment_boundary/1,
+ Q3, PushState),
+ {Q2a, PushState2} = push_betas_to_deltas(
+ fun ?QUEUE:out/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q2, PushState1),
+ {_, Delta1, State1} = PushState2,
+ State1 #vqstate { q2 = Q2a,
+ delta = Delta1,
+ q3 = Q3a };
+%% In the case of lazy queues we want to page as many messages as
+%% possible from q3.
+push_betas_to_deltas(Quota, State = #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q3, PushState),
+ {_, Delta1, State1} = PushState1,
+ State1 #vqstate { delta = Delta1,
+ q3 = Q3a }.
+
+
+push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
+ case ?QUEUE:is_empty(Q) of
+ true ->
+ {Q, PushState};
+ false ->
+ {value, #msg_status { seq_id = MinSeqId }} = ?QUEUE:peek(Q),
+ {value, #msg_status { seq_id = MaxSeqId }} = ?QUEUE:peek_r(Q),
+ Limit = LimitFun(MinSeqId),
+ case MaxSeqId < Limit of
+ true -> {Q, PushState};
+ false -> push_betas_to_deltas1(Generator, Limit, Q, PushState)
+ end
+ end.
+
+push_betas_to_deltas1(_Generator, _Limit, Q, {0, Delta, State}) ->
+ {Q, {0, Delta, ui(State)}};
+push_betas_to_deltas1(Generator, Limit, Q, {Quota, Delta, State}) ->
+ case Generator(Q) of
+ {empty, _Q} ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, #msg_status { seq_id = SeqId }}, _Qa}
+ when SeqId < Limit ->
+ {Q, {Quota, Delta, ui(State)}};
+ {{value, MsgStatus = #msg_status { seq_id = SeqId }}, Qa} ->
+ {#msg_status { index_on_disk = true }, State1} =
+ maybe_batch_write_index_to_disk(true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, State1),
+ Delta1 = expand_delta(SeqId, Delta),
+ push_betas_to_deltas1(Generator, Limit, Qa,
+ {Quota - 1, Delta1, State2})
+ end.
+
+%% Flushes queue index batch caches and updates queue index state.
+ui(#vqstate{index_state = IndexState,
+ target_ram_count = TargetRamCount} = State) ->
+ IndexState1 = rabbit_queue_index:flush_pre_publish_cache(
+ TargetRamCount, IndexState),
+ State#vqstate{index_state = IndexState1}.
+
+%% Delay
+maybe_delay(QPA) ->
+ case is_timeout_test(gb_trees:values(QPA)) of
+ true -> receive
+ %% The queue received an EXIT message, it's probably the
+ %% node being stopped with "rabbitmqctl stop". Thus, abort
+ %% the wait and requeue the EXIT message.
+ {'EXIT', _, shutdown} = ExitMsg -> self() ! ExitMsg,
+ void
+ after infinity -> void
+ end;
+ _ -> void
+ end.
+
+is_timeout_test([]) -> false;
+is_timeout_test([#msg_status{
+ msg = #basic_message{
+ content = #content{
+ payload_fragments_rev = PFR}}}|Rem]) ->
+ case lists:member(?TIMEOUT_TEST_MSG, PFR) of
+ T = true -> T;
+ _ -> is_timeout_test(Rem)
+ end;
+is_timeout_test([_|Rem]) -> is_timeout_test(Rem).
+
+%%----------------------------------------------------------------------------
+%% Upgrading
+%%----------------------------------------------------------------------------
+
+multiple_routing_keys() ->
+ transform_storage(
+ fun ({basic_message, ExchangeName, Routing_Key, Content,
+ MsgId, Persistent}) ->
+ {ok, {basic_message, ExchangeName, [Routing_Key], Content,
+ MsgId, Persistent}};
+ (_) -> {error, corrupt_message}
+ end),
+ ok.
+
+
+%% Assumes message store is not running
+transform_storage(TransformFun) ->
+ transform_store(?PERSISTENT_MSG_STORE, TransformFun),
+ transform_store(?TRANSIENT_MSG_STORE, TransformFun).
+
+transform_store(Store, TransformFun) ->
+ rabbit_msg_store:force_recovery(rabbit_mnesia:dir(), Store),
+ rabbit_msg_store:transform_dir(rabbit_mnesia:dir(), Store, TransformFun).
diff --git a/test/cluster_rename_SUITE.erl b/test/cluster_rename_SUITE.erl
new file mode 100644
index 0000000000..8ce29a6695
--- /dev/null
+++ b/test/cluster_rename_SUITE.erl
@@ -0,0 +1,304 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(cluster_rename_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ % XXX post_change_nodename,
+ abortive_rename,
+ rename_fail,
+ rename_twice_fail
+ ]},
+ {cluster_size_3, [], [
+ rename_cluster_one_by_one,
+ rename_cluster_big_bang,
+ partial_one_by_one,
+ partial_big_bang
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2} %% Replaced with a list of node names later.
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = case rabbit_ct_helpers:get_config(Config, save_config) of
+ undefined -> Config;
+ C -> C
+ end,
+ Config2 = rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config2, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% Rolling rename of a cluster, each node should do a secondary rename.
+rename_cluster_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+ Config3 = stop_rename_start(Config2, Node3, [Node3, flopsy]),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Big bang rename of a cluster, Node1 should do a primary rename.
+rename_cluster_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node1, jessica, Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node1, Map),
+ Config2 = rename_node(Config1, Node2, Map),
+ Config3 = rename_node(Config2, Node3, Map),
+
+ [Jessica, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(
+ Config3, nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Jessica),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config3, Flopsy),
+
+ consume_all(Config3,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config3}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_one_by_one(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ Config1 = stop_rename_start(Config, Node1, [Node1, jessica]),
+ Config2 = stop_rename_start(Config1, Node2, [Node2, hazel]),
+
+ [Jessica, Hazel, Node3] = rabbit_ct_broker_helpers:get_node_configs(
+ Config2, nodename),
+ consume_all(Config2,
+ [{Jessica, <<"1">>}, {Hazel, <<"2">>}, {Node3, <<"3">>}]),
+ {save_config, Config2}.
+
+%% Here we test that Node1 copes with things being renamed around it.
+partial_big_bang(Config) ->
+ [Node1, Node2, Node3] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ publish_all(Config,
+ [{Node1, <<"1">>}, {Node2, <<"2">>}, {Node3, <<"3">>}]),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node3),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node2),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+
+ Map = [Node2, hazel, Node3, flopsy],
+ Config1 = rename_node(Config, Node2, Map),
+ Config2 = rename_node(Config1, Node3, Map),
+
+ [Node1, Hazel, Flopsy] = rabbit_ct_broker_helpers:get_node_configs(Config2,
+ nodename),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Node1),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Hazel),
+ ok = rabbit_ct_broker_helpers:start_node(Config2, Flopsy),
+
+ consume_all(Config2,
+ [{Node1, <<"1">>}, {Hazel, <<"2">>}, {Flopsy, <<"3">>}]),
+ {save_config, Config2}.
+
+% XXX %% We should be able to specify the -n parameter on ctl with either
+% XXX %% the before or after name for the local node (since in real cases
+% XXX %% one might want to invoke the command before or after the hostname
+% XXX %% has changed) - usually we test before so here we test after.
+% XXX post_change_nodename([Node1, _Bigwig]) ->
+% XXX publish(Node1, <<"Node1">>),
+% XXX
+% XXX Bugs1 = rabbit_test_configs:stop_node(Node1),
+% XXX Bugs2 = [{nodename, jessica} | proplists:delete(nodename, Bugs1)],
+% XXX Jessica0 = rename_node(Bugs2, jessica, [Node1, jessica]),
+% XXX Jessica = rabbit_test_configs:start_node(Jessica0),
+% XXX
+% XXX consume(Jessica, <<"Node1">>),
+% XXX stop_all([Jessica]),
+% XXX ok.
+
+%% If we invoke rename but the node name does not actually change, we
+%% should roll back.
+abortive_rename(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ publish(Config, Node1, <<"Node1">>),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ _Config1 = rename_node(Config, Node1, [Node1, jessica]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Node1),
+
+ consume(Config, Node1, <<"Node1">>),
+ ok.
+
+%% And test some ways the command can fail.
+rename_fail(Config) ->
+ [Node1, Node2] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ %% Rename from a node that does not exist
+ ok = rename_node_fail(Config, Node1, [bugzilla, jessica]),
+ %% Rename to a node which does
+ ok = rename_node_fail(Config, Node1, [Node1, Node2]),
+ %% Rename two nodes to the same thing
+ ok = rename_node_fail(Config, Node1, [Node1, jessica, Node2, jessica]),
+ %% Rename while impersonating a node not in the cluster
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Node1,
+ {nodename, 'rabbit@localhost'}),
+ ok = rename_node_fail(Config1, Node1, [Node1, jessica]),
+ ok.
+
+rename_twice_fail(Config) ->
+ Node1 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Node1),
+ Config1 = rename_node(Config, Node1, [Node1, indecisive]),
+ ok = rename_node_fail(Config, Node1, [indecisive, jessica]),
+ {save_config, Config1}.
+
+%% ----------------------------------------------------------------------------
+
+stop_rename_start(Config, Nodename, Map) ->
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Nodename),
+ Config1 = rename_node(Config, Nodename, Map),
+ ok = rabbit_ct_broker_helpers:start_node(Config1, Nodename),
+ Config1.
+
+rename_node(Config, Nodename, Map) ->
+ {ok, Config1} = do_rename_node(Config, Nodename, Map),
+ Config1.
+
+rename_node_fail(Config, Nodename, Map) ->
+ error = do_rename_node(Config, Nodename, Map),
+ ok.
+
+do_rename_node(Config, Nodename, Map) ->
+ Map1 = [
+ begin
+ NStr = atom_to_list(N),
+ case lists:member($@, NStr) of
+ true -> N;
+ false -> rabbit_nodes:make({NStr, "localhost"})
+ end
+ end
+ || N <- Map
+ ],
+ Ret = rabbit_ct_broker_helpers:rabbitmqctl(Config, Nodename,
+ ["rename_cluster_node" | Map1]),
+ case Ret of
+ {ok, _} ->
+ Config1 = update_config_after_rename(Config, Map1),
+ {ok, Config1};
+ {error, _, _} ->
+ error
+ end.
+
+update_config_after_rename(Config, [Old, New | Rest]) ->
+ Config1 = rabbit_ct_broker_helpers:set_node_config(Config, Old,
+ {nodename, New}),
+ update_config_after_rename(Config1, Rest);
+update_config_after_rename(Config, []) ->
+ Config.
+
+publish(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = Q}),
+ amqp_channel:wait_for_confirms(Ch),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+consume(Config, Node, Q) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, Node),
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q, durable = true}),
+ {#'basic.get_ok'{}, #amqp_msg{payload = Q}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q}),
+ rabbit_ct_client_helpers:close_channels_and_connection(Config, Node).
+
+
+publish_all(Config, Nodes) ->
+ [publish(Config, Node, Key) || {Node, Key} <- Nodes].
+
+consume_all(Config, Nodes) ->
+ [consume(Config, Node, Key) || {Node, Key} <- Nodes].
+
+set_node(Nodename, Cfg) ->
+ [{nodename, Nodename} | proplists:delete(nodename, Cfg)].
diff --git a/test/clustering_management_SUITE.erl b/test/clustering_management_SUITE.erl
new file mode 100644
index 0000000000..00ddfa48a2
--- /dev/null
+++ b/test/clustering_management_SUITE.erl
@@ -0,0 +1,728 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(clustering_management_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {unclustered, [], [
+ {cluster_size_2, [], [
+ erlang_config
+ ]},
+ {cluster_size_3, [], [
+ join_and_part_cluster,
+ join_cluster_bad_operations,
+ join_to_start_interval,
+ forget_cluster_node,
+ change_cluster_node_type,
+ change_cluster_when_node_offline,
+ update_cluster_nodes,
+ force_reset_node
+ ]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [
+ forget_removes_things,
+ reset_removes_things,
+ forget_offline_removes_things,
+ force_boot,
+ status_with_alarm
+ ]},
+ {cluster_size_4, [], [
+ forget_promotes_offline_slave
+ ]}
+ ]}
+ ].
+
+suite() ->
+ [
+ %% If a test hangs, no need to wait for 30 minutes.
+ {timetrap, {minutes, 5}}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_4, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 4}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+join_and_part_cluster(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+ assert_not_clustered(Rabbit),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny),
+
+ stop_join_start(Rabbit, Bunny),
+ assert_clustered([Rabbit, Bunny]),
+
+ stop_join_start(Hare, Bunny, true),
+ assert_cluster_status(
+ {[Bunny, Hare, Rabbit], [Bunny, Rabbit], [Bunny, Hare, Rabbit]},
+ [Rabbit, Hare, Bunny]),
+
+ %% Allow clustering with already clustered node
+ ok = stop_app(Rabbit),
+ {ok, already_member} = join_cluster(Rabbit, Hare),
+ ok = start_app(Rabbit),
+
+ stop_reset_start(Rabbit),
+ assert_not_clustered(Rabbit),
+ assert_cluster_status({[Bunny, Hare], [Bunny], [Bunny, Hare]},
+ [Hare, Bunny]),
+
+ stop_reset_start(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Bunny).
+
+join_cluster_bad_operations(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Non-existant node
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, non@existant) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster with mnesia running
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_not_clustered(Rabbit),
+
+ %% Trying to cluster the node with itself
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> join_cluster(Rabbit, Rabbit) end),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Do not let the node leave the cluster or reset if it's the only
+ %% ram node
+ stop_join_start(Hare, Rabbit, true),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> join_cluster(Rabbit, Bunny) end),
+ assert_failure(fun () -> reset(Rabbit) end),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% Cannot start RAM-only node first
+ ok = stop_app(Rabbit),
+ ok = stop_app(Hare),
+ assert_failure(fun () -> start_app(Hare) end),
+ ok = start_app(Rabbit),
+ ok = start_app(Hare),
+ ok.
+
+%% This tests that the nodes in the cluster are notified immediately of a node
+%% join, and not just after the app is started.
+join_to_start_interval(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+forget_cluster_node(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Trying to remove a node not in the cluster should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ %% Trying to remove an online node should fail
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit) end),
+
+ ok = stop_app(Rabbit),
+ %% We're passing the --offline flag, but Hare is online
+ assert_failure(fun () -> forget_cluster_node(Hare, Rabbit, true) end),
+ %% Removing some non-existant node will fail
+ assert_failure(fun () -> forget_cluster_node(Hare, non@existant) end),
+ ok = forget_cluster_node(Hare, Rabbit),
+ assert_not_clustered(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit]),
+
+ %% Now we can't start Rabbit since it thinks that it's still in the cluster
+ %% with Hare, while Hare disagrees.
+ assert_failure(fun () -> start_app(Rabbit) end),
+
+ ok = reset(Rabbit),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Rabbit),
+
+ %% Now we remove Rabbit from an offline node.
+ stop_join_start(Bunny, Hare),
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+ ok = stop_app(Hare),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ %% This is fine but we need the flag
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny) end),
+ %% Also fails because hare node is still running
+ assert_failure(fun () -> forget_cluster_node(Hare, Bunny, true) end),
+ %% But this works
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Bunny]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+ ok = start_app(Rabbit),
+ %% Bunny still thinks its clustered with Rabbit and Hare
+ assert_failure(fun () -> start_app(Bunny) end),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_clustered([Rabbit, Hare]).
+
+forget_removes_things(Config) ->
+ test_removes_things(Config, fun (R, H) -> ok = forget_cluster_node(H, R) end).
+
+reset_removes_things(Config) ->
+ test_removes_things(Config, fun (R, _H) -> ok = reset(R) end).
+
+test_removes_things(Config, LoseRabbit) ->
+ Unmirrored = <<"unmirrored-queue">>,
+ [Rabbit, Hare] = cluster_members(Config),
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+ ok = stop_app(Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = LoseRabbit(Rabbit, Hare),
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ ok.
+
+forget_offline_removes_things(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Unmirrored = <<"unmirrored-queue">>,
+ X = <<"X">>,
+ RCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ declare(RCh, Unmirrored),
+
+ amqp_channel:call(RCh, #'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true}),
+ amqp_channel:call(RCh, #'queue.bind'{queue = Unmirrored,
+ exchange = X}),
+ ok = rabbit_ct_broker_helpers:stop_broker(Config, Rabbit),
+
+ HCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch declare(HCh, Unmirrored)),
+
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["forget_cluster_node", "--offline", Rabbit]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Hare),
+
+ HCh2 = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ declare(HCh2, Unmirrored),
+ {'EXIT',{{shutdown,{server_initiated_close,404,_}}, _}} =
+ (catch amqp_channel:call(HCh2,#'exchange.declare'{durable = true,
+ exchange = X,
+ auto_delete = true,
+ passive = true})),
+ ok.
+
+forget_promotes_offline_slave(Config) ->
+ [A, B, C, D] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q = <<"mirrored-queue">>,
+ declare(ACh, Q),
+ set_ha_policy(Config, Q, A, [B, C]),
+ set_ha_policy(Config, Q, A, [C, D]), %% Test add and remove from recoverable_slaves
+
+ %% Publish and confirm
+ amqp_channel:call(ACh, #'confirm.select'{}),
+ amqp_channel:cast(ACh, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(ACh),
+
+ %% We kill nodes rather than stop them in order to make sure
+ %% that we aren't dependent on anything that happens as they shut
+ %% down (see bug 26467).
+ ok = rabbit_ct_broker_helpers:kill_node(Config, D),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, C),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, B),
+ ok = rabbit_ct_broker_helpers:kill_node(Config, A),
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["force_boot"]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, C),
+
+ %% We should now have the following dramatis personae:
+ %% A - down, master
+ %% B - down, used to be slave, no longer is, never had the message
+ %% C - running, should be slave, but has wiped the message on restart
+ %% D - down, recoverable slave, contains message
+ %%
+ %% So forgetting A should offline-promote the queue to D, keeping
+ %% the message.
+
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, C,
+ ["forget_cluster_node", A]),
+
+ ok = rabbit_ct_broker_helpers:start_node(Config, D),
+ DCh2 = rabbit_ct_client_helpers:open_channel(Config, D),
+ #'queue.declare_ok'{message_count = 1} = declare(DCh2, Q),
+ ok.
+
+set_ha_policy(Config, Q, Master, Slaves) ->
+ Nodes = [list_to_binary(atom_to_list(N)) || N <- [Master | Slaves]],
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Master, Q,
+ {<<"nodes">>, Nodes}),
+ await_slaves(Q, Master, Slaves).
+
+await_slaves(Q, Master, Slaves) ->
+ {ok, #amqqueue{pid = MPid,
+ slave_pids = SPids}} =
+ rpc:call(Master, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, Q)]),
+ ActMaster = node(MPid),
+ ActSlaves = lists:usort([node(P) || P <- SPids]),
+ case {Master, lists:usort(Slaves)} of
+ {ActMaster, ActSlaves} -> ok;
+ _ -> timer:sleep(100),
+ await_slaves(Q, Master, Slaves)
+ end.
+
+force_boot(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ {error, _, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Rabbit),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, Hare),
+ {error, _} = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ {ok, _} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["force_boot"]),
+ ok = rabbit_ct_broker_helpers:start_node(Config, Rabbit),
+ ok.
+
+change_cluster_node_type(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ %% Trying to change the ram node when not clustered should always fail
+ ok = stop_app(Rabbit),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, ram) end),
+ assert_failure(fun () -> change_cluster_node_type(Rabbit, disc) end),
+ ok = start_app(Rabbit),
+
+ ok = stop_app(Rabbit),
+ join_cluster(Rabbit, Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, disc),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Rabbit, Hare]),
+ change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare, Rabbit]},
+ [Rabbit, Hare]),
+
+ %% Changing to ram when you're the only ram node should fail
+ ok = stop_app(Hare),
+ assert_failure(fun () -> change_cluster_node_type(Hare, ram) end),
+ ok = start_app(Hare).
+
+change_cluster_when_node_offline(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Cluster the three notes
+ stop_join_start(Rabbit, Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ stop_join_start(Bunny, Hare),
+ assert_clustered([Rabbit, Hare, Bunny]),
+
+ %% Bring down Rabbit, and remove Bunny from the cluster while
+ %% Rabbit is offline
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ assert_cluster_status({[Bunny], [Bunny], []}, [Bunny]),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Rabbit, Hare, Bunny], [Hare, Bunny]}, [Rabbit]),
+
+ %% Bring Rabbit back up
+ ok = start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+
+ %% Now the same, but Rabbit is a RAM node, and we bring up Bunny
+ %% before
+ ok = stop_app(Rabbit),
+ ok = change_cluster_node_type(Rabbit, ram),
+ ok = start_app(Rabbit),
+ stop_join_start(Bunny, Hare),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Rabbit, Hare, Bunny]},
+ [Rabbit, Hare, Bunny]),
+ ok = stop_app(Rabbit),
+ ok = stop_app(Bunny),
+ ok = reset(Bunny),
+ ok = start_app(Bunny),
+ assert_not_clustered(Bunny),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Hare]}, [Hare]),
+ assert_cluster_status(
+ {[Rabbit, Hare, Bunny], [Hare, Bunny], [Hare, Bunny]},
+ [Rabbit]),
+ ok = start_app(Rabbit),
+ assert_cluster_status({[Rabbit, Hare], [Hare], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+ assert_not_clustered(Bunny).
+
+update_cluster_nodes(Config) ->
+ [Rabbit, Hare, Bunny] = cluster_members(Config),
+
+ %% Mnesia is running...
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+
+ ok = stop_app(Rabbit),
+ ok = join_cluster(Rabbit, Hare),
+ ok = stop_app(Bunny),
+ ok = join_cluster(Bunny, Hare),
+ ok = start_app(Bunny),
+ stop_reset_start(Hare),
+ assert_failure(fun () -> start_app(Rabbit) end),
+ %% Bogus node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, non@existant) end),
+ %% Inconsisent node
+ assert_failure(fun () -> update_cluster_nodes(Rabbit, Hare) end),
+ ok = update_cluster_nodes(Rabbit, Bunny),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Hare),
+ assert_clustered([Rabbit, Bunny]).
+
+erlang_config(Config) ->
+ [Rabbit, Hare] = cluster_members(Config),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], disc}]),
+ ok = start_app(Hare),
+ assert_clustered([Rabbit, Hare]),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], ram}]),
+ ok = start_app(Hare),
+ assert_cluster_status({[Rabbit, Hare], [Rabbit], [Rabbit, Hare]},
+ [Rabbit, Hare]),
+
+ %% Check having a stop_app'ed node around doesn't break completely.
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = stop_app(Rabbit),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], disc}]),
+ ok = start_app(Hare),
+ ok = start_app(Rabbit),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Rabbit),
+
+ %% We get a warning but we start anyway
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[non@existent], disc}]),
+ ok = start_app(Hare),
+ assert_not_clustered(Hare),
+ assert_not_clustered(Rabbit),
+
+ %% If we use a legacy config file, the node fails to start.
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, [Rabbit]]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit),
+
+ %% If we use an invalid node name, the node fails to start.
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {["Mike's computer"], disc}]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit),
+
+ %% If we use an invalid node type, the node fails to start.
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, {[Rabbit], blue}]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit),
+
+ %% If we use an invalid cluster_nodes conf, the node fails to start.
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, true]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit),
+
+ ok = stop_app(Hare),
+ ok = reset(Hare),
+ ok = rpc:call(Hare, application, set_env,
+ [rabbit, cluster_nodes, "Yes, please"]),
+ assert_failure(fun () -> start_app(Hare) end),
+ assert_not_clustered(Rabbit).
+
+force_reset_node(Config) ->
+ [Rabbit, Hare, _Bunny] = cluster_members(Config),
+
+ stop_join_start(Rabbit, Hare),
+ stop_app(Rabbit),
+ force_reset(Rabbit),
+ %% Hare thinks that Rabbit is still clustered
+ assert_cluster_status({[Rabbit, Hare], [Rabbit, Hare], [Hare]},
+ [Hare]),
+ %% %% ...but it isn't
+ assert_cluster_status({[Rabbit], [Rabbit], []}, [Rabbit]),
+ %% We can rejoin Rabbit and Hare
+ update_cluster_nodes(Rabbit, Hare),
+ start_app(Rabbit),
+ assert_clustered([Rabbit, Hare]).
+
+status_with_alarm(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+
+ %% Given: an alarm is raised each node.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["set_disk_free_limit", "2048G"]),
+
+ %% When: we ask for cluster status.
+ {ok, S} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Rabbit,
+ ["cluster_status"]),
+ {ok, R} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Hare,
+ ["cluster_status"]),
+
+ %% Then: both nodes have printed alarm information for eachother.
+ ok = alarm_information_on_each_node(S, Rabbit, Hare),
+ ok = alarm_information_on_each_node(R, Rabbit, Hare).
+
+
+%% ----------------------------------------------------------------------------
+%% Internal utils
+
+cluster_members(Config) ->
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename).
+
+assert_cluster_status(Status0, Nodes) ->
+ Status = {AllNodes, _, _} = sort_cluster_status(Status0),
+ wait_for_cluster_status(Status, AllNodes, Nodes).
+
+wait_for_cluster_status(Status, AllNodes, Nodes) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_cluster_status(0, Max, Status, AllNodes, Nodes).
+
+wait_for_cluster_status(N, Max, Status, _AllNodes, Nodes) when N >= Max ->
+ erlang:error({cluster_status_max_tries_failed,
+ [{nodes, Nodes},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_cluster_status(N, Max, Status, AllNodes, Nodes) ->
+ case lists:all(fun (Node) ->
+ verify_status_equal(Node, Status, AllNodes)
+ end, Nodes) of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_cluster_status(N + 1, Max, Status, AllNodes, Nodes)
+ end.
+
+verify_status_equal(Node, Status, AllNodes) ->
+ NodeStatus = sort_cluster_status(cluster_status(Node)),
+ (AllNodes =/= [Node]) =:= rpc:call(Node, rabbit_mnesia, is_clustered, [])
+ andalso NodeStatus =:= Status.
+
+cluster_status(Node) ->
+ {rpc:call(Node, rabbit_mnesia, cluster_nodes, [all]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [disc]),
+ rpc:call(Node, rabbit_mnesia, cluster_nodes, [running])}.
+
+sort_cluster_status({All, Disc, Running}) ->
+ {lists:sort(All), lists:sort(Disc), lists:sort(Running)}.
+
+assert_clustered(Nodes) ->
+ assert_cluster_status({Nodes, Nodes, Nodes}, Nodes).
+
+assert_not_clustered(Node) ->
+ assert_cluster_status({[Node], [Node], [Node]}, [Node]).
+
+assert_failure(Fun) ->
+ case catch Fun() of
+ {error, Reason} -> Reason;
+ {error_string, Reason} -> Reason;
+ {badrpc, {'EXIT', Reason}} -> Reason;
+ {badrpc_multi, Reason, _Nodes} -> Reason;
+ Other -> exit({expected_failure, Other})
+ end.
+
+stop_app(Node) ->
+ control_action(stop_app, Node).
+
+start_app(Node) ->
+ control_action(start_app, Node).
+
+join_cluster(Node, To) ->
+ join_cluster(Node, To, false).
+
+join_cluster(Node, To, Ram) ->
+ control_action(join_cluster, Node, [atom_to_list(To)], [{"--ram", Ram}]).
+
+reset(Node) ->
+ control_action(reset, Node).
+
+force_reset(Node) ->
+ control_action(force_reset, Node).
+
+forget_cluster_node(Node, Removee, RemoveWhenOffline) ->
+ control_action(forget_cluster_node, Node, [atom_to_list(Removee)],
+ [{"--offline", RemoveWhenOffline}]).
+
+forget_cluster_node(Node, Removee) ->
+ forget_cluster_node(Node, Removee, false).
+
+change_cluster_node_type(Node, Type) ->
+ control_action(change_cluster_node_type, Node, [atom_to_list(Type)]).
+
+update_cluster_nodes(Node, DiscoveryNode) ->
+ control_action(update_cluster_nodes, Node, [atom_to_list(DiscoveryNode)]).
+
+stop_join_start(Node, ClusterTo, Ram) ->
+ ok = stop_app(Node),
+ ok = join_cluster(Node, ClusterTo, Ram),
+ ok = start_app(Node).
+
+stop_join_start(Node, ClusterTo) ->
+ stop_join_start(Node, ClusterTo, false).
+
+stop_reset_start(Node) ->
+ ok = stop_app(Node),
+ ok = reset(Node),
+ ok = start_app(Node).
+
+control_action(Command, Node) ->
+ control_action(Command, Node, [], []).
+
+control_action(Command, Node, Args) ->
+ control_action(Command, Node, Args, []).
+
+control_action(Command, Node, Args, Opts) ->
+ rpc:call(Node, rabbit_control_main, action,
+ [Command, Node, Args, Opts,
+ fun io:format/2]).
+
+declare(Ch, Name) ->
+ Res = amqp_channel:call(Ch, #'queue.declare'{durable = true,
+ queue = Name}),
+ amqp_channel:call(Ch, #'queue.bind'{queue = Name,
+ exchange = <<"amq.fanout">>}),
+ Res.
+
+alarm_information_on_each_node(Output, Rabbit, Hare) ->
+
+ A = string:str(Output, "alarms"), true = A > 0,
+
+ %% Test that names are printed after `alarms': this counts on
+ %% output with a `{Name, Value}' kind of format, for listing
+ %% alarms, so that we can miss any node names in preamble text.
+ Alarms = string:substr(Output, A),
+ RabbitStr = atom_to_list(Rabbit),
+ HareStr = atom_to_list(Hare),
+ match = re:run(Alarms, "\\{'?" ++ RabbitStr ++ "'?,\\[memory\\]\\}",
+ [{capture, none}]),
+ match = re:run(Alarms, "\\{'?" ++ HareStr ++ "'?,\\[disk\\]\\}",
+ [{capture, none}]),
+
+ ok.
diff --git a/test/config_schema_SUITE.erl b/test/config_schema_SUITE.erl
new file mode 100644
index 0000000000..79e7220e98
--- /dev/null
+++ b/test/config_schema_SUITE.erl
@@ -0,0 +1,143 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(config_schema_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ run_snippets
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase}
+ ]),
+ Config2 = case Testcase of
+ run_snippets ->
+ SchemaDir = filename:join(?config(data_dir, Config1), "schema"),
+ ResultsDir = filename:join(?config(priv_dir, Config1), "results"),
+ Snippets = filename:join(?config(data_dir, Config1),
+ "snippets.config"),
+ ok = file:make_dir(ResultsDir),
+ rabbit_ct_helpers:set_config(Config1, [
+ {schema_dir, SchemaDir},
+ {results_dir, ResultsDir},
+ {conf_snippets, Snippets}
+ ])
+ end,
+ rabbit_ct_helpers:run_steps(Config2,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+run_snippets(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, run_snippets1, [Config]).
+
+run_snippets1(Config) ->
+ {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)),
+ lists:map(
+ fun({N, S, C, P}) -> ok = test_snippet(Config, {integer_to_list(N), S, []}, C, P);
+ ({N, S, A, C, P}) -> ok = test_snippet(Config, {integer_to_list(N), S, A}, C, P)
+ end,
+ Snippets),
+ passed.
+
+test_snippet(Config, Snippet, Expected, _Plugins) ->
+ {ConfFile, AdvancedFile} = write_snippet(Config, Snippet),
+ {ok, GeneratedFile} = generate_config(Config, ConfFile, AdvancedFile),
+ {ok, [Generated]} = file:consult(GeneratedFile),
+ Gen = deepsort(Generated),
+ Exp = deepsort(Expected),
+ case Exp of
+ Gen -> ok;
+ _ ->
+ error({config_mismatch, Snippet, Exp, Gen})
+ end.
+
+write_snippet(Config, {Name, Conf, Advanced}) ->
+ ResultsDir = ?config(results_dir, Config),
+ file:make_dir(filename:join(ResultsDir, Name)),
+ ConfFile = filename:join([ResultsDir, Name, "config.conf"]),
+ AdvancedFile = filename:join([ResultsDir, Name, "advanced.config"]),
+
+ file:write_file(ConfFile, Conf),
+ rabbit_file:write_term_file(AdvancedFile, [Advanced]),
+ {ConfFile, AdvancedFile}.
+
+generate_config(Config, ConfFile, AdvancedFile) ->
+ SchemaDir = ?config(schema_dir, Config),
+ ResultsDir = ?config(results_dir, Config),
+ Rabbitmqctl = ?config(rabbitmqctl_cmd, Config),
+ ScriptDir = filename:dirname(Rabbitmqctl),
+ ct:pal("ConfFile=~p ScriptDir=~p SchemaDir=~p AdvancedFile=~p", [ConfFile, ScriptDir, SchemaDir, AdvancedFile]),
+ rabbit_config:generate_config_file([ConfFile], ResultsDir, ScriptDir,
+ SchemaDir, AdvancedFile).
+
+deepsort(List) ->
+ case is_proplist(List) of
+ true ->
+ lists:keysort(1, lists:map(fun({K, V}) -> {K, deepsort(V)};
+ (V) -> V end,
+ List));
+ false ->
+ case is_list(List) of
+ true -> lists:sort(List);
+ false -> List
+ end
+ end.
+
+is_proplist([{_Key, _Val}|_] = List) -> lists:all(fun({_K, _V}) -> true; (_) -> false end, List);
+is_proplist(_) -> false.
diff --git a/test/config_schema_SUITE_data/certs/cacert.pem b/test/config_schema_SUITE_data/certs/cacert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/test/config_schema_SUITE_data/certs/cacert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/test/config_schema_SUITE_data/certs/cert.pem b/test/config_schema_SUITE_data/certs/cert.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/test/config_schema_SUITE_data/certs/cert.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/test/config_schema_SUITE_data/certs/key.pem b/test/config_schema_SUITE_data/certs/key.pem
new file mode 100644
index 0000000000..eaf6b67806
--- /dev/null
+++ b/test/config_schema_SUITE_data/certs/key.pem
@@ -0,0 +1 @@
+I'm not a certificate
diff --git a/test/config_schema_SUITE_data/rabbit-mgmt/access.log b/test/config_schema_SUITE_data/rabbit-mgmt/access.log
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/config_schema_SUITE_data/rabbit-mgmt/access.log
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq.schema b/test/config_schema_SUITE_data/schema/rabbitmq.schema
new file mode 100644
index 0000000000..19040da409
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq.schema
@@ -0,0 +1,961 @@
+% ==============================
+% Rabbit app section
+% ==============================
+
+%%
+%% Network Connectivity
+%% ====================
+%%
+
+%% By default, RabbitMQ will listen on all interfaces, using
+%% the standard (reserved) AMQP port.
+%%
+%% {tcp_listeners, [5672]},
+%% To listen on a specific interface, provide a tuple of {IpAddress, Port}.
+%% For example, to listen only on localhost for both IPv4 and IPv6:
+%%
+%% {tcp_listeners, [{"127.0.0.1", 5672},
+%% {"[::1]", 5672}]},
+
+{mapping, "listeners.tcp", "rabbit.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.tcp.$name", "rabbit.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% SSL listeners are configured in the same fashion as TCP listeners,
+%% including the option to control the choice of interface.
+%%
+%% {ssl_listeners, [5671]},
+
+{mapping, "listeners.ssl", "rabbit.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "listeners.ssl.$name", "rabbit.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbit.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and SSL listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 1},
+
+{mapping, "num_acceptors.ssl", "rabbit.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "num_acceptors.tcp", "rabbit.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+
+%% Maximum time for AMQP 0-8/0-9/0-9-1 handshake (after socket connection
+%% and SSL handshake), in milliseconds.
+%%
+%% {handshake_timeout, 10000},
+
+{mapping, "handshake_timeout", "rabbit.handshake_timeout", [
+ {datatype, integer}
+]}.
+
+%% Set to 'true' to perform reverse DNS lookups when accepting a
+%% connection. Hostnames will then be shown instead of IP addresses
+%% in rabbitmqctl and the management plugin.
+%%
+%% {reverse_dns_lookups, true},
+
+{mapping, "reverse_dns_lookups", "rabbit.reverse_dns_lookups", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "erlang.K", "vm_args.+K", [
+ {default, "true"},
+ {level, advanced}
+]}.
+
+%%
+%% Security / AAA
+%% ==============
+%%
+
+%% The default "guest" user is only permitted to access the server
+%% via a loopback interface (e.g. localhost).
+%% {loopback_users, [<<"guest">>]},
+%%
+%% Uncomment the following line if you want to allow access to the
+%% guest user from anywhere on the network.
+%% {loopback_users, []},
+
+{mapping, "loopback_users", "rabbit.loopback_users", [
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "loopback_users.$user", "rabbit.loopback_users", [
+ {datatype, atom}
+]}.
+
+{translation, "rabbit.loopback_users",
+fun(Conf) ->
+ None = cuttlefish:conf_get("loopback_users", Conf, undefined),
+ case None of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("loopback_users", Conf),
+ [ list_to_binary(U) || {["loopback_users", U], V} <- Settings, V == true ]
+ end
+end}.
+
+%% Configuring SSL.
+%% See http://www.rabbitmq.com/ssl.html for full documentation.
+%%
+%% {ssl_options, [{cacertfile, "/path/to/testca/cacert.pem"},
+%% {certfile, "/path/to/server/cert.pem"},
+%% {keyfile, "/path/to/server/key.pem"},
+%% {verify, verify_peer},
+%% {fail_if_no_peer_cert, false}]},
+
+%% SSL options section ========================================================
+
+{mapping, "ssl_allow_poodle_attack", "rabbit.ssl_allow_poodle_attack",
+[{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options", "rabbit.ssl_options", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbit.ssl_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("ssl_options", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid ssl_options")
+ end
+end}.
+
+{mapping, "ssl_options.verify", "rabbit.ssl_options.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "ssl_options.fail_if_no_peer_cert", "rabbit.ssl_options.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.cacertfile", "rabbit.ssl_options.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.certfile", "rabbit.ssl_options.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.cacerts.$name", "rabbit.ssl_options.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "ssl_options.cert", "rabbit.ssl_options.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.cert", Conf))
+end}.
+
+{mapping, "ssl_options.client_renegotiation", "rabbit.ssl_options.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.crl_check", "rabbit.ssl_options.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "ssl_options.depth", "rabbit.ssl_options.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "ssl_options.dh", "rabbit.ssl_options.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("ssl_options.dh", Conf))
+end}.
+
+{mapping, "ssl_options.dhfile", "rabbit.ssl_options.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.honor_cipher_order", "rabbit.ssl_options.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.key.RSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.DSAPrivateKey", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.key.PrivateKeyInfo", "rabbit.ssl_options.key",
+ [{datatype, string}]}.
+
+{translation, "rabbit.ssl_options.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("ssl_options.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "ssl_options.keyfile", "rabbit.ssl_options.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "ssl_options.log_alert", "rabbit.ssl_options.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.password", "rabbit.ssl_options.password",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.psk_identity", "rabbit.ssl_options.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "ssl_options.reuse_sessions", "rabbit.ssl_options.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.secure_renegotiate", "rabbit.ssl_options.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "ssl_options.versions.$version", "rabbit.ssl_options.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.ssl_options.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("ssl_options.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+%% ===========================================================================
+
+%% Choose the available SASL mechanism(s) to expose.
+%% The two default (built in) mechanisms are 'PLAIN' and
+%% 'AMQPLAIN'. Additional mechanisms can be added via
+%% plugins.
+%%
+%% See http://www.rabbitmq.com/authentication.html for more details.
+%%
+%% {auth_mechanisms, ['PLAIN', 'AMQPLAIN']},
+
+{mapping, "auth_mechanisms.$name", "rabbit.auth_mechanisms", [
+ {datatype, atom}]}.
+
+{translation, "rabbit.auth_mechanisms",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_mechanisms", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+
+%% Select an authentication backend to use. RabbitMQ provides an
+%% internal backend in the core.
+%%
+%% {auth_backends, [rabbit_auth_backend_internal]},
+
+{translation, "rabbit.auth_backends",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("auth_backends", Conf),
+ BackendModule = fun
+ (internal) -> rabbit_auth_backend_internal;
+ (ldap) -> rabbit_auth_backend_ldap;
+ (http) -> rabbit_auth_backend_http;
+ (amqp) -> rabbit_auth_backend_amqp;
+ (dummy) -> rabbit_auth_backend_dummy;
+ (Other) when is_atom(Other) -> Other;
+ (_) -> cuttlefish:invalid("Unknown/unsupported auth backend")
+ end,
+ AuthBackends = [{Num, {default, BackendModule(V)}} || {["auth_backends", Num], V} <- Settings],
+ AuthNBackends = [{Num, {authn, BackendModule(V)}} || {["auth_backends", Num, "authn"], V} <- Settings],
+ AuthZBackends = [{Num, {authz, BackendModule(V)}} || {["auth_backends", Num, "authz"], V} <- Settings],
+ Backends = lists:foldl(
+ fun({NumStr, {Type, V}}, Acc) ->
+ Num = case catch list_to_integer(NumStr) of
+ N when is_integer(N) -> N;
+ Err ->
+ cuttlefish:invalid(
+ iolist_to_binary(io_lib:format(
+ "Auth backend position in the chain should be an integer ~p", [Err])))
+ end,
+ NewVal = case dict:find(Num, Acc) of
+ {ok, {AuthN, AuthZ}} ->
+ case {Type, AuthN, AuthZ} of
+ {authn, undefined, _} ->
+ {V, AuthZ};
+ {authz, _, undefined} ->
+ {AuthN, V};
+ _ ->
+ cuttlefish:invalid(
+ iolist_to_binary(
+ io_lib:format(
+ "Auth backend already defined for the ~pth ~p backend",
+ [Num, Type])))
+ end;
+ error ->
+ case Type of
+ authn -> {V, undefined};
+ authz -> {undefined, V};
+ default -> {V, V}
+ end
+ end,
+ dict:store(Num, NewVal, Acc)
+ end,
+ dict:new(),
+ AuthBackends ++ AuthNBackends ++ AuthZBackends),
+ lists:map(
+ fun
+ ({Num, {undefined, AuthZ}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Auth backend undefined for the ~pth authz backend. Using ~p",
+ [Num, AuthZ])),
+ {AuthZ, AuthZ};
+ ({Num, {AuthN, undefined}}) ->
+ cuttlefish:warn(
+ io_lib:format(
+ "Authz backend undefined for the ~pth authn backend. Using ~p",
+ [Num, AuthN])),
+ {AuthN, AuthN};
+ ({_Num, {Auth, Auth}}) -> Auth;
+ ({_Num, {AuthN, AuthZ}}) -> {AuthN, AuthZ}
+ end,
+ lists:keysort(1, dict:to_list(Backends)))
+end}.
+
+{mapping, "auth_backends.$num", "rabbit.auth_backends", [
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authn", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+{mapping, "auth_backends.$num.authz", "rabbit.auth_backends",[
+ {datatype, atom}
+]}.
+
+%% This pertains to both the rabbitmq_auth_mechanism_ssl plugin and
+%% STOMP ssl_cert_login configurations. See the rabbitmq_stomp
+%% configuration section later in this file and the README in
+%% https://github.com/rabbitmq/rabbitmq-auth-mechanism-ssl for further
+%% details.
+%%
+%% To use the SSL cert's CN instead of its DN as the username
+%%
+%% {ssl_cert_login_from, common_name},
+
+{mapping, "ssl_cert_login_from", "rabbit.ssl_cert_login_from", [
+ {datatype, {enum, [distinguished_name, common_name]}}
+]}.
+
+%% SSL handshake timeout, in milliseconds.
+%%
+%% {ssl_handshake_timeout, 5000},
+
+{mapping, "ssl_handshake_timeout", "rabbit.ssl_handshake_timeout", [
+ {datatype, integer}
+]}.
+
+%% Password hashing implementation. Will only affect newly
+%% created users. To recalculate hash for an existing user
+%% it's necessary to update her password.
+%%
+%% When importing definitions exported from versions earlier
+%% than 3.6.0, it is possible to go back to MD5 (only do this
+%% as a temporary measure!) by setting this to rabbit_password_hashing_md5.
+%%
+%% To use SHA-512, set to rabbit_password_hashing_sha512.
+%%
+%% {password_hashing_module, rabbit_password_hashing_sha256},
+
+{mapping, "password_hashing_module", "rabbit.password_hashing_module", [
+ {datatype, atom}
+]}.
+
+%%
+%% Default User / VHost
+%% ====================
+%%
+
+%% On first start RabbitMQ will create a vhost and a user. These
+%% config items control what gets created. See
+%% http://www.rabbitmq.com/access-control.html for further
+%% information about vhosts and access control.
+%%
+%% {default_vhost, <<"/">>},
+%% {default_user, <<"guest">>},
+%% {default_pass, <<"guest">>},
+%% {default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
+
+{mapping, "default_vhost", "rabbit.default_vhost", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_vhost", Conf))
+end}.
+
+{mapping, "default_user", "rabbit.default_user", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_user",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_user", Conf))
+end}.
+
+{mapping, "default_pass", "rabbit.default_pass", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_pass",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("default_pass", Conf))
+end}.
+
+{mapping, "default_permissions.configure", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.read", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{mapping, "default_permissions.write", "rabbit.default_permissions", [
+ {datatype, string}
+]}.
+
+{translation, "rabbit.default_permissions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_permissions", Conf),
+ Configure = proplists:get_value(["default_permissions", "configure"], Settings),
+ Read = proplists:get_value(["default_permissions", "read"], Settings),
+ Write = proplists:get_value(["default_permissions", "write"], Settings),
+ [list_to_binary(Configure), list_to_binary(Read), list_to_binary(Write)]
+end}.
+
+%% Tags for default user
+%%
+%% For more details about tags, see the documentation for the
+%% Management Plugin at http://www.rabbitmq.com/management.html.
+%%
+%% {default_user_tags, [administrator]},
+
+{mapping, "default_user_tags.$tag", "rabbit.default_user_tags",
+ [{datatype, {enum, [true, false]}}]}.
+
+{translation, "rabbit.default_user_tags",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("default_user_tags", Conf),
+ [ list_to_atom(Key) || {[_,Key], Val} <- Settings, Val == true ]
+end}.
+
+%%
+%% Additional network and protocol related configuration
+%% =====================================================
+%%
+
+%% Set the default AMQP heartbeat delay (in seconds).
+%%
+%% {heartbeat, 600},
+
+{mapping, "heartbeat", "rabbit.heartbeat", [{datatype, integer}]}.
+
+%% Set the max permissible size of an AMQP frame (in bytes).
+%%
+%% {frame_max, 131072},
+
+{mapping, "frame_max", "rabbit.frame_max", [{datatype, bytesize}]}.
+
+%% Set the max frame size the server will accept before connection
+%% tuning occurs
+%%
+%% {initial_frame_max, 4096},
+
+{mapping, "initial_frame_max", "rabbit.initial_frame_max", [{datatype, bytesize}]}.
+
+%% Set the max permissible number of channels per connection.
+%% 0 means "no limit".
+%%
+%% {channel_max, 128},
+
+{mapping, "channel_max", "rabbit.channel_max", [{datatype, integer}]}.
+
+%% Customising Socket Options.
+%%
+%% See (http://www.erlang.org/doc/man/inet.html#setopts-2) for
+%% further documentation.
+%%
+%% {tcp_listen_options, [{backlog, 128},
+%% {nodelay, true},
+%% {exit_on_close, false}]},
+
+%% TCP listener section ======================================================
+
+{mapping, "tcp_listen_options", "rabbit.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbit.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("tcp_listen_options", undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid tcp_listen_options")
+ end
+end}.
+
+{mapping, "tcp_listen_options.backlog", "rabbit.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "tcp_listen_options.nodelay", "rabbit.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "tcp_listen_options.buffer", "rabbit.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.delay_send", "rabbit.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.dontroute", "rabbit.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.exit_on_close", "rabbit.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.fd", "rabbit.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_msgq_watermark", "rabbit.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.high_watermark", "rabbit.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.keepalive", "rabbit.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.low_msgq_watermark", "rabbit.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.low_watermark", "rabbit.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.port", "rabbit.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "tcp_listen_options.priority", "rabbit.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.recbuf", "rabbit.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout", "rabbit.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.send_timeout_close", "rabbit.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "tcp_listen_options.sndbuf", "rabbit.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "tcp_listen_options.tos", "rabbit.tcp_listen_options.tos",
+ [{datatype, integer}]}.
+
+%% ==========================================================================
+
+%%
+%% Resource Limits & Flow Control
+%% ==============================
+%%
+%% See http://www.rabbitmq.com/memory.html for full details.
+
+%% Memory-based Flow Control threshold.
+%%
+%% {vm_memory_high_watermark, 0.4},
+
+%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
+%%
+%% {vm_memory_high_watermark, {absolute, 1073741824}},
+%%
+%% Or you can set absolute value using memory units (with RabbitMQ 3.6.0+).
+%%
+%% {vm_memory_high_watermark, {absolute, "1024M"}},
+%%
+%% Supported units suffixes:
+%%
+%% kb, KB: kibibytes (2^10 bytes)
+%% mb, MB: mebibytes (2^20)
+%% gb, GB: gibibytes (2^30)
+
+{mapping, "vm_memory_high_watermark.relative", "rabbit.vm_memory_high_watermark", [
+ {datatype, float}]}.
+
+{mapping, "vm_memory_high_watermark.absolute", "rabbit.vm_memory_high_watermark", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.vm_memory_high_watermark",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("vm_memory_high_watermark", Conf),
+ Absolute = proplists:get_value(["vm_memory_high_watermark", "absolute"], Settings),
+ Relative = proplists:get_value(["vm_memory_high_watermark", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No vm watermark defined");
+ {_, undefined} -> {absolute, Absolute};
+ _ -> Relative
+ end
+end}.
+
+%% Fraction of the high watermark limit at which queues start to
+%% page message out to disc in order to free up memory.
+%%
+%% Values greater than 0.9 can be dangerous and should be used carefully.
+%%
+%% {vm_memory_high_watermark_paging_ratio, 0.5},
+
+{mapping, "vm_memory_high_watermark_paging_ratio",
+ "rabbit.vm_memory_high_watermark_paging_ratio",
+ [{datatype, float}, {validators, ["less_than_1"]}]}.
+
+%% Interval (in milliseconds) at which we perform the check of the memory
+%% levels against the watermarks.
+%%
+%% {memory_monitor_interval, 2500},
+
+{mapping, "memory_monitor_interval", "rabbit.memory_monitor_interval",
+ [{datatype, integer}]}.
+
+%% Set disk free limit (in bytes). Once free disk space reaches this
+%% lower bound, a disk alarm will be set - see the documentation
+%% listed above for more details.
+%%
+%% {disk_free_limit, 50000000},
+%%
+%% Or you can set it using memory units (same as in vm_memory_high_watermark)
+%% with RabbitMQ 3.6.0+.
+%% {disk_free_limit, "50MB"},
+%% {disk_free_limit, "50000kB"},
+%% {disk_free_limit, "2GB"},
+
+%% Alternatively, we can set a limit relative to total available RAM.
+%%
+%% Values lower than 1.0 can be dangerous and should be used carefully.
+%% {disk_free_limit, {mem_relative, 2.0}},
+
+{mapping, "disk_free_limit.relative", "rabbit.disk_free_limit", [
+ {datatype, float}]}.
+
+{mapping, "disk_free_limit.absolute", "rabbit.disk_free_limit", [
+ {datatype, [integer, string]}]}.
+
+
+{translation, "rabbit.disk_free_limit",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("disk_free_limit", Conf),
+ Absolute = proplists:get_value(["disk_free_limit", "absolute"], Settings),
+ Relative = proplists:get_value(["disk_free_limit", "relative"], Settings),
+ case {Absolute, Relative} of
+ {undefined, undefined} -> cuttlefish:invalid("No disk limit defined");
+ {_, undefined} -> Absolute;
+ _ -> {mem_relative, Relative}
+ end
+end}.
+
+%%
+%% Clustering
+%% =====================
+%%
+
+%% How to respond to cluster partitions.
+%% See http://www.rabbitmq.com/partitions.html for further details.
+%%
+%% {cluster_partition_handling, ignore},
+
+{mapping, "cluster_partition_handling", "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, pause_minority, autoheal, pause_if_all_down]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.recover",
+ "rabbit.cluster_partition_handling",
+ [{datatype, {enum, [ignore, autoheal]}}]}.
+
+{mapping, "cluster_partition_handling.pause_if_all_down.nodes.$name",
+ "rabbit.cluster_partition_handling",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_partition_handling",
+fun(Conf) ->
+ case cuttlefish:conf_get("cluster_partition_handling", Conf) of
+ pause_if_all_down ->
+ PauseIfAllDownNodes = cuttlefish_variable:filter_by_prefix(
+ "cluster_partition_handling.pause_if_all_down.nodes",
+ Conf),
+ case PauseIfAllDownNodes of
+ [] ->
+ cuttlefish:invalid("Nodes required for pause_if_all_down");
+ _ ->
+ Nodes = [ V || {K,V} <- PauseIfAllDownNodes ],
+ PauseIfAllDownRecover = cuttlefish:conf_get(
+ "cluster_partition_handling.pause_if_all_down.recover",
+ Conf),
+ case PauseIfAllDownRecover of
+ Recover when Recover == ignore; Recover == autoheal ->
+ {pause_if_all_down, Nodes, Recover};
+ Invalid ->
+ cuttlefish:invalid("Recover strategy required for pause_if_all_down")
+ end
+ end;
+ Other -> Other
+ end
+end}.
+
+%% Mirror sync batch size, in messages. Increasing this will speed
+%% up syncing but total batch size in bytes must not exceed 2 GiB.
+%% Available in RabbitMQ 3.6.0 or later.
+%%
+%% {mirroring_sync_batch_size, 4096},
+
+{mapping, "mirroring_sync_batch_size", "rabbit.mirroring_sync_batch_size",
+ [{datatype, bytesize}, {validators, ["size_less_than_2G"]}]}.
+
+%% Make clustering happen *automatically* at startup - only applied
+%% to nodes that have just been reset or started for the first time.
+%% See http://www.rabbitmq.com/clustering.html#auto-config for
+%% further details.
+%%
+%% {cluster_nodes, {['rabbit@my.host.com'], disc}},
+
+{mapping, "cluster_nodes.disc.$node", "rabbit.cluster_nodes",
+ [{datatype, atom}]}.
+
+{mapping, "cluster_nodes.ram.$node", "rabbit.cluster_nodes",
+ [{datatype, atom}]}.
+
+{translation, "rabbit.cluster_nodes",
+fun(Conf) ->
+ DiskNodes = [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_nodes.disc", Conf)],
+ RamNodes = [ V || {_, V} <- cuttlefish_variable:filter_by_prefix("cluster_nodes.ram", Conf)],
+
+ case {DiskNodes, RamNodes} of
+ {_, []} -> {DiskNodes, disc};
+ {[], _} -> {RamNodes, ram}
+ end
+end}.
+
+
+%% Interval (in milliseconds) at which we send keepalive messages
+%% to other cluster members. Note that this is not the same thing
+%% as net_ticktime; missed keepalive messages will not cause nodes
+%% to be considered down.
+%%
+%% {cluster_keepalive_interval, 10000},
+
+{mapping, "cluster_keepalive_interval", "rabbit.cluster_keepalive_interval",
+ [{datatype, integer}]}.
+
+
+{mapping, "queue_master_locator", "rabbit.queue_master_locator",
+ [{datatype, string}]}.
+
+{translation, "rabbit.queue_master_locator",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("queue_master_locator", Conf))
+end}.
+
+%%
+%% Statistics Collection
+%% =====================
+%%
+
+%% Set (internal) statistics collection granularity.
+%%
+%% {collect_statistics, none},
+
+{mapping, "collect_statistics", "rabbit.collect_statistics",
+ [{datatype, {enum, [none, coarse, fine]}}]}.
+
+%% Statistics collection interval (in milliseconds). Increasing
+%% this will reduce the load on management database.
+%%
+%% {collect_statistics_interval, 5000},
+
+{mapping, "collect_statistics_interval", "rabbit.collect_statistics_interval",
+ [{datatype, integer}]}.
+
+%%
+%% Misc/Advanced Options
+%% =====================
+%%
+%% NB: Change these only if you understand what you are doing!
+%%
+
+%% Explicitly enable/disable hipe compilation.
+%%
+%% {hipe_compile, true},
+
+{mapping, "hipe_compile", "rabbit.hipe_compile",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Timeout used when waiting for Mnesia tables in a cluster to
+%% become available.
+%%
+%% {mnesia_table_loading_timeout, 30000},
+
+{mapping, "mnesia_table_loading_timeout", "rabbit.mnesia_table_loading_timeout",
+ [{datatype, integer}]}.
+
+%% Size in bytes below which to embed messages in the queue index. See
+%% http://www.rabbitmq.com/persistence-conf.html
+%%
+%% {queue_index_embed_msgs_below, 4096}
+
+{mapping, "queue_index_embed_msgs_below", "rabbit.queue_index_embed_msgs_below",
+ [{datatype, bytesize}]}.
+
+% ==========================
+% Lager section
+% ==========================
+
+{mapping, "log.dir", "lager.log_root", [
+ {datatype, string},
+ {validators, ["dir_writable"]}]}.
+
+{mapping, "log.console", "lager.handlers", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "log.syslog", "lager.handlers", [
+ {datatype, {enum, [true, false]}}
+]}.
+{mapping, "log.file", "lager.handlers", [
+ {datatype, [{enum, [false]}, string]}
+]}.
+
+{mapping, "log.file.level", "lager.handlers", [
+ {datatype, {enum, [debug, info, warning, error]}}
+]}.
+{mapping, "log.$handler.level", "lager.handlers", [
+ {datatype, {enum, [debug, info, warning, error]}}
+]}.
+{mapping, "log.file.rotation.date", "lager.handlers", [
+ {datatype, string}
+]}.
+{mapping, "log.file.rotation.size", "lager.handlers", [
+ {datatype, integer}
+]}.
+{mapping, "log.file.rotation.count", "lager.handlers", [
+ {datatype, integer}
+]}.
+
+{mapping, "log.syslog.identity", "lager.handlers", [
+ {datatype, string}
+]}.
+{mapping, "log.syslog.facility", "lager.handlers", [
+ {datatype, atom}
+]}.
+
+{translation, "lager.handlers",
+fun(Conf) ->
+ ConsoleHandler = case cuttlefish:conf_get("log.console", Conf, false) of
+ true ->
+ ConsoleLevel = cuttlefish:conf_get("log.console.level", Conf, info),
+ [{lager_console_backend, ConsoleLevel}];
+ false -> []
+ end,
+ FileHandler = case cuttlefish:conf_get("log.file", Conf, false) of
+ false -> [];
+ File ->
+ FileLevel = cuttlefish:conf_get("log.file.level", Conf, info),
+ RotationDate = cuttlefish:conf_get("log.file.rotation.date", Conf, ""),
+ RotationSize = cuttlefish:conf_get("log.file.rotation.size", Conf, 0),
+ RotationCount = cuttlefish:conf_get("log.file.rotation.count", Conf, 10),
+ [{lager_file_backend, [{file, File},
+ {level, FileLevel},
+ {date, RotationDate},
+ {size, RotationSize},
+ {count, RotationCount}]}]
+ end,
+ SyslogHandler = case cuttlefish:conf_get("log.syslog", Conf, false) of
+ false -> [];
+ true ->
+ SyslogLevel = cuttlefish:conf_get("log.syslog.level", Conf, info),
+ Identity = cuttlefish:conf_get("log.syslog.identity", Conf),
+ Facility = cuttlefish:conf_get("log.syslog.facility", Conf),
+ [{lager_syslog_backend, [Identity, Facility, SyslogLevel]}]
+ end,
+ case ConsoleHandler ++ FileHandler ++ SyslogHandler of
+ [] -> undefined;
+ Other -> Other
+ end
+end}.
+
+
+% ===============================
+% Validators
+% ===============================
+
+{validator, "size_less_than_2G", "Byte size should be less than 2G and greater than 0",
+fun(Size) when is_integer(Size) ->
+ Size > 0 andalso Size < 2147483648
+end}.
+
+{validator, "less_than_1", "Flooat is not beetween 0 and 1",
+fun(Float) when is_float(Float) ->
+ Float > 0 andalso Float < 1
+end}.
+
+{validator, "port", "Invalid port number",
+fun(Port) when is_integer(Port) ->
+ Port > 0 andalso Port < 65535
+end}.
+
+{validator, "byte", "Integer is not 0<i<255",
+fun(Int) when is_integer(Int) ->
+ Int > 0 andalso Int < 255
+end}.
+
+{validator, "dir_writable", "Cannot create file in dir",
+fun(Dir) ->
+ TestFile = filename:join(Dir, "test_file"),
+ file:delete(TestFile),
+ Res = ok == file:write_file(TestFile, <<"test">>),
+ file:delete(TestFile),
+ Res
+end}.
+
+{validator, "file_accessible", "file doesnt exist or unaccessible",
+fun(File) ->
+ ReadFile = file:read_file_info(File),
+ element(1, ReadFile) == ok
+end}.
+
+{validator, "is_ip", "string is a valid IP address",
+fun(IpStr) ->
+ Res = inet:parse_address(IpStr),
+ element(1, Res) == ok
+end}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema b/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema
new file mode 100644
index 0000000000..e6cfb68262
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_amqp1_0.schema
@@ -0,0 +1,31 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ AMQP 1.0 Support
+%%
+%% See https://github.com/rabbitmq/rabbitmq-amqp1.0/blob/stable/README.md
+%% for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_amqp1_0,[
+%% Connections that are not authenticated with SASL will connect as this
+%% account. See the README for more information.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, "guest"},
+{mapping, "amqp1_0.default_user", "rabbitmq_amqp1_0.default_user",
+ [{datatype, [{enum, [none]}, string]}]}.
+%% Enable protocol strict mode. See the README for more information.
+%%
+%% {protocol_strict_mode, false}
+% ]},
+{mapping, "amqp1_0.protocol_strict_mode", "rabbitmq_amqp1_0.protocol_strict_mode",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "amqp1_0.default_vhost", "rabbitmq_amqp1_0.default_vhost",
+ [{datatype, string}]}.
+
+{translation , "rabbitmq_amqp1_0.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("amqp1_0.default_vhost", Conf))
+end}. \ No newline at end of file
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema
new file mode 100644
index 0000000000..a30efb6c03
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_amqp.schema
@@ -0,0 +1,27 @@
+{mapping, "rabbitmq_auth_backend_amqp.username", "rabbitmq_auth_backend_amqp.username",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_amqp.username",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.username", Conf))
+end}.
+
+{mapping, "rabbitmq_auth_backend_amqp.vhost", "rabbitmq_auth_backend_amqp.vhost",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_amqp.vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.vhost", Conf))
+end}.
+
+{mapping, "rabbitmq_auth_backend_amqp.exchange", "rabbitmq_auth_backend_amqp.exchange",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_amqp.exchange",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("rabbitmq_auth_backend_amqp.exchange", Conf))
+end}.
+
+
+{mapping, "rabbitmq_auth_backend_amqp.timeout", "rabbitmq_auth_backend_amqp.timeout",
+ [{datatype, [{enum, [infinity]}, integer]}]}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema
new file mode 100644
index 0000000000..f10eb6710b
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_http.schema
@@ -0,0 +1,15 @@
+
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ HTTP Authorization
+%%
+%% ----------------------------------------------------------------------------
+
+{mapping, "rabbitmq_auth_backend_http.user_path", "rabbitmq_auth_backend_http.user_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
+
+{mapping, "rabbitmq_auth_backend_http.vhost_path", "rabbitmq_auth_backend_http.vhost_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
+
+{mapping, "rabbitmq_auth_backend_http.resource_path", "rabbitmq_auth_backend_http.resource_path",
+ [{datatype, string}, {validators, ["uri"]}]}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema
new file mode 100644
index 0000000000..334fd014c1
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_auth_backend_ldap.schema
@@ -0,0 +1,183 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ LDAP Plugin
+%%
+%% See http://www.rabbitmq.com/ldap.html for details.
+%%
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_auth_backend_ldap,
+% [
+%%
+%% Connecting to the LDAP server(s)
+%% ================================
+%%
+
+%% Specify servers to bind to. You *must* set this in order for the plugin
+%% to work properly.
+%%
+%% {servers, ["your-server-name-goes-here"]},
+
+{mapping, "rabbitmq_auth_backend_ldap.servers", "rabbitmq_auth_backend_ldap.servers",
+ [{datatype, {enum, [none]}}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.servers.$server", "rabbitmq_auth_backend_ldap.servers",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.servers",
+fun(Conf) ->
+ case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.servers", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("rabbitmq_auth_backend_ldap.servers", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Connect to the LDAP server using SSL
+%%
+%% {use_ssl, false},
+
+{mapping, "rabbitmq_auth_backend_ldap.use_ssl", "rabbitmq_auth_backend_ldap.use_ssl",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Specify the LDAP port to connect to
+%%
+%% {port, 389},
+
+{mapping, "rabbitmq_auth_backend_ldap.port", "rabbitmq_auth_backend_ldap.port",
+ [{datatype, integer}]}.
+
+%% LDAP connection timeout, in milliseconds or 'infinity'
+%%
+%% {timeout, infinity},
+
+{mapping, "rabbitmq_auth_backend_ldap.timeout", "rabbitmq_auth_backend_ldap.timeout",
+ [{datatype, [integer, {atom, infinity}]}]}.
+
+%% Enable logging of LDAP queries.
+%% One of
+%% - false (no logging is performed)
+%% - true (verbose logging of the logic used by the plugin)
+%% - network (as true, but additionally logs LDAP network traffic)
+%%
+%% Defaults to false.
+%%
+%% {log, false},
+
+{mapping, "rabbitmq_auth_backend_ldap.log", "rabbitmq_auth_backend_ldap.log",
+ [{datatype, {enum, [true, false, network]}}]}.
+
+%%
+%% Authentication
+%% ==============
+%%
+
+%% Pattern to convert the username given through AMQP to a DN before
+%% binding
+%%
+%% {user_dn_pattern, "cn=${username},ou=People,dc=example,dc=com"},
+
+{mapping, "rabbitmq_auth_backend_ldap.user_dn_pattern", "rabbitmq_auth_backend_ldap.user_dn_pattern",
+ [{datatype, string}]}.
+
+%% Alternatively, you can convert a username to a Distinguished
+%% Name via an LDAP lookup after binding. See the documentation for
+%% full details.
+
+%% When converting a username to a dn via a lookup, set these to
+%% the name of the attribute that represents the user name, and the
+%% base DN for the lookup query.
+%%
+%% {dn_lookup_attribute, "userPrincipalName"},
+%% {dn_lookup_base, "DC=gopivotal,DC=com"},
+
+{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_attribute", "rabbitmq_auth_backend_ldap.dn_lookup_attribute",
+ [{datatype, [{enum, [none]}, string]}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_base", "rabbitmq_auth_backend_ldap.dn_lookup_base",
+ [{datatype, [{enum, [none]}, string]}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [{enum, [as_user]}]}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [string]}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.dn_lookup_bind.password", "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+ [{datatype, [string]}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.dn_lookup_bind",
+fun(Conf) ->
+ case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind", Conf, undefined) of
+ as_user -> as_user;
+ _ ->
+ User = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn", Conf),
+ Pass = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.dn_lookup_bind.password", Conf),
+ case {User, Pass} of
+ {undefined, _} -> as_user;
+ {_, undefined} -> as_user;
+ _ -> {User, Pass}
+ end
+ end
+end}.
+
+%% Controls how to bind for authorisation queries and also to
+%% retrieve the details of users logging in without presenting a
+%% password (e.g., SASL EXTERNAL).
+%% One of
+%% - as_user (to bind as the authenticated user - requires a password)
+%% - anon (to bind anonymously)
+%% - {UserDN, Password} (to bind with a specified user name and password)
+%%
+%% Defaults to 'as_user'.
+%%
+%% {other_bind, as_user},
+
+{mapping, "rabbitmq_auth_backend_ldap.other_bind", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, {enum, [as_user, anon]}}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.other_bind.user_dn", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, string}]}.
+
+{mapping, "rabbitmq_auth_backend_ldap.other_bind.password", "rabbitmq_auth_backend_ldap.other_bind",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_auth_backend_ldap.other_bind",
+fun(Conf) ->
+ case cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind", Conf, undefined) of
+ as_user -> as_user;
+ anon -> anon;
+ _ ->
+ User = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind.user_dn", Conf),
+ Pass = cuttlefish:conf_get("rabbitmq_auth_backend_ldap.other_bind.password", Conf),
+ case {User, Pass} of
+ {undefined, _} -> as_user;
+ {_, undefined} -> as_user;
+ _ -> {User, Pass}
+ end
+ end
+end}.
+
+%%
+%% Authorisation
+%% =============
+%%
+
+%% The LDAP plugin can perform a variety of queries against your
+%% LDAP server to determine questions of authorisation. See
+%% http://www.rabbitmq.com/ldap.html#authorisation for more
+%% information.
+
+%% Set the query to use when determining vhost access
+%%
+%% {vhost_access_query, {in_group,
+%% "ou=${vhost}-users,ou=vhosts,dc=example,dc=com"}},
+
+%% Set the query to use when determining resource (e.g., queue) access
+%%
+%% {resource_access_query, {constant, true}},
+
+%% Set queries to determine which tags a user has
+%%
+%% {tag_queries, []}
+% ]},
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema b/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema
new file mode 100644
index 0000000000..ba127f00c1
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_clusterer.schema
@@ -0,0 +1,58 @@
+{mapping, "clusterer.config", "rabbitmq_clusterer.config",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{translation, "rabbitmq_clusterer.config",
+fun(Conf) ->
+ case cuttlefish:conf_get("clusterer.config", Conf, undefined) of
+ String when is_list(String) ->
+ case cuttlefish_variable:filter_by_prefix("clusterer", Conf) of
+ [{["clusterer", "config"], String}] -> String;
+ _ -> cuttlefish:invalid("Config for clusterer defined in "++
+ String ++ " file. " ++
+ "All other clusterer configurations should be removed")
+ end;
+ _ -> []
+ end
+end}.
+
+{mapping, "clusterer.version", "rabbitmq_clusterer.config.version",
+ [{datatype, integer}]}.
+
+{mapping, "clusterer.nodes.$node", "rabbitmq_clusterer.config.nodes",
+ [{datatype, atom}]}.
+
+{mapping, "clusterer.nodes.ram.$node", "rabbitmq_clusterer.config.nodes",
+ [{datatype, atom}]}.
+
+{mapping, "clusterer.nodes.disk.$node", "rabbitmq_clusterer.config.nodes",
+ [{datatype, atom}]}.
+
+{mapping, "clusterer.nodes.disc.$node", "rabbitmq_clusterer.config.nodes",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_clusterer.config.nodes",
+fun(Conf) ->
+ DiskNodes = cuttlefish_variable:filter_by_prefix("clusterer.nodes", Conf)
+ ++ cuttlefish_variable:filter_by_prefix("clusterer.nodes.disk", Conf)
+ ++ cuttlefish_variable:filter_by_prefix("clusterer.nodes.disc", Conf),
+ RamNodes = cuttlefish_variable:filter_by_prefix("clusterer.nodes.ram", Conf),
+ [{Node, disk} || {_, Node} <- DiskNodes] ++ [{Node, ram} || Node <- RamNodes]
+end}.
+
+{mapping, "clusterer.gospel", "rabbitmq_clusterer.config.gospel",
+ [{datatype, {enum, [reset]}}]}.
+
+{mapping, "clusterer.gospel.node", "rabbitmq_clusterer.config.gospel",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_clusterer.config.gospel",
+fun(Conf) ->
+ case cuttlefish:conf_get("clusterer.gospel", Conf, undefined) of
+ reset -> reset;
+ _ ->
+ {node, cuttlefish:conf_get("clusterer.gospel.node", Conf)}
+ end
+end}.
+
+
+
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_management.schema b/test/config_schema_SUITE_data/schema/rabbitmq_management.schema
new file mode 100644
index 0000000000..7ac6d21b93
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_management.schema
@@ -0,0 +1,203 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Management Plugin
+%%
+%% See http://www.rabbitmq.com/management.html for details
+%% ----------------------------------------------------------------------------
+
+ % {rabbitmq_management,
+ % [%% Pre-Load schema definitions from the following JSON file. See
+%% http://www.rabbitmq.com/management.html#load-definitions
+%%
+%% {load_definitions, "/path/to/schema.json"},
+{mapping, "management.load_definitions", "rabbitmq_management.load_definitions",
+ [{datatype, string},
+ {validators, ["file_accessible"]}]}.
+
+%% Log all requests to the management HTTP API to a file.
+%%
+%% {http_log_dir, "/path/to/access.log"},
+
+{mapping, "management.http_log_dir", "rabbitmq_management.http_log_dir",
+ [{datatype, string}]}.
+
+
+%% Change the port on which the HTTP listener listens,
+%% specifying an interface for the web server to bind to.
+%% Also set the listener to use SSL and provide SSL options.
+%%
+%% {listener, [{port, 12345},
+%% {ip, "127.0.0.1"},
+%% {ssl, true},
+%% {ssl_opts, [{cacertfile, "/path/to/cacert.pem"},
+%% {certfile, "/path/to/cert.pem"},
+%% {keyfile, "/path/to/key.pem"}]}]},
+
+{mapping, "management.listener.port", "rabbitmq_management.listener.port",
+ [{datatype, integer}]}.
+
+{mapping, "management.listener.ip", "rabbitmq_management.listener.ip",
+ [{datatype, string},
+ {validators, ["is_ip"]}]}.
+
+{mapping, "management.listener.ssl", "rabbitmq_management.listener.ssl",
+ [{datatype, {enum, [true, false]}}]}.
+
+
+%% SSL options section ========================================================
+
+{mapping, "management.listener.ssl_opts", "rabbitmq_management.listener.ssl_opts", [
+ {datatype, {enum, [none]}}
+]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts",
+fun(Conf) ->
+ case cuttlefish:conf_get("management.listener.ssl_opts", Conf, undefined) of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid management.listener.ssl_opts")
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.verify", "rabbitmq_management.listener.ssl_opts.verify", [
+ {datatype, {enum, [verify_peer, verify_none]}}]}.
+
+{mapping, "management.listener.ssl_opts.fail_if_no_peer_cert", "rabbitmq_management.listener.ssl_opts.fail_if_no_peer_cert", [
+ {datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.cacertfile", "rabbitmq_management.listener.ssl_opts.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.certfile", "rabbitmq_management.listener.ssl_opts.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.cacerts.$name", "rabbitmq_management.listener.ssl_opts.cacerts",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cacerts",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.cacerts", Conf),
+ [ list_to_binary(V) || {_, V} <- Settings ]
+end}.
+
+{mapping, "management.listener.ssl_opts.cert", "rabbitmq_management.listener.ssl_opts.cert",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.cert",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.cert", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.client_renegotiation", "rabbitmq_management.listener.ssl_opts.client_renegotiation",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.crl_check", "rabbitmq_management.listener.ssl_opts.crl_check",
+ [{datatype, [{enum, [true, false, peer, best_effort]}]}]}.
+
+{mapping, "management.listener.ssl_opts.depth", "rabbitmq_management.listener.ssl_opts.depth",
+ [{datatype, integer}, {validators, ["byte"]}]}.
+
+{mapping, "management.listener.ssl_opts.dh", "rabbitmq_management.listener.ssl_opts.dh",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.dh",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("management.listener.ssl_opts.dh", Conf))
+end}.
+
+{mapping, "management.listener.ssl_opts.dhfile", "rabbitmq_management.listener.ssl_opts.dhfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.honor_cipher_order", "rabbitmq_management.listener.ssl_opts.honor_cipher_order",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.key.RSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.DSAPrivateKey", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.key.PrivateKeyInfo", "rabbitmq_management.listener.ssl_opts.key",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.key",
+fun(Conf) ->
+ case cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.key", Conf) of
+ [{[_,_,Key], Val}|_] -> {list_to_atom(Key), list_to_binary(Val)};
+ _ -> undefined
+ end
+end}.
+
+{mapping, "management.listener.ssl_opts.keyfile", "rabbitmq_management.listener.ssl_opts.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+
+{mapping, "management.listener.ssl_opts.log_alert", "rabbitmq_management.listener.ssl_opts.log_alert",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.password", "rabbitmq_management.listener.ssl_opts.password",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.psk_identity", "rabbitmq_management.listener.ssl_opts.psk_identity",
+ [{datatype, string}]}.
+
+{mapping, "management.listener.ssl_opts.reuse_sessions", "rabbitmq_management.listener.ssl_opts.reuse_sessions",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.secure_renegotiate", "rabbitmq_management.listener.ssl_opts.secure_renegotiate",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "management.listener.ssl_opts.versions.$version", "rabbitmq_management.listener.ssl_opts.versions",
+ [{datatype, atom}]}.
+
+{translation, "rabbitmq_management.listener.ssl_opts.versions",
+fun(Conf) ->
+ Settings = cuttlefish_variable:filter_by_prefix("management.listener.ssl_opts.versions", Conf),
+ [ V || {_, V} <- Settings ]
+end}.
+
+%% ===========================================================================
+
+
+%% One of 'basic', 'detailed' or 'none'. See
+%% http://www.rabbitmq.com/management.html#fine-stats for more details.
+%% {rates_mode, basic},
+{mapping, "management.rates_mode", "rabbitmq_management.rates_mode",
+ [{datatype, {enum, [basic, detailed, none]}}]}.
+
+%% Configure how long aggregated data (such as message rates and queue
+%% lengths) is retained. Please read the plugin's documentation in
+%% http://www.rabbitmq.com/management.html#configuration for more
+%% details.
+%%
+%% {sample_retention_policies,
+%% [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
+%% {basic, [{60, 5}, {3600, 60}]},
+%% {detailed, [{10, 5}]}]}
+% ]},
+
+{mapping, "management.sample_retention_policies.$section.$interval",
+ "rabbitmq_management.sample_retention_policies",
+ [{datatype, integer}]}.
+
+{translation, "rabbitmq_management.sample_retention_policies",
+fun(Conf) ->
+ Global = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.global", Conf),
+ Basic = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.basic", Conf),
+ Detailed = cuttlefish_variable:filter_by_prefix("management.sample_retention_policies.detailed", Conf),
+ TranslateKey = fun("minute") -> 60;
+ ("hour") -> 3600;
+ ("day") -> 86400;
+ (Other) -> list_to_integer(Other)
+ end,
+ TranslatePolicy = fun(Section) ->
+ [ {TranslateKey(Key), Val} || {[_,_,_,Key], Val} <- Section ]
+ end,
+ [{global, TranslatePolicy(Global)},
+ {basic, TranslatePolicy(Basic)},
+ {detailed, TranslatePolicy(Detailed)}]
+end}.
+
+
+{validator, "is_dir", "is not directory",
+fun(File) ->
+ ReadFile = file:list_dir(File),
+ element(1, ReadFile) == ok
+end}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema b/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema
new file mode 100644
index 0000000000..53cf8f003e
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_metronome.schema
@@ -0,0 +1,9 @@
+
+{mapping, "metronome.exchange", "rabbitmq_metronome.exchange",
+ [{datatype, string}]}.
+
+{translation, "rabbitmq_metronome.exchange",
+fun(Conf) ->
+ Exchange = cuttlefish:conf_get("metronome.exchange", Conf),
+ list_to_binary(Exchange)
+end}. \ No newline at end of file
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema b/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema
new file mode 100644
index 0000000000..1daab5423d
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_mqtt.schema
@@ -0,0 +1,235 @@
+%% ----------------------------------------------------------------------------
+%% RabbitMQ MQTT Adapter
+%%
+%% See https://github.com/rabbitmq/rabbitmq-mqtt/blob/stable/README.md
+%% for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_mqtt,
+% [%% Set the default user name and password. Will be used as the default login
+%% if a connecting client provides no other login details.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, <<"guest">>},
+%% {default_pass, <<"guest">>},
+
+{mapping, "mqtt.default_user", "rabbitmq_mqtt.default_user", [
+ {datatype, string}
+]}.
+
+{mapping, "mqtt.default_pass", "rabbitmq_mqtt.default_pass", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_mqtt.default_user",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.default_user", Conf))
+end}.
+
+{translation, "rabbitmq_mqtt.default_pass",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.default_pass", Conf))
+end}.
+
+%% Enable anonymous access. If this is set to false, clients MUST provide
+%% login information in order to connect. See the default_user/default_pass
+%% configuration elements for managing logins without authentication.
+%%
+%% {allow_anonymous, true},
+
+{mapping, "mqtt.allow_anonymous", "rabbitmq_mqtt.allow_anonymous",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% If you have multiple chosts, specify the one to which the
+%% adapter connects.
+%%
+%% {vhost, <<"/">>},
+
+{mapping, "mqtt.vhost", "rabbitmq_mqtt.vhost", [{datatype, string}]}.
+
+{translation, "rabbitmq_mqtt.vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.vhost", Conf))
+end}.
+
+%% Specify the exchange to which messages from MQTT clients are published.
+%%
+%% {exchange, <<"amq.topic">>},
+
+{mapping, "mqtt.exchange", "rabbitmq_mqtt.exchange", [{datatype, string}]}.
+
+{translation, "rabbitmq_mqtt.exchange",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("mqtt.exchange", Conf))
+end}.
+
+%% Specify TTL (time to live) to control the lifetime of non-clean sessions.
+%%
+%% {subscription_ttl, 1800000},
+{mapping, "mqtt.subscription_ttl", "rabbitmq_mqtt.subscription_ttl", [
+ {datatype, [{enum, [undefined, infinity]}, integer]}
+]}.
+
+{translation, "rabbitmq_mqtt.subscription_ttl",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.subscription_ttl", Conf, undefined) of
+ undefined -> undefined;
+ infinity -> undefined;
+ Ms -> Ms
+ end
+end}.
+
+%% Set the prefetch count (governing the maximum number of unacknowledged
+%% messages that will be delivered).
+%%
+%% {prefetch, 10},
+{mapping, "mqtt.prefetch", "rabbitmq_mqtt.prefetch",
+ [{datatype, integer}]}.
+
+
+{mapping, "mqtt.retained_message_store", "rabbitmq_mqtt.retained_message_store",
+ [{datatype, atom}]}.
+
+{mapping, "mqtt.retained_message_store_dets_sync_interval", "rabbitmq_mqtt.retained_message_store_dets_sync_interval",
+ [{datatype, integer}]}.
+
+
+
+%% TCP/SSL Configuration (as per the broker configuration).
+%%
+%% {tcp_listeners, [1883]},
+%% {ssl_listeners, []},
+
+{mapping, "mqtt.listeners.tcp", "rabbitmq_mqtt.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "mqtt.listeners.tcp.$name", "rabbitmq_mqtt.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_mqtt.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+{mapping, "mqtt.listeners.ssl", "rabbitmq_mqtt.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "mqtt.listeners.ssl.$name", "rabbitmq_mqtt.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_mqtt.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("mqtt.listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and SSL listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 1},
+
+{mapping, "mqtt.num_acceptors.ssl", "rabbitmq_mqtt.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.num_acceptors.tcp", "rabbitmq_mqtt.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.ssl_cert_login", "rabbitmq_mqtt.ssl_cert_login", [
+ {datatype, {enum, [true, false]}}]}.
+
+
+%% TCP/Socket options (as per the broker configuration).
+%%
+%% {tcp_listen_options, [{backlog, 128},
+%% {nodelay, true}]}
+% ]},
+
+%% TCP listener section ======================================================
+
+{mapping, "mqtt.tcp_listen_options", "rabbitmq_mqtt.rabbit.tcp_listen_options", [
+ {datatype, {enum, [none]}}]}.
+
+{translation, "rabbitmq_mqtt.rabbit.tcp_listen_options",
+fun(Conf) ->
+ case cuttlefish:conf_get("mqtt.tcp_listen_options") of
+ none -> [];
+ _ -> cuttlefish:invalid("Invalid mqtt.tcp_listen_options")
+ end
+end}.
+
+{mapping, "mqtt.tcp_listen_options.backlog", "rabbitmq_mqtt.tcp_listen_options.backlog", [
+ {datatype, integer}
+]}.
+
+{mapping, "mqtt.tcp_listen_options.nodelay", "rabbitmq_mqtt.tcp_listen_options.nodelay", [
+ {datatype, {enum, [true, false]}}
+]}.
+
+{mapping, "mqtt.tcp_listen_options.buffer", "rabbitmq_mqtt.tcp_listen_options.buffer",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.delay_send", "rabbitmq_mqtt.tcp_listen_options.delay_send",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.dontroute", "rabbitmq_mqtt.tcp_listen_options.dontroute",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.exit_on_close", "rabbitmq_mqtt.tcp_listen_options.exit_on_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.fd", "rabbitmq_mqtt.tcp_listen_options.fd",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.high_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.high_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.high_watermark", "rabbitmq_mqtt.tcp_listen_options.high_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.keepalive", "rabbitmq_mqtt.tcp_listen_options.keepalive",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.low_msgq_watermark", "rabbitmq_mqtt.tcp_listen_options.low_msgq_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.low_watermark", "rabbitmq_mqtt.tcp_listen_options.low_watermark",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.port", "rabbitmq_mqtt.tcp_listen_options.port",
+ [{datatype, integer}, {validators, ["port"]}]}.
+
+{mapping, "mqtt.tcp_listen_options.priority", "rabbitmq_mqtt.tcp_listen_options.priority",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.recbuf", "rabbitmq_mqtt.tcp_listen_options.recbuf",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.send_timeout", "rabbitmq_mqtt.tcp_listen_options.send_timeout",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.send_timeout_close", "rabbitmq_mqtt.tcp_listen_options.send_timeout_close",
+ [{datatype, {enum, [true, false]}}]}.
+
+{mapping, "mqtt.tcp_listen_options.sndbuf", "rabbitmq_mqtt.tcp_listen_options.sndbuf",
+ [{datatype, integer}]}.
+
+{mapping, "mqtt.tcp_listen_options.tos", "rabbitmq_mqtt.tcp_listen_options.tos",
+ [{datatype, integer}]}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema
new file mode 100644
index 0000000000..b7619f0b28
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_stomp.schema
@@ -0,0 +1,110 @@
+%% ==========================================================================
+%% ----------------------------------------------------------------------------
+%% RabbitMQ Stomp Adapter
+%%
+%% See http://www.rabbitmq.com/stomp.html for details
+%% ----------------------------------------------------------------------------
+
+% {rabbitmq_stomp,
+% [%% Network Configuration - the format is generally the same as for the broker
+
+%% Listen only on localhost (ipv4 & ipv6) on a specific port.
+%% {tcp_listeners, [{"127.0.0.1", 61613},
+%% {"::1", 61613}]},
+
+{mapping, "stomp.listeners.tcp", "rabbitmq_stomp.tcp_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "stomp.listeners.tcp.$name", "rabbitmq_stomp.tcp_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_stomp.tcp_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("stomp.listeners.tcp", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.tcp", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+{mapping, "stomp.listeners.ssl", "rabbitmq_stomp.ssl_listeners",[
+ {datatype, {enum, [none]}}
+]}.
+
+{mapping, "stomp.listeners.ssl.$name", "rabbitmq_stomp.ssl_listeners",[
+ {datatype, [integer, ip]}
+]}.
+
+{translation, "rabbitmq_stomp.ssl_listeners",
+fun(Conf) ->
+ case cuttlefish:conf_get("stomp.listeners.ssl", Conf, undefined) of
+ none -> [];
+ _ ->
+ Settings = cuttlefish_variable:filter_by_prefix("stomp.listeners.ssl", Conf),
+ [ V || {_, V} <- Settings ]
+ end
+end}.
+
+%% Number of Erlang processes that will accept connections for the TCP
+%% and SSL listeners.
+%%
+%% {num_tcp_acceptors, 10},
+%% {num_ssl_acceptors, 1},
+
+{mapping, "stomp.num_acceptors.ssl", "rabbitmq_stomp.num_ssl_acceptors", [
+ {datatype, integer}
+]}.
+
+{mapping, "stomp.num_acceptors.tcp", "rabbitmq_stomp.num_tcp_acceptors", [
+ {datatype, integer}
+]}.
+
+%% Additional SSL options
+
+%% Extract a name from the client's certificate when using SSL.
+%%
+%% {ssl_cert_login, true},
+
+{mapping, "stomp.ssl_cert_login", "rabbitmq_stomp.ssl_cert_login",
+ [{datatype, {enum, [true, false]}}]}.
+
+%% Set a default user name and password. This is used as the default login
+%% whenever a CONNECT frame omits the login and passcode headers.
+%%
+%% Please note that setting this will allow clients to connect without
+%% authenticating!
+%%
+%% {default_user, [{login, "guest"},
+%% {passcode, "guest"}]},
+
+{mapping, "stomp.default_vhost", "rabbitmq_stomp.default_vhost", [
+ {datatype, string}
+]}.
+
+{translation, "rabbitmq_stomp.default_vhost",
+fun(Conf) ->
+ list_to_binary(cuttlefish:conf_get("stomp.default_vhost", Conf, "/"))
+end}.
+
+{mapping, "stomp.default_user", "rabbitmq_stomp.default_user.login", [
+ {datatype, string}
+]}.
+
+{mapping, "stomp.default_pass", "rabbitmq_stomp.default_user.passcode", [
+ {datatype, string}
+]}.
+
+%% If a default user is configured, or you have configured use SSL client
+%% certificate based authentication, you can choose to allow clients to
+%% omit the CONNECT frame entirely. If set to true, the client is
+%% automatically connected as the default user or user supplied in the
+%% SSL certificate whenever the first frame sent on a session is not a
+%% CONNECT frame.
+%%
+%% {implicit_connect, true}
+% ]},
+{mapping, "stomp.implicit_connect", "rabbitmq_stomp.implicit_connect",
+ [{datatype, {enum, [true, false]}}]}.
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema b/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema
new file mode 100644
index 0000000000..acdab62c32
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_web_mqtt.schema
@@ -0,0 +1,44 @@
+{mapping, "web_mqtt.num_acceptors.tcp", "rabbitmq_web_mqtt.num_tcp_acceptors",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.num_acceptors.ssl", "rabbitmq_web_mqtt.num_ssl_acceptors",
+ [{datatype, integer}]}.
+
+{mapping, "web_mqtt.tcp.port", "rabbitmq_web_mqtt.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.tcp.backlog", "rabbitmq_web_mqtt.tcp_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.tcp.ip", "rabbitmq_web_mqtt.tcp_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+
+
+{mapping, "web_mqtt.ssl.port", "rabbitmq_web_mqtt.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.ssl.backlog", "rabbitmq_web_mqtt.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.ssl.ip", "rabbitmq_web_mqtt.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_mqtt.ssl.certfile", "rabbitmq_web_mqtt.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.keyfile", "rabbitmq_web_mqtt.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.cacertfile", "rabbitmq_web_mqtt.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_mqtt.ssl.password", "rabbitmq_web_mqtt.ssl_config.password",
+ [{datatype, string}]}.
+
+
+{mapping, "web_mqtt.cowboy_opts.max_empty_lines", "rabbitmq_web_mqtt.cowboy_opts.max_empty_lines",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_header_name_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_name_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_header_value_length", "rabbitmq_web_mqtt.cowboy_opts.max_header_value_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_headers", "rabbitmq_web_mqtt.cowboy_opts.max_headers",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_keepalive", "rabbitmq_web_mqtt.cowboy_opts.max_keepalive",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.max_request_line_length", "rabbitmq_web_mqtt.cowboy_opts.max_request_line_length",
+ [{datatype, integer}]}.
+{mapping, "web_mqtt.cowboy_opts.timeout", "rabbitmq_web_mqtt.cowboy_opts.timeout",
+ [{datatype, integer}]}.
+
diff --git a/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema b/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema
new file mode 100644
index 0000000000..389da07d14
--- /dev/null
+++ b/test/config_schema_SUITE_data/schema/rabbitmq_web_stomp.schema
@@ -0,0 +1,64 @@
+{mapping, "web_stomp.port", "rabbitmq_web_stomp.port",
+ [{datatype, integer}]}.
+
+{mapping, "web_stomp.ws_frame", "rabbitmq_web_stomp.ws_frame",
+ [{datatype, {enum, [binary, text]}}]}.
+
+{mapping, "web_stomp.num_acceptors.tcp", "rabbitmq_web_stomp.num_tcp_acceptors",
+ [{datatype, integer}]}.
+
+{mapping, "web_stomp.num_acceptors.ssl", "rabbitmq_web_stomp.num_ssl_acceptors",
+ [{datatype, integer}]}.
+
+{mapping, "web_stomp.tcp.port", "rabbitmq_web_stomp.tcp_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.tcp.backlog", "rabbitmq_web_stomp.tcp_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.tcp.ip", "rabbitmq_web_stomp.tcp_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+
+
+{mapping, "web_stomp.ssl.port", "rabbitmq_web_stomp.ssl_config.port",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.ssl.backlog", "rabbitmq_web_stomp.ssl_config.backlog",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.ssl.ip", "rabbitmq_web_stomp.ssl_config.ip",
+ [{datatype, string}, {validators, ["is_ip"]}]}.
+{mapping, "web_stomp.ssl.certfile", "rabbitmq_web_stomp.ssl_config.certfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.keyfile", "rabbitmq_web_stomp.ssl_config.keyfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.cacertfile", "rabbitmq_web_stomp.ssl_config.cacertfile",
+ [{datatype, string}, {validators, ["file_accessible"]}]}.
+{mapping, "web_stomp.ssl.password", "rabbitmq_web_stomp.ssl_config.password",
+ [{datatype, string}]}.
+
+
+{mapping, "web_stomp.cowboy_opts.max_empty_lines", "rabbitmq_web_stomp.cowboy_opts.max_empty_lines",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_header_name_length", "rabbitmq_web_stomp.cowboy_opts.max_header_name_length",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_header_value_length", "rabbitmq_web_stomp.cowboy_opts.max_header_value_length",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_headers", "rabbitmq_web_stomp.cowboy_opts.max_headers",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_keepalive", "rabbitmq_web_stomp.cowboy_opts.max_keepalive",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.max_request_line_length", "rabbitmq_web_stomp.cowboy_opts.max_request_line_length",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.cowboy_opts.timeout", "rabbitmq_web_stomp.cowboy_opts.timeout",
+ [{datatype, integer}]}.
+
+
+{mapping, "web_stomp.sockjs_opts.url", "rabbitmq_web_stomp.sockjs_opts.sockjs_url",
+ [{datatype, string}]}.
+{mapping, "web_stomp.sockjs_opts.websocket", "rabbitmq_web_stomp.sockjs_opts.websocket",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "web_stomp.sockjs_opts.cookie_needed", "rabbitmq_web_stomp.sockjs_opts.cookie_needed",
+ [{datatype, {enum, [true, false]}}]}.
+{mapping, "web_stomp.sockjs_opts.heartbeat_delay", "rabbitmq_web_stomp.sockjs_opts.heartbeat_delay",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.sockjs_opts.disconnect_delay", "rabbitmq_web_stomp.sockjs_opts.disconnect_delay",
+ [{datatype, integer}]}.
+{mapping, "web_stomp.sockjs_opts.response_limit", "rabbitmq_web_stomp.sockjs_opts.response_limit",
+ [{datatype, integer}]}.
diff --git a/test/config_schema_SUITE_data/snippets.config b/test/config_schema_SUITE_data/snippets.config
new file mode 100644
index 0000000000..22c9f2b7cd
--- /dev/null
+++ b/test/config_schema_SUITE_data/snippets.config
@@ -0,0 +1,714 @@
+[
+{1,
+"auth_backends.1 = internal",
+[{rabbit, [{auth_backends, [rabbit_auth_backend_internal]}]}],[]}
+,
+{2,
+"auth_backends.1 = ldap",
+[{rabbit, [{auth_backends, [rabbit_auth_backend_ldap]}]}],[]}
+,
+
+{3,
+"auth_backends.1 = ldap
+auth_backends.2 = internal",
+
+[{rabbit, [
+ {auth_backends, [rabbit_auth_backend_ldap, rabbit_auth_backend_internal]}
+ ]
+ }],[]}
+
+,
+
+{4,
+"auth_backends.1 = ldap
+# uses module name instead of a short alias, \"http\"
+auth_backends.2 = rabbit_auth_backend_http",
+
+[{rabbit, [{auth_backends, [rabbit_auth_backend_ldap, rabbit_auth_backend_http]}]}],[]}
+
+,
+
+{5,
+"auth_backends.1.authn = internal
+# uses module name because this backend is from a 3rd party
+auth_backends.1.authz = rabbit_auth_backend_ip_range",
+[{rabbit, [{auth_backends, [{rabbit_auth_backend_internal, rabbit_auth_backend_ip_range}]}]}],[]}
+,
+{6,
+"auth_backends.1.authn = ldap
+auth_backends.1.authz = internal",
+[{rabbit, [{auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal}]}]}],[]}
+,
+
+{7,
+"auth_backends.1.authn = ldap
+auth_backends.1.authz = internal
+auth_backends.2 = internal",
+[{rabbit, [
+ {auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal},
+ rabbit_auth_backend_internal]}
+ ]
+ }],[]}
+,
+
+
+{8,
+"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = true",
+[
+ {rabbit, [{ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}
+ ]}],[]}
+,
+
+{9,
+"listeners.tcp.default = 5673",
+[{rabbit, [{tcp_listeners, [5673]}]}],[]}
+,
+
+{10,
+"listeners.ssl = none",
+[{rabbit, [{ssl_listeners, []}]}],[]}
+,
+{11,
+"num_acceptors.ssl = 1",
+[{rabbit, [{num_ssl_acceptors, 1}]}],[]}
+,
+{12,
+"default_user = guest
+default_pass = guest
+default_user_tags.administrator = true
+default_permissions.configure = .*
+default_permissions.read = .*
+default_permissions.write = .*",
+[{rabbit, [
+{default_user, <<"guest">>},
+{default_pass, <<"guest">>},
+{default_user_tags, [administrator]},
+{default_permissions, [<<".*">>, <<".*">>, <<".*">>]}]}],[]}
+,
+{13,
+"cluster_nodes.disc.1 = rabbit@hostname1
+cluster_nodes.disc.2 = rabbit@hostname2",
+[{rabbit, [
+ {cluster_nodes, {[rabbit@hostname2,rabbit@hostname1], disc}}
+]}],[]}
+,
+{14,
+"tcp_listen_options.backlog = 128
+tcp_listen_options.nodelay = true
+tcp_listen_options.exit_on_close = false",
+[{rabbit, [{tcp_listen_options, [{backlog, 128},
+{nodelay, true},
+{exit_on_close, false}]}]}],[]}
+,
+{15,
+"auth_backends.1.authn = ldap
+auth_backends.1.authz = internal
+auth_backends.2 = internal",
+[{rabbit,[{auth_backends, [{rabbit_auth_backend_ldap, rabbit_auth_backend_internal},
+ rabbit_auth_backend_internal]}]}],[]}
+,
+{16,
+"rabbitmq_auth_backend_ldap.servers.1 = some_server
+ rabbitmq_auth_backend_ldap.servers.2 = some_other_server",
+[{rabbitmq_auth_backend_ldap, [{servers, ["some_server", "some_other_server"]}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{17,
+"rabbitmq_auth_backend_ldap.dn_lookup_attribute = userPrincipalName
+rabbitmq_auth_backend_ldap.dn_lookup_base = DC=gopivotal,DC=com
+rabbitmq_auth_backend_ldap.dn_lookup_bind = as_user",
+[{rabbitmq_auth_backend_ldap, [{dn_lookup_attribute, "userPrincipalName"},
+{dn_lookup_base, "DC=gopivotal,DC=com"},
+{dn_lookup_bind, as_user}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{18,
+"rabbitmq_auth_backend_ldap.dn_lookup_bind.user_dn = username
+rabbitmq_auth_backend_ldap.dn_lookup_bind.password = password",
+[{rabbitmq_auth_backend_ldap, [
+{dn_lookup_bind, {"username", "password"}}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{19,
+"rabbitmq_auth_backend_ldap.other_bind = anon",
+[{rabbitmq_auth_backend_ldap, [{other_bind, anon}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{20,
+"rabbitmq_auth_backend_ldap.other_bind = as_user",
+[{rabbitmq_auth_backend_ldap, [{other_bind, as_user}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{21,
+"rabbitmq_auth_backend_ldap.other_bind.user_dn = username
+rabbitmq_auth_backend_ldap.other_bind.password = password",
+[{rabbitmq_auth_backend_ldap, [{other_bind, {"username", "password"}}]}],
+[rabbitmq_auth_backend_ldap]}
+,
+{22,
+"listeners.tcp.default = 5672
+collect_statistics_interval = 10000
+management.http_log_dir = test/config_schema_SUITE_data/rabbit-mgmt
+management.rates_mode = basic",
+[{rabbit, [ {tcp_listeners, [5672]},
+ {collect_statistics_interval, 10000}]},
+ {rabbitmq_management, [ {http_log_dir, "test/config_schema_SUITE_data/rabbit-mgmt"},
+ {rates_mode, basic}]}
+],
+[rabbitmq_management]}
+,
+{23,
+"management.listener.port = 12345",
+[{rabbitmq_management, [{listener, [{port, 12345}]}]}],
+[rabbitmq_management]}
+,
+{24,
+"management.listener.port = 15671
+management.listener.ssl = true
+management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem
+management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem",
+[{rabbitmq_management,
+ [{listener, [{port, 15671},
+ {ssl, true},
+ {ssl_opts, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}]}
+ ]}
+ ]}
+],
+[rabbitmq_management]}
+,
+{25,
+"management.sample_retention_policies.global.minute = 5
+management.sample_retention_policies.global.hour = 60
+management.sample_retention_policies.global.day = 1200
+
+management.sample_retention_policies.basic.minute = 5
+management.sample_retention_policies.basic.hour = 60
+
+management.sample_retention_policies.detailed.10 = 5",
+[{rabbitmq_management,[
+ {sample_retention_policies,
+ %% List of {MaxAgeInSeconds, SampleEveryNSeconds}
+ [{global, [{60, 5}, {3600, 60}, {86400, 1200}]},
+ {basic, [{60, 5}, {3600, 60}]},
+ {detailed, [{10, 5}]}]}
+]}],
+[rabbitmq_management]}
+,
+{26,
+"vm_memory_high_watermark.absolute = 1073741824",
+[{rabbit, [{vm_memory_high_watermark, {absolute, 1073741824}}]}],[]}
+,
+{27,
+"vm_memory_high_watermark.absolute = 1024MB",
+[{rabbit, [{vm_memory_high_watermark, {absolute, "1024MB"}}]}],[]}
+,
+{28,
+"vm_memory_high_watermark_paging_ratio = 0.75
+vm_memory_high_watermark.relative = 0.4",
+[{rabbit, [{vm_memory_high_watermark_paging_ratio, 0.75},
+ {vm_memory_high_watermark, 0.4}]}],[]}
+,
+{29,
+"listeners.tcp.default = 5672
+mqtt.default_user = guest
+mqtt.default_pass = guest
+mqtt.allow_anonymous = true
+mqtt.vhost = /
+mqtt.exchange = amq.topic
+mqtt.subscription_ttl = 1800000
+mqtt.prefetch = 10
+mqtt.listeners.ssl = none
+## Default MQTT with TLS port is 8883
+# mqtt.listeners.ssl.default = 8883
+mqtt.listeners.tcp.default = 1883
+mqtt.tcp_listen_options.backlog = 128
+mqtt.tcp_listen_options.nodelay = true",
+[{rabbit, [{tcp_listeners, [5672]}]},
+ {rabbitmq_mqtt, [{default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {allow_anonymous, true},
+ {vhost, <<"/">>},
+ {exchange, <<"amq.topic">>},
+ {subscription_ttl, 1800000},
+ {prefetch, 10},
+ {ssl_listeners, []},
+ %% Default MQTT with TLS port is 8883
+ %% {ssl_listeners, [8883]}
+ {tcp_listeners, [1883]},
+ {tcp_listen_options, [{backlog, 128},
+ {nodelay, true}]}]}
+ ],
+[rabbitmq_mqtt]}
+,
+{30,
+"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = true
+
+mqtt.listeners.ssl.default = 8883
+mqtt.listeners.tcp.default = 1883",
+[{rabbit, [
+ {ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}
+ ]},
+ {rabbitmq_mqtt, [
+ {ssl_listeners, [8883]},
+ {tcp_listeners, [1883]}
+ ]}
+ ],
+[rabbitmq_mqtt]}
+,
+{31,
+"mqtt.ssl_cert_login = true",
+[{rabbitmq_mqtt, [{ssl_cert_login, true}]}], [rabbitmq_mqtt]}
+,
+
+{32,
+"ssl_cert_login_from = common_name",
+[{rabbit, [{ssl_cert_login_from, common_name}]}], [rabbitmq_mqtt]}
+,
+
+
+{33,
+"listeners.tcp.default = 5672
+mqtt.default_user = guest
+mqtt.default_pass = guest
+mqtt.allow_anonymous = true
+mqtt.vhost = /
+mqtt.exchange = amq.topic
+mqtt.subscription_ttl = undefined
+mqtt.prefetch = 10",
+[{rabbit, [{tcp_listeners, [5672]}]},
+ {rabbitmq_mqtt, [{default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {allow_anonymous, true},
+ {vhost, <<"/">>},
+ {exchange, <<"amq.topic">>},
+ {subscription_ttl, undefined},
+ {prefetch, 10}]}
+ ],
+[rabbitmq_mqtt]}
+,
+{34,
+"mqtt.default_user = guest
+mqtt.default_pass = guest
+mqtt.allow_anonymous = true
+mqtt.vhost = /
+mqtt.exchange = amq.topic
+mqtt.subscription_ttl = 1800000
+mqtt.prefetch = 10
+## use DETS (disk-based) store for retained messages
+mqtt.retained_message_store = rabbit_mqtt_retained_msg_store_dets
+## only used by DETS store
+mqtt.retained_message_store_dets_sync_interval = 2000
+
+mqtt.listeners.ssl = none
+mqtt.listeners.tcp.default = 1883",
+[{rabbitmq_mqtt, [{default_user, <<"guest">>},
+ {default_pass, <<"guest">>},
+ {allow_anonymous, true},
+ {vhost, <<"/">>},
+ {exchange, <<"amq.topic">>},
+ {subscription_ttl, 1800000},
+ {prefetch, 10},
+ %% use DETS (disk-based) store for retained messages
+ {retained_message_store, rabbit_mqtt_retained_msg_store_dets},
+ %% only used by DETS store
+ {retained_message_store_dets_sync_interval, 2000},
+ {ssl_listeners, []},
+ {tcp_listeners, [1883]}]}
+ ],
+[rabbitmq_mqtt]}
+,
+
+{35,
+"listeners.tcp.1 = 192.168.1.99:5672",
+[
+ {rabbit, [
+ {tcp_listeners, [{"192.168.1.99", 5672}]}
+ ]}
+], []}
+,
+{36,
+"listeners.tcp.1 = 127.0.0.1:5672
+listeners.tcp.2 = ::1:5672",
+[
+ {rabbit, [
+ {tcp_listeners, [{"127.0.0.1", 5672},
+ {"::1", 5672}]}
+ ]}
+], []}
+,
+{37,
+"listeners.tcp.1 = :::5672",
+[
+ {rabbit, [
+ {tcp_listeners, [{"::", 5672}]}
+ ]}
+], []}
+,
+{38,
+"listeners.tcp.1 = 192.168.1.99:5672",
+[
+ {rabbit, [
+ {tcp_listeners, [{"192.168.1.99", 5672}]}
+ ]}
+], []}
+,
+{39,
+"listeners.tcp.1 = fe80::2acf:e9ff:fe17:f97b:5672",
+[
+ {rabbit, [
+ {tcp_listeners, [{"fe80::2acf:e9ff:fe17:f97b", 5672}]}
+ ]}
+], []}
+,
+{40,
+"tcp_listen_options.backlog = 128
+tcp_listen_options.nodelay = true
+tcp_listen_options.sndbuf = 196608
+tcp_listen_options.recbuf = 196608",
+[
+ {rabbit, [
+ {tcp_listen_options, [
+ {backlog, 128},
+ {nodelay, true},
+ {sndbuf, 196608},
+ {recbuf, 196608}
+ ]}
+ ]}
+], []}
+,
+
+{42,
+"tcp_listen_options.backlog = 4096
+tcp_listen_options.nodelay = true",
+[
+ {kernel, [
+ {inet_default_connect_options, [{nodelay, true}]},
+ {inet_default_listen_options, [{nodelay, true}]}
+ ]}]
+,
+[
+ {kernel, [
+ {inet_default_connect_options, [{nodelay, true}]},
+ {inet_default_listen_options, [{nodelay, true}]}
+ ]},
+ {rabbit, [
+ {tcp_listen_options, [
+ {backlog, 4096},
+ {nodelay, true}
+ ]}
+ ]}
+], []}
+,
+
+{43,
+"tcp_listen_options.backlog = 4096
+tcp_listen_options.nodelay = true",
+[
+ {rabbit, [
+ {tcp_listen_options, [
+ {backlog, 4096},
+ {nodelay, true}
+ ]}
+ ]}
+], []}
+,
+
+{44,
+"ssl_handshake_timeout = 10000",
+[
+ {rabbit, [
+ %% 10 seconds
+ {ssl_handshake_timeout, 10000}
+ ]}
+], []}
+,
+
+{45,
+"cluster_partition_handling = pause_if_all_down
+
+## Recover strategy. Can be either 'autoheal' or 'ignore'
+cluster_partition_handling.pause_if_all_down.recover = ignore
+
+## Node names to check
+cluster_partition_handling.pause_if_all_down.nodes.1 = rabbit@myhost1
+cluster_partition_handling.pause_if_all_down.nodes.2 = rabbit@myhost2",
+[{rabbit, [{cluster_partition_handling, {pause_if_all_down, [rabbit@myhost2, rabbit@myhost1], ignore}}]}], []}
+,
+{46,
+"cluster_partition_handling = autoheal",
+[{rabbit, [{cluster_partition_handling, autoheal}]}], []}
+,
+{47,
+"password_hashing_module = rabbit_password_hashing_sha512",
+[
+ {rabbit, [{password_hashing_module, rabbit_password_hashing_sha512}]}
+],[]}
+,
+
+{48,
+"listeners.ssl.1 = 5671
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = false"
+,
+[
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}
+ ]}
+],[]}
+,
+
+
+{49,
+"listeners.ssl.1 = 5671
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.password = t0p$3kRe7",
+[
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {password, "t0p$3kRe7"}
+ ]}
+ ]}
+],[]}
+,
+
+{50,
+"listeners.ssl.1 = 5671
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.versions.tls1_2 = tlsv1.2
+ssl_options.versions.tls1_1 = tlsv1.1
+ssl_options.versions.tls1 = tlsv1",
+[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1', tlsv1]}]}],
+[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1', tlsv1]}]},
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {versions, ['tlsv1.2', 'tlsv1.1', tlsv1]}
+ ]}
+ ]}
+],[]}
+,
+{51,
+"listeners.ssl.1 = 5671
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.versions.tls1_2 = tlsv1.2
+ssl_options.versions.tls1_1 = tlsv1.1",
+[{ssl, [{versions, ['tlsv1.2', 'tlsv1.1']}]}],
+[
+ {ssl, [{versions, ['tlsv1.2', 'tlsv1.1']}]},
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {versions, ['tlsv1.2', 'tlsv1.1']}
+ ]}
+ ]}
+],[]}
+,
+{52,
+"listeners.ssl.1 = 5671
+ssl_allow_poodle_attack = true
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = false",
+[
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_allow_poodle_attack, true},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}
+ ]}
+],[]}
+,
+{53,
+"listeners.ssl.1 = 5671
+ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.depth = 2
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = false",
+[
+ {rabbit, [
+ {ssl_listeners, [5671]},
+ {ssl_options, [{cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile,"test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile,"test/config_schema_SUITE_data/certs/key.pem"},
+ {depth, 2},
+ {verify,verify_peer},
+ {fail_if_no_peer_cert,false}]}
+ ]}
+],[]}
+,
+{54,
+"stomp.listeners.tcp.1 = 12345",
+[{rabbitmq_stomp, [{tcp_listeners, [12345]}]}],[rabbitmq_stomp]}
+,
+{55,
+"stomp.listeners.tcp.1 = 127.0.0.1:61613
+stomp.listeners.tcp.2 = ::1:61613",
+[{rabbitmq_stomp, [{tcp_listeners, [{"127.0.0.1", 61613},
+ {"::1", 61613}]}]}],[rabbitmq_stomp]}
+,
+{56,
+"ssl_options.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+ssl_options.certfile = test/config_schema_SUITE_data/certs/cert.pem
+ssl_options.keyfile = test/config_schema_SUITE_data/certs/key.pem
+ssl_options.verify = verify_peer
+ssl_options.fail_if_no_peer_cert = true
+
+stomp.listeners.tcp.1 = 61613
+stomp.listeners.ssl.1 = 61614",
+[{rabbit,[
+{ssl_options, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, true}]}]},
+{rabbitmq_stomp, [{tcp_listeners, [61613]},
+{ssl_listeners, [61614]}]}
+ ],[]}
+,
+
+{57,
+"stomp.default_user = guest
+stomp.default_pass = guest",
+[{rabbitmq_stomp, [{default_user, [{login, "guest"},{passcode, "guest"}]}]}],
+[rabbitmq_stomp]}
+,
+{58,
+"stomp.ssl_cert_login = true",
+[{rabbitmq_stomp, [{ssl_cert_login, true}]}],
+[rabbitmq_stomp]}
+,
+{59,
+"ssl_cert_login_from = common_name",
+[{rabbit, [{ssl_cert_login_from, common_name}]}], []}
+,
+{60,
+"stomp.default_user = guest
+stomp.default_pass = guest
+stomp.implicit_connect = true",
+[{rabbitmq_stomp, [{default_user,[{login, "guest"}, {passcode, "guest"}]},{implicit_connect, true}]}],
+[rabbitmq_stomp]}
+,
+{61,
+"stomp.default_vhost = /",
+[{rabbitmq_stomp, [{default_vhost, <<"/">>}]}],
+[rabbitmq_stomp]}
+,
+{62,
+"management.listener.port = 15672
+management.listener.ip = 127.0.0.1",
+[{rabbitmq_management,
+ [{listener, [{port, 15672},
+ {ip, "127.0.0.1"}
+ ]}
+ ]}
+],
+[rabbitmq_management]}
+,
+{63,
+"management.listener.port = 15672
+management.listener.ssl = true
+
+management.listener.ssl_opts.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+management.listener.ssl_opts.certfile = test/config_schema_SUITE_data/certs/cert.pem
+management.listener.ssl_opts.keyfile = test/config_schema_SUITE_data/certs/key.pem",
+[{rabbitmq_management,
+ [{listener, [{port, 15672},
+ {ssl, true},
+ {ssl_opts, [{cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"}]}
+ ]}
+ ]}
+],
+[rabbitmq_management]},
+{64,
+"web_stomp.port = 12345",
+[{rabbitmq_web_stomp, [{port, 12345}]}],
+[rabbitmq_web_stomp]},
+{65,
+"web_stomp.ssl.port = 15671
+web_stomp.ssl.backlog = 1024
+web_stomp.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem
+web_stomp.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem
+web_stomp.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem
+web_stomp.ssl.password = changeme",
+[{rabbitmq_web_stomp,
+ [{ssl_config, [{port, 15671},
+ {backlog, 1024},
+ {certfile, "test/config_schema_SUITE_data/certs/cert.pem"},
+ {keyfile, "test/config_schema_SUITE_data/certs/key.pem"},
+ {cacertfile, "test/config_schema_SUITE_data/certs/cacert.pem"},
+ {password, "changeme"}]}]}],
+[rabbitmq_web_stomp]},
+{66,
+"web_stomp.ws_frame = binary",
+[{rabbitmq_web_stomp, [{ws_frame, binary}]}],
+[rabbitmq_web_stomp]},
+{67,
+"web_stomp.cowboy_opts.max_keepalive = 10",
+[{rabbitmq_web_stomp,[{cowboy_opts, [{max_keepalive, 10}]}]}],
+[rabbitmq_web_stomp]},
+{68,
+"web_stomp.sockjs_opts.url = https://cdn.jsdelivr.net/sockjs/0.3.4/sockjs.min.js",
+[{rabbitmq_web_stomp,
+ [{sockjs_opts, [{sockjs_url, "https://cdn.jsdelivr.net/sockjs/0.3.4/sockjs.min.js"}]}]}],
+[rabbitmq_web_stomp]},
+{69,
+"auth_backends.1 = http
+rabbitmq_auth_backend_http.user_path = http://some-server/auth/user
+rabbitmq_auth_backend_http.vhost_path = http://some-server/auth/vhost
+rabbitmq_auth_backend_http.resource_path = http://some-server/auth/resource",
+[{rabbit, [{auth_backends, [rabbit_auth_backend_http]}]},
+ {rabbitmq_auth_backend_http,
+ [{user_path, "http://some-server/auth/user"},
+ {vhost_path, "http://some-server/auth/vhost"},
+ {resource_path, "http://some-server/auth/resource"}]}],
+[rabbitmq_auth_backend_http]}
+].
diff --git a/test/crashing_queues_SUITE.erl b/test/crashing_queues_SUITE.erl
new file mode 100644
index 0000000000..872b771811
--- /dev/null
+++ b/test/crashing_queues_SUITE.erl
@@ -0,0 +1,269 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(crashing_queues_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ crashing_unmirrored,
+ crashing_mirrored,
+ give_up_after_repeated_crashes
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+crashing_unmirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_unmirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 1, 0,
+ #'queue.declare'{queue = QName, durable = true}),
+ test_queue_failure(A, ChA, ConnB, 0, 0,
+ #'queue.declare'{queue = QName, durable = false}),
+ ok.
+
+crashing_mirrored(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<".*">>, <<"all">>),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConnB = rabbit_ct_client_helpers:open_connection(Config, B),
+ QName = <<"crashing_mirrored-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ test_queue_failure(A, ChA, ConnB, 2, 1,
+ #'queue.declare'{queue = QName, durable = true}),
+ ok.
+
+test_queue_failure(Node, Ch, RaceConn, MsgCount, SlaveCount, Decl) ->
+ #'queue.declare_ok'{queue = QName} = amqp_channel:call(Ch, Decl),
+ try
+ publish(Ch, QName, transient),
+ publish(Ch, QName, durable),
+ Racer = spawn_declare_racer(RaceConn, Decl),
+ kill_queue(Node, QName),
+ assert_message_count(MsgCount, Ch, QName),
+ assert_slave_count(SlaveCount, Node, QName),
+ stop_declare_racer(Racer)
+ after
+ amqp_channel:call(Ch, #'queue.delete'{queue = QName})
+ end.
+
+give_up_after_repeated_crashes(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+ QName = <<"give_up_after_repeated_crashes-q">>,
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ amqp_channel:call(ChA, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+ publish(ChA, QName, durable),
+ kill_queue_hard(A, QName),
+ {'EXIT', _} = (catch amqp_channel:call(
+ ChA, #'queue.declare'{queue = QName,
+ durable = true})),
+ await_state(A, QName, crashed),
+ amqp_channel:call(ChB, #'queue.delete'{queue = QName}),
+ amqp_channel:call(ChB, #'queue.declare'{queue = QName,
+ durable = true}),
+ await_state(A, QName, running),
+
+ %% Since it's convenient, also test absent queue status here.
+ rabbit_ct_broker_helpers:stop_node(Config, B),
+ await_state(A, QName, down),
+ ok.
+
+
+publish(Ch, QName, DelMode) ->
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = QName},
+ Msg = #amqp_msg{props = #'P_basic'{delivery_mode = del_mode(DelMode)}},
+ amqp_channel:cast(Ch, Publish, Msg),
+ amqp_channel:wait_for_confirms(Ch).
+
+del_mode(transient) -> 1;
+del_mode(durable) -> 2.
+
+spawn_declare_racer(Conn, Decl) ->
+ Self = self(),
+ spawn_link(fun() -> declare_racer_loop(Self, Conn, Decl) end).
+
+stop_declare_racer(Pid) ->
+ Pid ! stop,
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, process, Pid, _} -> ok
+ end.
+
+declare_racer_loop(Parent, Conn, Decl) ->
+ receive
+ stop -> unlink(Parent)
+ after 0 ->
+ %% Catch here because we might happen to catch the queue
+ %% while it is in the middle of recovering and thus
+ %% explode with NOT_FOUND because crashed. Doesn't matter,
+ %% we are only in this loop to try to fool the recovery
+ %% code anyway.
+ try
+ case amqp_connection:open_channel(Conn) of
+ {ok, Ch} -> amqp_channel:call(Ch, Decl);
+ closing -> ok
+ end
+ catch
+ exit:_ ->
+ ok
+ end,
+ declare_racer_loop(Parent, Conn, Decl)
+ end.
+
+await_state(Node, QName, State) ->
+ await_state(Node, QName, State, 30000).
+
+await_state(Node, QName, State, Time) ->
+ case state(Node, QName) of
+ State ->
+ ok;
+ Other ->
+ case Time of
+ 0 -> exit({timeout_awaiting_state, State, Other});
+ _ -> timer:sleep(100),
+ await_state(Node, QName, State, Time - 100)
+ end
+ end.
+
+state(Node, QName) ->
+ V = <<"/">>,
+ Res = rabbit_misc:r(V, queue, QName),
+ Infos = rpc:call(Node, rabbit_amqqueue, info_all, [V, [name, state]]),
+ case Infos of
+ [] -> undefined;
+ [[{name, Res}, {state, State}]] -> State
+ end.
+
+kill_queue_hard(Node, QName) ->
+ case kill_queue(Node, QName) of
+ crashed -> ok;
+ _NewPid -> timer:sleep(100),
+ kill_queue_hard(Node, QName)
+ end.
+
+kill_queue(Node, QName) ->
+ Pid1 = queue_pid(Node, QName),
+ exit(Pid1, boom),
+ await_new_pid(Node, QName, Pid1).
+
+queue_pid(Node, QName) ->
+ #amqqueue{pid = QPid,
+ state = State} = lookup(Node, QName),
+ case State of
+ crashed -> case sup_child(Node, rabbit_amqqueue_sup_sup) of
+ {ok, _} -> QPid; %% restarting
+ {error, no_child} -> crashed %% given up
+ end;
+ _ -> QPid
+ end.
+
+sup_child(Node, Sup) ->
+ case rpc:call(Node, supervisor2, which_children, [Sup]) of
+ [{_, Child, _, _}] -> {ok, Child};
+ [] -> {error, no_child};
+ {badrpc, {'EXIT', {noproc, _}}} -> {error, no_sup}
+ end.
+
+lookup(Node, QName) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, QName)]),
+ Q.
+
+await_new_pid(Node, QName, OldPid) ->
+ case queue_pid(Node, QName) of
+ OldPid -> timer:sleep(10),
+ await_new_pid(Node, QName, OldPid);
+ New -> New
+ end.
+
+assert_message_count(Count, Ch, QName) ->
+ #'queue.declare_ok'{message_count = Count} =
+ amqp_channel:call(Ch, #'queue.declare'{queue = QName,
+ passive = true}).
+
+assert_slave_count(Count, Node, QName) ->
+ Q = lookup(Node, QName),
+ [{_, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [slave_pids]]),
+ RealCount = case Pids of
+ '' -> 0;
+ _ -> length(Pids)
+ end,
+ case RealCount of
+ Count ->
+ ok;
+ _ when RealCount < Count ->
+ timer:sleep(10),
+ assert_slave_count(Count, Node, QName);
+ _ ->
+ exit({too_many_slaves, Count, RealCount})
+ end.
diff --git a/test/dummy_event_receiver.erl b/test/dummy_event_receiver.erl
new file mode 100644
index 0000000000..75db3678ce
--- /dev/null
+++ b/test/dummy_event_receiver.erl
@@ -0,0 +1,58 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(dummy_event_receiver).
+
+-export([start/3, stop/0]).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-include("rabbit.hrl").
+
+start(Pid, Nodes, Types) ->
+ Oks = [ok || _ <- Nodes],
+ {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
+ [rabbit_event, ?MODULE, [Pid, Types]]).
+
+stop() ->
+ gen_event:delete_handler(rabbit_event, ?MODULE, []).
+
+%%----------------------------------------------------------------------------
+
+init([Pid, Types]) ->
+ {ok, {Pid, Types}}.
+
+handle_call(_Request, State) ->
+ {ok, not_understood, State}.
+
+handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
+ case lists:member(Type, Types) of
+ true -> Pid ! Event;
+ false -> ok
+ end,
+ {ok, State}.
+
+handle_info(_Info, State) ->
+ {ok, State}.
+
+terminate(_Arg, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
diff --git a/test/dummy_interceptor.erl b/test/dummy_interceptor.erl
new file mode 100644
index 0000000000..6d510a3073
--- /dev/null
+++ b/test/dummy_interceptor.erl
@@ -0,0 +1,26 @@
+-module(dummy_interceptor).
+
+-behaviour(rabbit_channel_interceptor).
+
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+
+-compile(export_all).
+
+init(_Ch) ->
+ undefined.
+
+description() ->
+ [{description,
+ <<"Empties payload on publish">>}].
+
+intercept(#'basic.publish'{} = Method, Content, _IState) ->
+ Content2 = Content#content{payload_fragments_rev = []},
+ {Method, Content2};
+
+intercept(Method, Content, _VHost) ->
+ {Method, Content}.
+
+applies_to() ->
+ ['basic.publish'].
diff --git a/test/dummy_runtime_parameters.erl b/test/dummy_runtime_parameters.erl
new file mode 100644
index 0000000000..d80ec785d0
--- /dev/null
+++ b/test/dummy_runtime_parameters.erl
@@ -0,0 +1,72 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(dummy_runtime_parameters).
+-behaviour(rabbit_runtime_parameter).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate/5, notify/4, notify_clear/3]).
+-export([register/0, unregister/0]).
+-export([validate_policy/1]).
+-export([register_policy_validator/0, unregister_policy_validator/0]).
+
+%----------------------------------------------------------------------------
+
+register() ->
+ rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
+
+unregister() ->
+ rabbit_registry:unregister(runtime_parameter, <<"test">>).
+
+validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
+validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
+validate(_, <<"test">>, <<"admin">>, _Term, User) ->
+ case lists:member(administrator, User#user.tags) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
+
+notify(_, _, _, _) -> ok.
+notify_clear(_, _, _) -> ok.
+
+%----------------------------------------------------------------------------
+
+register_policy_validator() ->
+ rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
+ rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
+
+unregister_policy_validator() ->
+ rabbit_registry:unregister(policy_validator, <<"testeven">>),
+ rabbit_registry:unregister(policy_validator, <<"testpos">>).
+
+validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
+ case length(Terms) rem 2 =:= 0 of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
+ case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
+ true -> ok;
+ false -> {error, "meh", []}
+ end;
+
+validate_policy(_) ->
+ {error, "meh", []}.
diff --git a/test/dummy_supervisor2.erl b/test/dummy_supervisor2.erl
new file mode 100644
index 0000000000..9ca3f6329c
--- /dev/null
+++ b/test/dummy_supervisor2.erl
@@ -0,0 +1,41 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(dummy_supervisor2).
+
+-behaviour(supervisor2).
+
+-export([
+ start_link/0,
+ init/1
+ ]).
+
+start_link() ->
+ Pid = spawn_link(fun () ->
+ process_flag(trap_exit, true),
+ receive stop -> ok end
+ end),
+ {ok, Pid}.
+
+init([Timeout]) ->
+ {ok, {{one_for_one, 0, 1},
+ [{test_sup, {supervisor2, start_link,
+ [{local, ?MODULE}, ?MODULE, []]},
+ transient, Timeout, supervisor, [?MODULE]}]}};
+init([]) ->
+ {ok, {{simple_one_for_one, 0, 1},
+ [{test_worker, {?MODULE, start_link, []},
+ temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/test/dynamic_ha_SUITE.erl b/test/dynamic_ha_SUITE.erl
new file mode 100644
index 0000000000..c54e4c2994
--- /dev/null
+++ b/test/dynamic_ha_SUITE.erl
@@ -0,0 +1,438 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(dynamic_ha_SUITE).
+
+%% rabbit_tests:test_dynamic_mirroring() is a unit test which should
+%% test the logic of what all the policies decide to do, so we don't
+%% need to exhaustively test that here. What we need to test is that:
+%%
+%% * Going from non-mirrored to mirrored works and vice versa
+%% * Changing policy can add / remove mirrors and change the master
+%% * Adding a node will create a new mirror when there are not enough nodes
+%% for the policy
+%% * Removing a node will not create a new mirror even if the policy
+%% logic wants it (since this gives us a good way to lose messages
+%% on cluster shutdown, by repeated failover to new nodes)
+%%
+%% The first two are change_policy, the last two are change_cluster
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.test">>).
+-define(POLICY, <<"^ha.test$">>). %% " emacs
+-define(VHOST, <<"/">>).
+
+all() ->
+ [
+ {group, unclustered},
+ {group, clustered}
+ ].
+
+groups() ->
+ [
+ {unclustered, [], [
+ {cluster_size_5, [], [
+ change_cluster
+ ]}
+ ]},
+ {clustered, [], [
+ {cluster_size_2, [], [
+ vhost_deletion,
+ promote_on_shutdown
+ ]},
+ {cluster_size_3, [], [
+ change_policy,
+ rapid_change,
+ random_policy
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(unclustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, false}]);
+init_per_group(clustered, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_clustered, true}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]);
+init_per_group(cluster_size_5, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 5}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+change_policy(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ %% When we first declare a queue with no policy, it's not HA.
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Give it policy "all", it becomes HA and gets all mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY, <<"all">>),
+ assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+ %% Give it policy "nodes", it gets specific mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(B)]}),
+ assert_slaves(A, ?QNAME, {A, [B]}),
+
+ %% Now explicitly change the mirrors
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(A),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_slaves(A, ?QNAME, {A, [C]}, [{A, [B, C]}]),
+
+ %% Clear the policy, and we go back to non-mirrored
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, A, ?POLICY),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Test switching "away" from an unmirrored node
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"nodes">>, [rabbit_misc:atom_to_binary(B),
+ rabbit_misc:atom_to_binary(C)]}),
+ assert_slaves(A, ?QNAME, {A, [B, C]}, [{A, [B]}, {A, [C]}]),
+
+ ok.
+
+change_cluster(Config) ->
+ [A, B, C, D, E] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, B, C]),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME}),
+ assert_slaves(A, ?QNAME, {A, ''}),
+
+ %% Give it policy exactly 4, it should mirror to all 3 nodes
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, ?POLICY,
+ {<<"exactly">>, 4}),
+ assert_slaves(A, ?QNAME, {A, [B, C]}),
+
+ %% Add D and E, D joins in
+ rabbit_ct_broker_helpers:cluster_nodes(Config, [A, D, E]),
+ assert_slaves(A, ?QNAME, {A, [B, C, D]}),
+
+ %% Remove D, E joins in
+ rabbit_ct_broker_helpers:stop_node(Config, D),
+ assert_slaves(A, ?QNAME, {A, [B, C, E]}),
+
+ ok.
+
+rapid_change(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ {_Pid, MRef} = spawn_monitor(
+ fun() ->
+ [rapid_amqp_ops(ACh, I) || I <- lists:seq(1, 100)]
+ end),
+ rapid_loop(Config, A, MRef),
+ ok.
+
+rapid_amqp_ops(Ch, I) ->
+ Payload = list_to_binary(integer_to_list(I)),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = ?QNAME},
+ #amqp_msg{payload = Payload}),
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = ?QNAME,
+ no_ack = true}, self()),
+ receive #'basic.consume_ok'{} -> ok
+ end,
+ receive {#'basic.deliver'{}, #amqp_msg{payload = Payload}} ->
+ ok
+ end,
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}).
+
+rapid_loop(Config, Node, MRef) ->
+ receive
+ {'DOWN', MRef, process, _Pid, normal} ->
+ ok;
+ {'DOWN', MRef, process, _Pid, Reason} ->
+ exit({amqp_ops_died, Reason})
+ after 0 ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, ?POLICY,
+ <<"all">>),
+ ok = rabbit_ct_broker_helpers:clear_policy(Config, Node, ?POLICY),
+ rapid_loop(Config, Node, MRef)
+ end.
+
+%% Vhost deletion needs to successfully tear down policies and queues
+%% with policies. At least smoke-test that it doesn't blow up.
+vhost_deletion(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy_all(Config),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ amqp_channel:call(ACh, #'queue.declare'{queue = <<"vhost_deletion-q">>}),
+ ok = rpc:call(A, rabbit_vhost, delete, [<<"/">>]),
+ ok.
+
+promote_on_shutdown(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.promote">>,
+ <<"all">>, [{<<"ha-promote-on-shutdown">>, <<"always">>}]),
+ rabbit_ct_broker_helpers:set_ha_policy(Config, A, <<"^ha.nopromote">>,
+ <<"all">>),
+
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ [begin
+ amqp_channel:call(ACh, #'queue.declare'{queue = Q,
+ durable = true}),
+ rabbit_ct_client_helpers:publish(ACh, Q, 10)
+ end || Q <- [<<"ha.promote.test">>, <<"ha.nopromote.test">>]],
+ ok = rabbit_ct_broker_helpers:restart_node(Config, B),
+ ok = rabbit_ct_broker_helpers:stop_node(Config, A),
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{message_count = 0} =
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.promote.test">>,
+ durable = true}),
+ ?assertExit(
+ {{shutdown, {server_initiated_close, 404, _}}, _},
+ amqp_channel:call(
+ BCh, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true})),
+ ok = rabbit_ct_broker_helpers:start_node(Config, A),
+ ACh2 = rabbit_ct_client_helpers:open_channel(Config, A),
+ #'queue.declare_ok'{message_count = 10} =
+ amqp_channel:call(
+ ACh2, #'queue.declare'{queue = <<"ha.nopromote.test">>,
+ durable = true}),
+ ok.
+
+random_policy(Config) ->
+ run_proper(fun prop_random_policy/1, [Config]).
+
+%%----------------------------------------------------------------------------
+
+assert_slaves(RPCNode, QName, Exp) ->
+ assert_slaves(RPCNode, QName, Exp, []).
+
+assert_slaves(RPCNode, QName, Exp, PermittedIntermediate) ->
+ assert_slaves0(RPCNode, QName, Exp,
+ [{get(previous_exp_m_node), get(previous_exp_s_nodes)} |
+ PermittedIntermediate]).
+
+assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes}, PermittedIntermediate) ->
+ Q = find_queue(QName, RPCNode),
+ Pid = proplists:get_value(pid, Q),
+ SPids = proplists:get_value(slave_pids, Q),
+ ActMNode = node(Pid),
+ ActSNodes = case SPids of
+ '' -> '';
+ _ -> [node(SPid) || SPid <- SPids]
+ end,
+ case ExpMNode =:= ActMNode andalso equal_list(ExpSNodes, ActSNodes) of
+ false ->
+ %% It's an async change, so if nothing has changed let's
+ %% just wait - of course this means if something does not
+ %% change when expected then we time out the test which is
+ %% a bit tedious
+ case [found || {PermMNode, PermSNodes} <- PermittedIntermediate,
+ PermMNode =:= ActMNode,
+ equal_list(PermSNodes, ActSNodes)] of
+ [] -> ct:fail("Expected ~p / ~p, got ~p / ~p~nat ~p~n",
+ [ExpMNode, ExpSNodes, ActMNode, ActSNodes,
+ get_stacktrace()]);
+ _ -> timer:sleep(100),
+ assert_slaves0(RPCNode, QName, {ExpMNode, ExpSNodes},
+ PermittedIntermediate)
+ end;
+ true ->
+ put(previous_exp_m_node, ExpMNode),
+ put(previous_exp_s_nodes, ExpSNodes),
+ ok
+ end.
+
+equal_list('', '') -> true;
+equal_list('', _Act) -> false;
+equal_list(_Exp, '') -> false;
+equal_list([], []) -> true;
+equal_list(_Exp, []) -> false;
+equal_list([], _Act) -> false;
+equal_list([H|T], Act) -> case lists:member(H, Act) of
+ true -> equal_list(T, Act -- [H]);
+ false -> false
+ end.
+
+find_queue(QName, RPCNode) ->
+ Qs = rpc:call(RPCNode, rabbit_amqqueue, info_all, [?VHOST], infinity),
+ case find_queue0(QName, Qs) of
+ did_not_find_queue -> timer:sleep(100),
+ find_queue(QName, RPCNode);
+ Q -> Q
+ end.
+
+find_queue0(QName, Qs) ->
+ case [Q || Q <- Qs, proplists:get_value(name, Q) =:=
+ rabbit_misc:r(?VHOST, queue, QName)] of
+ [R] -> R;
+ [] -> did_not_find_queue
+ end.
+
+get_stacktrace() ->
+ try
+ throw(e)
+ catch
+ _:e ->
+ erlang:get_stacktrace()
+ end.
+
+%%----------------------------------------------------------------------------
+run_proper(Fun, Args) ->
+ case proper:counterexample(erlang:apply(Fun, Args),
+ [{numtests, 25},
+ {on_output, fun(F, A) ->
+ io:format(user, F, A)
+ end}]) of
+ true ->
+ true;
+ Value ->
+ exit(Value)
+ end.
+
+prop_random_policy(Config) ->
+ [NodeA, _, _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(
+ Config, nodename),
+ ?FORALL(
+ Policies, non_empty(list(policy_gen(Nodes))),
+ begin
+ Ch = rabbit_ct_client_helpers:open_channel(Config, NodeA),
+ amqp_channel:call(Ch, #'queue.declare'{queue = ?QNAME}),
+ %% Add some load so mirrors can be busy synchronising
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 100000),
+ %% Apply policies in parallel on all nodes
+ apply_in_parallel(Config, Nodes, Policies),
+ %% The last policy is the final state
+ Last = lists:last(Policies),
+ %% Give it some time to generate all internal notifications
+ timer:sleep(2000),
+ %% Ensure the owner/master is able to process a call request,
+ %% which means that all pending casts have been processed.
+ %% Use the information returned by owner/master to verify the
+ %% test result
+ Info = find_queue(?QNAME, NodeA),
+ %% Gets owner/master
+ Pid = proplists:get_value(pid, Info),
+ FinalInfo = rpc:call(node(Pid), gen_server, call, [Pid, info], 5000),
+ %% Check the result
+ Result = verify_policy(Last, FinalInfo),
+ %% Cleanup
+ amqp_channel:call(Ch, #'queue.delete'{queue = ?QNAME}),
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, NodeA, ?POLICY),
+ Result
+ end).
+
+apply_in_parallel(Config, Nodes, Policies) ->
+ Self = self(),
+ [spawn_link(fun() ->
+ [begin
+ apply_policy(Config, N, Policy)
+ end || Policy <- Policies],
+ Self ! parallel_task_done
+ end) || N <- Nodes],
+ [receive
+ parallel_task_done ->
+ ok
+ end || _ <- Nodes].
+
+%% Proper generators
+policy_gen(Nodes) ->
+ %% Stop mirroring needs to be called often to trigger rabbitmq-server#803
+ frequency([{3, undefined},
+ {1, all},
+ {1, {nodes, nodes_gen(Nodes)}},
+ {1, {exactly, choose(1, 3)}}
+ ]).
+
+nodes_gen(Nodes) ->
+ ?LET(List, non_empty(list(oneof(Nodes))),
+ sets:to_list(sets:from_list(List))).
+
+%% Checks
+verify_policy(undefined, Info) ->
+ %% If the queue is not mirrored, it returns ''
+ '' == proplists:get_value(slave_pids, Info);
+verify_policy(all, Info) ->
+ 2 == length(proplists:get_value(slave_pids, Info));
+verify_policy({exactly, 1}, Info) ->
+ %% If the queue is mirrored, it returns a list
+ [] == proplists:get_value(slave_pids, Info);
+verify_policy({exactly, N}, Info) ->
+ (N - 1) == length(proplists:get_value(slave_pids, Info));
+verify_policy({nodes, Nodes}, Info) ->
+ Master = node(proplists:get_value(pid, Info)),
+ Slaves = [node(P) || P <- proplists:get_value(slave_pids, Info)],
+ lists:sort(Nodes) == lists:sort([Master | Slaves]).
+
+%% Policies
+apply_policy(Config, N, undefined) ->
+ _ = rabbit_ct_broker_helpers:clear_policy(Config, N, ?POLICY);
+apply_policy(Config, N, all) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, <<"all">>,
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {nodes, Nodes}) ->
+ NNodes = [rabbit_misc:atom_to_binary(Node) || Node <- Nodes],
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"nodes">>, NNodes},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]);
+apply_policy(Config, N, {exactly, Exactly}) ->
+ rabbit_ct_broker_helpers:set_ha_policy(
+ Config, N, ?POLICY, {<<"exactly">>, Exactly},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]).
diff --git a/test/eager_sync_SUITE.erl b/test/eager_sync_SUITE.erl
new file mode 100644
index 0000000000..93b308b6c5
--- /dev/null
+++ b/test/eager_sync_SUITE.erl
@@ -0,0 +1,278 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(eager_sync_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"ha.two.test">>).
+-define(QNAME_AUTO, <<"ha.auto.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ eager_sync,
+ eager_sync_cancel,
+ eager_sync_auto,
+ eager_sync_auto_on_policy_change,
+ eager_sync_requeue
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 3,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+eager_sync(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Don't sync, lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ %% Sync, keep messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ ok = sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% Check the no-need-to-sync path
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ ok = sync(C, ?QNAME),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ %% keep unacknowledged messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 2),
+ restart(Config, A),
+ rabbit_ct_client_helpers:fetch(Ch, ?QNAME, 3),
+ sync(C, ?QNAME),
+ restart(Config, B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_cancel(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ set_app_sync_batch_size(A),
+ set_app_sync_batch_size(B),
+ set_app_sync_batch_size(C),
+
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ eager_sync_cancel_test2(Config, A, B, C, Ch).
+
+eager_sync_cancel_test2(Config, A, B, C, Ch) ->
+ %% Sync then cancel
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ set_app_sync_batch_size(A),
+ spawn_link(fun() -> ok = sync_nowait(C, ?QNAME) end),
+ case wait_for_syncing(C, ?QNAME, 1) of
+ ok ->
+ case sync_cancel(C, ?QNAME) of
+ ok ->
+ wait_for_running(C, ?QNAME),
+ restart(Config, B),
+ set_app_sync_batch_size(B),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 0),
+
+ {ok, not_syncing} = sync_cancel(C, ?QNAME), %% Idempotence
+ ok;
+ {ok, not_syncing} ->
+ %% Damn. Syncing finished between wait_for_syncing/3 and
+ %% sync_cancel/2 above. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch)
+ end;
+ synced_already ->
+ %% Damn. Syncing finished before wait_for_syncing/3. Start again.
+ amqp_channel:call(Ch, #'queue.purge'{queue = ?QNAME}),
+ eager_sync_cancel_test2(Config, A, B, C, Ch)
+ end.
+
+eager_sync_auto(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME_AUTO,
+ durable = true}),
+
+ %% Sync automatically, don't lose messages
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+ restart(Config, A),
+ wait_for_sync(C, ?QNAME_AUTO),
+ restart(Config, B),
+ wait_for_sync(C, ?QNAME_AUTO),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME_AUTO, ?MESSAGE_COUNT),
+
+ ok.
+
+eager_sync_auto_on_policy_change(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ %% Sync automatically once the policy is changed to tell us to.
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ restart(Config, A),
+ Params = [rabbit_misc:atom_to_binary(N) || N <- [A, B]],
+ rabbit_ct_broker_helpers:set_ha_policy(Config,
+ A, <<"^ha.two.">>, {<<"nodes">>, Params},
+ [{<<"ha-sync-mode">>, <<"automatic">>}]),
+ wait_for_sync(C, ?QNAME),
+
+ ok.
+
+eager_sync_requeue(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Queue is on AB but not C.
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, C),
+ amqp_channel:call(ACh, #'queue.declare'{queue = ?QNAME,
+ durable = true}),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, 2),
+ {#'basic.get_ok'{delivery_tag = TagA}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ {#'basic.get_ok'{delivery_tag = TagB}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = ?QNAME}),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagA, requeue = true}),
+ restart(Config, B),
+ ok = sync(C, ?QNAME),
+ amqp_channel:cast(Ch, #'basic.reject'{delivery_tag = TagB, requeue = true}),
+ rabbit_ct_client_helpers:consume(Ch, ?QNAME, 2),
+
+ ok.
+
+restart(Config, Node) ->
+ rabbit_ct_broker_helpers:restart_broker(Config, Node).
+
+sync(Node, QName) ->
+ case sync_nowait(Node, QName) of
+ ok -> wait_for_sync(Node, QName),
+ ok;
+ R -> R
+ end.
+
+sync_nowait(Node, QName) -> action(Node, sync_queue, QName).
+sync_cancel(Node, QName) -> action(Node, cancel_sync_queue, QName).
+
+wait_for_sync(Node, QName) ->
+ sync_detection_SUITE:wait_for_sync_status(true, Node, QName).
+
+action(Node, Action, QName) ->
+ rabbit_ct_broker_helpers:control_action(
+ Action, Node, [binary_to_list(QName)], [{"-p", "/"}]).
+
+queue(Node, QName) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, QName),
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ Q.
+
+wait_for_syncing(Node, QName, Target) ->
+ case state(Node, QName) of
+ {{syncing, _}, _} -> ok;
+ {running, Target} -> synced_already;
+ _ -> timer:sleep(100),
+ wait_for_syncing(Node, QName, Target)
+ end.
+
+wait_for_running(Node, QName) ->
+ case state(Node, QName) of
+ {running, _} -> ok;
+ _ -> timer:sleep(100),
+ wait_for_running(Node, QName)
+ end.
+
+state(Node, QName) ->
+ [{state, State}, {synchronised_slave_pids, Pids}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [queue(Node, QName), [state, synchronised_slave_pids]]),
+ {State, length(Pids)}.
+
+%% eager_sync_cancel_test needs a batch size that's < ?MESSAGE_COUNT
+%% in order to pass, because a SyncBatchSize >= ?MESSAGE_COUNT will
+%% always finish before the test is able to cancel the sync.
+set_app_sync_batch_size(Node) ->
+ rabbit_ct_broker_helpers:control_action(
+ eval, Node,
+ ["application:set_env(rabbit, mirroring_sync_batch_size, 1)."]).
diff --git a/test/gm_SUITE.erl b/test/gm_SUITE.erl
new file mode 100644
index 0000000000..f5ccf75b70
--- /dev/null
+++ b/test/gm_SUITE.erl
@@ -0,0 +1,205 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(gm_SUITE).
+
+-behaviour(gm).
+
+-include_lib("common_test/include/ct.hrl").
+
+-include("gm_specs.hrl").
+
+-compile(export_all).
+
+-define(RECEIVE_OR_THROW(Body, Bool, Error),
+ receive Body ->
+ true = Bool,
+ passed
+ after 1000 ->
+ throw(Error)
+ end).
+
+all() ->
+ [
+ join_leave,
+ broadcast,
+ confirmed_broadcast,
+ member_death,
+ receive_in_order
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ {ok, FHC} = file_handle_cache:start_link(),
+ unlink(FHC),
+ {ok, WPS} = worker_pool_sup:start_link(),
+ unlink(WPS),
+ rabbit_ct_helpers:set_config(Config, [
+ {file_handle_cache_pid, FHC},
+ {worker_pool_sup_pid, WPS}
+ ]).
+
+end_per_suite(Config) ->
+ exit(?config(worker_pool_sup_pid, Config), shutdown),
+ exit(?config(file_handle_cache_pid, Config), shutdown),
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+join_leave(_Config) ->
+ passed = with_two_members(fun (_Pid, _Pid2) -> passed end).
+
+broadcast(_Config) ->
+ passed = do_broadcast(fun gm:broadcast/2).
+
+confirmed_broadcast(_Config) ->
+ passed = do_broadcast(fun gm:confirmed_broadcast/2).
+
+member_death(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ {ok, Pid3} = gm:start_link(
+ ?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
+ timeout_joining_gm_group_3),
+ passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
+ passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
+
+ unlink(Pid3),
+ exit(Pid3, kill),
+
+ %% Have to do some broadcasts to ensure that all members
+ %% find out about the death.
+ passed = (broadcast_fun(fun gm:confirmed_broadcast/2))(
+ Pid, Pid2),
+
+ passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
+ passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
+
+ passed
+ end).
+
+receive_in_order(_Config) ->
+ passed = with_two_members(
+ fun (Pid, Pid2) ->
+ Numbers = lists:seq(1,1000),
+ [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
+ || N <- Numbers],
+ passed = receive_numbers(
+ Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
+ passed = receive_numbers(
+ Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
+ passed
+ end).
+
+do_broadcast(Fun) ->
+ with_two_members(broadcast_fun(Fun)).
+
+broadcast_fun(Fun) ->
+ fun (Pid, Pid2) ->
+ ok = Fun(Pid, magic_message),
+ passed = receive_or_throw({msg, Pid, Pid, magic_message},
+ timeout_waiting_for_msg),
+ passed = receive_or_throw({msg, Pid2, Pid, magic_message},
+ timeout_waiting_for_msg)
+ end.
+
+with_two_members(Fun) ->
+ ok = gm:create_tables(),
+
+ {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
+
+ {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
+ fun rabbit_misc:execute_mnesia_transaction/1),
+ passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
+ passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
+
+ passed = Fun(Pid, Pid2),
+
+ ok = gm:leave(Pid),
+ passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
+ passed =
+ receive_termination(Pid, normal, timeout_waiting_for_termination_1),
+
+ ok = gm:leave(Pid2),
+ passed =
+ receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
+
+ receive X -> throw({unexpected_message, X})
+ after 0 -> passed
+ end.
+
+receive_or_throw(Pattern, Error) ->
+ ?RECEIVE_OR_THROW(Pattern, true, Error).
+
+receive_birth(From, Born, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([Born] == Birth) andalso ([] == Death),
+ Error).
+
+receive_death(From, Died, Error) ->
+ ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
+ ([] == Birth) andalso ([Died] == Death),
+ Error).
+
+receive_joined(From, Members, Error) ->
+ ?RECEIVE_OR_THROW({joined, From, Members1},
+ lists:usort(Members) == lists:usort(Members1),
+ Error).
+
+receive_termination(From, Reason, Error) ->
+ ?RECEIVE_OR_THROW({termination, From, Reason1},
+ Reason == Reason1,
+ Error).
+
+receive_numbers(_Pid, _Sender, _Error, []) ->
+ passed;
+receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
+ ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
+ M == N,
+ Error),
+ receive_numbers(Pid, Sender, Error, Numbers).
+
+%% -------------------------------------------------------------------
+%% gm behavior callbacks.
+%% -------------------------------------------------------------------
+
+joined(Pid, Members) ->
+ Pid ! {joined, self(), Members},
+ ok.
+
+members_changed(Pid, Births, Deaths) ->
+ Pid ! {members_changed, self(), Births, Deaths},
+ ok.
+
+handle_msg(Pid, From, Msg) ->
+ Pid ! {msg, self(), From, Msg},
+ ok.
+
+handle_terminate(Pid, Reason) ->
+ Pid ! {termination, self(), Reason},
+ ok.
diff --git a/test/inet_proxy_dist.erl b/test/inet_proxy_dist.erl
new file mode 100644
index 0000000000..32b7641a79
--- /dev/null
+++ b/test/inet_proxy_dist.erl
@@ -0,0 +1,201 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+-module(inet_proxy_dist).
+
+%% A distribution plugin that uses the usual inet_tcp_dist but allows
+%% insertion of a proxy at the receiving end.
+
+%% inet_*_dist "behaviour"
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% For copypasta from inet_tcp_dist
+-export([do_setup/6]).
+-import(error_logger,[error_msg/2]).
+
+-define(REAL, inet_tcp_dist).
+
+%%----------------------------------------------------------------------------
+
+listen(Name) -> ?REAL:listen(Name).
+select(Node) -> ?REAL:select(Node).
+accept(Listen) -> ?REAL:accept(Listen).
+close(Socket) -> ?REAL:close(Socket).
+is_node_name(Node) -> ?REAL:is_node_name(Node).
+
+accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime) ->
+ ?REAL:accept_connection(AcceptPid, Socket, MyNode, Allowed, SetupTime).
+
+%% This is copied from inet_tcp_dist, in order to change the
+%% output of erl_epmd:port_please/2.
+
+-include_lib("kernel/include/net_address.hrl").
+-include_lib("kernel/include/dist_util.hrl").
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case erl_epmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ %% Modification START
+ Ret = application:get_env(kernel,
+ dist_and_proxy_ports_map),
+ PortsMap = case Ret of
+ {ok, M} -> M;
+ undefined -> []
+ end,
+ ProxyPort = case inet_tcp_proxy:is_enabled() of
+ true -> proplists:get_value(TcpPort, PortsMap, TcpPort);
+ false -> TcpPort
+ end,
+ case inet_tcp:connect(Ip, ProxyPort,
+ [{active, false},
+ {packet,2}]) of
+ {ok, Socket} ->
+ {ok, {_, SrcPort}} = inet:sockname(Socket),
+ ok = inet_tcp_proxy_manager:register(
+ node(), Node, SrcPort, TcpPort, ProxyPort),
+ %% Modification END
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun inet_tcp:send/2,
+ f_recv = fun inet_tcp:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = inet}
+ end,
+ mf_tick = fun tick/1,
+ mf_getstat = fun inet_tcp_dist:getstat/1,
+ request_type = Type
+ },
+ dist_util:handshake_we_started(HSData);
+ R ->
+ io:format("~p failed! ~p~n", [node(), R]),
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ _Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,_Other]),
+ ?shutdown(Node)
+ end.
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~s is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+tick(Socket) ->
+ case inet_tcp:send(Socket, [], [force]) of
+ {error, closed} ->
+ self() ! {tcp_closed, Socket},
+ {error, closed};
+ R ->
+ R
+ end.
diff --git a/test/inet_tcp_proxy.erl b/test/inet_tcp_proxy.erl
new file mode 100644
index 0000000000..4498b8f952
--- /dev/null
+++ b/test/inet_tcp_proxy.erl
@@ -0,0 +1,134 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+-module(inet_tcp_proxy).
+
+%% A TCP proxy for insertion into the Erlang distribution mechanism,
+%% which allows us to simulate network partitions.
+
+-export([start/3, reconnect/1, is_enabled/0, allow/1, block/1]).
+
+-define(TABLE, ?MODULE).
+
+%% This can't start_link because there's no supervision hierarchy we
+%% can easily fit it into (we need to survive all application
+%% restarts). So we have to do some horrible error handling.
+
+start(ManagerNode, DistPort, ProxyPort) ->
+ application:set_env(kernel, inet_tcp_proxy_manager_node, ManagerNode),
+ Parent = self(),
+ Pid = spawn(error_handler(fun() -> go(Parent, DistPort, ProxyPort) end)),
+ MRef = erlang:monitor(process, Pid),
+ receive
+ ready ->
+ erlang:demonitor(MRef),
+ ok;
+ {'DOWN', MRef, _, _, Reason} ->
+ {error, Reason}
+ end.
+
+reconnect(Nodes) ->
+ [erlang:disconnect_node(N) || N <- Nodes, N =/= node()],
+ ok.
+
+is_enabled() ->
+ lists:member(?TABLE, ets:all()).
+
+allow(Node) ->
+ rabbit_log:info("(~s) Allowing distribution between ~s and ~s~n",
+ [?MODULE, node(), Node]),
+ ets:delete(?TABLE, Node).
+block(Node) ->
+ rabbit_log:info("(~s) BLOCKING distribution between ~s and ~s~n",
+ [?MODULE, node(), Node]),
+ ets:insert(?TABLE, {Node, block}).
+
+%%----------------------------------------------------------------------------
+
+error_handler(Thunk) ->
+ fun () ->
+ try
+ Thunk()
+ catch _:{{nodedown, _}, _} ->
+ %% The only other node we ever talk to is the test
+ %% runner; if that's down then the test is nearly
+ %% over; die quietly.
+ ok;
+ _:X ->
+ io:format(user, "TCP proxy died with ~p~n At ~p~n",
+ [X, erlang:get_stacktrace()]),
+ erlang:halt(1)
+ end
+ end.
+
+go(Parent, Port, ProxyPort) ->
+ ets:new(?TABLE, [public, named_table]),
+ {ok, Sock} = gen_tcp:listen(ProxyPort, [inet,
+ {reuseaddr, true}]),
+ Parent ! ready,
+ accept_loop(Sock, Port).
+
+accept_loop(ListenSock, Port) ->
+ {ok, Sock} = gen_tcp:accept(ListenSock),
+ Proxy = spawn(error_handler(fun() -> run_it(Sock, Port) end)),
+ ok = gen_tcp:controlling_process(Sock, Proxy),
+ accept_loop(ListenSock, Port).
+
+run_it(SockIn, Port) ->
+ case {inet:peername(SockIn), inet:sockname(SockIn)} of
+ {{ok, {_Addr, SrcPort}}, {ok, {Addr, _OtherPort}}} ->
+ {ok, Remote, This} = inet_tcp_proxy_manager:lookup(SrcPort),
+ case node() of
+ This -> ok;
+ _ -> exit({not_me, node(), This})
+ end,
+ {ok, SockOut} = gen_tcp:connect(Addr, Port, [inet]),
+ run_loop({SockIn, SockOut}, Remote, []);
+ _ ->
+ ok
+ end.
+
+run_loop(Sockets, RemoteNode, Buf0) ->
+ Block = [{RemoteNode, block}] =:= ets:lookup(?TABLE, RemoteNode),
+ receive
+ {tcp, Sock, Data} ->
+ Buf = [Data | Buf0],
+ case {Block, get(dist_was_blocked)} of
+ {true, false} ->
+ put(dist_was_blocked, Block),
+ rabbit_log:warning(
+ "(~s) Distribution BLOCKED between ~s and ~s~n",
+ [?MODULE, node(), RemoteNode]);
+ {false, S} when S =:= true orelse S =:= undefined ->
+ put(dist_was_blocked, Block),
+ rabbit_log:warning(
+ "(~s) Distribution allowed between ~s and ~s~n",
+ [?MODULE, node(), RemoteNode]);
+ _ ->
+ ok
+ end,
+ case Block of
+ false -> gen_tcp:send(other(Sock, Sockets), lists:reverse(Buf)),
+ run_loop(Sockets, RemoteNode, []);
+ true -> run_loop(Sockets, RemoteNode, Buf)
+ end;
+ {tcp_closed, Sock} ->
+ gen_tcp:close(other(Sock, Sockets));
+ X ->
+ exit({weirdness, X})
+ end.
+
+other(A, {A, B}) -> B;
+other(B, {A, B}) -> A.
diff --git a/test/inet_tcp_proxy_manager.erl b/test/inet_tcp_proxy_manager.erl
new file mode 100644
index 0000000000..18255b8d48
--- /dev/null
+++ b/test/inet_tcp_proxy_manager.erl
@@ -0,0 +1,107 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+-module(inet_tcp_proxy_manager).
+
+%% The TCP proxies need to decide whether to block based on the node
+%% they're running on, and the node connecting to them. The trouble
+%% is, they don't have an easy way to determine the latter. Therefore
+%% when A connects to B we register the source port used by A here, so
+%% that B can later look it up and find out who A is without having to
+%% sniff the distribution protocol.
+%%
+%% That does unfortunately mean that we need a central control
+%% thing. We assume here it's running on the node called
+%% 'standalone_test' since that's where tests are orchestrated from.
+%%
+%% Yes, this leaks. For its intended lifecycle, that's fine.
+
+-behaviour(gen_server).
+
+-export([start/0, register/5, lookup/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-define(NODE, ct).
+
+-record(state, {ports, pending}).
+
+start() ->
+ gen_server:start({local, ?MODULE}, ?MODULE, [], []).
+
+register(_From, _To, _SrcPort, Port, Port) ->
+ %% No proxy, don't register
+ ok;
+register(From, To, SrcPort, _Port, _ProxyPort) ->
+ gen_server:call(name(), {register, From, To, SrcPort}, infinity).
+
+lookup(SrcPort) ->
+ gen_server:call(name(), {lookup, SrcPort}, infinity).
+
+controller_node() ->
+ {ok, ManagerNode} = application:get_env(kernel,
+ inet_tcp_proxy_manager_node),
+ ManagerNode.
+
+name() ->
+ {?MODULE, controller_node()}.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ net_kernel:monitor_nodes(true),
+ {ok, #state{ports = dict:new(),
+ pending = []}}.
+
+handle_call({register, FromNode, ToNode, SrcPort}, _From,
+ State = #state{ports = Ports,
+ pending = Pending}) ->
+ {Notify, Pending2} =
+ lists:partition(fun ({P, _}) -> P =:= SrcPort end, Pending),
+ [gen_server:reply(From, {ok, FromNode, ToNode}) || {_, From} <- Notify],
+ {reply, ok,
+ State#state{ports = dict:store(SrcPort, {FromNode, ToNode}, Ports),
+ pending = Pending2}};
+
+handle_call({lookup, SrcPort}, From,
+ State = #state{ports = Ports, pending = Pending}) ->
+ case dict:find(SrcPort, Ports) of
+ {ok, {FromNode, ToNode}} ->
+ {reply, {ok, FromNode, ToNode}, State};
+ error ->
+ {noreply, State#state{pending = [{SrcPort, From} | Pending]}}
+ end;
+
+handle_call(_Req, _From, State) ->
+ {reply, unknown_request, State}.
+
+handle_cast(_C, State) ->
+ {noreply, State}.
+
+handle_info({nodedown, Node}, State = #state{ports = Ports}) ->
+ Ports1 = dict:filter(
+ fun (_, {From, To}) ->
+ Node =/= From andalso Node =/= To
+ end, Ports),
+ {noreply, State#state{ports = Ports1}};
+
+handle_info(_I, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_, State, _) -> {ok, State}.
diff --git a/test/lazy_queue_SUITE.erl b/test/lazy_queue_SUITE.erl
new file mode 100644
index 0000000000..fe105cddd0
--- /dev/null
+++ b/test/lazy_queue_SUITE.erl
@@ -0,0 +1,224 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(lazy_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(QNAME, <<"queue.mode.test">>).
+-define(MESSAGE_COUNT, 2000).
+
+all() ->
+ [
+ {group, non_parallel_tests}
+ ].
+
+groups() ->
+ [
+ {non_parallel_tests, [], [
+ declare_args,
+ queue_mode_policy,
+ publish_consume
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = 2,
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+declare_args(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ DQ2 = <<"default-q2">>,
+ declare(Ch, DQ2),
+ assert_queue_mode(A, DQ2, default),
+
+ passed.
+
+queue_mode_policy(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+
+ LQ = <<"lazy-q">>,
+ declare(Ch, LQ, [{<<"x-queue-mode">>, longstr, <<"lazy">>}]),
+ assert_queue_mode(A, LQ, lazy),
+
+ LQ2 = <<"lazy-q-2">>,
+ declare(Ch, LQ2),
+ assert_queue_mode(A, LQ2, lazy),
+
+ DQ = <<"default-q">>,
+ declare(Ch, DQ, [{<<"x-queue-mode">>, longstr, <<"default">>}]),
+ assert_queue_mode(A, DQ, default),
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+
+ ok = wait_for_queue_mode(A, LQ, lazy, 5000),
+ ok = wait_for_queue_mode(A, LQ2, default, 5000),
+ ok = wait_for_queue_mode(A, DQ, default, 5000),
+
+ passed.
+
+publish_consume(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ declare(Ch, ?QNAME),
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ consume(Ch, ?QNAME, ack),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ rabbit_ct_client_helpers:publish(Ch, ?QNAME, ?MESSAGE_COUNT),
+ set_ha_mode_policy(Config, A, <<"default">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ set_ha_mode_policy(Config, A, <<"lazy">>),
+ [assert_delivered(Ch, ack, P) || P <- lists:seq(1, ?MESSAGE_COUNT)],
+
+ cancel(Ch),
+
+ passed.
+
+%%----------------------------------------------------------------------------
+
+declare(Ch, Q) ->
+ declare(Ch, Q, []).
+
+declare(Ch, Q, Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args}).
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, Payload) ->
+ PBin = payload2bin(Payload),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+payload2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+set_ha_mode_policy(Config, Node, Mode) ->
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>, <<"all">>,
+ [{<<"queue-mode">>, Mode}]).
+
+
+wait_for_queue_mode(_Node, _Q, _Mode, Max) when Max < 0 ->
+ fail;
+wait_for_queue_mode(Node, Q, Mode, Max) ->
+ case get_queue_mode(Node, Q) of
+ Mode -> ok;
+ _ -> timer:sleep(100),
+ wait_for_queue_mode(Node, Q, Mode, Max - 100)
+ end.
+
+assert_queue_mode(Node, Q, Expected) ->
+ Actual = get_queue_mode(Node, Q),
+ Expected = Actual.
+
+get_queue_mode(Node, Q) ->
+ QNameRes = rabbit_misc:r(<<"/">>, queue, Q),
+ {ok, AMQQueue} =
+ rpc:call(Node, rabbit_amqqueue, lookup, [QNameRes]),
+ [{backing_queue_status, Status}] =
+ rpc:call(Node, rabbit_amqqueue, info,
+ [AMQQueue, [backing_queue_status]]),
+ proplists:get_value(mode, Status).
diff --git a/test/many_node_ha_SUITE.erl b/test/many_node_ha_SUITE.erl
new file mode 100644
index 0000000000..22b39e7a3d
--- /dev/null
+++ b/test/many_node_ha_SUITE.erl
@@ -0,0 +1,117 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(many_node_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_6}
+ ].
+
+groups() ->
+ [
+ {cluster_size_6, [], [
+ kill_intermediate
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_6, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 6}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+kill_intermediate(Config) ->
+ [A, B, C, D, E, F] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ MasterChannel = rabbit_ct_client_helpers:open_channel(Config, A),
+ ConsumerChannel = rabbit_ct_client_helpers:open_channel(Config, E),
+ ProducerChannel = rabbit_ct_client_helpers:open_channel(Config, F),
+ Queue = <<"test">>,
+ amqp_channel:call(MasterChannel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% TODO: this seems *highly* timing dependant - the assumption being
+ %% that the kill will work quickly enough that there will still be
+ %% some messages in-flight that we *must* receive despite the intervening
+ %% node deaths. It would be nice if we could find a means to do this
+ %% in a way that is not actually timing dependent.
+
+ %% Worse still, it assumes that killing the master will cause a
+ %% failover to Slave1, and so on. Nope.
+
+ ConsumerPid = rabbit_ha_test_consumer:create(ConsumerChannel,
+ Queue, self(), false, Msgs),
+
+ ProducerPid = rabbit_ha_test_producer:create(ProducerChannel,
+ Queue, self(), false, Msgs),
+
+ %% create a killer for the master and the first 3 slaves
+ [rabbit_ct_broker_helpers:kill_node_after(Config, Node, Time) ||
+ {Node, Time} <- [{A, 50},
+ {B, 50},
+ {C, 100},
+ {D, 100}]],
+
+ %% verify that the consumer got all msgs, or die, or time out
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ ok.
+
diff --git a/test/mirrored_supervisor_SUITE.erl b/test/mirrored_supervisor_SUITE.erl
new file mode 100644
index 0000000000..5ed17c90bb
--- /dev/null
+++ b/test/mirrored_supervisor_SUITE.erl
@@ -0,0 +1,335 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE).
+
+-behaviour(mirrored_supervisor).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+-define(MS, mirrored_supervisor).
+-define(SERVER, mirrored_supervisor_SUITE_gs).
+
+all() ->
+ [
+ migrate,
+ migrate_twice,
+ already_there,
+ delete_restart,
+ which_children,
+ large_group,
+ childspecs_at_init,
+ anonymous_supervisors,
+ no_migration_on_shutdown,
+ start_idempotence,
+ unsupported,
+ ignore,
+ startup_failure
+ ].
+
+init_per_suite(Config) ->
+ ok = application:set_env(mnesia, dir, ?config(priv_dir, Config)),
+ ok = application:start(mnesia),
+ lists:foreach(
+ fun ({Tab, TabDef}) ->
+ TabDef1 = proplists:delete(match, TabDef),
+ case mnesia:create_table(Tab, TabDef1) of
+ {atomic, ok} ->
+ ok;
+ {aborted, Reason} ->
+ throw({error,
+ {table_creation_failed, Tab, TabDef1, Reason}})
+ end
+ end, mirrored_supervisor:table_definitions()),
+ Config.
+
+end_per_suite(Config) ->
+ ok = application:stop(mnesia),
+ Config.
+
+%% ---------------------------------------------------------------------------
+%% Functional tests
+%% ---------------------------------------------------------------------------
+
+%% Simplest test
+migrate(_Config) ->
+ passed = with_sups(
+ fun([A, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b]).
+
+%% Is migration transitive?
+migrate_twice(_Config) ->
+ passed = with_sups(
+ fun([A, B]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ {ok, C} = start_sup(c),
+ Pid2 = pid_of(worker),
+ kill_registered(B, Pid2),
+ Pid3 = pid_of(worker),
+ false = (Pid1 =:= Pid3),
+ kill(C)
+ end, [a, b]).
+
+%% Can't start the same child twice
+already_there(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, S),
+ {error, {already_started, Pid}} = ?MS:start_child(b, S)
+ end, [a, b]).
+
+%% Deleting and restarting should work as per a normal supervisor
+delete_restart(_Config) ->
+ passed = with_sups(
+ fun([_, _]) ->
+ S = childspec(worker),
+ {ok, Pid1} = ?MS:start_child(a, S),
+ {error, running} = ?MS:delete_child(a, worker),
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid2} = ?MS:start_child(b, S),
+ false = (Pid1 =:= Pid2),
+ ok = ?MS:terminate_child(b, worker),
+ {ok, Pid3} = ?MS:restart_child(b, worker),
+ Pid3 = pid_of(worker),
+ false = (Pid2 =:= Pid3),
+ %% Not the same supervisor as the worker is on
+ ok = ?MS:terminate_child(a, worker),
+ ok = ?MS:delete_child(a, worker),
+ {ok, Pid4} = ?MS:start_child(a, S),
+ false = (Pid3 =:= Pid4)
+ end, [a, b]).
+
+which_children(_Config) ->
+ passed = with_sups(
+ fun([A, B] = Both) ->
+ ?MS:start_child(A, childspec(worker)),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ok = ?MS:terminate_child(a, worker),
+ assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
+ {ok, _} = ?MS:restart_child(a, worker),
+ assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
+ ?MS:start_child(B, childspec(worker2)),
+ assert_wc(Both, fun (C) -> 2 = length(C) end)
+ end, [a, b]).
+
+assert_wc(Sups, Fun) ->
+ [Fun(?MS:which_children(Sup)) || Sup <- Sups].
+
+wc_pid(Child) ->
+ {worker, Pid, worker, [?MODULE]} = Child,
+ Pid.
+
+%% Not all the members of the group should actually do the failover
+large_group(_Config) ->
+ passed = with_sups(
+ fun([A, _, _, _]) ->
+ {ok, _} = ?MS:start_child(a, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [a, b, c, d]).
+
+%% Do childspecs work when returned from init?
+childspecs_at_init(_Config) ->
+ S = childspec(worker),
+ passed = with_sups(
+ fun([A, _]) ->
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [{a, [S]}, {b, [S]}]).
+
+anonymous_supervisors(_Config) ->
+ passed = with_sups(
+ fun([A, _B]) ->
+ {ok, _} = ?MS:start_child(A, childspec(worker)),
+ Pid1 = pid_of(worker),
+ kill_registered(A, Pid1),
+ Pid2 = pid_of(worker),
+ false = (Pid1 =:= Pid2)
+ end, [anon, anon]).
+
+%% When a mirrored_supervisor terminates, we should not migrate, but
+%% the whole supervisor group should shut down. To test this we set up
+%% a situation where the gen_server will only fail if it's running
+%% under the supervisor called 'evil'. It should not migrate to
+%% 'good' and survive, rather the whole group should go away.
+no_migration_on_shutdown(_Config) ->
+ passed = with_sups(
+ fun([Evil, _]) ->
+ {ok, _} = ?MS:start_child(Evil, childspec(worker)),
+ try
+ call(worker, ping, 1000, 100),
+ exit(worker_should_not_have_migrated)
+ catch exit:{timeout_waiting_for_server, _, _} ->
+ ok
+ end
+ end, [evil, good]).
+
+start_idempotence(_Config) ->
+ passed = with_sups(
+ fun([_]) ->
+ CS = childspec(worker),
+ {ok, Pid} = ?MS:start_child(a, CS),
+ {error, {already_started, Pid}} = ?MS:start_child(a, CS),
+ ?MS:terminate_child(a, worker),
+ {error, already_present} = ?MS:start_child(a, CS)
+ end, [a]).
+
+unsupported(_Config) ->
+ try
+ ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, []}),
+ exit(no_global)
+ catch error:badarg ->
+ ok
+ end,
+ try
+ {ok, _} = ?MS:start_link({local, foo}, get_group(group),
+ fun tx_fun/1, ?MODULE, {simple_one_for_one, []}),
+ exit(no_sofo)
+ catch error:badarg ->
+ ok
+ end.
+
+%% Just test we don't blow up
+ignore(_Config) ->
+ ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
+ {fake_strategy_for_ignore, []}).
+
+startup_failure(_Config) ->
+ [test_startup_failure(F) || F <- [want_error, want_exit]].
+
+test_startup_failure(Fail) ->
+ process_flag(trap_exit, true),
+ ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
+ {one_for_one, [childspec(Fail)]}),
+ receive
+ {'EXIT', _, shutdown} ->
+ ok
+ after 1000 ->
+ exit({did_not_exit, Fail})
+ end,
+ process_flag(trap_exit, false).
+
+%% ---------------------------------------------------------------------------
+
+with_sups(Fun, Sups) ->
+ inc_group(),
+ Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
+ Fun(Pids),
+ [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
+ timer:sleep(500),
+ passed.
+
+start_sup(Spec) ->
+ start_sup(Spec, group).
+
+start_sup({Name, ChildSpecs}, Group) ->
+ {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
+ %% We are not a supervisor, when we kill the supervisor we do not
+ %% want to die!
+ unlink(Pid),
+ {ok, Pid};
+
+start_sup(Name, Group) ->
+ start_sup({Name, []}, Group).
+
+start_sup0(anon, Group, ChildSpecs) ->
+ ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs});
+
+start_sup0(Name, Group, ChildSpecs) ->
+ ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
+ {one_for_one, ChildSpecs}).
+
+childspec(Id) ->
+ {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
+
+pid_of(Id) ->
+ {received, Pid, ping} = call(Id, ping),
+ Pid.
+
+tx_fun(Fun) ->
+ case mnesia:sync_transaction(Fun) of
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+inc_group() ->
+ Count = case get(counter) of
+ undefined -> 0;
+ C -> C
+ end + 1,
+ put(counter, Count).
+
+get_group(Group) ->
+ {Group, get(counter)}.
+
+call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
+
+call(Id, Msg, 0, _Decr) ->
+ exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
+
+call(Id, Msg, MaxDelay, Decr) ->
+ try
+ gen_server:call(Id, Msg, infinity)
+ catch exit:_ -> timer:sleep(Decr),
+ call(Id, Msg, MaxDelay - Decr, Decr)
+ end.
+
+kill(Pid) -> kill(Pid, []).
+kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
+kill(Pid, Waits) ->
+ erlang:monitor(process, Pid),
+ [erlang:monitor(process, P) || P <- Waits],
+ exit(Pid, bang),
+ kill_wait(Pid),
+ [kill_wait(P) || P <- Waits].
+
+kill_registered(Pid, Child) ->
+ {registered_name, Name} = erlang:process_info(Child, registered_name),
+ kill(Pid, Child),
+ false = (Child =:= whereis(Name)),
+ ok.
+
+kill_wait(Pid) ->
+ receive
+ {'DOWN', _Ref, process, Pid, _Reason} ->
+ ok
+ end.
+
+%% ---------------------------------------------------------------------------
+
+init({fake_strategy_for_ignore, _ChildSpecs}) ->
+ ignore;
+
+init({Strategy, ChildSpecs}) ->
+ {ok, {{Strategy, 0, 1}, ChildSpecs}}.
+
diff --git a/test/mirrored_supervisor_SUITE_gs.erl b/test/mirrored_supervisor_SUITE_gs.erl
new file mode 100644
index 0000000000..867754b4a2
--- /dev/null
+++ b/test/mirrored_supervisor_SUITE_gs.erl
@@ -0,0 +1,66 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(mirrored_supervisor_SUITE_gs).
+
+%% Dumb gen_server we can supervise
+
+-export([start_link/1]).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-behaviour(gen_server).
+
+-define(MS, mirrored_supervisor).
+
+start_link(want_error) ->
+ {error, foo};
+
+start_link(want_exit) ->
+ exit(foo);
+
+start_link(Id) ->
+ gen_server:start_link({local, Id}, ?MODULE, [], []).
+
+%% ---------------------------------------------------------------------------
+
+init([]) ->
+ {ok, state}.
+
+handle_call(Msg, _From, State) ->
+ die_if_my_supervisor_is_evil(),
+ {reply, {received, self(), Msg}, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+die_if_my_supervisor_is_evil() ->
+ try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
+ false -> ok;
+ _ -> exit(doooom)
+ catch
+ exit:{noproc, _} -> ok
+ end.
diff --git a/test/msg_store_SUITE.erl b/test/msg_store_SUITE.erl
new file mode 100644
index 0000000000..f63f6cb745
--- /dev/null
+++ b/test/msg_store_SUITE.erl
@@ -0,0 +1,62 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(msg_store_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+
+-compile(export_all).
+
+-define(T(Fun, Args), (catch apply(rabbit, Fun, Args))).
+
+all() ->
+ [
+ parameter_validation
+ ].
+
+parameter_validation(_Config) ->
+ %% make sure it works with default values
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]),
+
+ %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit
+ ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, 3000]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, 1500]),
+
+ %% All values must be integers
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, "1500"]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{"2000", 500}, abc]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, "500"}, 2048]),
+
+ %% CREDIT_DISC_BOUND must be a tuple
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [[2000, 500], 1500]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [2000, 1500]),
+
+ %% config values can't be smaller than default values
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{1999, 500}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 499}, 2048]),
+ {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound,
+ [{2000, 500}, 2047]).
diff --git a/test/partitions_SUITE.erl b/test/partitions_SUITE.erl
new file mode 100644
index 0000000000..b93e1ea9dd
--- /dev/null
+++ b/test/partitions_SUITE.erl
@@ -0,0 +1,438 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(partitions_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+%% We set ticktime to 1s and setuptime is 7s so to make sure it
+%% passes...
+-define(DELAY, 8000).
+
+all() ->
+ [
+ {group, net_ticktime_1},
+ {group, net_ticktime_10}
+ ].
+
+groups() ->
+ [
+ {net_ticktime_1, [], [
+ {cluster_size_2, [], [
+ ctl_ticktime_sync,
+ prompt_disconnect_detection
+ ]},
+ {cluster_size_3, [], [
+ autoheal,
+ autoheal_after_pause_if_all_down,
+ ignore,
+ pause_if_all_down_on_blocked,
+ pause_if_all_down_on_down,
+ pause_minority_on_blocked,
+ pause_minority_on_down,
+ partial_false_positive,
+ partial_to_full,
+ partial_pause_minority,
+ partial_pause_if_all_down
+ ]}
+ ]},
+ {net_ticktime_10, [], [
+ {cluster_size_2, [], [
+ pause_if_all_down_false_promises_mirrored,
+ pause_if_all_down_false_promises_unmirrored,
+ pause_minority_false_promises_mirrored,
+ pause_minority_false_promises_unmirrored
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config, [
+ fun enable_dist_proxy_manager/1
+ ]).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(net_ticktime_1, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{net_ticktime, 1}]);
+init_per_group(net_ticktime_10, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{net_ticktime, 10}]);
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, false},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun enable_dist_proxy/1,
+ fun rabbit_ct_broker_helpers:cluster_nodes/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+enable_dist_proxy_manager(Config) ->
+ inet_tcp_proxy_manager:start(),
+ rabbit_ct_helpers:set_config(Config,
+ {erlang_dist_module, inet_proxy_dist}).
+
+enable_dist_proxy(Config) ->
+ NodeConfigs = rabbit_ct_broker_helpers:get_node_configs(Config),
+ Nodes = [?config(nodename, NodeConfig) || NodeConfig <- NodeConfigs],
+ ManagerNode = node(),
+ ok = lists:foreach(
+ fun(NodeConfig) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config,
+ ?config(nodename, NodeConfig),
+ ?MODULE, enable_dist_proxy_on_node,
+ [NodeConfig, ManagerNode, Nodes])
+ end, NodeConfigs),
+ Config.
+
+enable_dist_proxy_on_node(NodeConfig, ManagerNode, Nodes) ->
+ Nodename = ?config(nodename, NodeConfig),
+ DistPort = ?config(tcp_port_erlang_dist, NodeConfig),
+ ProxyPort = ?config(tcp_port_erlang_dist_proxy, NodeConfig),
+ ok = inet_tcp_proxy:start(ManagerNode, DistPort, ProxyPort),
+ ok = inet_tcp_proxy:reconnect(Nodes -- [Nodename]).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+ignore(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ block_unblock([{A, B}, {A, C}]),
+ timer:sleep(?DELAY),
+ [B, C] = partitions(A),
+ [A] = partitions(B),
+ [A] = partitions(C),
+ ok.
+
+pause_minority_on_down(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, pause_minority),
+
+ true = is_running(A),
+
+ rabbit_ct_broker_helpers:kill_node(Config, B),
+ timer:sleep(?DELAY),
+ true = is_running(A),
+
+ rabbit_ct_broker_helpers:kill_node(Config, C),
+ await_running(A, false),
+ ok.
+
+pause_minority_on_blocked(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, pause_minority),
+ pause_on_blocked(A, B, C).
+
+pause_if_all_down_on_down(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, {pause_if_all_down, [C], ignore}),
+ [(true = is_running(N)) || N <- [A, B, C]],
+
+ rabbit_ct_broker_helpers:kill_node(Config, B),
+ timer:sleep(?DELAY),
+ [(true = is_running(N)) || N <- [A, C]],
+
+ rabbit_ct_broker_helpers:kill_node(Config, C),
+ timer:sleep(?DELAY),
+ await_running(A, false),
+ ok.
+
+pause_if_all_down_on_blocked(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, {pause_if_all_down, [C], ignore}),
+ pause_on_blocked(A, B, C).
+
+pause_on_blocked(A, B, C) ->
+ [(true = is_running(N)) || N <- [A, B, C]],
+ block([{A, B}, {A, C}]),
+ await_running(A, false),
+ [await_running(N, true) || N <- [B, C]],
+ unblock([{A, B}, {A, C}]),
+ [await_running(N, true) || N <- [A, B, C]],
+ Status = rpc:call(B, rabbit_mnesia, status, []),
+ [] = rabbit_misc:pget(partitions, Status),
+ ok.
+
+%%% Make sure we do not confirm any messages after a partition has
+%%% happened but before we pause, since any such confirmations would be
+%%% lies.
+%%%
+%%% This test has to use an AB cluster (not ABC) since GM ends up
+%%% taking longer to detect down slaves when there are more nodes and
+%%% we close the window by mistake.
+%%%
+%%% In general there are quite a few ways to accidentally cause this
+%%% test to pass since there are a lot of things in the broker that can
+%%% suddenly take several seconds to time out when TCP connections
+%%% won't establish.
+
+pause_minority_false_promises_mirrored(Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+ pause_false_promises(Config, pause_minority).
+
+pause_minority_false_promises_unmirrored(Config) ->
+ pause_false_promises(Config, pause_minority).
+
+pause_if_all_down_false_promises_mirrored(Config) ->
+ rabbit_ct_broker_helpers:set_ha_policy(Config, 0, <<".*">>, <<"all">>),
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ pause_false_promises(Config, {pause_if_all_down, [B], ignore}).
+
+pause_if_all_down_false_promises_unmirrored(Config) ->
+ B = rabbit_ct_broker_helpers:get_node_config(Config, 1, nodename),
+ pause_false_promises(Config, {pause_if_all_down, [B], ignore}).
+
+pause_false_promises(Config, ClusterPartitionHandling) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, [A], ClusterPartitionHandling),
+ ChA = rabbit_ct_client_helpers:open_channel(Config, A),
+ ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+ amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+ amqp_channel:call(ChA, #'confirm.select'{}),
+ amqp_channel:register_confirm_handler(ChA, self()),
+
+ %% Cause a partition after 1s
+ Self = self(),
+ spawn_link(fun () ->
+ timer:sleep(1000),
+ %%io:format(user, "~p BLOCK~n", [calendar:local_time()]),
+ block([{A, B}]),
+ unlink(Self)
+ end),
+
+ %% Publish large no of messages, see how many we get confirmed
+ [amqp_channel:cast(ChA, #'basic.publish'{routing_key = <<"test">>},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 1}}) ||
+ _ <- lists:seq(1, 100000)],
+ %%io:format(user, "~p finish publish~n", [calendar:local_time()]),
+
+ %% Time for the partition to be detected. We don't put this sleep
+ %% in receive_acks since otherwise we'd have another similar sleep
+ %% at the end.
+ timer:sleep(30000),
+ Confirmed = receive_acks(0),
+ %%io:format(user, "~p got acks~n", [calendar:local_time()]),
+ await_running(A, false),
+ %%io:format(user, "~p A stopped~n", [calendar:local_time()]),
+
+ unblock([{A, B}]),
+ await_running(A, true),
+
+ %% But how many made it onto the rest of the cluster?
+ #'queue.declare_ok'{message_count = Survived} =
+ amqp_channel:call(ChB, #'queue.declare'{queue = <<"test">>,
+ durable = true}),
+ %%io:format(user, "~p queue declared~n", [calendar:local_time()]),
+ case Confirmed > Survived of
+ true -> io:format("Confirmed=~p Survived=~p~n", [Confirmed, Survived]);
+ false -> ok
+ end,
+ true = (Confirmed =< Survived),
+
+ rabbit_ct_client_helpers:close_channel(ChB),
+ rabbit_ct_client_helpers:close_channel(ChA),
+ ok.
+
+receive_acks(Max) ->
+ receive
+ #'basic.ack'{delivery_tag = DTag} ->
+ receive_acks(DTag)
+ after ?DELAY ->
+ Max
+ end.
+
+prompt_disconnect_detection(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ChB = rabbit_ct_client_helpers:open_channel(Config, B),
+ [amqp_channel:call(ChB, #'queue.declare'{}) || _ <- lists:seq(1, 100)],
+ block([{A, B}]),
+ timer:sleep(?DELAY),
+ %% We want to make sure we do not end up waiting for setuptime *
+ %% no of queues. Unfortunately that means we need a timeout...
+ [] = rabbit_ct_broker_helpers:rpc(Config, A,
+ rabbit_amqqueue, info_all, [<<"/">>], ?DELAY),
+ rabbit_ct_client_helpers:close_channel(ChB),
+ ok.
+
+ctl_ticktime_sync(Config) ->
+ %% Server has 1s net_ticktime, make sure ctl doesn't get disconnected
+ Cmd = ["eval", "timer:sleep(5000)."],
+ {ok, "ok\n"} = rabbit_ct_broker_helpers:rabbitmqctl(Config, 0, Cmd).
+
+%% NB: we test full and partial partitions here.
+autoheal(Config) ->
+ set_mode(Config, autoheal),
+ do_autoheal(Config).
+
+autoheal_after_pause_if_all_down(Config) ->
+ [_, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, {pause_if_all_down, [B, C], autoheal}),
+ do_autoheal(Config).
+
+do_autoheal(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Test = fun (Pairs) ->
+ block_unblock(Pairs),
+ %% Sleep to make sure all the partitions are noticed
+ %% ?DELAY for the net_tick timeout
+ timer:sleep(?DELAY),
+ [await_listening(N, true) || N <- [A, B, C]],
+ [await_partitions(N, []) || N <- [A, B, C]]
+ end,
+ Test([{B, C}]),
+ Test([{A, C}, {B, C}]),
+ Test([{A, B}, {A, C}, {B, C}]),
+ ok.
+
+partial_false_positive(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ block([{A, B}]),
+ timer:sleep(1000),
+ block([{A, C}]),
+ timer:sleep(?DELAY),
+ unblock([{A, B}, {A, C}]),
+ timer:sleep(?DELAY),
+ %% When B times out A's connection, it will check with C. C will
+ %% not have timed out A yet, but already it can't talk to it. We
+ %% need to not consider this a partial partition; B and C should
+ %% still talk to each other.
+ [B, C] = partitions(A),
+ [A] = partitions(B),
+ [A] = partitions(C),
+ ok.
+
+partial_to_full(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ block_unblock([{A, B}]),
+ timer:sleep(?DELAY),
+ %% There are several valid ways this could go, depending on how
+ %% the DOWN messages race: either A gets disconnected first and BC
+ %% stay together, or B gets disconnected first and AC stay
+ %% together, or both make it through and all three get
+ %% disconnected.
+ case {partitions(A), partitions(B), partitions(C)} of
+ {[B, C], [A], [A]} -> ok;
+ {[B], [A, C], [B]} -> ok;
+ {[B, C], [A, C], [A, B]} -> ok;
+ Partitions -> exit({partitions, Partitions})
+ end.
+
+partial_pause_minority(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, pause_minority),
+ block([{A, B}]),
+ [await_running(N, false) || N <- [A, B]],
+ await_running(C, true),
+ unblock([{A, B}]),
+ [await_listening(N, true) || N <- [A, B, C]],
+ [await_partitions(N, []) || N <- [A, B, C]],
+ ok.
+
+partial_pause_if_all_down(Config) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ set_mode(Config, {pause_if_all_down, [B], ignore}),
+ block([{A, B}]),
+ await_running(A, false),
+ [await_running(N, true) || N <- [B, C]],
+ unblock([{A, B}]),
+ [await_listening(N, true) || N <- [A, B, C]],
+ [await_partitions(N, []) || N <- [A, B, C]],
+ ok.
+
+set_mode(Config, Mode) ->
+ rabbit_ct_broker_helpers:rpc_all(Config,
+ application, set_env, [rabbit, cluster_partition_handling, Mode]).
+
+set_mode(Config, Nodes, Mode) ->
+ rabbit_ct_broker_helpers:rpc(Config, Nodes,
+ application, set_env, [rabbit, cluster_partition_handling, Mode]).
+
+block_unblock(Pairs) ->
+ block(Pairs),
+ timer:sleep(?DELAY),
+ unblock(Pairs).
+
+block(Pairs) -> [block(X, Y) || {X, Y} <- Pairs].
+unblock(Pairs) -> [allow(X, Y) || {X, Y} <- Pairs].
+
+partitions(Node) ->
+ case rpc:call(Node, rabbit_node_monitor, partitions, []) of
+ {badrpc, {'EXIT', E}} = R -> case rabbit_misc:is_abnormal_exit(E) of
+ true -> R;
+ false -> timer:sleep(1000),
+ partitions(Node)
+ end;
+ Partitions -> Partitions
+ end.
+
+block(X, Y) ->
+ rpc:call(X, inet_tcp_proxy, block, [Y]),
+ rpc:call(Y, inet_tcp_proxy, block, [X]).
+
+allow(X, Y) ->
+ rpc:call(X, inet_tcp_proxy, allow, [Y]),
+ rpc:call(Y, inet_tcp_proxy, allow, [X]).
+
+await_running (Node, Bool) -> await(Node, Bool, fun is_running/1).
+await_listening (Node, Bool) -> await(Node, Bool, fun is_listening/1).
+await_partitions(Node, Parts) -> await(Node, Parts, fun partitions/1).
+
+await(Node, Res, Fun) ->
+ case Fun(Node) of
+ Res -> ok;
+ _ -> timer:sleep(100),
+ await(Node, Res, Fun)
+ end.
+
+is_running(Node) -> rpc:call(Node, rabbit, is_running, []).
+
+is_listening(Node) ->
+ case rpc:call(Node, rabbit_networking, node_listeners, [Node]) of
+ [] -> false;
+ [_|_] -> true;
+ _ -> false
+ end.
diff --git a/test/plugin_versioning_SUITE.erl b/test/plugin_versioning_SUITE.erl
new file mode 100644
index 0000000000..7fcfe433e0
--- /dev/null
+++ b/test/plugin_versioning_SUITE.erl
@@ -0,0 +1,177 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(plugin_versioning_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ version_support,
+ plugin_validation
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+version_support(_Config) ->
+ Examples = [
+ {[], "any version", true} %% anything goes
+ ,{[], "0.0.0", true} %% ditto
+ ,{[], "3.5.6", true} %% ditto
+ ,{["something"], "something", true} %% equal values match
+ ,{["3.5.4"], "something", false}
+ ,{["3.4.5", "3.6.0"], "0.0.0", true} %% zero version always match
+ ,{["3.4.5", "3.6.0"], "", true} %% empty version always match
+ ,{["something", "3.5.6"], "3.5.7", true} %% 3.5.7 matches ~> 3.5.6
+ ,{["3.4.0", "3.5.6"], "3.6.1", false} %% 3.6.x isn't supported
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.2", true} %% 3.5.2 matches ~> 3.5.2
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.5.1", false} %% lesser than the lower boundary
+ ,{["3.5.2", "3.6.1", "3.7.1"], "3.6.2", true} %% 3.6.2 matches ~> 3.6.1
+ ,{["3.5.2", "3.6.1", "3.6.8"], "3.6.2", true} %% 3.6.2 still matches ~> 3.6.1
+ ,{["3.5", "3.6", "3.7"], "3.5.1", false} %% x.y values are not supported
+ ,{["3"], "3.5.1", false} %% x values are not supported
+ ,{["3.5.2", "3.6.1"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.999", true} %% x.y.z.p values are supported
+ ,{["3.5.2", "3.6.2.333"], "3.6.2.222", false} %% x.y.z.p values are supported
+ ],
+
+ lists:foreach(
+ fun({Versions, RabbitVersion, Expected}) ->
+ {Expected, RabbitVersion, Versions} =
+ {rabbit_plugins:is_version_supported(RabbitVersion, Versions),
+ RabbitVersion, Versions}
+ end,
+ Examples),
+ ok.
+
+-record(validation_example, {rabbit_version, plugins, errors, valid}).
+
+plugin_validation(_Config) ->
+ Examples = [
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.2", ["3.5.6", "3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.1"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]},
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.6.3", "3.7.0"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.6"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.0"]}]},
+ {plugin_c, "3.7.2", ["3.7.0"], [{plugin_b, ["3.7.3"]}]}],
+ errors =
+ [{plugin_a, [{broker_version_mismatch, "3.7.1", ["3.7.6"]}]},
+ {plugin_b, [{missing_dependency, plugin_a}]},
+ {plugin_c, [{missing_dependency, plugin_b}]}],
+ valid = []
+ },
+
+ #validation_example{
+ rabbit_version = "3.7.1",
+ plugins =
+ [{plugin_a, "3.7.1", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]},
+ {plugin_d, "3.7.2", ["3.7.0"], [{plugin_c, ["3.7.3"]}]}],
+ errors =
+ [{plugin_b, [{{dependency_version_mismatch, "3.7.1", ["3.7.3"]}, plugin_a}]},
+ {plugin_d, [{missing_dependency, plugin_c}]}],
+ valid = [plugin_a]
+ },
+ #validation_example{
+ rabbit_version = "0.0.0",
+ plugins =
+ [{plugin_a, "", ["3.7.1"], []},
+ {plugin_b, "3.7.2", ["3.7.0"], [{plugin_a, ["3.7.3"]}]}],
+ errors = [],
+ valid = [plugin_a, plugin_b]
+ }],
+ lists:foreach(
+ fun(#validation_example{rabbit_version = RabbitVersion,
+ plugins = PluginsExamples,
+ errors = Errors,
+ valid = ExpectedValid}) ->
+ Plugins = make_plugins(PluginsExamples),
+ {Valid, Invalid} = rabbit_plugins:validate_plugins(Plugins,
+ RabbitVersion),
+ Errors = lists:reverse(Invalid),
+ ExpectedValid = lists:reverse(lists:map(fun(#plugin{name = Name}) ->
+ Name
+ end,
+ Valid))
+ end,
+ Examples),
+ ok.
+
+make_plugins(Plugins) ->
+ lists:map(
+ fun({Name, Version, RabbitVersions, PluginsVersions}) ->
+ Deps = [K || {K,_V} <- PluginsVersions],
+ #plugin{name = Name,
+ version = Version,
+ dependencies = Deps,
+ broker_version_requirements = RabbitVersions,
+ dependency_version_requirements = PluginsVersions}
+ end,
+ Plugins).
diff --git a/test/priority_queue_SUITE.erl b/test/priority_queue_SUITE.erl
new file mode 100644
index 0000000000..5df5686090
--- /dev/null
+++ b/test/priority_queue_SUITE.erl
@@ -0,0 +1,558 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ {parallel_tests, [parallel], [
+ ackfold,
+ drop,
+ dropwhile_fetchwhile,
+ info_head_message_timestamp,
+ matching,
+ mirror_queue_sync,
+ mirror_queue_sync_priority_above_max,
+ mirror_queue_sync_priority_above_max_pending_ack,
+ purge,
+ requeue,
+ resume,
+ simple_order,
+ straight_through
+ ]},
+ {non_parallel_tests, [], [
+ recovery %% Restart RabbitMQ.
+ ]}
+ ]},
+ {cluster_size_3, [], [
+ {parallel_tests, [parallel], [
+ mirror_queue_auto_ack
+ ]}
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(cluster_size_3, Config) ->
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps());
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(ClusterSizeGroup, Config)
+when ClusterSizeGroup =:= cluster_size_2
+orelse ClusterSizeGroup =:= cluster_size_3 ->
+ rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%% The BQ API is used in all sorts of places in all sorts of
+%% ways. Therefore we have to jump through a few different hoops
+%% in order to integration-test it.
+%%
+%% * start/1, stop/0, init/3, terminate/2, delete_and_terminate/2
+%% - starting and stopping rabbit. durable queues / persistent msgs needed
+%% to test recovery
+%%
+%% * publish/5, drain_confirmed/1, fetch/2, ack/2, is_duplicate/2, msg_rates/1,
+%% needs_timeout/1, timeout/1, invoke/3, resume/1 [0]
+%% - regular publishing and consuming, with confirms and acks and durability
+%%
+%% * publish_delivered/4 - publish with acks straight through
+%% * discard/3 - publish without acks straight through
+%% * dropwhile/2 - expire messages without DLX
+%% * fetchwhile/4 - expire messages with DLX
+%% * ackfold/4 - reject messages with DLX
+%% * requeue/2 - reject messages without DLX
+%% * drop/2 - maxlen messages without DLX
+%% * purge/1 - issue AMQP queue.purge
+%% * purge_acks/1 - mirror queue explicit sync with unacked msgs
+%% * fold/3 - mirror queue explicit sync
+%% * depth/1 - mirror queue implicit sync detection
+%% * len/1, is_empty/1 - info items
+%% * handle_pre_hibernate/1 - hibernation
+%%
+%% * set_ram_duration_target/2, ram_duration/1, status/1
+%% - maybe need unit testing?
+%%
+%% [0] publish enough to get credit flow from msg store
+
+recovery(Config) ->
+ {Conn, Ch} = open(Config),
+ Q = <<"recovery-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ amqp_connection:close(Conn),
+
+ %% TODO This terminates the automatically open connection and breaks
+ %% coverage.
+ rabbit_ct_broker_helpers:restart_broker(Config, 0),
+
+ {Conn2, Ch2} = open(Config),
+ get_all(Ch2, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch2, Q),
+ amqp_connection:close(Conn2),
+ passed.
+
+simple_order(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"simple_order-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [2, 3, 1, 2, 3, 1, 2, 3, 1]),
+ get_all(Ch, Q, no_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ publish(Ch, Q, [3, 1, 2, 3, 1, 2, 3, 1, 2]),
+ get_all(Ch, Q, do_ack, [3, 3, 3, 2, 2, 2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+matching(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"matching-queue">>,
+ declare(Ch, Q, 5),
+ %% We round priority down, and 0 is the default
+ publish(Ch, Q, [undefined, 0, 5, 10, undefined]),
+ get_all(Ch, Q, do_ack, [5, 10, undefined, 0, undefined]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+resume(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"resume-queue">>,
+ declare(Ch, Q, 5),
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ publish_many(Ch, Q, 10000),
+ amqp_channel:wait_for_confirms(Ch),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}), %% Assert it exists
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+straight_through(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"straight_through-queue">>,
+ declare(Ch, Q, 3),
+ [begin
+ consume(Ch, Q, Ack),
+ [begin
+ publish1(Ch, Q, P),
+ assert_delivered(Ch, Ack, P)
+ end || P <- [1, 2, 3]],
+ cancel(Ch)
+ end || Ack <- [do_ack, no_ack]],
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+dropwhile_fetchwhile(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"dropwhile_fetchwhile-queue">>,
+ [begin
+ declare(Ch, Q, Args ++ arguments(3)),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ timer:sleep(10),
+ get_empty(Ch, Q),
+ delete(Ch, Q)
+ end ||
+ Args <- [[{<<"x-message-ttl">>, long, 1}],
+ [{<<"x-message-ttl">>, long, 1},
+ {<<"x-dead-letter-exchange">>, longstr, <<"amq.fanout">>}]
+ ]],
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+ackfold(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"ackfolq-queue1">>,
+ Q2 = <<"ackfold-queue2">>,
+ declare(Ch, Q,
+ [{<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, Q2}
+ | arguments(3)]),
+ declare(Ch, Q2, none),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = false}),
+ timer:sleep(100),
+ get_all(Ch, Q2, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ delete(Ch, Q2),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+requeue(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"requeue-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ [_, _, DTag] = get_all(Ch, Q, manual_ack, [3, 2, 1]),
+ amqp_channel:cast(Ch, #'basic.nack'{delivery_tag = DTag,
+ multiple = true,
+ requeue = true}),
+ get_all(Ch, Q, do_ack, [3, 2, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+drop(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"drop-queue">>,
+ declare(Ch, Q, [{<<"x-max-length">>, long, 4} | arguments(3)]),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3, 1, 2, 3]),
+ %% We drop from the head, so this is according to the "spec" even
+ %% if not likely to be what the user wants.
+ get_all(Ch, Q, do_ack, [2, 1, 1, 1]),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+purge(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"purge-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ amqp_channel:call(Ch, #'queue.purge'{queue = Q}),
+ get_empty(Ch, Q),
+ delete(Ch, Q),
+ rabbit_ct_client_helpers:close_channel(Ch),
+ passed.
+
+info_head_message_timestamp(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, info_head_message_timestamp1, [Config]).
+
+info_head_message_timestamp1(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue,
+ <<"info_head_message_timestamp-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 2}]},
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q, new, fun(_, _) -> ok end),
+ %% The queue is empty: no timestamp.
+ true = PQ:is_empty(BQS1),
+ '' = PQ:info(head_message_timestamp, BQS1),
+ %% Publish one message with timestamp 1000.
+ Msg1 = #basic_message{
+ id = msg1,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 1,
+ timestamp = 1000
+ }},
+ is_persistent = false
+ },
+ BQS2 = PQ:publish(Msg1, #message_properties{size = 0}, false, self(),
+ noflow, BQS1),
+ 1000 = PQ:info(head_message_timestamp, BQS2),
+ %% Publish a higher priority message with no timestamp.
+ Msg2 = #basic_message{
+ id = msg2,
+ content = #content{
+ properties = #'P_basic'{
+ priority = 2
+ }},
+ is_persistent = false
+ },
+ BQS3 = PQ:publish(Msg2, #message_properties{size = 0}, false, self(),
+ noflow, BQS2),
+ '' = PQ:info(head_message_timestamp, BQS3),
+ %% Consume message with no timestamp.
+ {{Msg2, _, _}, BQS4} = PQ:fetch(false, BQS3),
+ 1000 = PQ:info(head_message_timestamp, BQS4),
+ %% Consume message with timestamp 1000, but do not acknowledge it
+ %% yet. The goal is to verify that the unacknowledged message's
+ %% timestamp is returned.
+ {{Msg1, _, AckTag}, BQS5} = PQ:fetch(true, BQS4),
+ 1000 = PQ:info(head_message_timestamp, BQS5),
+ %% Ack message. The queue is empty now.
+ {[msg1], BQS6} = PQ:ack([AckTag], BQS5),
+ true = PQ:is_empty(BQS6),
+ '' = PQ:info(head_message_timestamp, BQS6),
+ PQ:delete_and_terminate(a_whim, BQS6),
+ passed.
+
+ram_duration(_Config) ->
+ QName = rabbit_misc:r(<<"/">>, queue, <<"ram_duration-queue">>),
+ Q0 = rabbit_amqqueue:pseudo_queue(QName, self()),
+ Q = Q0#amqqueue{arguments = [{<<"x-max-priority">>, long, 5}]},
+ PQ = rabbit_priority_queue,
+ BQS1 = PQ:init(Q, new, fun(_, _) -> ok end),
+ {_Duration1, BQS2} = PQ:ram_duration(BQS1),
+ BQS3 = PQ:set_ram_duration_target(infinity, BQS2),
+ BQS4 = PQ:set_ram_duration_target(1, BQS3),
+ {_Duration2, BQS5} = PQ:ram_duration(BQS4),
+ PQ:delete_and_terminate(a_whim, BQS5),
+ passed.
+
+mirror_queue_sync(Config) ->
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ Q = <<"mirror_queue_sync-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, 0,
+ <<"^mirror_queue_sync-queue$">>, <<"all">>),
+ publish(Ch, Q, [1, 2, 3, 1, 2, 3]),
+ %% master now has 9, slave 6.
+ get_partial(Ch, Q, manual_ack, [3, 3, 3, 2, 2, 2]),
+ %% So some but not all are unacked at the slave
+ Nodename0 = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ rabbit_ct_broker_helpers:control_action(sync_queue, Nodename0,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, Nodename0, rabbit_misc:r(<<"/">>, queue, Q)),
+ passed.
+
+mirror_queue_sync_priority_above_max(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Tests synchronisation of slaves when priority is higher than max priority.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ delete(Ch, Q),
+ passed.
+
+mirror_queue_sync_priority_above_max_pending_ack(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ %% Tests synchronisation of slaves when priority is higher than max priority
+ %% and there are pending acks.
+ %% This causes an infinity loop (and test timeout) before rabbitmq-server-795
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q = <<"mirror_queue_sync_priority_above_max_pending_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [5, 5, 5]),
+ %% Consume but 'forget' to acknowledge
+ get_without_ack(Ch, Q),
+ get_without_ack(Ch, Q),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ rabbit_ct_broker_helpers:control_action(sync_queue, A,
+ [binary_to_list(Q)], [{"-p", "/"}]),
+ wait_for_sync(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ synced_msgs(Config, A, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ synced_msgs(Config, B, rabbit_misc:r(<<"/">>, queue, Q), 3),
+ delete(Ch, Q),
+ passed.
+
+mirror_queue_auto_ack(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ %% Check correct use of AckRequired in the notifications to the slaves.
+ %% If slaves are notified with AckRequired == true when it is false,
+ %% the slaves will crash with the depth notification as they will not
+ %% match the master delta.
+ %% Bug rabbitmq-server 687
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Q = <<"mirror_queue_auto_ack-queue">>,
+ declare(Ch, Q, 3),
+ publish(Ch, Q, [1, 2, 3]),
+ ok = rabbit_ct_broker_helpers:set_ha_policy(Config, A,
+ <<".*">>, <<"all">>),
+ get_partial(Ch, Q, no_ack, [3, 2, 1]),
+
+ %% Retrieve slaves
+ SPids = slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q)),
+ [{SNode1, _SPid1}, {SNode2, SPid2}] = nodes_and_pids(SPids),
+
+ %% Restart one of the slaves so `request_depth` is triggered
+ rabbit_ct_broker_helpers:restart_node(Config, SNode1),
+
+ %% The alive slave must have the same pid after its neighbour is restarted
+ timer:sleep(3000), %% ugly but we can't know when the `depth` instruction arrives
+ Slaves = nodes_and_pids(slave_pids(Config, A, rabbit_misc:r(<<"/">>, queue, Q))),
+ SPid2 = proplists:get_value(SNode2, Slaves),
+
+ delete(Ch, Q),
+ passed.
+
+%%----------------------------------------------------------------------------
+
+open(Config) ->
+ Conn = rabbit_ct_client_helpers:open_connection(Config, 0),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, 0),
+ {Conn, Ch}.
+
+declare(Ch, Q, Args) when is_list(Args) ->
+ amqp_channel:call(Ch, #'queue.declare'{queue = Q,
+ durable = true,
+ arguments = Args});
+declare(Ch, Q, Max) ->
+ declare(Ch, Q, arguments(Max)).
+
+delete(Ch, Q) ->
+ amqp_channel:call(Ch, #'queue.delete'{queue = Q}).
+
+publish(Ch, Q, Ps) ->
+ amqp_channel:call(Ch, #'confirm.select'{}),
+ [publish1(Ch, Q, P) || P <- Ps],
+ amqp_channel:wait_for_confirms(Ch).
+
+publish_many(_Ch, _Q, 0) -> ok;
+publish_many( Ch, Q, N) -> publish1(Ch, Q, random:uniform(5)),
+ publish_many(Ch, Q, N - 1).
+
+publish1(Ch, Q, P) ->
+ amqp_channel:cast(Ch, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = props(P),
+ payload = priority2bin(P)}).
+
+props(undefined) -> #'P_basic'{delivery_mode = 2};
+props(P) -> #'P_basic'{priority = P,
+ delivery_mode = 2}.
+
+consume(Ch, Q, Ack) ->
+ amqp_channel:subscribe(Ch, #'basic.consume'{queue = Q,
+ no_ack = Ack =:= no_ack,
+ consumer_tag = <<"ctag">>},
+ self()),
+ receive
+ #'basic.consume_ok'{consumer_tag = <<"ctag">>} ->
+ ok
+ end.
+
+cancel(Ch) ->
+ amqp_channel:call(Ch, #'basic.cancel'{consumer_tag = <<"ctag">>}).
+
+assert_delivered(Ch, Ack, P) ->
+ PBin = priority2bin(P),
+ receive
+ {#'basic.deliver'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} ->
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag)
+ end.
+
+get_all(Ch, Q, Ack, Ps) ->
+ DTags = get_partial(Ch, Q, Ack, Ps),
+ get_empty(Ch, Q),
+ DTags.
+
+get_partial(Ch, Q, Ack, Ps) ->
+ [get_ok(Ch, Q, Ack, P) || P <- Ps].
+
+get_empty(Ch, Q) ->
+ #'basic.get_empty'{} = amqp_channel:call(Ch, #'basic.get'{queue = Q}).
+
+get_ok(Ch, Q, Ack, P) ->
+ PBin = priority2bin(P),
+ {#'basic.get_ok'{delivery_tag = DTag}, #amqp_msg{payload = PBin2}} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q,
+ no_ack = Ack =:= no_ack}),
+ PBin = PBin2,
+ maybe_ack(Ch, Ack, DTag).
+
+get_without_ack(Ch, Q) ->
+ {#'basic.get_ok'{}, _} =
+ amqp_channel:call(Ch, #'basic.get'{queue = Q, no_ack = false}).
+
+maybe_ack(Ch, do_ack, DTag) ->
+ amqp_channel:cast(Ch, #'basic.ack'{delivery_tag = DTag}),
+ DTag;
+maybe_ack(_Ch, _, DTag) ->
+ DTag.
+
+arguments(none) -> [];
+arguments(Max) -> [{<<"x-max-priority">>, byte, Max}].
+
+priority2bin(undefined) -> <<"undefined">>;
+priority2bin(Int) -> list_to_binary(integer_to_list(Int)).
+
+%%----------------------------------------------------------------------------
+
+wait_for_sync(Config, Nodename, Q) ->
+ case synced(Config, Nodename, Q) of
+ true -> ok;
+ false -> timer:sleep(100),
+ wait_for_sync(Config, Nodename, Q)
+ end.
+
+synced(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, synchronised_slave_pids]]),
+ [SSPids] = [Pids || [{name, Q1}, {synchronised_slave_pids, Pids}] <- Info,
+ Q =:= Q1],
+ length(SSPids) =:= 1.
+
+synced_msgs(Config, Nodename, Q, Expected) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, messages]]),
+ [M] = [M || [{name, Q1}, {messages, M}] <- Info, Q =:= Q1],
+ M =:= Expected.
+
+nodes_and_pids(SPids) ->
+ lists:zip([node(S) || S <- SPids], SPids).
+
+slave_pids(Config, Nodename, Q) ->
+ Info = rabbit_ct_broker_helpers:rpc(Config, Nodename,
+ rabbit_amqqueue, info_all, [<<"/">>, [name, slave_pids]]),
+ [SPids] = [SPids || [{name, Q1}, {slave_pids, SPids}] <- Info,
+ Q =:= Q1],
+ SPids.
+
+%%----------------------------------------------------------------------------
diff --git a/test/queue_master_location_SUITE.erl b/test/queue_master_location_SUITE.erl
new file mode 100644
index 0000000000..e77f27f14b
--- /dev/null
+++ b/test/queue_master_location_SUITE.erl
@@ -0,0 +1,271 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(queue_master_location_SUITE).
+
+%% These tests use an ABC cluster with each node initialised with
+%% a different number of queues. When a queue is declared, different
+%% strategies can be applied to determine the queue's master node. Queue
+%% location strategies can be applied in the following ways;
+%% 1. As policy,
+%% 2. As config (in rabbitmq.config),
+%% 3. or as part of the queue's declare arguements.
+%%
+%% Currently supported strategies are;
+%% min-masters : The queue master node is calculated as the one with the
+%% least bound queues in the cluster.
+%% client-local: The queue master node is the local node from which
+%% the declaration is being carried out from
+%% random : The queue master node is randomly selected.
+%%
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(DEFAULT_VHOST_PATH, (<<"/">>)).
+-define(POLICY, <<"^qm.location$">>).
+
+all() ->
+ [
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_3, [], [
+ declare_args,
+ declare_policy,
+ declare_config,
+ calculate_min_master,
+ calculate_random,
+ calculate_client_local
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3} %% Replaced with a list of node names later.
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ Nodenames = [
+ list_to_atom(rabbit_misc:format("~s-~b", [Testcase, I]))
+ || I <- lists:seq(1, ClusterSize)
+ ],
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, Nodenames},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps()).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+%%
+%% Queue 'declarations'
+%%
+
+declare_args(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q).
+
+declare_policy(Config) ->
+ setup_test_environment(Config),
+ unset_location_config(Config),
+ set_location_policy(Config, ?POLICY, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q).
+
+declare_config(Config) ->
+ setup_test_environment(Config),
+ set_location_config(Config, <<"min-masters">>),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ declare(Config, QueueName, false, false, _Args=[], none),
+ verify_min_master(Config, Q),
+ unset_location_config(Config),
+ ok.
+
+%%
+%% Test 'calculations'
+%%
+
+calculate_min_master(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, <<"min-masters">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_min_master(Config, Q),
+ ok.
+
+calculate_random(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, <<"random">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_random(Config, Q),
+ ok.
+
+calculate_client_local(Config) ->
+ setup_test_environment(Config),
+ QueueName = rabbit_misc:r(<<"/">>, queue, Q= <<"qm.test">>),
+ Args = [{<<"x-queue-master-locator">>, <<"client-local">>}],
+ declare(Config, QueueName, false, false, Args, none),
+ verify_client_local(Config, Q),
+ ok.
+
+%%
+%% Setup environment
+%%
+
+setup_test_environment(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [distribute_queues(Config, Node) || Node <- Nodes],
+ ok.
+
+distribute_queues(Config, Node) ->
+ ok = rpc:call(Node, application, unset_env, [rabbit, queue_master_location]),
+ Count = case rabbit_ct_broker_helpers:nodename_to_index(Config, Node) of
+ 0 -> 15;
+ 1 -> 8;
+ 2 -> 1
+ end,
+
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Node),
+ ok = declare_queues(Channel, declare_fun(), Count),
+ ok = create_e2e_binding(Channel, [<< "ex_1" >>, << "ex_2" >>]),
+ {ok, Channel}.
+
+%%
+%% Internal queue handling
+%%
+
+declare_queues(Channel, DeclareFun, 1) -> DeclareFun(Channel);
+declare_queues(Channel, DeclareFun, N) ->
+ DeclareFun(Channel),
+ declare_queues(Channel, DeclareFun, N-1).
+
+declare_exchange(Channel, Ex) ->
+ #'exchange.declare_ok'{} =
+ amqp_channel:call(Channel, #'exchange.declare'{exchange = Ex}),
+ {ok, Ex}.
+
+declare_binding(Channel, Binding) ->
+ #'exchange.bind_ok'{} = amqp_channel:call(Channel, Binding),
+ ok.
+
+declare_fun() ->
+ fun(Channel) ->
+ #'queue.declare_ok'{} = amqp_channel:call(Channel, get_random_queue_declare()),
+ ok
+ end.
+
+create_e2e_binding(Channel, ExNamesBin) ->
+ [{ok, Ex1}, {ok, Ex2}] = [declare_exchange(Channel, Ex) || Ex <- ExNamesBin],
+ Binding = #'exchange.bind'{source = Ex1, destination = Ex2},
+ ok = declare_binding(Channel, Binding).
+
+get_random_queue_declare() ->
+ #'queue.declare'{passive = false,
+ durable = false,
+ exclusive = true,
+ auto_delete = false,
+ nowait = false,
+ arguments = []}.
+
+%%
+%% Internal helper functions
+%%
+
+get_cluster() -> [node()|nodes()].
+
+min_master_node(Config) ->
+ hd(lists:reverse(
+ rabbit_ct_broker_helpers:get_node_configs(Config, nodename))).
+
+set_location_config(Config, Strategy) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rpc:call(Node, application, set_env,
+ [rabbit, queue_master_locator, Strategy]) || Node <- Nodes],
+ ok.
+
+unset_location_config(Config) ->
+ Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ [ok = rpc:call(Node, application, unset_env,
+ [rabbit, queue_master_locator]) || Node <- Nodes],
+ ok.
+
+declare(Config, QueueName, Durable, AutoDelete, Args, Owner) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {new, Queue} = rpc:call(Node, rabbit_amqqueue, declare,
+ [QueueName, Durable, AutoDelete, Args, Owner]),
+ Queue.
+
+verify_min_master(Config, Q) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ MinMaster = min_master_node(Config),
+ {ok, MinMaster} = rpc:call(Node, rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]).
+
+verify_random(Config, Q) ->
+ [Node | _] = Nodes = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ {ok, Master} = rpc:call(Node, rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]),
+ true = lists:member(Master, Nodes).
+
+verify_client_local(Config, Q) ->
+ Node = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ {ok, Node} = rpc:call(Node, rabbit_queue_master_location_misc,
+ lookup_master, [Q, ?DEFAULT_VHOST_PATH]).
+
+set_location_policy(Config, Name, Strategy) ->
+ ok = rabbit_ct_broker_helpers:set_policy(Config, 0,
+ Name, <<".*">>, <<"queues">>, [{<<"queue-master-locator">>, Strategy}]).
diff --git a/test/rabbit_ha_test_consumer.erl b/test/rabbit_ha_test_consumer.erl
new file mode 100644
index 0000000000..f374863f6a
--- /dev/null
+++ b/test/rabbit_ha_test_consumer.erl
@@ -0,0 +1,114 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+-module(rabbit_ha_test_consumer).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-export([await_response/1, create/5, start/6]).
+
+await_response(ConsumerPid) ->
+ case receive {ConsumerPid, Response} -> Response end of
+ {error, Reason} -> erlang:error(Reason);
+ ok -> ok
+ end.
+
+create(Channel, Queue, TestPid, CancelOnFailover, ExpectingMsgs) ->
+ ConsumerPid = spawn_link(?MODULE, start,
+ [TestPid, Channel, Queue, CancelOnFailover,
+ ExpectingMsgs + 1, ExpectingMsgs]),
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), ConsumerPid),
+ ConsumerPid.
+
+start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ error_logger:info_msg("consumer ~p on ~p awaiting ~w messages "
+ "(lowest seen = ~w, cancel-on-failover = ~w)~n",
+ [self(), Channel, MsgsToConsume, LowestSeen,
+ CancelOnFailover]),
+ run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+run(TestPid, _Channel, _Queue, _CancelOnFailover, _LowestSeen, 0) ->
+ consumer_reply(TestPid, ok);
+run(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume) ->
+ receive
+ #'basic.consume_ok'{} ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ {Delivery = #'basic.deliver'{ redelivered = Redelivered },
+ #amqp_msg{payload = Payload}} ->
+ MsgNum = list_to_integer(binary_to_list(Payload)),
+
+ ack(Delivery, Channel),
+
+ %% we can receive any message we've already seen and,
+ %% because of the possibility of multiple requeuings, we
+ %% might see these messages in any order. If we are seeing
+ %% a message again, we don't decrement the MsgsToConsume
+ %% counter.
+ if
+ MsgNum + 1 == LowestSeen ->
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, MsgNum, MsgsToConsume - 1);
+ MsgNum >= LowestSeen ->
+ error_logger:info_msg(
+ "consumer ~p on ~p ignoring redeliverd msg ~p~n",
+ [self(), Channel, MsgNum]),
+ true = Redelivered, %% ASSERTION
+ run(TestPid, Channel, Queue,
+ CancelOnFailover, LowestSeen, MsgsToConsume);
+ true ->
+ %% We received a message we haven't seen before,
+ %% but it is not the next message in the expected
+ %% sequence.
+ consumer_reply(TestPid,
+ {error, {unexpected_message, MsgNum}})
+ end;
+ #'basic.cancel'{} when CancelOnFailover ->
+ error_logger:info_msg("consumer ~p on ~p received basic.cancel: "
+ "resubscribing to ~p on ~p~n",
+ [self(), Channel, Queue, Channel]),
+ resubscribe(TestPid, Channel, Queue, CancelOnFailover,
+ LowestSeen, MsgsToConsume);
+ #'basic.cancel'{} ->
+ exit(cancel_received_without_cancel_on_failover)
+ end.
+
+%%
+%% Private API
+%%
+
+resubscribe(TestPid, Channel, Queue, CancelOnFailover, LowestSeen,
+ MsgsToConsume) ->
+ amqp_channel:subscribe(
+ Channel, consume_method(Queue, CancelOnFailover), self()),
+ ok = receive #'basic.consume_ok'{} -> ok
+ end,
+ error_logger:info_msg("re-subscripting consumer ~p on ~p complete "
+ "(received basic.consume_ok)",
+ [self(), Channel]),
+ start(TestPid, Channel, Queue, CancelOnFailover, LowestSeen, MsgsToConsume).
+
+consume_method(Queue, CancelOnFailover) ->
+ Args = [{<<"x-cancel-on-ha-failover">>, bool, CancelOnFailover}],
+ #'basic.consume'{queue = Queue,
+ arguments = Args}.
+
+ack(#'basic.deliver'{delivery_tag = DeliveryTag}, Channel) ->
+ amqp_channel:call(Channel, #'basic.ack'{delivery_tag = DeliveryTag}),
+ ok.
+
+consumer_reply(TestPid, Reply) ->
+ TestPid ! {self(), Reply}.
diff --git a/test/rabbit_ha_test_producer.erl b/test/rabbit_ha_test_producer.erl
new file mode 100644
index 0000000000..66dee3f7a3
--- /dev/null
+++ b/test/rabbit_ha_test_producer.erl
@@ -0,0 +1,119 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+-module(rabbit_ha_test_producer).
+
+-export([await_response/1, start/5, create/5]).
+
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+await_response(ProducerPid) ->
+ error_logger:info_msg("waiting for producer pid ~p~n", [ProducerPid]),
+ case receive {ProducerPid, Response} -> Response end of
+ ok -> ok;
+ {error, _} = Else -> exit(Else);
+ Else -> exit({weird_response, Else})
+ end.
+
+create(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ ProducerPid = spawn_link(?MODULE, start, [Channel, Queue, TestPid,
+ Confirm, MsgsToSend]),
+ receive
+ {ProducerPid, started} -> ProducerPid
+ end.
+
+start(Channel, Queue, TestPid, Confirm, MsgsToSend) ->
+ ConfirmState =
+ case Confirm of
+ true -> amqp_channel:register_confirm_handler(Channel, self()),
+ #'confirm.select_ok'{} =
+ amqp_channel:call(Channel, #'confirm.select'{}),
+ gb_trees:empty();
+ false -> none
+ end,
+ TestPid ! {self(), started},
+ error_logger:info_msg("publishing ~w msgs on ~p~n", [MsgsToSend, Channel]),
+ producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend).
+
+%%
+%% Private API
+%%
+
+producer(_Channel, _Queue, TestPid, none, 0) ->
+ TestPid ! {self(), ok};
+producer(Channel, _Queue, TestPid, ConfirmState, 0) ->
+ error_logger:info_msg("awaiting confirms on channel ~p~n", [Channel]),
+ Msg = case drain_confirms(no_nacks, ConfirmState) of
+ no_nacks -> ok;
+ nacks -> {error, received_nacks};
+ {Nacks, CS} -> {error, {missing_confirms, Nacks,
+ lists:sort(gb_trees:keys(CS))}}
+ end,
+ TestPid ! {self(), Msg};
+
+producer(Channel, Queue, TestPid, ConfirmState, MsgsToSend) ->
+ Method = #'basic.publish'{exchange = <<"">>,
+ routing_key = Queue,
+ mandatory = false,
+ immediate = false},
+
+ ConfirmState1 = maybe_record_confirm(ConfirmState, Channel, MsgsToSend),
+
+ amqp_channel:call(Channel, Method,
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2},
+ payload = list_to_binary(
+ integer_to_list(MsgsToSend))}),
+
+ producer(Channel, Queue, TestPid, ConfirmState1, MsgsToSend - 1).
+
+maybe_record_confirm(none, _, _) ->
+ none;
+maybe_record_confirm(ConfirmState, Channel, MsgsToSend) ->
+ SeqNo = amqp_channel:next_publish_seqno(Channel),
+ gb_trees:insert(SeqNo, MsgsToSend, ConfirmState).
+
+drain_confirms(Nacks, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> Nacks;
+ false -> receive
+ #'basic.ack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ drain_confirms(Nacks,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState));
+ #'basic.nack'{delivery_tag = DeliveryTag,
+ multiple = IsMulti} ->
+ drain_confirms(nacks,
+ delete_confirms(DeliveryTag, IsMulti,
+ ConfirmState))
+ after
+ 60000 -> {Nacks, ConfirmState}
+ end
+ end.
+
+delete_confirms(DeliveryTag, false, ConfirmState) ->
+ gb_trees:delete(DeliveryTag, ConfirmState);
+delete_confirms(DeliveryTag, true, ConfirmState) ->
+ multi_confirm(DeliveryTag, ConfirmState).
+
+multi_confirm(DeliveryTag, ConfirmState) ->
+ case gb_trees:is_empty(ConfirmState) of
+ true -> ConfirmState;
+ false -> {Key, _, ConfirmState1} = gb_trees:take_smallest(ConfirmState),
+ case Key =< DeliveryTag of
+ true -> multi_confirm(DeliveryTag, ConfirmState1);
+ false -> ConfirmState
+ end
+ end.
diff --git a/test/simple_ha_SUITE.erl b/test/simple_ha_SUITE.erl
new file mode 100644
index 0000000000..af85ad6d3b
--- /dev/null
+++ b/test/simple_ha_SUITE.erl
@@ -0,0 +1,216 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(simple_ha_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ rapid_redeclare,
+ declare_synchrony
+ ]},
+ {cluster_size_3, [], [
+ consume_survives_stop,
+ consume_survives_sigkill,
+ consume_survives_policy,
+ auto_resume,
+ auto_resume_no_ccn_client,
+ confirms_survive_stop,
+ confirms_survive_sigkill,
+ confirms_survive_policy
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 2}
+ ]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, 3}
+ ]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_all/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+rapid_redeclare(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Ch = rabbit_ct_client_helpers:open_channel(Config, A),
+ Queue = <<"test">>,
+ [begin
+ amqp_channel:call(Ch, #'queue.declare'{queue = Queue,
+ durable = true}),
+ amqp_channel:call(Ch, #'queue.delete'{queue = Queue})
+ end || _I <- lists:seq(1, 20)],
+ ok.
+
+%% Check that by the time we get a declare-ok back, the slaves are up
+%% and in Mnesia.
+declare_synchrony(Config) ->
+ [Rabbit, Hare] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ RabbitCh = rabbit_ct_client_helpers:open_channel(Config, Rabbit),
+ HareCh = rabbit_ct_client_helpers:open_channel(Config, Hare),
+ Q = <<"mirrored-queue">>,
+ declare(RabbitCh, Q),
+ amqp_channel:call(RabbitCh, #'confirm.select'{}),
+ amqp_channel:cast(RabbitCh, #'basic.publish'{routing_key = Q},
+ #amqp_msg{props = #'P_basic'{delivery_mode = 2}}),
+ amqp_channel:wait_for_confirms(RabbitCh),
+ rabbit_ct_broker_helpers:kill_node(Config, Rabbit),
+
+ #'queue.declare_ok'{message_count = 1} = declare(HareCh, Q),
+ ok.
+
+declare(Ch, Name) ->
+ amqp_channel:call(Ch, #'queue.declare'{durable = true, queue = Name}).
+
+consume_survives_stop(Cf) -> consume_survives(Cf, fun stop/2, true).
+consume_survives_sigkill(Cf) -> consume_survives(Cf, fun sigkill/2, true).
+consume_survives_policy(Cf) -> consume_survives(Cf, fun policy/2, true).
+auto_resume(Cf) -> consume_survives(Cf, fun sigkill/2, false).
+auto_resume_no_ccn_client(Cf) -> consume_survives(Cf, fun sigkill/2, false,
+ false).
+
+confirms_survive_stop(Cf) -> confirms_survive(Cf, fun stop/2).
+confirms_survive_sigkill(Cf) -> confirms_survive(Cf, fun sigkill/2).
+confirms_survive_policy(Cf) -> confirms_survive(Cf, fun policy/2).
+
+%%----------------------------------------------------------------------------
+
+consume_survives(Config, DeathFun, CancelOnFailover) ->
+ consume_survives(Config, DeathFun, CancelOnFailover, true).
+
+consume_survives(Config,
+ DeathFun, CancelOnFailover, CCNSupported) ->
+ [A, B, C] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Channel1 = rabbit_ct_client_helpers:open_channel(Config, A),
+ Channel2 = rabbit_ct_client_helpers:open_channel(Config, B),
+ Channel3 = rabbit_ct_client_helpers:open_channel(Config, C),
+
+ %% declare the queue on the master, mirrored to the two slaves
+ Queue = <<"test">>,
+ amqp_channel:call(Channel1, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% start up a consumer
+ ConsCh = case CCNSupported of
+ true -> Channel2;
+ false -> Port = rabbit_ct_broker_helpers:get_node_config(
+ Config, B, tcp_port_amqp),
+ open_incapable_channel(Port)
+ end,
+ ConsumerPid = rabbit_ha_test_consumer:create(
+ ConsCh, Queue, self(), CancelOnFailover, Msgs),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Channel3, Queue,
+ self(), false, Msgs),
+ DeathFun(Config, A),
+ %% verify that the consumer got all msgs, or die - the await_response
+ %% calls throw an exception if anything goes wrong....
+ rabbit_ha_test_consumer:await_response(ConsumerPid),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+confirms_survive(Config, DeathFun) ->
+ [A, B, _] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ Msgs = rabbit_ct_helpers:cover_work_factor(Config, 20000),
+ Node1Channel = rabbit_ct_client_helpers:open_channel(Config, A),
+ Node2Channel = rabbit_ct_client_helpers:open_channel(Config, B),
+
+ %% declare the queue on the master, mirrored to the two slaves
+ Queue = <<"test">>,
+ amqp_channel:call(Node1Channel,#'queue.declare'{queue = Queue,
+ auto_delete = false,
+ durable = true}),
+
+ %% send a bunch of messages from the producer
+ ProducerPid = rabbit_ha_test_producer:create(Node2Channel, Queue,
+ self(), true, Msgs),
+ DeathFun(Config, A),
+ rabbit_ha_test_producer:await_response(ProducerPid),
+ ok.
+
+stop(Config, Node) ->
+ rabbit_ct_broker_helpers:stop_node_after(Config, Node, 50).
+
+sigkill(Config, Node) ->
+ rabbit_ct_broker_helpers:kill_node_after(Config, Node, 50).
+
+policy(Config, Node)->
+ Nodes = [
+ rabbit_misc:atom_to_binary(N)
+ || N <- rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ N =/= Node],
+ rabbit_ct_broker_helpers:set_ha_policy(Config, Node, <<".*">>,
+ {<<"nodes">>, Nodes}).
+
+open_incapable_channel(NodePort) ->
+ Props = [{<<"capabilities">>, table, []}],
+ {ok, ConsConn} =
+ amqp_connection:start(#amqp_params_network{port = NodePort,
+ client_properties = Props}),
+ {ok, Ch} = amqp_connection:open_channel(ConsConn),
+ Ch.
diff --git a/test/sup_delayed_restart_SUITE.erl b/test/sup_delayed_restart_SUITE.erl
new file mode 100644
index 0000000000..e495f57d0e
--- /dev/null
+++ b/test/sup_delayed_restart_SUITE.erl
@@ -0,0 +1,91 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(sup_delayed_restart_SUITE).
+
+-behaviour(supervisor2).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ delayed_restart
+ ].
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+delayed_restart(_Config) ->
+ passed = with_sup(simple_one_for_one,
+ fun (SupPid) ->
+ {ok, _ChildPid} =
+ supervisor2:start_child(SupPid, []),
+ test_supervisor_delayed_restart(SupPid)
+ end),
+ passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
+
+test_supervisor_delayed_restart(SupPid) ->
+ ok = ping_child(SupPid),
+ ok = exit_child(SupPid),
+ timer:sleep(100),
+ ok = ping_child(SupPid),
+ ok = exit_child(SupPid),
+ timer:sleep(100),
+ timeout = ping_child(SupPid),
+ timer:sleep(1010),
+ ok = ping_child(SupPid),
+ passed.
+
+with_sup(RestartStrategy, Fun) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
+ Res = Fun(SupPid),
+ unlink(SupPid),
+ exit(SupPid, shutdown),
+ Res.
+
+init([RestartStrategy]) ->
+ {ok, {{RestartStrategy, 1, 1},
+ [{test, {?MODULE, start_child, []}, {permanent, 1},
+ 16#ffffffff, worker, [?MODULE]}]}}.
+
+start_child() ->
+ {ok, proc_lib:spawn_link(fun run_child/0)}.
+
+ping_child(SupPid) ->
+ Ref = make_ref(),
+ with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
+ receive {pong, Ref} -> ok
+ after 1000 -> timeout
+ end.
+
+exit_child(SupPid) ->
+ with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
+ ok.
+
+with_child_pid(SupPid, Fun) ->
+ case supervisor2:which_children(SupPid) of
+ [{_Id, undefined, worker, [?MODULE]}] -> ok;
+ [{_Id, ChildPid, worker, [?MODULE]}] -> Fun(ChildPid);
+ [] -> ok
+ end.
+
+run_child() ->
+ receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
+ run_child()
+ end.
diff --git a/test/sync_detection_SUITE.erl b/test/sync_detection_SUITE.erl
new file mode 100644
index 0000000000..1e0a66e8fd
--- /dev/null
+++ b/test/sync_detection_SUITE.erl
@@ -0,0 +1,252 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(sync_detection_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(LOOP_RECURSION_DELAY, 100).
+
+all() ->
+ [
+ {group, cluster_size_2},
+ {group, cluster_size_3}
+ ].
+
+groups() ->
+ [
+ {cluster_size_2, [], [
+ slave_synchronization
+ ]},
+ {cluster_size_3, [], [
+ slave_synchronization_ttl
+ ]}
+ ].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(cluster_size_2, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]);
+init_per_group(cluster_size_3, Config) ->
+ rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 3}]).
+
+end_per_group(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase),
+ ClusterSize = ?config(rmq_nodes_count, Config),
+ TestNumber = rabbit_ct_helpers:testcase_number(Config, ?MODULE, Testcase),
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodes_count, ClusterSize},
+ {rmq_nodes_clustered, true},
+ {rmq_nodename_suffix, Testcase},
+ {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos/1,
+ fun rabbit_ct_broker_helpers:set_ha_policy_two_pos_batch_sync/1
+ ]).
+
+end_per_testcase(Testcase, Config) ->
+ Config1 = rabbit_ct_helpers:run_steps(Config,
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps()),
+ rabbit_ct_helpers:testcase_finished(Config1, Testcase).
+
+%% -------------------------------------------------------------------
+%% Testcases.
+%% -------------------------------------------------------------------
+
+slave_synchronization(Config) ->
+ [Master, Slave] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ Queue = <<"ha.two.test">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false}),
+
+ %% The comments on the right are the queue length and the pending acks on
+ %% the master.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+
+ %% We get and ack one message when the slave is down, and check that when we
+ %% start the slave it's not marked as synced until ack the message. We also
+ %% publish another message when the slave is up.
+ send_dummy_message(Channel, Queue), % 1 - 0
+ {#'basic.get_ok'{delivery_tag = Tag1}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ slave_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue), % 1 - 1
+ slave_unsynced(Master, Queue),
+
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag1}), % 1 - 0
+
+ slave_synced(Master, Queue),
+
+ %% We restart the slave and we send a message, so that the slave will only
+ %% have one of the messages.
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+
+ send_dummy_message(Channel, Queue), % 2 - 0
+
+ slave_unsynced(Master, Queue),
+
+ %% We reject the message that the slave doesn't have, and verify that it's
+ %% still unsynced
+ {#'basic.get_ok'{delivery_tag = Tag2}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ slave_unsynced(Master, Queue),
+ amqp_channel:cast(Channel, #'basic.reject'{ delivery_tag = Tag2,
+ requeue = true }), % 2 - 0
+ slave_unsynced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag3}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 1 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag3}), % 1 - 0
+ slave_synced(Master, Queue),
+ {#'basic.get_ok'{delivery_tag = Tag4}, _} =
+ amqp_channel:call(Channel, #'basic.get'{queue = Queue}), % 0 - 1
+ amqp_channel:cast(Channel, #'basic.ack'{delivery_tag = Tag4}), % 0 - 0
+ slave_synced(Master, Queue).
+
+slave_synchronization_ttl(Config) ->
+ [Master, Slave, DLX] = rabbit_ct_broker_helpers:get_node_configs(Config,
+ nodename),
+ Channel = rabbit_ct_client_helpers:open_channel(Config, Master),
+ DLXChannel = rabbit_ct_client_helpers:open_channel(Config, DLX),
+
+ %% We declare a DLX queue to wait for messages to be TTL'ed
+ DLXQueue = <<"dlx-queue">>,
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = DLXQueue,
+ auto_delete = false}),
+
+ TestMsgTTL = 5000,
+ Queue = <<"ha.two.test">>,
+ %% Sadly we need fairly high numbers for the TTL because starting/stopping
+ %% nodes takes a fair amount of time.
+ Args = [{<<"x-message-ttl">>, long, TestMsgTTL},
+ {<<"x-dead-letter-exchange">>, longstr, <<>>},
+ {<<"x-dead-letter-routing-key">>, longstr, DLXQueue}],
+ #'queue.declare_ok'{} =
+ amqp_channel:call(Channel, #'queue.declare'{queue = Queue,
+ auto_delete = false,
+ arguments = Args}),
+
+ slave_synced(Master, Queue),
+
+ %% All unknown
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ slave_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ %% 1 unknown, 1 known
+ rabbit_ct_broker_helpers:stop_broker(Config, Slave),
+ send_dummy_message(Channel, Queue),
+ rabbit_ct_broker_helpers:start_broker(Config, Slave),
+ slave_unsynced(Master, Queue),
+ send_dummy_message(Channel, Queue),
+ slave_unsynced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ %% %% both known
+ send_dummy_message(Channel, Queue),
+ send_dummy_message(Channel, Queue),
+ slave_synced(Master, Queue),
+ wait_for_messages(DLXQueue, DLXChannel, 2),
+ slave_synced(Master, Queue),
+
+ ok.
+
+send_dummy_message(Channel, Queue) ->
+ Payload = <<"foo">>,
+ Publish = #'basic.publish'{exchange = <<>>, routing_key = Queue},
+ amqp_channel:cast(Channel, Publish, #amqp_msg{payload = Payload}).
+
+slave_pids(Node, Queue) ->
+ {ok, Q} = rpc:call(Node, rabbit_amqqueue, lookup,
+ [rabbit_misc:r(<<"/">>, queue, Queue)]),
+ SSP = synchronised_slave_pids,
+ [{SSP, Pids}] = rpc:call(Node, rabbit_amqqueue, info, [Q, [SSP]]),
+ case Pids of
+ '' -> [];
+ _ -> Pids
+ end.
+
+%% The mnesia syncronization takes a while, but we don't want to wait for the
+%% test to fail, since the timetrap is quite high.
+wait_for_sync_status(Status, Node, Queue) ->
+ Max = 10000 / ?LOOP_RECURSION_DELAY,
+ wait_for_sync_status(0, Max, Status, Node, Queue).
+
+wait_for_sync_status(N, Max, Status, Node, Queue) when N >= Max ->
+ erlang:error({sync_status_max_tries_failed,
+ [{queue, Queue},
+ {node, Node},
+ {expected_status, Status},
+ {max_tried, Max}]});
+wait_for_sync_status(N, Max, Status, Node, Queue) ->
+ Synced = length(slave_pids(Node, Queue)) =:= 1,
+ case Synced =:= Status of
+ true -> ok;
+ false -> timer:sleep(?LOOP_RECURSION_DELAY),
+ wait_for_sync_status(N + 1, Max, Status, Node, Queue)
+ end.
+
+slave_synced(Node, Queue) ->
+ wait_for_sync_status(true, Node, Queue).
+
+slave_unsynced(Node, Queue) ->
+ wait_for_sync_status(false, Node, Queue).
+
+wait_for_messages(Queue, Channel, N) ->
+ Sub = #'basic.consume'{queue = Queue},
+ #'basic.consume_ok'{consumer_tag = CTag} = amqp_channel:call(Channel, Sub),
+ receive
+ #'basic.consume_ok'{} -> ok
+ end,
+ lists:foreach(
+ fun (_) -> receive
+ {#'basic.deliver'{delivery_tag = Tag}, _Content} ->
+ amqp_channel:cast(Channel,
+ #'basic.ack'{delivery_tag = Tag})
+ end
+ end, lists:seq(1, N)),
+ amqp_channel:call(Channel, #'basic.cancel'{consumer_tag = CTag}).
diff --git a/test/unit_SUITE.erl b/test/unit_SUITE.erl
new file mode 100644
index 0000000000..30bf4d937c
--- /dev/null
+++ b/test/unit_SUITE.erl
@@ -0,0 +1,736 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("rabbit_common/include/rabbit.hrl").
+-include_lib("rabbit_common/include/rabbit_framing.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ arguments_parser,
+ filtering_flags_parsing,
+ {basic_header_handling, [parallel], [
+ write_table_with_invalid_existing_type,
+ invalid_existing_headers,
+ disparate_invalid_header_entries_accumulate_separately,
+ corrupt_or_invalid_headers_are_overwritten,
+ invalid_same_header_entry_accumulation
+ ]},
+ content_framing,
+ content_transcoding,
+ pg_local,
+ pmerge,
+ plmerge,
+ priority_queue,
+ {resource_monitor, [parallel], [
+ parse_information_unit
+ ]},
+ {supervisor2, [], [
+ check_shutdown_stop,
+ check_shutdown_ignored
+ ]},
+ table_codec,
+ {truncate, [parallel], [
+ short_examples_exactly,
+ term_limit,
+ large_examples_for_size
+ ]},
+ unfold,
+ version_equivalance,
+ {vm_memory_monitor, [parallel], [
+ parse_line_linux
+ ]}
+ ]}
+ ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+%% -------------------------------------------------------------------
+%% Argument parsing.
+%% -------------------------------------------------------------------
+
+arguments_parser(_Config) ->
+ GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
+ Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
+
+ GetOptions =
+ fun (Args) ->
+ rabbit_cli:parse_arguments(Commands1, GlobalOpts1, "-n", Args)
+ end,
+
+ check_parse_arguments(no_command, GetOptions, []),
+ check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
+ GetOptions, ["command1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+ GetOptions, ["command1", "-o1", "blah"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
+ GetOptions, ["command1", "-f1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
+ GetOptions, ["-o1", "blah", "command1"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
+ GetOptions, ["-o1", "blah", "command1", "quux"]),
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
+ GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
+ %% For duplicate flags, the last one counts
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
+ GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
+ %% If the flag "eats" the command, the command won't be recognised
+ check_parse_arguments(no_command, GetOptions,
+ ["-o1", "command1", "quux"]),
+ %% If a flag eats another flag, the eaten flag won't be recognised
+ check_parse_arguments(
+ {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
+ GetOptions, ["command1", "-o1", "-f1"]),
+
+ %% Now for some command-specific flags...
+ check_parse_arguments(
+ {ok, {command2, [{"-f1", false}, {"-f2", false},
+ {"-o1", "foo"}, {"-o2", "bar"}], []}},
+ GetOptions, ["command2"]),
+
+ check_parse_arguments(
+ {ok, {command2, [{"-f1", false}, {"-f2", true},
+ {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
+ GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
+
+ passed.
+
+check_parse_arguments(ExpRes, Fun, As) ->
+ SortRes =
+ fun (no_command) -> no_command;
+ ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
+ end,
+
+ true = SortRes(ExpRes) =:= SortRes(Fun(As)).
+
+filtering_flags_parsing(_Config) ->
+ Cases = [{[], [], []}
+ ,{[{"--online", true}], ["--offline", "--online", "--third-option"], [false, true, false]}
+ ,{[{"--online", true}, {"--third-option", true}, {"--offline", true}], ["--offline", "--online", "--third-option"], [true, true, true]}
+ ,{[], ["--offline", "--online", "--third-option"], [true, true, true]}
+ ],
+ lists:foreach(fun({Vals, Opts, Expect}) ->
+ case rabbit_cli:filter_opts(Vals, Opts) of
+ Expect ->
+ ok;
+ Got ->
+ exit({no_match, Got, Expect, {args, Vals, Opts}})
+ end
+ end,
+ Cases).
+
+%% -------------------------------------------------------------------
+%% basic_header_handling.
+%% -------------------------------------------------------------------
+
+-define(XDEATH_TABLE,
+ [{<<"reason">>, longstr, <<"blah">>},
+ {<<"queue">>, longstr, <<"foo.bar.baz">>},
+ {<<"exchange">>, longstr, <<"my-exchange">>},
+ {<<"routing-keys">>, array, []}]).
+
+-define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
+
+-define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
+-define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
+-define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
+
+write_table_with_invalid_existing_type(_Config) ->
+ prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]).
+
+invalid_existing_headers(_Config) ->
+ Headers =
+ prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
+ {array, [{table, ?ROUTE_TABLE}]} =
+ rabbit_misc:table_lookup(Headers, <<"header2">>),
+ passed.
+
+disparate_invalid_header_entries_accumulate_separately(_Config) ->
+ BadHeaders = [?BAD_HEADER("header2")],
+ Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
+ Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
+ [?BAD_HEADER("header1") | Headers]),
+ {table, [?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("header2")]} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ passed.
+
+corrupt_or_invalid_headers_are_overwritten(_Config) ->
+ Headers0 = [?BAD_HEADER("header1"),
+ ?BAD_HEADER("x-invalid-headers")],
+ Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
+ {table,[?FOUND_BAD_HEADER("header1"),
+ ?FOUND_BAD_HEADER("x-invalid-headers")]} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ passed.
+
+invalid_same_header_entry_accumulation(_Config) ->
+ BadHeader1 = ?BAD_HEADER2("header1", "a"),
+ Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
+ Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
+ [?BAD_HEADER2("header1", "b") | Headers]),
+ {table, InvalidHeaders} =
+ rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
+ {array, [{longstr,<<"bad header1b">>},
+ {longstr,<<"bad header1a">>}]} =
+ rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
+ passed.
+
+prepend_check(HeaderKey, HeaderTable, Headers) ->
+ Headers1 = rabbit_basic:prepend_table_header(
+ HeaderKey, HeaderTable, Headers),
+ {table, Invalid} =
+ rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
+ {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
+ {array, [{Type, Value} | _]} =
+ rabbit_misc:table_lookup(Invalid, HeaderKey),
+ Headers1.
+
+%% -------------------------------------------------------------------
+%% pg_local.
+%% -------------------------------------------------------------------
+
+pg_local(_Config) ->
+ [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
+ check_pg_local(ok, [], []),
+ check_pg_local(pg_local:join(a, P), [P], []),
+ check_pg_local(pg_local:join(b, P), [P], [P]),
+ check_pg_local(pg_local:join(a, P), [P, P], [P]),
+ check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
+ check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
+ check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
+ [begin X ! done,
+ Ref = erlang:monitor(process, X),
+ receive {'DOWN', Ref, process, X, _Info} -> ok end
+ end || X <- [P, Q]],
+ check_pg_local(ok, [], []),
+ passed.
+
+check_pg_local(ok, APids, BPids) ->
+ ok = pg_local:sync(),
+ [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
+ {Key, Pids} <- [{a, APids}, {b, BPids}]].
+
+%% -------------------------------------------------------------------
+%% priority_queue.
+%% -------------------------------------------------------------------
+
+priority_queue(_Config) ->
+
+ false = priority_queue:is_queue(not_a_queue),
+
+ %% empty Q
+ Q = priority_queue:new(),
+ {true, true, 0, [], []} = test_priority_queue(Q),
+
+ %% 1-4 element no-priority Q
+ true = lists:all(fun (X) -> X =:= passed end,
+ lists:map(fun test_simple_n_element_queue/1,
+ lists:seq(1, 4))),
+
+ %% 1-element priority Q
+ Q1 = priority_queue:in(foo, 1, priority_queue:new()),
+ {true, false, 1, [{1, foo}], [foo]} =
+ test_priority_queue(Q1),
+
+ %% 2-element same-priority Q
+ Q2 = priority_queue:in(bar, 1, Q1),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q2),
+
+ %% 2-element different-priority Q
+ Q3 = priority_queue:in(bar, 2, Q1),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q3),
+
+ %% 1-element negative priority Q
+ Q4 = priority_queue:in(foo, -1, priority_queue:new()),
+ {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
+
+ %% merge 2 * 1-element no-priority Qs
+ Q5 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q5),
+
+ %% merge 1-element no-priority Q with 1-element priority Q
+ Q6 = priority_queue:join(priority_queue:in(foo, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
+ test_priority_queue(Q6),
+
+ %% merge 1-element priority Q with 1-element no-priority Q
+ Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, Q)),
+ {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q7),
+
+ %% merge 2 * 1-element same-priority Qs
+ Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 1, Q)),
+ {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
+ test_priority_queue(Q8),
+
+ %% merge 2 * 1-element different-priority Qs
+ Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
+ priority_queue:in(bar, 2, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q9),
+
+ %% merge 2 * 1-element different-priority Qs (other way around)
+ Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
+ priority_queue:in(foo, 1, Q)),
+ {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
+ test_priority_queue(Q10),
+
+ %% merge 2 * 2-element multi-different-priority Qs
+ Q11 = priority_queue:join(Q6, Q5),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
+ [bar, foo, foo, bar]} = test_priority_queue(Q11),
+
+ %% and the other way around
+ Q12 = priority_queue:join(Q5, Q6),
+ {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
+ [bar, foo, bar, foo]} = test_priority_queue(Q12),
+
+ %% merge with negative priorities
+ Q13 = priority_queue:join(Q4, Q5),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q13),
+
+ %% and the other way around
+ Q14 = priority_queue:join(Q5, Q4),
+ {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
+ test_priority_queue(Q14),
+
+ %% joins with empty queues:
+ Q1 = priority_queue:join(Q, Q1),
+ Q1 = priority_queue:join(Q1, Q),
+
+ %% insert with priority into non-empty zero-priority queue
+ Q15 = priority_queue:in(baz, 1, Q5),
+ {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
+ test_priority_queue(Q15),
+
+ %% 1-element infinity priority Q
+ Q16 = priority_queue:in(foo, infinity, Q),
+ {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
+
+ %% add infinity to 0-priority Q
+ Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q17),
+
+ %% and the other way around
+ Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
+ test_priority_queue(Q18),
+
+ %% add infinity to mixed-priority Q
+ Q19 = priority_queue:in(qux, infinity, Q3),
+ {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
+ test_priority_queue(Q19),
+
+ %% merge the above with a negative priority Q
+ Q20 = priority_queue:join(Q19, Q4),
+ {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
+ [qux, bar, foo, foo]} = test_priority_queue(Q20),
+
+ %% merge two infinity priority queues
+ Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
+ priority_queue:in(bar, infinity, Q)),
+ {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
+ test_priority_queue(Q21),
+
+ %% merge two mixed priority with infinity queues
+ Q22 = priority_queue:join(Q18, Q20),
+ {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
+ {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
+ test_priority_queue(Q22),
+
+ passed.
+
+priority_queue_in_all(Q, L) ->
+ lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
+
+priority_queue_out_all(Q) ->
+ case priority_queue:out(Q) of
+ {empty, _} -> [];
+ {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
+ end.
+
+test_priority_queue(Q) ->
+ {priority_queue:is_queue(Q),
+ priority_queue:is_empty(Q),
+ priority_queue:len(Q),
+ priority_queue:to_list(Q),
+ priority_queue_out_all(Q)}.
+
+test_simple_n_element_queue(N) ->
+ Items = lists:seq(1, N),
+ Q = priority_queue_in_all(priority_queue:new(), Items),
+ ToListRes = [{0, X} || X <- Items],
+ {true, false, N, ToListRes, Items} = test_priority_queue(Q),
+ passed.
+
+%% ---------------------------------------------------------------------------
+%% resource_monitor.
+%% ---------------------------------------------------------------------------
+
+parse_information_unit(_Config) ->
+ lists:foreach(fun ({S, V}) ->
+ V = rabbit_resource_monitor_misc:parse_information_unit(S)
+ end,
+ [
+ {"1000", {ok, 1000}},
+
+ {"10kB", {ok, 10000}},
+ {"10MB", {ok, 10000000}},
+ {"10GB", {ok, 10000000000}},
+
+ {"10kiB", {ok, 10240}},
+ {"10MiB", {ok, 10485760}},
+ {"10GiB", {ok, 10737418240}},
+
+ {"10k", {ok, 10240}},
+ {"10M", {ok, 10485760}},
+ {"10G", {ok, 10737418240}},
+
+ {"10KB", {ok, 10000}},
+ {"10K", {ok, 10240}},
+ {"10m", {ok, 10485760}},
+ {"10Mb", {ok, 10000000}},
+
+ {"0MB", {ok, 0}},
+
+ {"10 k", {error, parse_error}},
+ {"MB", {error, parse_error}},
+ {"", {error, parse_error}},
+ {"0.5GB", {error, parse_error}},
+ {"10TB", {error, parse_error}}
+ ]),
+ passed.
+
+%% ---------------------------------------------------------------------------
+%% supervisor2.
+%% ---------------------------------------------------------------------------
+
+check_shutdown_stop(_Config) ->
+ ok = check_shutdown(stop, 200, 200, 2000).
+
+check_shutdown_ignored(_Config) ->
+ ok = check_shutdown(ignored, 1, 2, 2000).
+
+check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
+ {ok, Sup} = supervisor2:start_link(dummy_supervisor2, [SupTimeout]),
+ Res = lists:foldl(
+ fun (I, ok) ->
+ TestSupPid = erlang:whereis(dummy_supervisor2),
+ ChildPids =
+ [begin
+ {ok, ChildPid} =
+ supervisor2:start_child(TestSupPid, []),
+ ChildPid
+ end || _ <- lists:seq(1, ChildCount)],
+ MRef = erlang:monitor(process, TestSupPid),
+ [P ! SigStop || P <- ChildPids],
+ ok = supervisor2:terminate_child(Sup, test_sup),
+ {ok, _} = supervisor2:restart_child(Sup, test_sup),
+ receive
+ {'DOWN', MRef, process, TestSupPid, shutdown} ->
+ ok;
+ {'DOWN', MRef, process, TestSupPid, Reason} ->
+ {error, {I, Reason}}
+ end;
+ (_, R) ->
+ R
+ end, ok, lists:seq(1, Iterations)),
+ unlink(Sup),
+ MSupRef = erlang:monitor(process, Sup),
+ exit(Sup, shutdown),
+ receive
+ {'DOWN', MSupRef, process, Sup, _Reason} ->
+ ok
+ end,
+ Res.
+
+%% ---------------------------------------------------------------------------
+%% truncate.
+%% ---------------------------------------------------------------------------
+
+short_examples_exactly(_Config) ->
+ F = fun (Term, Exp) ->
+ Exp = truncate:term(Term, {1, {10, 10, 5, 5}}),
+ Term = truncate:term(Term, {100000, {10, 10, 5, 5}})
+ end,
+ FSmall = fun (Term, Exp) ->
+ Exp = truncate:term(Term, {1, {2, 2, 2, 2}}),
+ Term = truncate:term(Term, {100000, {2, 2, 2, 2}})
+ end,
+ F([], []),
+ F("h", "h"),
+ F("hello world", "hello w..."),
+ F([[h,e,l,l,o,' ',w,o,r,l,d]], [[h,e,l,l,o,'...']]),
+ F([a|b], [a|b]),
+ F(<<"hello">>, <<"hello">>),
+ F([<<"hello world">>], [<<"he...">>]),
+ F(<<1:1>>, <<1:1>>),
+ F(<<1:81>>, <<0:56, "...">>),
+ F({{{{a}}},{b},c,d,e,f,g,h,i,j,k}, {{{'...'}},{b},c,d,e,f,g,h,i,j,'...'}),
+ FSmall({a,30,40,40,40,40}, {a,30,'...'}),
+ FSmall([a,30,40,40,40,40], [a,30,'...']),
+ P = spawn(fun() -> receive die -> ok end end),
+ F([0, 0.0, <<1:1>>, F, P], [0, 0.0, <<1:1>>, F, P]),
+ P ! die,
+ R = make_ref(),
+ F([R], [R]),
+ ok.
+
+term_limit(_Config) ->
+ W = erlang:system_info(wordsize),
+ S = <<"abc">>,
+ 1 = truncate:term_size(S, 4, W),
+ limit_exceeded = truncate:term_size(S, 3, W),
+ case 100 - truncate:term_size([S, S], 100, W) of
+ 22 -> ok; %% 32 bit
+ 38 -> ok %% 64 bit
+ end,
+ case 100 - truncate:term_size([S, [S]], 100, W) of
+ 30 -> ok; %% ditto
+ 54 -> ok
+ end,
+ limit_exceeded = truncate:term_size([S, S], 6, W),
+ ok.
+
+large_examples_for_size(_Config) ->
+ %% Real world values
+ Shrink = fun(Term) -> truncate:term(Term, {1, {1000, 100, 50, 5}}) end,
+ TestSize = fun(Term) ->
+ true = 5000000 < size(term_to_binary(Term)),
+ true = 500000 > size(term_to_binary(Shrink(Term)))
+ end,
+ TestSize(lists:seq(1, 5000000)),
+ TestSize(recursive_list(1000, 10)),
+ TestSize(recursive_list(5000, 20)),
+ TestSize(gb_sets:from_list([I || I <- lists:seq(1, 1000000)])),
+ TestSize(gb_trees:from_orddict([{I, I} || I <- lists:seq(1, 1000000)])),
+ ok.
+
+recursive_list(S, 0) -> lists:seq(1, S);
+recursive_list(S, N) -> [recursive_list(S div N, N-1) || _ <- lists:seq(1, S)].
+
+%% ---------------------------------------------------------------------------
+%% vm_memory_monitor.
+%% ---------------------------------------------------------------------------
+
+parse_line_linux(_Config) ->
+ lists:foreach(fun ({S, {K, V}}) ->
+ {K, V} = vm_memory_monitor:parse_line_linux(S)
+ end,
+ [{"MemTotal: 0 kB", {'MemTotal', 0}},
+ {"MemTotal: 502968 kB ", {'MemTotal', 515039232}},
+ {"MemFree: 178232 kB", {'MemFree', 182509568}},
+ {"MemTotal: 50296888", {'MemTotal', 50296888}},
+ {"MemTotal 502968 kB", {'MemTotal', 515039232}},
+ {"MemTotal 50296866 ", {'MemTotal', 50296866}}]),
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Unordered tests (originally from rabbit_tests.erl).
+%% ---------------------------------------------------------------------------
+
+%% Test that content frames don't exceed frame-max
+content_framing(_Config) ->
+ %% no content
+ passed = test_content_framing(4096, <<>>),
+ %% easily fit in one frame
+ passed = test_content_framing(4096, <<"Easy">>),
+ %% exactly one frame (empty frame = 8 bytes)
+ passed = test_content_framing(11, <<"One">>),
+ %% more than one frame
+ passed = test_content_framing(11, <<"More than one frame">>),
+ passed.
+
+test_content_framing(FrameMax, BodyBin) ->
+ [Header | Frames] =
+ rabbit_binary_generator:build_simple_content_frames(
+ 1,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, BodyBin),
+ rabbit_framing_amqp_0_9_1),
+ FrameMax,
+ rabbit_framing_amqp_0_9_1),
+ %% header is formatted correctly and the size is the total of the
+ %% fragments
+ <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
+ BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
+ BodySize = size(BodyBin),
+ true = lists:all(
+ fun (ContentFrame) ->
+ FrameBinary = list_to_binary(ContentFrame),
+ %% assert
+ <<_TypeAndChannel:3/binary,
+ Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
+ FrameBinary,
+ size(FrameBinary) =< FrameMax
+ end, Frames),
+ passed.
+
+content_transcoding(_Config) ->
+ %% there are no guarantees provided by 'clear' - it's just a hint
+ ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
+ ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
+ EnsureDecoded =
+ fun (C0) ->
+ C1 = rabbit_binary_parser:ensure_content_decoded(C0),
+ true = C1#content.properties =/= none,
+ C1
+ end,
+ EnsureEncoded =
+ fun (Protocol) ->
+ fun (C0) ->
+ C1 = rabbit_binary_generator:ensure_content_encoded(
+ C0, Protocol),
+ true = C1#content.properties_bin =/= none,
+ C1
+ end
+ end,
+ %% Beyond the assertions in Ensure*, the only testable guarantee
+ %% is that the operations should never fail.
+ %%
+ %% If we were using quickcheck we'd simply stuff all the above
+ %% into a generator for sequences of operations. In the absence of
+ %% quickcheck we pick particularly interesting sequences that:
+ %%
+ %% - execute every op twice since they are idempotent
+ %% - invoke clear_decoded, clear_encoded, decode and transcode
+ %% with one or both of decoded and encoded content present
+ [begin
+ sequence_with_content([Op]),
+ sequence_with_content([ClearEncoded, Op]),
+ sequence_with_content([ClearDecoded, Op])
+ end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
+ EnsureEncoded(rabbit_framing_amqp_0_9_1),
+ EnsureEncoded(rabbit_framing_amqp_0_8)]],
+ passed.
+
+sequence_with_content(Sequence) ->
+ lists:foldl(fun (F, V) -> F(F(V)) end,
+ rabbit_binary_generator:ensure_content_encoded(
+ rabbit_basic:build_content(#'P_basic'{}, <<>>),
+ rabbit_framing_amqp_0_9_1),
+ Sequence).
+
+pmerge(_Config) ->
+ P = [{a, 1}, {b, 2}],
+ P = rabbit_misc:pmerge(a, 3, P),
+ [{c, 3} | P] = rabbit_misc:pmerge(c, 3, P),
+ passed.
+
+plmerge(_Config) ->
+ P1 = [{a, 1}, {b, 2}, {c, 3}],
+ P2 = [{a, 2}, {d, 4}],
+ [{a, 1}, {b, 2}, {c, 3}, {d, 4}] = rabbit_misc:plmerge(P1, P2),
+ passed.
+
+table_codec(_Config) ->
+ %% FIXME this does not test inexact numbers (double and float) yet,
+ %% because they won't pass the equality assertions
+ Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
+ {<<"signedint">>, signedint, 12345},
+ {<<"decimal">>, decimal, {3, 123456}},
+ {<<"timestamp">>, timestamp, 109876543209876},
+ {<<"table">>, table, [{<<"one">>, signedint, 54321},
+ {<<"two">>, longstr,
+ <<"A long string">>}]},
+ {<<"byte">>, byte, -128},
+ {<<"long">>, long, 1234567890},
+ {<<"short">>, short, 655},
+ {<<"bool">>, bool, true},
+ {<<"binary">>, binary, <<"a binary string">>},
+ {<<"unsignedbyte">>, unsignedbyte, 250},
+ {<<"unsignedshort">>, unsignedshort, 65530},
+ {<<"unsignedint">>, unsignedint, 4294967290},
+ {<<"void">>, void, undefined},
+ {<<"array">>, array, [{signedint, 54321},
+ {longstr, <<"A long string">>}]}
+ ],
+ Binary = <<
+ 7,"longstr", "S", 21:32, "Here is a long string",
+ 9,"signedint", "I", 12345:32/signed,
+ 7,"decimal", "D", 3, 123456:32,
+ 9,"timestamp", "T", 109876543209876:64,
+ 5,"table", "F", 31:32, % length of table
+ 3,"one", "I", 54321:32,
+ 3,"two", "S", 13:32, "A long string",
+ 4,"byte", "b", -128:8/signed,
+ 4,"long", "l", 1234567890:64,
+ 5,"short", "s", 655:16,
+ 4,"bool", "t", 1,
+ 6,"binary", "x", 15:32, "a binary string",
+ 12,"unsignedbyte", "B", 250:8/unsigned,
+ 13,"unsignedshort", "u", 65530:16/unsigned,
+ 11,"unsignedint", "i", 4294967290:32/unsigned,
+ 4,"void", "V",
+ 5,"array", "A", 23:32,
+ "I", 54321:32,
+ "S", 13:32, "A long string"
+ >>,
+ Binary = rabbit_binary_generator:generate_table(Table),
+ Table = rabbit_binary_parser:parse_table(Binary),
+ passed.
+
+unfold(_Config) ->
+ {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
+ List = lists:seq(2,20,2),
+ {List, 0} = rabbit_misc:unfold(fun (0) -> false;
+ (N) -> {true, N*2, N-1}
+ end, 10),
+ passed.
+
+version_equivalance(_Config) ->
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
+ true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
+ % Support for 4-number versions
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
+ passed.
diff --git a/test/unit_inbroker_SUITE.erl b/test/unit_inbroker_SUITE.erl
new file mode 100644
index 0000000000..c26154ff69
--- /dev/null
+++ b/test/unit_inbroker_SUITE.erl
@@ -0,0 +1,3802 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2011-2016 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(unit_inbroker_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/file.hrl").
+-include_lib("amqp_client/include/amqp_client.hrl").
+
+-compile(export_all).
+
+-define(PERSISTENT_MSG_STORE, msg_store_persistent).
+-define(TRANSIENT_MSG_STORE, msg_store_transient).
+
+-define(TIMEOUT_LIST_OPS_PASS, 5000).
+-define(TIMEOUT, 30000).
+
+-define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
+
+-define(VARIABLE_QUEUE_TESTCASES, [
+ variable_queue_dynamic_duration_change,
+ variable_queue_partial_segments_delta_thing,
+ variable_queue_all_the_bits_not_covered_elsewhere_A,
+ variable_queue_all_the_bits_not_covered_elsewhere_B,
+ variable_queue_drop,
+ variable_queue_fold_msg_on_disk,
+ variable_queue_dropfetchwhile,
+ variable_queue_dropwhile_varying_ram_duration,
+ variable_queue_fetchwhile_varying_ram_duration,
+ variable_queue_ack_limiting,
+ variable_queue_purge,
+ variable_queue_requeue,
+ variable_queue_requeue_ram_beta,
+ variable_queue_fold,
+ variable_queue_batch_publish,
+ variable_queue_batch_publish_delivered
+ ]).
+
+-define(BACKING_QUEUE_TESTCASES, [
+ bq_queue_index,
+ bq_queue_index_props,
+ {variable_queue_default, [], ?VARIABLE_QUEUE_TESTCASES},
+ {variable_queue_lazy, [], ?VARIABLE_QUEUE_TESTCASES ++
+ [variable_queue_mode_change]},
+ bq_variable_queue_delete_msg_store_files_callback,
+ bq_queue_recover
+ ]).
+
+-define(CLUSTER_TESTCASES, [
+ delegates_async,
+ delegates_sync,
+ queue_cleanup,
+ declare_on_dead_queue,
+ refresh_events
+ ]).
+
+all() ->
+ [
+ {group, parallel_tests},
+ {group, non_parallel_tests},
+ {group, backing_queue_tests},
+ {group, cluster_tests},
+
+ {group, disconnect_detected_during_alarm},
+ {group, list_consumers_sanity_check},
+ {group, list_queues_online_and_offline}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ amqp_connection_refusal,
+ configurable_server_properties,
+ confirms,
+ credit_flow_settings,
+ dynamic_mirroring,
+ gen_server2_with_state,
+ list_operations_timeout_pass,
+ mcall,
+ {password_hashing, [], [
+ password_hashing,
+ change_password
+ ]},
+ {policy_validation, [parallel, {repeat, 20}], [
+ ha_policy_validation,
+ policy_validation,
+ policy_opts_validation,
+ queue_master_location_policy_validation,
+ queue_modes_policy_validation,
+ vhost_removed_while_updating_policy
+ ]},
+ runtime_parameters,
+ set_disk_free_limit_command,
+ topic_matching,
+ user_management
+ ]},
+ {non_parallel_tests, [], [
+ app_management, %% Restart RabbitMQ.
+ channel_statistics, %% Expect specific statistics.
+ disk_monitor, %% Replace rabbit_misc module.
+ file_handle_cache, %% Change FHC limit.
+ head_message_timestamp_statistics, %% Expect specific statistics.
+ log_management, %% Check log files.
+ log_management_during_startup, %% Check log files.
+ memory_high_watermark, %% Trigger alarm.
+ externally_rotated_logs_are_automatically_reopened, %% Check log files.
+ server_status %% Trigger alarm.
+ ]},
+ {backing_queue_tests, [], [
+ msg_store,
+ {backing_queue_embed_limit_0, [], ?BACKING_QUEUE_TESTCASES},
+ {backing_queue_embed_limit_1024, [], ?BACKING_QUEUE_TESTCASES}
+ ]},
+ {cluster_tests, [], [
+ {from_cluster_node1, [], ?CLUSTER_TESTCASES},
+ {from_cluster_node2, [], ?CLUSTER_TESTCASES}
+ ]},
+
+ %% Test previously executed with the multi-node target.
+ {disconnect_detected_during_alarm, [], [
+ disconnect_detected_during_alarm %% Trigger alarm.
+ ]},
+ {list_consumers_sanity_check, [], [
+ list_consumers_sanity_check
+ ]},
+ {list_queues_online_and_offline, [], [
+ list_queues_online_and_offline %% Stop node B.
+ ]}
+ ].
+
+group(backing_queue_tests) ->
+ [
+ %% Several tests based on lazy queues may take more than 30 minutes.
+ {timetrap, {hours, 1}}
+ ];
+group(_) ->
+ [].
+
+%% -------------------------------------------------------------------
+%% Testsuite setup/teardown.
+%% -------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ rabbit_ct_helpers:log_environment(),
+ rabbit_ct_helpers:run_setup_steps(Config).
+
+end_per_suite(Config) ->
+ rabbit_ct_helpers:run_teardown_steps(Config).
+
+init_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ ClusterSize = case Group of
+ disconnect_detected_during_alarm -> 1;
+ list_consumers_sanity_check -> 1;
+ _ -> 2
+ end,
+ Config1 = rabbit_ct_helpers:set_config(Config, [
+ {rmq_nodename_suffix, Group},
+ {rmq_nodes_count, ClusterSize}
+ ]),
+ rabbit_ct_helpers:run_steps(Config1,
+ rabbit_ct_broker_helpers:setup_steps() ++
+ rabbit_ct_client_helpers:setup_steps() ++ [
+ fun(C) -> init_per_group1(Group, C) end,
+ fun setup_file_handle_cache/1
+ ]);
+ false ->
+ rabbit_ct_helpers:run_steps(Config, [
+ fun(C) -> init_per_group1(Group, C) end
+ ])
+ end.
+
+init_per_group1(backing_queue_tests, Config) ->
+ Module = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, get_env, [rabbit, backing_queue_module]),
+ case Module of
+ {ok, rabbit_priority_queue} ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_backing_queue_test_group, [Config]);
+ _ ->
+ {skip, rabbit_misc:format(
+ "Backing queue module not supported by this test group: ~p~n",
+ [Module])}
+ end;
+init_per_group1(backing_queue_embed_limit_0, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 0]),
+ Config;
+init_per_group1(backing_queue_embed_limit_1024, Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below, 1024]),
+ Config;
+init_per_group1(variable_queue_default, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, default});
+init_per_group1(variable_queue_lazy, Config) ->
+ rabbit_ct_helpers:set_config(Config, {variable_queue_type, lazy});
+init_per_group1(from_cluster_node1, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {0, 1}});
+init_per_group1(from_cluster_node2, Config) ->
+ rabbit_ct_helpers:set_config(Config, {test_direction, {1, 0}});
+init_per_group1(_, Config) ->
+ Config.
+
+setup_file_handle_cache(Config) ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, setup_file_handle_cache1, []),
+ Config.
+
+setup_file_handle_cache1() ->
+ %% FIXME: Why are we doing this?
+ application:set_env(rabbit, file_handles_high_watermark, 10),
+ ok = file_handle_cache:set_limit(10),
+ ok.
+
+end_per_group(Group, Config) ->
+ case lists:member({group, Group}, all()) of
+ true ->
+ rabbit_ct_helpers:run_steps(Config,
+ [fun(C) -> end_per_group1(Group, C) end] ++
+ rabbit_ct_client_helpers:teardown_steps() ++
+ rabbit_ct_broker_helpers:teardown_steps());
+ false ->
+ Config
+ end.
+
+end_per_group1(backing_queue_tests, Config) ->
+ rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, teardown_backing_queue_test_group, [Config]);
+end_per_group1(Group, Config)
+when Group =:= backing_queue_embed_limit_0
+orelse Group =:= backing_queue_embed_limit_1024 ->
+ ok = rabbit_ct_broker_helpers:rpc(Config, 0,
+ application, set_env, [rabbit, queue_index_embed_msgs_below,
+ ?config(rmq_queue_index_embed_msgs_below, Config)]),
+ Config;
+end_per_group1(_, Config) ->
+ Config.
+
+init_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_started(Config, Testcase).
+
+end_per_testcase(Testcase, Config) ->
+ rabbit_ct_helpers:testcase_finished(Config, Testcase).
+
+%% -------------------------------------------------------------------
+%% Application management.
+%% -------------------------------------------------------------------
+
+app_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, app_management1, [Config]).
+
+app_management1(_Config) ->
+ control_action(wait, [os:getenv("RABBITMQ_PID_FILE")]),
+ %% Starting, stopping and diagnostics. Note that we don't try
+ %% 'report' when the rabbit app is stopped and that we enable
+ %% tracing for the duration of this function.
+ ok = control_action(trace_on, []),
+ ok = control_action(stop_app, []),
+ ok = control_action(stop_app, []),
+ ok = control_action(status, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
+ ok = control_action(start_app, []),
+ ok = control_action(start_app, []),
+ ok = control_action(status, []),
+ ok = control_action(report, []),
+ ok = control_action(cluster_status, []),
+ ok = control_action(environment, []),
+ ok = control_action(trace_off, []),
+ passed.
+
+%% -------------------------------------------------------------------
+%% Message store.
+%% -------------------------------------------------------------------
+
+msg_store(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, msg_store1, [Config]).
+
+msg_store1(_Config) ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
+ {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
+ Ref = rabbit_guid:gen(),
+ {Cap, MSCState} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref),
+ Ref2 = rabbit_guid:gen(),
+ {Cap2, MSC2State} = msg_store_client_init_capture(
+ ?PERSISTENT_MSG_STORE, Ref2),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% test confirm logic
+ passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
+ %% check we don't contain any of the msgs we're about to publish
+ false = msg_store_contains(false, MsgIds, MSCState),
+ %% publish the first half
+ ok = msg_store_write(MsgIds1stHalf, MSCState),
+ %% sync on the first half
+ ok = on_disk_await(Cap, MsgIds1stHalf),
+ %% publish the second half
+ ok = msg_store_write(MsgIds2ndHalf, MSCState),
+ %% check they're all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% publish the latter half twice so we hit the caching and ref
+ %% count code. We need to do this through a 2nd client since a
+ %% single client is not supposed to write the same message more
+ %% than once without first removing it.
+ ok = msg_store_write(MsgIds2ndHalf, MSC2State),
+ %% check they're still all in there
+ true = msg_store_contains(true, MsgIds, MSCState),
+ %% sync on the 2nd half
+ ok = on_disk_await(Cap2, MsgIds2ndHalf),
+ %% cleanup
+ ok = on_disk_stop(Cap2),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
+ ok = on_disk_stop(Cap),
+ %% read them all
+ MSCState1 = msg_store_read(MsgIds, MSCState),
+ %% read them all again - this will hit the cache, not disk
+ MSCState2 = msg_store_read(MsgIds, MSCState1),
+ %% remove them all
+ ok = msg_store_remove(MsgIds, MSCState2),
+ %% check first half doesn't exist
+ false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
+ %% check second half does exist
+ true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
+ %% read the second half again
+ MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
+ %% read the second half again, just for fun (aka code coverage)
+ MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
+ ok = rabbit_msg_store:client_terminate(MSCState4),
+ %% stop and restart, preserving every other msg in 2nd half
+ ok = rabbit_variable_queue:stop_msg_store(),
+ ok = rabbit_variable_queue:start_msg_store(
+ [], {fun ([]) -> finished;
+ ([MsgId|MsgIdsTail])
+ when length(MsgIdsTail) rem 2 == 0 ->
+ {MsgId, 1, MsgIdsTail};
+ ([MsgId|MsgIdsTail]) ->
+ {MsgId, 0, MsgIdsTail}
+ end, MsgIds2ndHalf}),
+ MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we have the right msgs left
+ lists:foldl(
+ fun (MsgId, Bool) ->
+ not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
+ end, false, MsgIds2ndHalf),
+ ok = rabbit_msg_store:client_terminate(MSCState5),
+ %% restart empty
+ restart_msg_store_empty(),
+ MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ %% check we don't contain any of the msgs
+ false = msg_store_contains(false, MsgIds, MSCState6),
+ %% publish the first half again
+ ok = msg_store_write(MsgIds1stHalf, MSCState6),
+ %% this should force some sort of sync internally otherwise misread
+ ok = rabbit_msg_store:client_terminate(
+ msg_store_read(MsgIds1stHalf, MSCState6)),
+ MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_remove(MsgIds1stHalf, MSCState7),
+ ok = rabbit_msg_store:client_terminate(MSCState7),
+ %% restart empty
+ restart_msg_store_empty(), %% now safe to reuse msg_ids
+ %% push a lot of msgs in... at least 100 files worth
+ {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
+ PayloadSizeBits = 65536,
+ BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
+ MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
+ Payload = << 0:PayloadSizeBits >>,
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
+ MsgId <- MsgIdsBig],
+ MSCStateM
+ end),
+ %% now read them to ensure we hit the fast client-side reading
+ ok = foreach_with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgId, MSCStateM) ->
+ {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MsgIdsBig),
+ %% .., then 3s by 1...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
+ %% .., then remove 3s by 2, from the young end first. This hits
+ %% GC (under 50% good data left, but no empty files. Must GC).
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
+ %% .., then remove 3s by 3, from the young end first. This hits
+ %% GC...
+ ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
+ [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
+ %% ensure empty
+ ok = with_msg_store_client(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MSCStateM) ->
+ false = msg_store_contains(false, MsgIdsBig, MSCStateM),
+ MSCStateM
+ end),
+ %%
+ passed = test_msg_store_client_delete_and_terminate(),
+ %% restart empty
+ restart_msg_store_empty(),
+ passed.
+
+restart_msg_store_empty() ->
+ ok = rabbit_variable_queue:stop_msg_store(),
+ ok = rabbit_variable_queue:start_msg_store(
+ undefined, {fun (ok) -> finished end, ok}).
+
+msg_id_bin(X) ->
+ erlang:md5(term_to_binary(X)).
+
+on_disk_capture() ->
+ receive
+ {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
+ stop -> done
+ end.
+
+on_disk_capture([_|_], _Awaiting, Pid) ->
+ Pid ! {self(), surplus};
+on_disk_capture(OnDisk, Awaiting, Pid) ->
+ receive
+ {on_disk, MsgIdsS} ->
+ MsgIds = gb_sets:to_list(MsgIdsS),
+ on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
+ Pid);
+ stop ->
+ done
+ after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
+ case Awaiting of
+ [] -> Pid ! {self(), arrived}, on_disk_capture();
+ _ -> Pid ! {self(), timeout}
+ end
+ end.
+
+on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
+ Pid ! {await, MsgIds, self()},
+ receive
+ {Pid, arrived} -> ok;
+ {Pid, Error} -> Error
+ end.
+
+on_disk_stop(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ Pid ! stop,
+ receive {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end.
+
+msg_store_client_init_capture(MsgStore, Ref) ->
+ Pid = spawn(fun on_disk_capture/0),
+ {Pid, rabbit_msg_store:client_init(
+ MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
+ Pid ! {on_disk, MsgIds}
+ end, undefined)}.
+
+msg_store_contains(Atom, MsgIds, MSCState) ->
+ Atom = lists:foldl(
+ fun (MsgId, Atom1) when Atom1 =:= Atom ->
+ rabbit_msg_store:contains(MsgId, MSCState) end,
+ Atom, MsgIds).
+
+msg_store_read(MsgIds, MSCState) ->
+ lists:foldl(fun (MsgId, MSCStateM) ->
+ {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
+ MsgId, MSCStateM),
+ MSCStateN
+ end, MSCState, MsgIds).
+
+msg_store_write(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_write_flow(MsgIds, MSCState) ->
+ ok = lists:foldl(fun (MsgId, ok) ->
+ rabbit_msg_store:write_flow(MsgId, MsgId, MSCState)
+ end, ok, MsgIds).
+
+msg_store_remove(MsgIds, MSCState) ->
+ rabbit_msg_store:remove(MsgIds, MSCState).
+
+msg_store_remove(MsgStore, Ref, MsgIds) ->
+ with_msg_store_client(MsgStore, Ref,
+ fun (MSCStateM) ->
+ ok = msg_store_remove(MsgIds, MSCStateM),
+ MSCStateM
+ end).
+
+with_msg_store_client(MsgStore, Ref, Fun) ->
+ rabbit_msg_store:client_terminate(
+ Fun(msg_store_client_init(MsgStore, Ref))).
+
+foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
+ rabbit_msg_store:client_terminate(
+ lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
+ msg_store_client_init(MsgStore, Ref), L)).
+
+test_msg_store_confirms(MsgIds, Cap, MSCState) ->
+ %% write -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove -> _
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, []),
+ %% write, remove -> confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% write, remove, write -> confirmed, confirmed
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds ++ MsgIds),
+ %% remove, write -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% remove, write, remove -> confirmed
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = msg_store_write(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ ok = on_disk_await(Cap, MsgIds),
+ %% confirmation on timer-based sync
+ passed = test_msg_store_confirm_timer(),
+ passed.
+
+test_msg_store_confirm_timer() ->
+ Ref = rabbit_guid:gen(),
+ MsgId = msg_id_bin(1),
+ Self = self(),
+ MSCState = rabbit_msg_store:client_init(
+ ?PERSISTENT_MSG_STORE, Ref,
+ fun (MsgIds, _ActionTaken) ->
+ case gb_sets:is_member(MsgId, MsgIds) of
+ true -> Self ! on_disk;
+ false -> ok
+ end
+ end, undefined),
+ ok = msg_store_write([MsgId], MSCState),
+ ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false),
+ ok = msg_store_remove([MsgId], MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) ->
+ After = case Blocked of
+ false -> 0;
+ true -> ?MAX_WAIT
+ end,
+ Recurse = fun () -> msg_store_keep_busy_until_confirm(
+ MsgIds, MSCState, credit_flow:blocked()) end,
+ receive
+ on_disk -> ok;
+ {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
+ Recurse()
+ after After ->
+ ok = msg_store_write_flow(MsgIds, MSCState),
+ ok = msg_store_remove(MsgIds, MSCState),
+ Recurse()
+ end.
+
+test_msg_store_client_delete_and_terminate() ->
+ restart_msg_store_empty(),
+ MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
+ Ref = rabbit_guid:gen(),
+ MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
+ ok = msg_store_write(MsgIds, MSCState),
+ %% test the 'dying client' fast path for writes
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ passed.
+
+%% -------------------------------------------------------------------
+%% Backing queue.
+%% -------------------------------------------------------------------
+
+setup_backing_queue_test_group(Config) ->
+ {ok, FileSizeLimit} =
+ application:get_env(rabbit, msg_store_file_size_limit),
+ application:set_env(rabbit, msg_store_file_size_limit, 512),
+ {ok, MaxJournal} =
+ application:get_env(rabbit, queue_index_max_journal_entries),
+ application:set_env(rabbit, queue_index_max_journal_entries, 128),
+ application:set_env(rabbit, msg_store_file_size_limit,
+ FileSizeLimit),
+ {ok, Bytes} =
+ application:get_env(rabbit, queue_index_embed_msgs_below),
+ rabbit_ct_helpers:set_config(Config, [
+ {rmq_queue_index_max_journal_entries, MaxJournal},
+ {rmq_queue_index_embed_msgs_below, Bytes}
+ ]).
+
+teardown_backing_queue_test_group(Config) ->
+ %% FIXME: Undo all the setup function did.
+ application:set_env(rabbit, queue_index_max_journal_entries,
+ ?config(rmq_queue_index_max_journal_entries, Config)),
+ %% We will have restarted the message store, and thus changed
+ %% the order of the children of rabbit_sup. This will cause
+ %% problems if there are subsequent failures - see bug 24262.
+ ok = restart_app(),
+ Config.
+
+bq_queue_index(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index1, [Config]).
+
+bq_queue_index1(_Config) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ TwoSegs = SegmentSize + SegmentSize,
+ MostOfASegment = trunc(SegmentSize*0.75),
+ SeqIdsA = lists:seq(0, MostOfASegment-1),
+ SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
+ SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
+ SeqIdsD = lists:seq(0, SegmentSize*4),
+
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
+ {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
+ {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
+ {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
+ ok = verify_read_with_published(false, false, ReadA,
+ lists:reverse(SeqIdsMsgIdsA)),
+ %% should get length back as 0, as all the msgs were transient
+ {0, 0, Qi6} = restart_test_queue(Qi4),
+ {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
+ {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
+ {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
+ {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
+ ok = verify_read_with_published(false, true, ReadB,
+ lists:reverse(SeqIdsMsgIdsB)),
+ %% should get length back as MostOfASegment
+ LenB = length(SeqIdsB),
+ BytesB = LenB * 10,
+ {LenB, BytesB, Qi12} = restart_test_queue(Qi10),
+ {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
+ Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
+ {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
+ ok = verify_read_with_published(true, true, ReadC,
+ lists:reverse(SeqIdsMsgIdsB)),
+ Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
+ Qi17 = rabbit_queue_index:flush(Qi16),
+ %% Everything will have gone now because #pubs == #acks
+ {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
+ %% should get length back as 0 because all persistent
+ %% msgs have been acked
+ {0, 0, Qi19} = restart_test_queue(Qi18),
+ Qi19
+ end),
+
+ %% These next bits are just to hit the auto deletion of segment files.
+ %% First, partials:
+ %% a) partial pub+del+ack, then move to new segment
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
+ false, Qi4),
+ Qi5
+ end),
+
+ %% b) partial pub+del, then move to new segment, then ack all in old segment
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
+ {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
+ false, Qi2),
+ Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
+ rabbit_queue_index:flush(Qi4)
+ end),
+
+ %% c) just fill up several segments of all pubs, then +dels, then +acks
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
+ Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
+ rabbit_queue_index:flush(Qi3)
+ end),
+
+ %% d) get messages in all states to a segment, then flush, then do
+ %% the same again, don't flush and read. This will hit all
+ %% possibilities in combining the segment with the journal.
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
+ false, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ Qi4 = rabbit_queue_index:flush(Qi3),
+ {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
+ {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
+ ok = verify_read_with_published(true, false, ReadD,
+ [Four, Five, Six]),
+ {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
+ ok = verify_read_with_published(false, false, ReadE,
+ [Seven, Eight]),
+ Qi10
+ end),
+
+ %% e) as for (d), but use terminate instead of read, which will
+ %% exercise journal_minus_segment, not segment_plus_journal.
+ with_empty_test_queue(
+ fun (Qi0) ->
+ {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
+ true, Qi0),
+ Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
+ Qi3 = rabbit_queue_index:ack([0], Qi2),
+ {5, 50, Qi4} = restart_test_queue(Qi3),
+ {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
+ Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
+ Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
+ {5, 50, Qi8} = restart_test_queue(Qi7),
+ Qi8
+ end),
+
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+
+ passed.
+
+bq_queue_index_props(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_index_props1, [Config]).
+
+bq_queue_index_props1(_Config) ->
+ with_empty_test_queue(
+ fun(Qi0) ->
+ MsgId = rabbit_guid:gen(),
+ Props = #message_properties{expiry=12345, size = 10},
+ Qi1 = rabbit_queue_index:publish(
+ MsgId, 1, Props, true, infinity, Qi0),
+ {[{MsgId, 1, Props, _, _}], Qi2} =
+ rabbit_queue_index:read(1, 2, Qi1),
+ Qi2
+ end),
+
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+
+ passed.
+
+bq_variable_queue_delete_msg_store_files_callback(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_variable_queue_delete_msg_store_files_callback1, [Config]).
+
+bq_variable_queue_delete_msg_store_files_callback1(Config) ->
+ ok = restart_msg_store_empty(),
+ {new, #amqqueue { pid = QPid, name = QName } = Q} =
+ rabbit_amqqueue:declare(
+ queue_name(Config,
+ <<"bq_variable_queue_delete_msg_store_files_callback-q">>),
+ true, false, [], none),
+ Payload = <<0:8388608>>, %% 1MB
+ Count = 30,
+ publish_and_confirm(Q, Payload, Count),
+
+ rabbit_amqqueue:set_ram_duration_target(QPid, 0),
+
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
+ rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
+ {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
+
+ %% give the queue a second to receive the close_fds callback msg
+ timer:sleep(1000),
+
+ rabbit_amqqueue:delete(Q, false, false),
+ passed.
+
+bq_queue_recover(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, bq_queue_recover1, [Config]).
+
+bq_queue_recover1(Config) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ {new, #amqqueue { pid = QPid, name = QName } = Q} =
+ rabbit_amqqueue:declare(queue_name(Config, <<"bq_queue_recover-q">>),
+ true, false, [], none),
+ publish_and_confirm(Q, <<>>, Count),
+
+ SupPid = rabbit_ct_broker_helpers:get_queue_sup_pid(QPid),
+ true = is_pid(SupPid),
+ exit(SupPid, kill),
+ exit(QPid, kill),
+ MRef = erlang:monitor(process, QPid),
+ receive {'DOWN', MRef, process, QPid, _Info} -> ok
+ after 10000 -> exit(timeout_waiting_for_queue_death)
+ end,
+ rabbit_amqqueue:stop(),
+ rabbit_amqqueue:start(rabbit_amqqueue:recover()),
+ {ok, Limiter} = rabbit_limiter:start_link(no_id),
+ rabbit_amqqueue:with_or_die(
+ QName,
+ fun (Q1 = #amqqueue { pid = QPid1 }) ->
+ CountMinusOne = Count - 1,
+ {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
+ rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
+ exit(QPid1, shutdown),
+ VQ1 = variable_queue_init(Q, true),
+ {{_Msg1, true, _AckTag1}, VQ2} =
+ rabbit_variable_queue:fetch(true, VQ1),
+ CountMinusOne = rabbit_variable_queue:len(VQ2),
+ _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
+ ok = rabbit_amqqueue:internal_delete(QName)
+ end),
+ passed.
+
+variable_queue_dynamic_duration_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dynamic_duration_change1, [Config]).
+
+variable_queue_dynamic_duration_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dynamic_duration_change2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dynamic_duration_change2(VQ0) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+
+ %% start by sending in a couple of segments worth
+ Len = 2*SegmentSize,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+ VQ7 = lists:foldl(
+ fun (Duration1, VQ4) ->
+ {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
+ VQ6 = variable_queue_set_ram_duration_target(
+ Duration1, VQ5),
+ publish_fetch_and_ack(Churn, Len, VQ6)
+ end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
+
+ %% drain
+ {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+
+ VQ10.
+
+variable_queue_partial_segments_delta_thing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_partial_segments_delta_thing1, [Config]).
+
+variable_queue_partial_segments_delta_thing1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_partial_segments_delta_thing2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_partial_segments_delta_thing2(VQ0) ->
+ SegmentSize = rabbit_queue_index:next_segment_boundary(0),
+ HalfSegment = SegmentSize div 2,
+ OneAndAHalfSegment = SegmentSize + HalfSegment,
+ VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
+ {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
+ VQ3 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ2),
+ %% one segment in q3, and half a segment in delta
+ [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment}]),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = check_variable_queue_status(
+ variable_queue_publish(true, 1, VQ4),
+ %% one alpha, but it's in the same segment as the deltas
+ [{q1, 1},
+ {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
+ {q3, SegmentSize},
+ {len, SegmentSize + HalfSegment + 1}]),
+ {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
+ SegmentSize + HalfSegment + 1, VQ5),
+ VQ7 = check_variable_queue_status(
+ VQ6,
+ %% the half segment should now be in q3
+ [{q1, 1},
+ {delta, {delta, undefined, 0, undefined}},
+ {q3, HalfSegment},
+ {len, HalfSegment + 1}]),
+ {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
+ HalfSegment + 1, VQ7),
+ {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
+ %% should be empty now
+ {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
+ VQ10.
+
+variable_queue_all_the_bits_not_covered_elsewhere_A(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_A1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_A2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_A2(VQ0) ->
+ Count = 2 * rabbit_queue_index:next_segment_boundary(0),
+ VQ1 = variable_queue_publish(true, Count, VQ0),
+ VQ2 = variable_queue_publish(false, Count, VQ1),
+ VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
+ {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
+ Count + Count, VQ3),
+ {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
+ Count, VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(true), true),
+ {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
+ Count1 = rabbit_variable_queue:len(VQ8),
+ VQ9 = variable_queue_publish(false, 1, VQ8),
+ VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
+ {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
+ {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
+ VQ12.
+
+variable_queue_all_the_bits_not_covered_elsewhere_B(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_all_the_bits_not_covered_elsewhere_B1, [Config]).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_all_the_bits_not_covered_elsewhere_B2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_all_the_bits_not_covered_elsewhere_B2(VQ0) ->
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(false, 4, VQ1),
+ {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
+ {_Guids, VQ4} =
+ rabbit_variable_queue:requeue(AckTags, VQ3),
+ VQ5 = rabbit_variable_queue:timeout(VQ4),
+ _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
+ VQ7 = variable_queue_init(test_amqqueue(true), true),
+ {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
+ VQ8.
+
+variable_queue_drop(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_drop1, [Config]).
+
+variable_queue_drop1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_drop2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_drop2(VQ0) ->
+ %% start by sending a messages
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ %% drop message with AckRequired = true
+ {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
+ true = rabbit_variable_queue:is_empty(VQ2),
+ true = AckTag =/= undefinded,
+ %% drop again -> empty
+ {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
+ %% requeue
+ {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
+ %% drop message with AckRequired = false
+ {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
+ true = rabbit_variable_queue:is_empty(VQ5),
+ VQ5.
+
+variable_queue_fold_msg_on_disk(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold_msg_on_disk1, [Config]).
+
+variable_queue_fold_msg_on_disk1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold_msg_on_disk2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold_msg_on_disk2(VQ0) ->
+ VQ1 = variable_queue_publish(true, 1, VQ0),
+ {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
+ {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
+ ok, VQ2, AckTags),
+ VQ3.
+
+variable_queue_dropfetchwhile(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropfetchwhile1, [Config]).
+
+variable_queue_dropfetchwhile1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropfetchwhile2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropfetchwhile2(VQ0) ->
+ Count = 10,
+
+ %% add messages with sequential expiry
+ VQ1 = variable_queue_publish(
+ false, 1, Count,
+ fun (N, Props) -> Props#message_properties{expiry = N} end,
+ fun erlang:term_to_binary/1, VQ0),
+
+ %% fetch the first 5 messages
+ {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
+ rabbit_variable_queue:fetchwhile(
+ fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
+ fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
+ {[Msg | MsgAcc], [AckTag | AckAcc]}
+ end, {[], []}, VQ1),
+ true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
+
+ %% requeue them
+ {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
+
+ %% drop the first 5 messages
+ {#message_properties{expiry = 6}, VQ4} =
+ rabbit_variable_queue:dropwhile(
+ fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
+
+ %% fetch 5
+ VQ5 = lists:foldl(fun (N, VQN) ->
+ {{Msg, _, _}, VQM} =
+ rabbit_variable_queue:fetch(false, VQN),
+ true = msg2int(Msg) == N,
+ VQM
+ end, VQ4, lists:seq(6, Count)),
+
+ %% should be empty now
+ true = rabbit_variable_queue:is_empty(VQ5),
+
+ VQ5.
+
+variable_queue_dropwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_dropwhile_varying_ram_duration1, [Config]).
+
+variable_queue_dropwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_dropwhile_varying_ram_duration2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_dropwhile_varying_ram_duration2(VQ0) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, VQ2} = rabbit_variable_queue:dropwhile(
+ fun (_) -> false end, VQ1),
+ VQ2
+ end, VQ0).
+
+variable_queue_fetchwhile_varying_ram_duration(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fetchwhile_varying_ram_duration1, [Config]).
+
+variable_queue_fetchwhile_varying_ram_duration1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fetchwhile_varying_ram_duration2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fetchwhile_varying_ram_duration2(VQ0) ->
+ test_dropfetchwhile_varying_ram_duration(
+ fun (VQ1) ->
+ {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
+ fun (_) -> false end,
+ fun (_, _, A) -> A end,
+ ok, VQ1),
+ VQ2
+ end, VQ0).
+
+test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
+ VQ3 = Fun(VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
+ VQ5 = variable_queue_publish(false, 1, VQ4),
+ VQ6 = Fun(VQ5),
+ VQ6.
+
+variable_queue_ack_limiting(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_ack_limiting1, [Config]).
+
+variable_queue_ack_limiting1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_ack_limiting2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_ack_limiting2(VQ0) ->
+ %% start by sending in a bunch of messages
+ Len = 1024,
+ VQ1 = variable_queue_publish(false, Len, VQ0),
+
+ %% squeeze and relax queue
+ Churn = Len div 32,
+ VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
+
+ %% update stats for duration
+ {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
+
+ %% fetch half the messages
+ {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
+
+ VQ5 = check_variable_queue_status(
+ VQ4, [{len, Len div 2},
+ {messages_unacknowledged_ram, Len div 2},
+ {messages_ready_ram, Len div 2},
+ {messages_ram, Len}]),
+
+ %% ensure all acks go to disk on 0 duration target
+ VQ6 = check_variable_queue_status(
+ variable_queue_set_ram_duration_target(0, VQ5),
+ [{len, Len div 2},
+ {target_ram_count, 0},
+ {messages_unacknowledged_ram, 0},
+ {messages_ready_ram, 0},
+ {messages_ram, 0}]),
+
+ VQ6.
+
+variable_queue_purge(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_purge1, [Config]).
+
+variable_queue_purge1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_purge2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_purge2(VQ0) ->
+ LenDepth = fun (VQ) ->
+ {rabbit_variable_queue:len(VQ),
+ rabbit_variable_queue:depth(VQ)}
+ end,
+ VQ1 = variable_queue_publish(false, 10, VQ0),
+ {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
+ {4, VQ3} = rabbit_variable_queue:purge(VQ2),
+ {0, 6} = LenDepth(VQ3),
+ {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
+ {2, 6} = LenDepth(VQ4),
+ VQ5 = rabbit_variable_queue:purge_acks(VQ4),
+ {2, 2} = LenDepth(VQ5),
+ VQ5.
+
+variable_queue_requeue(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue1, [Config]).
+
+variable_queue_requeue1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue2(VQ0) ->
+ {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Msgs =
+ lists:zip(RequeuedMsgs,
+ lists:duplicate(length(RequeuedMsgs), true)) ++
+ lists:zip(FreshMsgs,
+ lists:duplicate(length(FreshMsgs), false)),
+ VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
+ {{M, MRequeued, _}, VQb} =
+ rabbit_variable_queue:fetch(true, VQa),
+ Requeued = MRequeued, %% assertion
+ I = msg2int(M), %% assertion
+ VQb
+ end, VQ1, Msgs),
+ {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
+ VQ3.
+
+%% requeue from ram_pending_ack into q3, move to delta and then empty queue
+variable_queue_requeue_ram_beta(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_requeue_ram_beta1, [Config]).
+
+variable_queue_requeue_ram_beta1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_requeue_ram_beta2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_requeue_ram_beta2(VQ0) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
+ VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
+ {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
+ VQ6 = requeue_one_by_one(Front, VQ5),
+ {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
+ {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
+ VQ8.
+
+variable_queue_fold(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_fold1, [Config]).
+
+variable_queue_fold1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_fold2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_fold2(VQ0) ->
+ {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
+ variable_queue_with_holes(VQ0),
+ Count = rabbit_variable_queue:depth(VQ1),
+ Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
+ lists:foldl(fun (Cut, VQ2) ->
+ test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
+ end, VQ1, [0, 1, 2, Count div 2,
+ Count - 1, Count, Count + 1, Count * 2]).
+
+test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
+ {Acc, VQ1} = rabbit_variable_queue:fold(
+ fun (M, _, Pending, A) ->
+ MInt = msg2int(M),
+ Pending = lists:member(MInt, PendingMsgs), %% assert
+ case MInt =< Cut of
+ true -> {cont, [MInt | A]};
+ false -> {stop, A}
+ end
+ end, [], VQ0),
+ Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
+ Expected = lists:reverse(Acc), %% assertion
+ VQ1.
+
+variable_queue_batch_publish(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish1, [Config]).
+
+variable_queue_batch_publish1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish2(VQ) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish(true, Count, VQ),
+ Count = rabbit_variable_queue:len(VQ1),
+ VQ1.
+
+variable_queue_batch_publish_delivered(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_batch_publish_delivered1, [Config]).
+
+variable_queue_batch_publish_delivered1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_batch_publish_delivered2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_batch_publish_delivered2(VQ) ->
+ Count = 10,
+ VQ1 = variable_queue_batch_publish_delivered(true, Count, VQ),
+ Count = rabbit_variable_queue:depth(VQ1),
+ VQ1.
+
+%% same as test_variable_queue_requeue_ram_beta but randomly changing
+%% the queue mode after every step.
+variable_queue_mode_change(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, variable_queue_mode_change1, [Config]).
+
+variable_queue_mode_change1(Config) ->
+ with_fresh_variable_queue(
+ fun variable_queue_mode_change2/1,
+ ?config(variable_queue_type, Config)).
+
+variable_queue_mode_change2(VQ0) ->
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
+ VQ1 = variable_queue_publish(false, Count, VQ0),
+ VQ2 = maybe_switch_queue_mode(VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ VQ4 = maybe_switch_queue_mode(VQ3),
+ {Back, Front} = lists:split(Count div 2, AcksR),
+ {_, VQ5} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ4),
+ VQ6 = maybe_switch_queue_mode(VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(0, VQ6),
+ VQ8 = maybe_switch_queue_mode(VQ7),
+ {_, VQ9} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ8),
+ VQ10 = maybe_switch_queue_mode(VQ9),
+ VQ11 = requeue_one_by_one(Front, VQ10),
+ VQ12 = maybe_switch_queue_mode(VQ11),
+ {VQ13, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ12),
+ VQ14 = maybe_switch_queue_mode(VQ13),
+ {_, VQ15} = rabbit_variable_queue:ack(AcksAll, VQ14),
+ VQ16 = maybe_switch_queue_mode(VQ15),
+ VQ16.
+
+maybe_switch_queue_mode(VQ) ->
+ Mode = random_queue_mode(),
+ set_queue_mode(Mode, VQ).
+
+random_queue_mode() ->
+ Modes = [lazy, default],
+ lists:nth(random:uniform(length(Modes)), Modes).
+
+pub_res({_, VQS}) ->
+ VQS;
+pub_res(VQS) ->
+ VQS.
+
+make_publish(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false}.
+
+make_publish_delivered(IsPersistent, PayloadFun, PropFun, N) ->
+ {rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10})}.
+
+queue_name(Config, Name) ->
+ Name1 = rabbit_ct_helpers:config_to_testcase_name(Config, Name),
+ queue_name(Name1).
+
+queue_name(Name) ->
+ rabbit_misc:r(<<"/">>, queue, Name).
+
+test_queue() ->
+ queue_name(<<"test">>).
+
+init_test_queue() ->
+ TestQueue = test_queue(),
+ PRef = rabbit_guid:gen(),
+ PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
+ Res = rabbit_queue_index:recover(
+ TestQueue, [], false,
+ fun (MsgId) ->
+ rabbit_msg_store:contains(MsgId, PersistentClient)
+ end,
+ fun nop/1, fun nop/1),
+ ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
+ Res.
+
+restart_test_queue(Qi) ->
+ _ = rabbit_queue_index:terminate([], Qi),
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([test_queue()]),
+ init_test_queue().
+
+empty_test_queue() ->
+ ok = rabbit_variable_queue:stop(),
+ {ok, _} = rabbit_variable_queue:start([]),
+ {0, 0, Qi} = init_test_queue(),
+ _ = rabbit_queue_index:delete_and_terminate(Qi),
+ ok.
+
+with_empty_test_queue(Fun) ->
+ ok = empty_test_queue(),
+ {0, 0, Qi} = init_test_queue(),
+ rabbit_queue_index:delete_and_terminate(Fun(Qi)).
+
+restart_app() ->
+ rabbit:stop(),
+ rabbit:start().
+
+queue_index_publish(SeqIds, Persistent, Qi) ->
+ Ref = rabbit_guid:gen(),
+ MsgStore = case Persistent of
+ true -> ?PERSISTENT_MSG_STORE;
+ false -> ?TRANSIENT_MSG_STORE
+ end,
+ MSCState = msg_store_client_init(MsgStore, Ref),
+ {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
+ lists:foldl(
+ fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
+ MsgId = rabbit_guid:gen(),
+ QiM = rabbit_queue_index:publish(
+ MsgId, SeqId, #message_properties{size = 10},
+ Persistent, infinity, QiN),
+ ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
+ {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
+ end, {Qi, []}, SeqIds),
+ %% do this just to force all of the publishes through to the msg_store:
+ true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
+ ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
+ {A, B}.
+
+verify_read_with_published(_Delivered, _Persistent, [], _) ->
+ ok;
+verify_read_with_published(Delivered, Persistent,
+ [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
+ [{SeqId, MsgId}|Published]) ->
+ verify_read_with_published(Delivered, Persistent, Read, Published);
+verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
+ ko.
+
+nop(_) -> ok.
+nop(_, _) -> ok.
+
+msg_store_client_init(MsgStore, Ref) ->
+ rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
+
+variable_queue_init(Q, Recover) ->
+ rabbit_variable_queue:init(
+ Q, case Recover of
+ true -> non_clean_shutdown;
+ false -> new
+ end, fun nop/2, fun nop/2, fun nop/1, fun nop/1).
+
+publish_and_confirm(Q, Payload, Count) ->
+ Seqs = lists:seq(1, Count),
+ [begin
+ Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = 2},
+ Payload),
+ Delivery = #delivery{mandatory = false, sender = self(),
+ confirm = true, message = Msg, msg_seq_no = Seq,
+ flow = noflow},
+ _QPids = rabbit_amqqueue:deliver([Q], Delivery)
+ end || Seq <- Seqs],
+ wait_for_confirms(gb_sets:from_list(Seqs)).
+
+wait_for_confirms(Unconfirmed) ->
+ case gb_sets:is_empty(Unconfirmed) of
+ true -> ok;
+ false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
+ wait_for_confirms(
+ rabbit_misc:gb_sets_difference(
+ Unconfirmed, gb_sets:from_list(Confirmed)))
+ after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
+ end
+ end.
+
+with_fresh_variable_queue(Fun, Mode) ->
+ Ref = make_ref(),
+ Me = self(),
+ %% Run in a separate process since rabbit_msg_store will send
+ %% bump_credit messages and we want to ignore them
+ spawn_link(fun() ->
+ ok = empty_test_queue(),
+ VQ = variable_queue_init(test_amqqueue(true), false),
+ S0 = variable_queue_status(VQ),
+ assert_props(S0, [{q1, 0}, {q2, 0},
+ {delta,
+ {delta, undefined, 0, undefined}},
+ {q3, 0}, {q4, 0},
+ {len, 0}]),
+ VQ1 = set_queue_mode(Mode, VQ),
+ try
+ _ = rabbit_variable_queue:delete_and_terminate(
+ shutdown, Fun(VQ1)),
+ Me ! Ref
+ catch
+ Type:Error ->
+ Me ! {Ref, Type, Error, erlang:get_stacktrace()}
+ end
+ end),
+ receive
+ Ref -> ok;
+ {Ref, Type, Error, ST} -> exit({Type, Error, ST})
+ end,
+ passed.
+
+set_queue_mode(Mode, VQ) ->
+ VQ1 = rabbit_variable_queue:set_queue_mode(Mode, VQ),
+ S1 = variable_queue_status(VQ1),
+ assert_props(S1, [{mode, Mode}]),
+ VQ1.
+
+variable_queue_publish(IsPersistent, Count, VQ) ->
+ variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ lists:foldl(
+ fun (N, VQN) ->
+ rabbit_variable_queue:publish(
+ rabbit_basic:message(
+ rabbit_misc:r(<<>>, exchange, <<>>),
+ <<>>, #'P_basic'{delivery_mode = case IsPersistent of
+ true -> 2;
+ false -> 1
+ end},
+ PayloadFun(N)),
+ PropFun(N, #message_properties{size = 10}),
+ false, self(), noflow, VQN)
+ end, VQ, lists:seq(Start, Start + Count - 1))).
+
+variable_queue_batch_publish(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish/4,
+ fun rabbit_variable_queue:batch_publish/4,
+ VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, Count, fun (_N, P) -> P end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Count, PropFun, VQ) ->
+ variable_queue_batch_publish_delivered(IsPersistent, 1, Count, PropFun,
+ fun (_N) -> <<>> end, VQ).
+
+variable_queue_batch_publish_delivered(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
+ variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun,
+ PayloadFun, fun make_publish_delivered/4,
+ fun rabbit_variable_queue:batch_publish_delivered/4,
+ VQ).
+
+variable_queue_batch_publish0(IsPersistent, Start, Count, PropFun, PayloadFun,
+ MakePubFun, PubFun, VQ) ->
+ Publishes =
+ [MakePubFun(IsPersistent, PayloadFun, PropFun, N)
+ || N <- lists:seq(Start, Start + Count - 1)],
+ Res = PubFun(Publishes, self(), noflow, VQ),
+ VQ1 = pub_res(Res),
+ variable_queue_wait_for_shuffling_end(VQ1).
+
+variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
+ lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
+ Rem = Len - N,
+ {{#basic_message { is_persistent = IsPersistent },
+ IsDelivered, AckTagN}, VQM} =
+ rabbit_variable_queue:fetch(true, VQN),
+ Rem = rabbit_variable_queue:len(VQM),
+ {VQM, [AckTagN | AckTagsAcc]}
+ end, {VQ, []}, lists:seq(1, Count)).
+
+test_amqqueue(Durable) ->
+ (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
+ #amqqueue { durable = Durable }.
+
+assert_prop(List, Prop, Value) ->
+ case proplists:get_value(Prop, List)of
+ Value -> ok;
+ _ -> {exit, Prop, exp, Value, List}
+ end.
+
+assert_props(List, PropVals) ->
+ [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
+
+variable_queue_set_ram_duration_target(Duration, VQ) ->
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
+
+publish_fetch_and_ack(0, _Len, VQ0) ->
+ VQ0;
+publish_fetch_and_ack(N, Len, VQ0) ->
+ VQ1 = variable_queue_publish(false, 1, VQ0),
+ {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
+ Len = rabbit_variable_queue:len(VQ2),
+ {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
+ publish_fetch_and_ack(N-1, Len, VQ3).
+
+variable_queue_status(VQ) ->
+ Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
+ [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
+ rabbit_variable_queue:info(backing_queue_status, VQ).
+
+variable_queue_wait_for_shuffling_end(VQ) ->
+ case credit_flow:blocked() of
+ false -> VQ;
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg),
+ variable_queue_wait_for_shuffling_end(
+ rabbit_variable_queue:resume(VQ))
+ end
+ end.
+
+msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
+ binary_to_term(list_to_binary(lists:reverse(P))).
+
+ack_subset(AckSeqs, Interval, Rem) ->
+ lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
+
+requeue_one_by_one(Acks, VQ) ->
+ lists:foldl(fun (AckTag, VQN) ->
+ {_MsgId, VQM} = rabbit_variable_queue:requeue(
+ [AckTag], VQN),
+ VQM
+ end, VQ, Acks).
+
+%% Create a vq with messages in q1, delta, and q3, and holes (in the
+%% form of pending acks) in the latter two.
+variable_queue_with_holes(VQ0) ->
+ Interval = 2048, %% should match vq:IO_BATCH_SIZE
+ Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
+ Seq = lists:seq(1, Count),
+ VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
+ VQ2 = variable_queue_publish(
+ false, 1, Count,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
+ {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
+ Acks = lists:reverse(AcksR),
+ AckSeqs = lists:zip(Acks, Seq),
+ [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
+ [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
+ %% we requeue in three phases in order to exercise requeuing logic
+ %% in various vq states
+ {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
+ Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
+ VQ5 = requeue_one_by_one(Subset1, VQ4),
+ %% by now we have some messages (and holes) in delta
+ VQ6 = requeue_one_by_one(Subset2, VQ5),
+ VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
+ %% add the q1 tail
+ VQ8 = variable_queue_publish(
+ true, Count + 1, Interval,
+ fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
+ %% assertions
+ Status = variable_queue_status(VQ8),
+ vq_with_holes_assertions(VQ8, proplists:get_value(mode, Status)),
+ Depth = Count + Interval,
+ Depth = rabbit_variable_queue:depth(VQ8),
+ Len = Depth - length(Subset3),
+ Len = rabbit_variable_queue:len(VQ8),
+ {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
+
+vq_with_holes_assertions(VQ, default) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ 0 -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [q1, delta, q3])];
+vq_with_holes_assertions(VQ, lazy) ->
+ [false =
+ case V of
+ {delta, _, 0, _} -> true;
+ _ -> false
+ end || {K, V} <- variable_queue_status(VQ),
+ lists:member(K, [delta])].
+
+check_variable_queue_status(VQ0, Props) ->
+ VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
+ S = variable_queue_status(VQ1),
+ assert_props(S, Props),
+ VQ1.
+
+%% ---------------------------------------------------------------------------
+%% Credit flow.
+%% ---------------------------------------------------------------------------
+
+credit_flow_settings(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, credit_flow_settings1, [Config]).
+
+credit_flow_settings1(_Config) ->
+ %% default values
+ passed = test_proc(200, 50),
+
+ application:set_env(rabbit, credit_flow_default_credit, {100, 20}),
+ passed = test_proc(100, 20),
+
+ application:unset_env(rabbit, credit_flow_default_credit),
+
+ % back to defaults
+ passed = test_proc(200, 50),
+ passed.
+
+test_proc(InitialCredit, MoreCreditAfter) ->
+ Pid = spawn(fun dummy/0),
+ Pid ! {credit, self()},
+ {InitialCredit, MoreCreditAfter} =
+ receive
+ {credit, Val} -> Val
+ end,
+ passed.
+
+dummy() ->
+ credit_flow:send(self()),
+ receive
+ {credit, From} ->
+ From ! {credit, get(credit_flow_default_credit)};
+ _ ->
+ dummy()
+ end.
+
+%% -------------------------------------------------------------------
+%% dynamic_mirroring.
+%% -------------------------------------------------------------------
+
+dynamic_mirroring(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, dynamic_mirroring1, [Config]).
+
+dynamic_mirroring1(_Config) ->
+ %% Just unit tests of the node selection logic, see multi node
+ %% tests for the rest...
+ Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
+ {MNode, SNodes, SSNodes}, All) ->
+ {ok, M} = rabbit_mirror_queue_misc:module(Policy),
+ {NewM, NewSs0} = M:suggested_queue_nodes(
+ Params, MNode, SNodes, SSNodes, All),
+ NewSs1 = lists:sort(NewSs0),
+ case dm_list_match(NewSs, NewSs1, ExtraSs) of
+ ok -> ok;
+ error -> exit({no_match, NewSs, NewSs1, ExtraSs})
+ end
+ end,
+
+ Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
+ Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
+
+ N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
+
+ %% Add a node
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
+ Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
+ %% Add two nodes and drop one
+ Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
+ %% Don't try to include nodes that are not running
+ Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
+ %% If we can't find any of the nodes listed then just keep the master
+ Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
+ %% And once that's happened, still keep the master even when not listed,
+ %% if nothing is synced
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
+ Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
+ %% But if something is synced we can lose the master - but make
+ %% sure we pick the new master from the nodes which are synced!
+ Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
+ Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
+
+ Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
+ Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
+ Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
+ Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
+
+ passed.
+
+%% Does the first list match the second where the second is required
+%% to have exactly Extra superfluous items?
+dm_list_match([], [], 0) -> ok;
+dm_list_match(_, [], _Extra) -> error;
+dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
+dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
+
+%% ---------------------------------------------------------------------------
+%% file_handle_cache.
+%% ---------------------------------------------------------------------------
+
+file_handle_cache(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, file_handle_cache1, [Config]).
+
+file_handle_cache1(_Config) ->
+ %% test copying when there is just one spare handle
+ Limit = file_handle_cache:get_limit(),
+ ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
+ TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
+ ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
+ [Src1, Dst1, Src2, Dst2] = Files =
+ [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
+ Content = <<"foo">>,
+ CopyFun = fun (Src, Dst) ->
+ {ok, Hdl} = prim_file:open(Src, [binary, write]),
+ ok = prim_file:write(Hdl, Content),
+ ok = prim_file:sync(Hdl),
+ prim_file:close(Hdl),
+
+ {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
+ {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
+ Size = size(Content),
+ {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
+ ok = file_handle_cache:delete(SrcHdl),
+ ok = file_handle_cache:delete(DstHdl)
+ end,
+ Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
+ filename:join(TmpDir, "file5"),
+ [write], []),
+ receive {next, Pid1} -> Pid1 ! {next, self()} end,
+ file_handle_cache:delete(Hdl),
+ %% This will block and never return, so we
+ %% exercise the fhc tidying up the pending
+ %% queue on the death of a process.
+ ok = CopyFun(Src1, Dst1)
+ end),
+ ok = CopyFun(Src1, Dst1),
+ ok = file_handle_cache:set_limit(2),
+ Pid ! {next, self()},
+ receive {next, Pid} -> ok end,
+ timer:sleep(100),
+ Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
+ timer:sleep(100),
+ erlang:monitor(process, Pid),
+ erlang:monitor(process, Pid1),
+ exit(Pid, kill),
+ exit(Pid1, kill),
+ receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
+ receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
+ [file:delete(File) || File <- Files],
+ ok = file_handle_cache:set_limit(Limit),
+ passed.
+
+%% -------------------------------------------------------------------
+%% Log management.
+%% -------------------------------------------------------------------
+
+log_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_management1, [Config]).
+
+log_management1(_Config) ->
+ [LogFile] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ ok = test_logs_working([LogFile]),
+
+ %% prepare basic logs
+ file:delete(LogFile ++ Suffix),
+ ok = test_logs_working([LogFile]),
+
+ %% simple log rotation
+ ok = control_action(rotate_logs, []),
+ %% FIXME: rabbit:rotate_logs/0 is asynchronous due to a limitation
+ %% in Lager. Therefore, we have no choice but to wait an arbitrary
+ %% amount of time.
+ timer:sleep(2000),
+ [true, true] = non_empty_files([LogFile ++ Suffix, LogFile]),
+ ok = test_logs_working([LogFile]),
+
+ %% log rotation on empty files
+ ok = clean_logs([LogFile], Suffix),
+ ok = control_action(rotate_logs, []),
+ timer:sleep(2000),
+ [{error, enoent}, true] = non_empty_files([LogFile ++ Suffix, LogFile]),
+
+ %% logs with suffix are not writable
+ ok = control_action(rotate_logs, []),
+ timer:sleep(2000),
+ ok = make_files_non_writable([LogFile ++ Suffix]),
+ ok = control_action(rotate_logs, []),
+ timer:sleep(2000),
+ ok = test_logs_working([LogFile]),
+
+ %% rotate when original log files are not writable
+ ok = make_files_non_writable([LogFile]),
+ ok = control_action(rotate_logs, []),
+ timer:sleep(2000),
+
+ %% logging directed to tty (first, remove handlers)
+ ok = control_action(stop_app, []),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_handler, tty),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+ timer:sleep(200),
+ rabbit_log:info("test info"),
+ [{error, enoent}] = empty_files([LogFile]),
+
+ %% rotate logs when logging is turned off
+ ok = control_action(stop_app, []),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_handler, false),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+ timer:sleep(200),
+ rabbit_log:error("test error"),
+ timer:sleep(200),
+ [{error, enoent}] = empty_files([LogFile]),
+
+ %% cleanup
+ ok = control_action(stop_app, []),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_handler, LogFile),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+ ok = test_logs_working([LogFile]),
+ passed.
+
+log_management_during_startup(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, log_management_during_startup1, [Config]).
+
+log_management_during_startup1(_Config) ->
+ [LogFile] = rabbit:log_locations(),
+ Suffix = ".0",
+
+ %% start application with simple tty logging
+ ok = control_action(stop_app, []),
+ ok = clean_logs([LogFile], Suffix),
+ ok = application:set_env(rabbit, lager_handler, tty),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+
+ %% start application with logging to non-existing directory
+ NonExistent = "/tmp/non-existent/test.log",
+ delete_file(NonExistent),
+ delete_file(filename:dirname(NonExistent)),
+ ok = control_action(stop_app, []),
+ ok = application:set_env(rabbit, lager_handler, NonExistent),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+
+ %% start application with logging to directory with no
+ %% write permissions
+ ok = control_action(stop_app, []),
+ NoPermission1 = "/var/empty/test.log",
+ delete_file(NoPermission1),
+ delete_file(filename:dirname(NoPermission1)),
+ ok = control_action(stop_app, []),
+ ok = application:set_env(rabbit, lager_handler, NoPermission1),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = case control_action(start_app, []) of
+ ok -> exit({got_success_but_expected_failure,
+ log_rotation_no_write_permission_dir_test});
+ {badrpc,
+ {'EXIT', {error, {cannot_log_to_file, _, Reason1}}}}
+ when Reason1 =:= enoent orelse Reason1 =:= eacces -> ok;
+ {badrpc,
+ {'EXIT',
+ {error, {cannot_log_to_file, _,
+ {cannot_create_parent_dirs, _, Reason1}}}}}
+ when Reason1 =:= eperm orelse
+ Reason1 =:= eacces orelse
+ Reason1 =:= enoent-> ok
+ end,
+
+ %% start application with logging to a subdirectory which
+ %% parent directory has no write permissions
+ NoPermission2 = "/var/empty/non-existent/test.log",
+ delete_file(NoPermission2),
+ delete_file(filename:dirname(NoPermission2)),
+ case control_action(stop_app, []) of
+ ok -> ok;
+ {error, lager_not_running} -> ok
+ end,
+ ok = application:set_env(rabbit, lager_handler, NoPermission2),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = case control_action(start_app, []) of
+ ok -> exit({got_success_but_expected_failure,
+ log_rotatation_parent_dirs_test});
+ {badrpc,
+ {'EXIT', {error, {cannot_log_to_file, _, Reason2}}}}
+ when Reason2 =:= enoent orelse Reason2 =:= eacces -> ok;
+ {badrpc,
+ {'EXIT',
+ {error, {cannot_log_to_file, _,
+ {cannot_create_parent_dirs, _, Reason2}}}}}
+ when Reason2 =:= eperm orelse
+ Reason2 =:= eacces orelse
+ Reason2 =:= enoent-> ok
+ end,
+
+ %% cleanup
+ ok = application:set_env(rabbit, lager_handler, LogFile),
+ application:unset_env(lager, handlers),
+ application:unset_env(lager, extra_sinks),
+ ok = control_action(start_app, []),
+ passed.
+
+externally_rotated_logs_are_automatically_reopened(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, externally_rotated_logs_are_automatically_reopened1, [Config]).
+
+externally_rotated_logs_are_automatically_reopened1(_Config) ->
+ [LogFile] = rabbit:log_locations(),
+
+ %% Make sure log file is opened
+ ok = test_logs_working([LogFile]),
+
+ %% Move it away - i.e. external log rotation happened
+ file:rename(LogFile, [LogFile, ".rotation_test"]),
+
+ %% New files should be created - test_logs_working/1 will check that
+ %% LogFile is not empty after doing some logging. And it's exactly
+ %% what we need to check here.
+ ok = test_logs_working([LogFile]),
+ passed.
+
+empty_or_nonexist_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ {error, enoent} -> true;
+ Error -> Error
+ end || File <- Files].
+
+empty_files(Files) ->
+ [case file:read_file_info(File) of
+ {ok, FInfo} -> FInfo#file_info.size == 0;
+ Error -> Error
+ end || File <- Files].
+
+non_empty_files(Files) ->
+ [case EmptyFile of
+ {error, Reason} -> {error, Reason};
+ _ -> not(EmptyFile)
+ end || EmptyFile <- empty_files(Files)].
+
+test_logs_working(LogFiles) ->
+ ok = rabbit_log:error("Log a test message"),
+ %% give the error loggers some time to catch up
+ timer:sleep(200),
+ lists:all(fun(LogFile) -> [true] =:= non_empty_files([LogFile]) end, LogFiles),
+ ok.
+
+set_permissions(Path, Mode) ->
+ case file:read_file_info(Path) of
+ {ok, FInfo} -> file:write_file_info(
+ Path,
+ FInfo#file_info{mode=Mode});
+ Error -> Error
+ end.
+
+clean_logs(Files, Suffix) ->
+ [begin
+ ok = delete_file(File),
+ ok = delete_file([File, Suffix])
+ end || File <- Files],
+ ok.
+
+assert_ram_node() ->
+ case rabbit_mnesia:node_type() of
+ disc -> exit('not_ram_node');
+ ram -> ok
+ end.
+
+assert_disc_node() ->
+ case rabbit_mnesia:node_type() of
+ disc -> ok;
+ ram -> exit('not_disc_node')
+ end.
+
+delete_file(File) ->
+ case file:delete(File) of
+ ok -> ok;
+ {error, enoent} -> ok;
+ Error -> Error
+ end.
+
+make_files_non_writable(Files) ->
+ [ok = file:write_file_info(File, #file_info{mode=8#444}) ||
+ File <- Files],
+ ok.
+
+add_log_handlers(Handlers) ->
+ [ok = error_logger:add_report_handler(Handler, Args) ||
+ {Handler, Args} <- Handlers],
+ ok.
+
+%% sasl_report_file_h returns [] during terminate
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+%%
+%% error_logger_file_h returns ok since OTP 18.1
+%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
+delete_log_handlers(Handlers) ->
+ [ok_or_empty_list(error_logger:delete_report_handler(Handler))
+ || Handler <- Handlers],
+ ok.
+
+ok_or_empty_list([]) ->
+ [];
+ok_or_empty_list(ok) ->
+ ok.
+
+%% ---------------------------------------------------------------------------
+%% Password hashing.
+%% ---------------------------------------------------------------------------
+
+password_hashing(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, password_hashing1, [Config]).
+
+password_hashing1(_Config) ->
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 = rabbit_password:hashing_mod(),
+ application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ rabbit_password_hashing_sha256 = rabbit_password:hashing_mod(),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_sha256),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(rabbit_password_hashing_md5),
+ rabbit_password_hashing_md5 =
+ rabbit_password:hashing_mod(undefined),
+
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ #internal_user{}),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ #internal_user{
+ hashing_algorithm = undefined
+ }),
+ rabbit_password_hashing_md5 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ #internal_user{
+ hashing_algorithm = rabbit_password_hashing_md5
+ }),
+
+ rabbit_password_hashing_sha256 =
+ rabbit_auth_backend_internal:hashing_module_for_user(
+ #internal_user{
+ hashing_algorithm = rabbit_password_hashing_sha256
+ }),
+
+ passed.
+
+change_password(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, change_password1, [Config]).
+
+change_password1(_Config) ->
+ UserName = <<"test_user">>,
+ Password = <<"test_password">>,
+ case rabbit_auth_backend_internal:lookup_user(UserName) of
+ {ok, _} -> rabbit_auth_backend_internal:delete_user(UserName);
+ _ -> ok
+ end,
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_md5),
+ ok = rabbit_auth_backend_internal:add_user(UserName, Password),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ ok = application:set_env(rabbit, password_hashing_module,
+ rabbit_password_hashing_sha256),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+
+ NewPassword = <<"test_password1">>,
+ ok = rabbit_auth_backend_internal:change_password(UserName, NewPassword),
+ {ok, #auth_user{username = UserName}} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, NewPassword}]),
+
+ {refused, _, [UserName]} =
+ rabbit_auth_backend_internal:user_login_authentication(
+ UserName, [{password, Password}]),
+ passed.
+
+%% -------------------------------------------------------------------
+%% rabbitmqctl.
+%% -------------------------------------------------------------------
+
+list_operations_timeout_pass(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, list_operations_timeout_pass1, [Config]).
+
+list_operations_timeout_pass1(Config) ->
+ %% create a few things so there is some useful information to list
+ {_Writer1, Limiter1, Ch1} = rabbit_ct_broker_helpers:test_channel(),
+ {_Writer2, Limiter2, Ch2} = rabbit_ct_broker_helpers:test_channel(),
+
+ [Q, Q2] = [Queue || Name <- [<<"list_operations_timeout_pass-q1">>,
+ <<"list_operations_timeout_pass-q2">>],
+ {new, Queue = #amqqueue{}} <-
+ [rabbit_amqqueue:declare(
+ rabbit_misc:r(<<"/">>, queue, Name),
+ false, false, [], none)]],
+
+ ok = rabbit_amqqueue:basic_consume(
+ Q, true, Ch1, Limiter1, false, 0, <<"ctag1">>, true, [],
+ undefined),
+ ok = rabbit_amqqueue:basic_consume(
+ Q2, true, Ch2, Limiter2, false, 0, <<"ctag2">>, true, [],
+ undefined),
+
+ %% list users
+ ok = control_action(add_user,
+ ["list_operations_timeout_pass-user",
+ "list_operations_timeout_pass-password"]),
+ {error, {user_already_exists, _}} =
+ control_action(add_user,
+ ["list_operations_timeout_pass-user",
+ "list_operations_timeout_pass-password"]),
+ ok = control_action_t(list_users, [], ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list parameters
+ ok = dummy_runtime_parameters:register(),
+ ok = control_action(set_parameter, ["test", "good", "123"]),
+ ok = control_action_t(list_parameters, [], ?TIMEOUT_LIST_OPS_PASS),
+ ok = control_action(clear_parameter, ["test", "good"]),
+ dummy_runtime_parameters:unregister(),
+
+ %% list vhosts
+ ok = control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]),
+ {error, {vhost_already_exists, _}} =
+ control_action(add_vhost, ["/list_operations_timeout_pass-vhost"]),
+ ok = control_action_t(list_vhosts, [], ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list permissions
+ ok = control_action(set_permissions,
+ ["list_operations_timeout_pass-user", ".*", ".*", ".*"],
+ [{"-p", "/list_operations_timeout_pass-vhost"}]),
+ ok = control_action_t(list_permissions, [],
+ [{"-p", "/list_operations_timeout_pass-vhost"}],
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list user permissions
+ ok = control_action_t(list_user_permissions,
+ ["list_operations_timeout_pass-user"],
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list policies
+ ok = control_action_opts(
+ ["set_policy", "list_operations_timeout_pass-policy", ".*",
+ "{\"ha-mode\":\"all\"}"]),
+ ok = control_action_t(list_policies, [], ?TIMEOUT_LIST_OPS_PASS),
+ ok = control_action(clear_policy, ["list_operations_timeout_pass-policy"]),
+
+ %% list queues
+ ok = info_action_t(list_queues,
+ rabbit_amqqueue:info_keys(), false,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list exchanges
+ ok = info_action_t(list_exchanges,
+ rabbit_exchange:info_keys(), true,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list bindings
+ ok = info_action_t(list_bindings,
+ rabbit_binding:info_keys(), true,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list connections
+ H = ?config(rmq_hostname, Config),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, C1} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ gen_tcp:send(C1, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, <<1,0,0>>} = gen_tcp:recv(C1, 3, 100),
+
+ {ok, C2} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ gen_tcp:send(C2, <<"AMQP", 0, 0, 9, 1>>),
+ {ok, <<1,0,0>>} = gen_tcp:recv(C2, 3, 100),
+
+ ok = info_action_t(
+ list_connections, rabbit_networking:connection_info_keys(), false,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list consumers
+ ok = info_action_t(
+ list_consumers, rabbit_amqqueue:consumer_info_keys(), false,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% list channels
+ ok = info_action_t(
+ list_channels, rabbit_channel:info_keys(), false,
+ ?TIMEOUT_LIST_OPS_PASS),
+
+ %% do some cleaning up
+ ok = control_action(delete_user, ["list_operations_timeout_pass-user"]),
+ {error, {no_such_user, _}} =
+ control_action(delete_user, ["list_operations_timeout_pass-user"]),
+
+ ok = control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]),
+ {error, {no_such_vhost, _}} =
+ control_action(delete_vhost, ["/list_operations_timeout_pass-vhost"]),
+
+ %% close_connection
+ Conns = rabbit_ct_broker_helpers:get_connection_pids([C1, C2]),
+ [ok, ok] = [ok = control_action(
+ close_connection, [rabbit_misc:pid_to_string(ConnPid), "go away"])
+ || ConnPid <- Conns],
+
+ %% cleanup queues
+ [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
+
+ [begin
+ unlink(Chan),
+ ok = rabbit_channel:shutdown(Chan)
+ end || Chan <- [Ch1, Ch2]],
+ passed.
+
+user_management(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, user_management1, [Config]).
+
+user_management1(_Config) ->
+
+ %% lots if stuff that should fail
+ {error, {no_such_user, _}} =
+ control_action(delete_user,
+ ["user_management-user"]),
+ {error, {no_such_user, _}} =
+ control_action(change_password,
+ ["user_management-user", "user_management-password"]),
+ {error, {no_such_vhost, _}} =
+ control_action(delete_vhost,
+ ["/user_management-vhost"]),
+ {error, {no_such_user, _}} =
+ control_action(set_permissions,
+ ["user_management-user", ".*", ".*", ".*"]),
+ {error, {no_such_user, _}} =
+ control_action(clear_permissions,
+ ["user_management-user"]),
+ {error, {no_such_user, _}} =
+ control_action(list_user_permissions,
+ ["user_management-user"]),
+ {error, {no_such_vhost, _}} =
+ control_action(list_permissions, [],
+ [{"-p", "/user_management-vhost"}]),
+ {error, {invalid_regexp, _, _}} =
+ control_action(set_permissions,
+ ["guest", "+foo", ".*", ".*"]),
+ {error, {no_such_user, _}} =
+ control_action(set_user_tags,
+ ["user_management-user", "bar"]),
+
+ %% user creation
+ ok = control_action(add_user,
+ ["user_management-user", "user_management-password"]),
+ {error, {user_already_exists, _}} =
+ control_action(add_user,
+ ["user_management-user", "user_management-password"]),
+ ok = control_action(clear_password,
+ ["user_management-user"]),
+ ok = control_action(change_password,
+ ["user_management-user", "user_management-newpassword"]),
+
+ TestTags = fun (Tags) ->
+ Args = ["user_management-user" | [atom_to_list(T) || T <- Tags]],
+ ok = control_action(set_user_tags, Args),
+ {ok, #internal_user{tags = Tags}} =
+ rabbit_auth_backend_internal:lookup_user(
+ <<"user_management-user">>),
+ ok = control_action(list_users, [])
+ end,
+ TestTags([foo, bar, baz]),
+ TestTags([administrator]),
+ TestTags([]),
+
+ %% user authentication
+ ok = control_action(authenticate_user,
+ ["user_management-user", "user_management-newpassword"]),
+ {refused, _User, _Format, _Params} =
+ control_action(authenticate_user,
+ ["user_management-user", "user_management-password"]),
+
+ %% vhost creation
+ ok = control_action(add_vhost,
+ ["/user_management-vhost"]),
+ {error, {vhost_already_exists, _}} =
+ control_action(add_vhost,
+ ["/user_management-vhost"]),
+ ok = control_action(list_vhosts, []),
+
+ %% user/vhost mapping
+ ok = control_action(set_permissions,
+ ["user_management-user", ".*", ".*", ".*"],
+ [{"-p", "/user_management-vhost"}]),
+ ok = control_action(set_permissions,
+ ["user_management-user", ".*", ".*", ".*"],
+ [{"-p", "/user_management-vhost"}]),
+ ok = control_action(set_permissions,
+ ["user_management-user", ".*", ".*", ".*"],
+ [{"-p", "/user_management-vhost"}]),
+ ok = control_action(list_permissions, [],
+ [{"-p", "/user_management-vhost"}]),
+ ok = control_action(list_permissions, [],
+ [{"-p", "/user_management-vhost"}]),
+ ok = control_action(list_user_permissions,
+ ["user_management-user"]),
+
+ %% user/vhost unmapping
+ ok = control_action(clear_permissions,
+ ["user_management-user"], [{"-p", "/user_management-vhost"}]),
+ ok = control_action(clear_permissions,
+ ["user_management-user"], [{"-p", "/user_management-vhost"}]),
+
+ %% vhost deletion
+ ok = control_action(delete_vhost,
+ ["/user_management-vhost"]),
+ {error, {no_such_vhost, _}} =
+ control_action(delete_vhost,
+ ["/user_management-vhost"]),
+
+ %% deleting a populated vhost
+ ok = control_action(add_vhost,
+ ["/user_management-vhost"]),
+ ok = control_action(set_permissions,
+ ["user_management-user", ".*", ".*", ".*"],
+ [{"-p", "/user_management-vhost"}]),
+ {new, _} = rabbit_amqqueue:declare(
+ rabbit_misc:r(<<"/user_management-vhost">>, queue,
+ <<"user_management-vhost-queue">>),
+ true, false, [], none),
+ ok = control_action(delete_vhost,
+ ["/user_management-vhost"]),
+
+ %% user deletion
+ ok = control_action(delete_user,
+ ["user_management-user"]),
+ {error, {no_such_user, _}} =
+ control_action(delete_user,
+ ["user_management-user"]),
+
+ passed.
+
+runtime_parameters(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, runtime_parameters1, [Config]).
+
+runtime_parameters1(_Config) ->
+ dummy_runtime_parameters:register(),
+ Good = fun(L) -> ok = control_action(set_parameter, L) end,
+ Bad = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
+
+ %% Acceptable for bijection
+ Good(["test", "good", "\"ignore\""]),
+ Good(["test", "good", "123"]),
+ Good(["test", "good", "true"]),
+ Good(["test", "good", "false"]),
+ Good(["test", "good", "null"]),
+ Good(["test", "good", "{\"key\": \"value\"}"]),
+
+ %% Invalid json
+ Bad(["test", "good", "atom"]),
+ Bad(["test", "good", "{\"foo\": \"bar\""]),
+ Bad(["test", "good", "{foo: \"bar\"}"]),
+
+ %% Test actual validation hook
+ Good(["test", "maybe", "\"good\""]),
+ Bad(["test", "maybe", "\"bad\""]),
+ Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none
+
+ ok = control_action(list_parameters, []),
+
+ ok = control_action(clear_parameter, ["test", "good"]),
+ ok = control_action(clear_parameter, ["test", "maybe"]),
+ ok = control_action(clear_parameter, ["test", "admin"]),
+ {error_string, _} =
+ control_action(clear_parameter, ["test", "neverexisted"]),
+
+ %% We can delete for a component that no longer exists
+ Good(["test", "good", "\"ignore\""]),
+ dummy_runtime_parameters:unregister(),
+ ok = control_action(clear_parameter, ["test", "good"]),
+ passed.
+
+policy_validation(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, policy_validation1, [Config]).
+
+policy_validation1(_Config) ->
+ PolicyName = "runtime_parameters-policy",
+ dummy_runtime_parameters:register_policy_validator(),
+ SetPol = fun (Key, Val) ->
+ control_action_opts(
+ ["set_policy", PolicyName, ".*",
+ rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
+ end,
+ OK = fun (Key, Val) ->
+ ok = SetPol(Key, Val),
+ true = does_policy_exist(PolicyName,
+ [{definition, [{list_to_binary(Key), Val}]}])
+ end,
+
+ OK("testeven", []),
+ OK("testeven", [1, 2]),
+ OK("testeven", [1, 2, 3, 4]),
+ OK("testpos", [2, 5, 5678]),
+
+ {error_string, _} = SetPol("testpos", [-1, 0, 1]),
+ {error_string, _} = SetPol("testeven", [ 1, 2, 3]),
+
+ ok = control_action(clear_policy, [PolicyName]),
+ dummy_runtime_parameters:unregister_policy_validator(),
+ passed.
+
+policy_opts_validation(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, policy_opts_validation1, [Config]).
+
+policy_opts_validation1(_Config) ->
+ PolicyName = "policy_opts_validation-policy",
+ Set = fun (Extra) -> control_action_opts(
+ ["set_policy", PolicyName,
+ ".*", "{\"ha-mode\":\"all\"}"
+ | Extra]) end,
+ OK = fun (Extra, Props) ->
+ ok = Set(Extra),
+ true = does_policy_exist(PolicyName, Props)
+ end,
+ Fail = fun (Extra) ->
+ case Set(Extra) of
+ {error_string, _} -> ok;
+ no_command when Extra =:= ["--priority"] -> ok;
+ no_command when Extra =:= ["--apply-to"] -> ok;
+ {'EXIT',
+ {function_clause,
+ [{rabbit_control_main,action, _, _} | _]}}
+ when Extra =:= ["--offline"] -> ok
+ end
+ end,
+
+ OK ([], [{priority, 0}, {'apply-to', <<"all">>}]),
+
+ OK (["--priority", "0"], [{priority, 0}]),
+ OK (["--priority", "3"], [{priority, 3}]),
+ Fail(["--priority", "banana"]),
+ Fail(["--priority"]),
+
+ OK (["--apply-to", "all"], [{'apply-to', <<"all">>}]),
+ OK (["--apply-to", "queues"], [{'apply-to', <<"queues">>}]),
+ Fail(["--apply-to", "bananas"]),
+ Fail(["--apply-to"]),
+
+ OK (["--priority", "3", "--apply-to", "queues"], [{priority, 3}, {'apply-to', <<"queues">>}]),
+ Fail(["--priority", "banana", "--apply-to", "queues"]),
+ Fail(["--priority", "3", "--apply-to", "bananas"]),
+
+ Fail(["--offline"]),
+
+ ok = control_action(clear_policy, [PolicyName]),
+ passed.
+
+ha_policy_validation(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, ha_policy_validation1, [Config]).
+
+ha_policy_validation1(_Config) ->
+ PolicyName = "ha_policy_validation-policy",
+ Set = fun (JSON) -> control_action_opts(
+ ["set_policy", PolicyName,
+ ".*", JSON]) end,
+ OK = fun (JSON, Def) ->
+ ok = Set(JSON),
+ true = does_policy_exist(PolicyName, [{definition, Def}])
+ end,
+ Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+ OK ("{\"ha-mode\":\"all\"}", [{<<"ha-mode">>, <<"all">>}]),
+ Fail("{\"ha-mode\":\"made_up\"}"),
+
+ Fail("{\"ha-mode\":\"nodes\"}"),
+ Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
+ Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
+ OK ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}",
+ [{<<"ha-mode">>, <<"nodes">>}, {<<"ha-params">>, [<<"a">>, <<"b">>]}]),
+ Fail("{\"ha-params\":[\"a\",\"b\"]}"),
+
+ Fail("{\"ha-mode\":\"exactly\"}"),
+ Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
+ OK ("{\"ha-mode\":\"exactly\",\"ha-params\":2}",
+ [{<<"ha-mode">>, <<"exactly">>}, {<<"ha-params">>, 2}]),
+ Fail("{\"ha-params\":2}"),
+
+ OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}",
+ [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"manual">>}]),
+ OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}",
+ [{<<"ha-mode">>, <<"all">>}, {<<"ha-sync-mode">>, <<"automatic">>}]),
+ Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
+ Fail("{\"ha-sync-mode\":\"manual\"}"),
+ Fail("{\"ha-sync-mode\":\"automatic\"}"),
+
+ ok = control_action(clear_policy, [PolicyName]),
+ passed.
+
+queue_master_location_policy_validation(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, queue_master_location_policy_validation1, [Config]).
+
+queue_master_location_policy_validation1(_Config) ->
+ PolicyName = "queue_master_location_policy_validation-policy",
+ Set = fun (JSON) ->
+ control_action_opts(
+ ["set_policy", PolicyName, ".*", JSON])
+ end,
+ OK = fun (JSON, Def) ->
+ ok = Set(JSON),
+ true = does_policy_exist(PolicyName, [{definition, Def}])
+ end,
+ Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+ OK ("{\"queue-master-locator\":\"min-masters\"}",
+ [{<<"queue-master-locator">>, <<"min-masters">>}]),
+ OK ("{\"queue-master-locator\":\"client-local\"}",
+ [{<<"queue-master-locator">>, <<"client-local">>}]),
+ OK ("{\"queue-master-locator\":\"random\"}",
+ [{<<"queue-master-locator">>, <<"random">>}]),
+ Fail("{\"queue-master-locator\":\"made_up\"}"),
+
+ ok = control_action(clear_policy, [PolicyName]),
+ passed.
+
+queue_modes_policy_validation(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, queue_modes_policy_validation1, [Config]).
+
+queue_modes_policy_validation1(_Config) ->
+ PolicyName = "queue_modes_policy_validation-policy",
+ Set = fun (JSON) ->
+ control_action_opts(
+ ["set_policy", PolicyName, ".*", JSON])
+ end,
+ OK = fun (JSON, Def) ->
+ ok = Set(JSON),
+ true = does_policy_exist(PolicyName, [{definition, Def}])
+ end,
+ Fail = fun (JSON) -> {error_string, _} = Set(JSON) end,
+
+ OK ("{\"queue-mode\":\"lazy\"}",
+ [{<<"queue-mode">>, <<"lazy">>}]),
+ OK ("{\"queue-mode\":\"default\"}",
+ [{<<"queue-mode">>, <<"default">>}]),
+ Fail("{\"queue-mode\":\"wrong\"}"),
+
+ ok = control_action(clear_policy, [PolicyName]),
+ passed.
+
+vhost_removed_while_updating_policy(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, vhost_removed_while_updating_policy1, [Config]).
+
+vhost_removed_while_updating_policy1(_Config) ->
+ VHost = "/vhost_removed_while_updating_policy-vhost",
+ PolicyName = "vhost_removed_while_updating_policy-policy",
+
+ ok = control_action(add_vhost, [VHost]),
+ ok = control_action_opts(
+ ["set_policy", "-p", VHost, PolicyName, ".*", "{\"ha-mode\":\"all\"}"]),
+ true = does_policy_exist(PolicyName, []),
+
+ %% Removing the vhost triggers the deletion of the policy. Once
+ %% the policy and the vhost are actually removed, RabbitMQ calls
+ %% update_policies() which lists policies on the given vhost. This
+ %% obviously fails because the vhost is gone, but the call should
+ %% still succeed.
+ ok = control_action(delete_vhost, [VHost]),
+ false = does_policy_exist(PolicyName, []),
+
+ passed.
+
+does_policy_exist(PolicyName, Props) ->
+ PolicyNameBin = list_to_binary(PolicyName),
+ Policies = lists:filter(
+ fun(Policy) ->
+ lists:member({name, PolicyNameBin}, Policy)
+ end, rabbit_policy:list()),
+ case Policies of
+ [Policy] -> check_policy_props(Policy, Props);
+ [] -> false;
+ _ -> false
+ end.
+
+check_policy_props(Policy, [Prop | Rest]) ->
+ case lists:member(Prop, Policy) of
+ true -> check_policy_props(Policy, Rest);
+ false -> false
+ end;
+check_policy_props(_Policy, []) ->
+ true.
+
+server_status(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, server_status1, [Config]).
+
+server_status1(Config) ->
+ %% create a few things so there is some useful information to list
+ {_Writer, Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ [Q, Q2] = [Queue || {Name, Owner} <- [{<<"server_status-q1">>, none},
+ {<<"server_status-q2">>, self()}],
+ {new, Queue = #amqqueue{}} <-
+ [rabbit_amqqueue:declare(
+ rabbit_misc:r(<<"/">>, queue, Name),
+ false, false, [], Owner)]],
+ ok = rabbit_amqqueue:basic_consume(
+ Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined),
+
+ %% list queues
+ ok = info_action(list_queues,
+ rabbit_amqqueue:info_keys(), true),
+
+ %% as we have no way to collect output of
+ %% info_action/3 call, the only way we
+ %% can test individual queueinfoitems is by directly calling
+ %% rabbit_amqqueue:info/2
+ [{exclusive, false}] = rabbit_amqqueue:info(Q, [exclusive]),
+ [{exclusive, true}] = rabbit_amqqueue:info(Q2, [exclusive]),
+
+ %% list exchanges
+ ok = info_action(list_exchanges,
+ rabbit_exchange:info_keys(), true),
+
+ %% list bindings
+ ok = info_action(list_bindings,
+ rabbit_binding:info_keys(), true),
+ %% misc binding listing APIs
+ [_|_] = rabbit_binding:list_for_source(
+ rabbit_misc:r(<<"/">>, exchange, <<"">>)),
+ [_] = rabbit_binding:list_for_destination(
+ rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)),
+ [_] = rabbit_binding:list_for_source_and_destination(
+ rabbit_misc:r(<<"/">>, exchange, <<"">>),
+ rabbit_misc:r(<<"/">>, queue, <<"server_status-q1">>)),
+
+ %% list connections
+ H = ?config(rmq_hostname, Config),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ {ok, C} = gen_tcp:connect(H, P, []),
+ gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
+ timer:sleep(100),
+ ok = info_action(list_connections,
+ rabbit_networking:connection_info_keys(), false),
+ %% close_connection
+ [ConnPid] = rabbit_ct_broker_helpers:get_connection_pids([C]),
+ ok = control_action(close_connection,
+ [rabbit_misc:pid_to_string(ConnPid), "go away"]),
+
+ %% list channels
+ ok = info_action(list_channels, rabbit_channel:info_keys(), false),
+
+ %% list consumers
+ ok = control_action(list_consumers, []),
+
+ %% set vm memory high watermark
+ HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
+ ok = control_action(set_vm_memory_high_watermark, ["1"]),
+ ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
+ %% this will trigger an alarm
+ ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
+ %% reset
+ ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
+
+ %% eval
+ {error_string, _} = control_action(eval, ["\""]),
+ {error_string, _} = control_action(eval, ["a("]),
+ ok = control_action(eval, ["a."]),
+
+ %% cleanup
+ [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
+
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+
+ passed.
+
+amqp_connection_refusal(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, amqp_connection_refusal1, [Config]).
+
+amqp_connection_refusal1(Config) ->
+ H = ?config(rmq_hostname, Config),
+ P = rabbit_ct_broker_helpers:get_node_config(Config, 0, tcp_port_amqp),
+ [passed = test_amqp_connection_refusal(H, P, V) ||
+ V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
+ passed.
+
+test_amqp_connection_refusal(H, P, Header) ->
+ {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
+ ok = gen_tcp:send(C, Header),
+ {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
+ ok = gen_tcp:close(C),
+ passed.
+
+list_consumers_sanity_check(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+ Chan = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% this queue is not cleaned up because the entire node is
+ %% reset between tests
+ QName = <<"list_consumers_q">>,
+ #'queue.declare_ok'{} = amqp_channel:call(Chan, #'queue.declare'{queue = QName}),
+
+ %% No consumers even if we have some queues
+ [] = rabbitmqctl_list_consumers(Config, A),
+
+ %% Several consumers on single channel should be correctly reported
+ #'basic.consume_ok'{consumer_tag = CTag1} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ #'basic.consume_ok'{consumer_tag = CTag2} = amqp_channel:call(Chan, #'basic.consume'{queue = QName}),
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(rabbitmqctl_list_consumers(Config, A))),
+
+ %% `rabbitmqctl report` shares some code with `list_consumers`, so
+ %% check that it also reports both channels
+ {ok, ReportStdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["list_consumers"]),
+ ReportLines = re:split(ReportStdOut, <<"\n">>, [trim]),
+ ReportCTags = [lists:nth(3, re:split(Row, <<"\t">>)) || <<"list_consumers_q", _/binary>> = Row <- ReportLines],
+ true = (lists:sort([CTag1, CTag2]) =:=
+ lists:sort(ReportCTags)).
+
+rabbitmqctl_list_consumers(Config, Node) ->
+ {ok, StdOut} = rabbit_ct_broker_helpers:rabbitmqctl(Config, Node,
+ ["list_consumers"]),
+ [<<"Listing consumers", _/binary>> | ConsumerRows] = re:split(StdOut, <<"\n">>, [trim]),
+ CTags = [ lists:nth(3, re:split(Row, <<"\t">>)) || Row <- ConsumerRows ],
+ CTags.
+
+list_queues_online_and_offline(Config) ->
+ [A, B] = rabbit_ct_broker_helpers:get_node_configs(Config, nodename),
+ ACh = rabbit_ct_client_helpers:open_channel(Config, A),
+ %% Node B will be stopped
+ BCh = rabbit_ct_client_helpers:open_channel(Config, B),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(ACh, #'queue.declare'{queue = <<"q_a_2">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_1">>, durable = true}),
+ #'queue.declare_ok'{} = amqp_channel:call(BCh, #'queue.declare'{queue = <<"q_b_2">>, durable = true}),
+
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, B, ["stop"]),
+
+ GotUp = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--online", "name"])),
+ ExpectUp = [[<<"q_a_1">>], [<<"q_a_2">>]],
+ ExpectUp = GotUp,
+
+ GotDown = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "--offline", "name"])),
+ ExpectDown = [[<<"q_b_1">>], [<<"q_b_2">>]],
+ ExpectDown = GotDown,
+
+ GotAll = lists:sort(rabbit_ct_broker_helpers:rabbitmqctl_list(Config, A,
+ ["list_queues", "name"])),
+ ExpectAll = ExpectUp ++ ExpectDown,
+ ExpectAll = GotAll,
+
+ ok.
+
+%% -------------------------------------------------------------------
+%% Statistics.
+%% -------------------------------------------------------------------
+
+channel_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, channel_statistics1, [Config]).
+
+channel_statistics1(_Config) ->
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% ATM this just tests the queue / exchange stats in channels. That's
+ %% by far the most complex code though.
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+ X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
+
+ dummy_event_receiver:start(self(), [node()], [channel_stats]),
+
+ %% Check stats empty
+ Event = test_ch_statistics_receive_event(Ch, fun (_) -> true end),
+ [] = proplists:get_value(channel_queue_stats, Event),
+ [] = proplists:get_value(channel_exchange_stats, Event),
+ [] = proplists:get_value(channel_queue_exchange_stats, Event),
+
+ %% Publish and get a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
+
+ %% Check the stats reflect that
+ Event2 = test_ch_statistics_receive_event(
+ Ch,
+ fun (E) ->
+ length(proplists:get_value(
+ channel_queue_exchange_stats, E)) > 0
+ end),
+ [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats, Event2),
+ [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
+ [{{QRes,X},[{publish,1}]}] =
+ proplists:get_value(channel_queue_exchange_stats, Event2),
+
+ %% Check the stats remove stuff on queue deletion
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ Event3 = test_ch_statistics_receive_event(
+ Ch,
+ fun (E) ->
+ length(proplists:get_value(
+ channel_queue_exchange_stats, E)) == 0
+ end),
+
+ [] = proplists:get_value(channel_queue_stats, Event3),
+ [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
+ [] = proplists:get_value(channel_queue_exchange_stats, Event3),
+
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+ passed.
+
+test_ch_statistics_receive_event(Ch, Matcher) ->
+ rabbit_channel:flush(Ch),
+ Ch ! emit_stats,
+ test_ch_statistics_receive_event1(Ch, Matcher).
+
+test_ch_statistics_receive_event1(Ch, Matcher) ->
+ receive #event{type = channel_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_ch_statistics_receive_event1(Ch, Matcher)
+ end
+ after ?TIMEOUT -> throw(failed_to_receive_event)
+ end.
+
+head_message_timestamp_statistics(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, head_message_timestamp1, [Config]).
+
+head_message_timestamp1(_Config) ->
+ %% Can't find a way to receive the ack here so can't test pending acks status
+
+ application:set_env(rabbit, collect_statistics, fine),
+
+ %% Set up a channel and queue
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{}),
+ QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ QRes = rabbit_misc:r(<<"/">>, queue, QName),
+
+ {ok, Q1} = rabbit_amqqueue:lookup(QRes),
+ QPid = Q1#amqqueue.pid,
+
+ %% Set up event receiver for queue
+ dummy_event_receiver:start(self(), [node()], [queue_stats]),
+
+ %% Check timestamp is empty when queue is empty
+ Event1 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+ '' = proplists:get_value(head_message_timestamp, Event1),
+
+ %% Publish two messages and check timestamp is that of first message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 1}, <<"">>)),
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
+ routing_key = QName},
+ rabbit_basic:build_content(#'P_basic'{timestamp = 2}, <<"">>)),
+ Event2 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+ 1 = proplists:get_value(head_message_timestamp, Event2),
+
+ %% Get first message and check timestamp is that of second message
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ Event3 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+ 2 = proplists:get_value(head_message_timestamp, Event3),
+
+ %% Get second message and check timestamp is empty again
+ rabbit_channel:do(Ch, #'basic.get'{queue = QName, no_ack = true}),
+ Event4 = test_queue_statistics_receive_event(QPid, fun (E) -> proplists:get_value(name, E) == QRes end),
+ '' = proplists:get_value(head_message_timestamp, Event4),
+
+ %% Teardown
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
+ rabbit_channel:shutdown(Ch),
+ dummy_event_receiver:stop(),
+
+ passed.
+
+test_queue_statistics_receive_event(Q, Matcher) ->
+ %% Q ! emit_stats,
+ test_queue_statistics_receive_event1(Q, Matcher).
+
+test_queue_statistics_receive_event1(Q, Matcher) ->
+ receive #event{type = queue_stats, props = Props} ->
+ case Matcher(Props) of
+ true -> Props;
+ _ -> test_queue_statistics_receive_event1(Q, Matcher)
+ end
+ after ?TIMEOUT -> throw(failed_to_receive_event)
+ end.
+
+test_spawn() ->
+ {Writer, _Limiter, Ch} = rabbit_ct_broker_helpers:test_channel(),
+ ok = rabbit_channel:do(Ch, #'channel.open'{}),
+ receive #'channel.open_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
+ end,
+ {Writer, Ch}.
+
+test_spawn(Node) ->
+ rpc:call(Node, ?MODULE, test_spawn_remote, []).
+
+%% Spawn an arbitrary long lived process, so we don't end up linking
+%% the channel to the short-lived process (RPC, here) spun up by the
+%% RPC server.
+test_spawn_remote() ->
+ RPC = self(),
+ spawn(fun () ->
+ {Writer, Ch} = test_spawn(),
+ RPC ! {Writer, Ch},
+ link(Ch),
+ receive
+ _ -> ok
+ end
+ end),
+ receive Res -> Res
+ after ?TIMEOUT -> throw(failed_to_receive_result)
+ end.
+
+%% -------------------------------------------------------------------
+%% Topic matching.
+%% -------------------------------------------------------------------
+
+topic_matching(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, topic_matching1, [Config]).
+
+topic_matching1(_Config) ->
+ XName = #resource{virtual_host = <<"/">>,
+ kind = exchange,
+ name = <<"topic_matching-exchange">>},
+ X0 = #exchange{name = XName, type = topic, durable = false,
+ auto_delete = false, arguments = []},
+ X = rabbit_exchange_decorator:set(X0),
+ %% create
+ rabbit_exchange_type_topic:validate(X),
+ exchange_op_callback(X, create, []),
+
+ %% add some bindings
+ Bindings = [#binding{source = XName,
+ key = list_to_binary(Key),
+ destination = #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)},
+ args = Args} ||
+ {Key, Q, Args} <- [{"a.b.c", "t1", []},
+ {"a.*.c", "t2", []},
+ {"a.#.b", "t3", []},
+ {"a.b.b.c", "t4", []},
+ {"#", "t5", []},
+ {"#.#", "t6", []},
+ {"#.b", "t7", []},
+ {"*.*", "t8", []},
+ {"a.*", "t9", []},
+ {"*.b.c", "t10", []},
+ {"a.#", "t11", []},
+ {"a.#.#", "t12", []},
+ {"b.b.c", "t13", []},
+ {"a.b.b", "t14", []},
+ {"a.b", "t15", []},
+ {"b.c", "t16", []},
+ {"", "t17", []},
+ {"*.*.*", "t18", []},
+ {"vodka.martini", "t19", []},
+ {"a.b.c", "t20", []},
+ {"*.#", "t21", []},
+ {"#.*.#", "t22", []},
+ {"*.#.#", "t23", []},
+ {"#.#.#", "t24", []},
+ {"*", "t25", []},
+ {"#.b.#", "t26", []},
+ {"args-test", "t27",
+ [{<<"foo">>, longstr, <<"bar">>}]},
+ {"args-test", "t27", %% Note aliasing
+ [{<<"foo">>, longstr, <<"baz">>}]}]],
+ lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
+ Bindings),
+
+ %% test some matches
+ test_topic_expect_match(
+ X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
+ "t18", "t20", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
+ "t12", "t15", "t21", "t22", "t23", "t24",
+ "t26"]},
+ {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
+ "t18", "t21", "t22", "t23", "t24", "t26"]},
+ {"", ["t5", "t6", "t17", "t24"]},
+ {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
+ "t24", "t26"]},
+ {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
+ "t23", "t24"]},
+ {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
+ "t24"]},
+ {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
+ "t24"]},
+ {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
+ "t22", "t23", "t24", "t26"]},
+ {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
+ {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25"]},
+ {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24",
+ "t25", "t27"]}]),
+ %% remove some bindings
+ RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
+ lists:nth(11, Bindings), lists:nth(19, Bindings),
+ lists:nth(21, Bindings), lists:nth(28, Bindings)],
+ exchange_op_callback(X, remove_bindings, [RemovedBindings]),
+ RemainingBindings = ordsets:to_list(
+ ordsets:subtract(ordsets:from_list(Bindings),
+ ordsets:from_list(RemovedBindings))),
+
+ %% test some matches
+ test_topic_expect_match(
+ X,
+ [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
+ "t23", "t24", "t26"]},
+ {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
+ "t22", "t23", "t24", "t26"]},
+ {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
+ "t23", "t24", "t26"]},
+ {"", ["t6", "t17", "t24"]},
+ {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
+ {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
+ {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
+ {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
+ {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
+ "t24", "t26"]},
+ {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
+ {"oneword", ["t6", "t22", "t23", "t24", "t25"]},
+ {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
+
+ %% remove the entire exchange
+ exchange_op_callback(X, delete, [RemainingBindings]),
+ %% none should match now
+ test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
+ passed.
+
+exchange_op_callback(X, Fun, Args) ->
+ rabbit_misc:execute_mnesia_transaction(
+ fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
+ rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
+
+test_topic_expect_match(X, List) ->
+ lists:foreach(
+ fun ({Key, Expected}) ->
+ BinKey = list_to_binary(Key),
+ Message = rabbit_basic:message(X#exchange.name, BinKey,
+ #'P_basic'{}, <<>>),
+ Res = rabbit_exchange_type_topic:route(
+ X, #delivery{mandatory = false,
+ sender = self(),
+ message = Message}),
+ ExpectedRes = lists:map(
+ fun (Q) -> #resource{virtual_host = <<"/">>,
+ kind = queue,
+ name = list_to_binary(Q)}
+ end, Expected),
+ true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
+ end, List).
+
+%% ---------------------------------------------------------------------------
+%% Unordered tests (originally from rabbit_tests.erl).
+%% ---------------------------------------------------------------------------
+
+confirms(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, confirms1, [Config]).
+
+confirms1(_Config) ->
+ {_Writer, Ch} = test_spawn(),
+ DeclareBindDurableQueue =
+ fun() ->
+ rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
+ receive #'queue.declare_ok'{queue = Q0} ->
+ rabbit_channel:do(Ch, #'queue.bind'{
+ queue = Q0,
+ exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic" }),
+ receive #'queue.bind_ok'{} -> Q0
+ after ?TIMEOUT -> throw(failed_to_bind_queue)
+ end
+ after ?TIMEOUT -> throw(failed_to_declare_queue)
+ end
+ end,
+ %% Declare and bind two queues
+ QName1 = DeclareBindDurableQueue(),
+ QName2 = DeclareBindDurableQueue(),
+ %% Get the first one's pid (we'll crash it later)
+ {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
+ QPid1 = Q1#amqqueue.pid,
+ %% Enable confirms
+ rabbit_channel:do(Ch, #'confirm.select'{}),
+ receive
+ #'confirm.select_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_enable_confirms)
+ end,
+ %% Publish a message
+ rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
+ routing_key = "confirms-magic"
+ },
+ rabbit_basic:build_content(
+ #'P_basic'{delivery_mode = 2}, <<"">>)),
+ %% We must not kill the queue before the channel has processed the
+ %% 'publish'.
+ ok = rabbit_channel:flush(Ch),
+ %% Crash the queue
+ QPid1 ! boom,
+ %% Wait for a nack
+ receive
+ #'basic.nack'{} -> ok;
+ #'basic.ack'{} -> throw(received_ack_instead_of_nack)
+ after ?TIMEOUT-> throw(did_not_receive_nack)
+ end,
+ receive
+ #'basic.ack'{} -> throw(received_ack_when_none_expected)
+ after 1000 -> ok
+ end,
+ %% Cleanup
+ rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
+ receive
+ #'queue.delete_ok'{} -> ok
+ after ?TIMEOUT -> throw(failed_to_cleanup_queue)
+ end,
+ unlink(Ch),
+ ok = rabbit_channel:shutdown(Ch),
+
+ passed.
+
+gen_server2_with_state(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, gen_server2_with_state1, [Config]).
+
+gen_server2_with_state1(_Config) ->
+ fhc_state = gen_server2:with_state(file_handle_cache,
+ fun (S) -> element(1, S) end),
+ passed.
+
+mcall(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, mcall1, [Config]).
+
+mcall1(_Config) ->
+ P1 = spawn(fun gs2_test_listener/0),
+ register(foo, P1),
+ global:register_name(gfoo, P1),
+
+ P2 = spawn(fun() -> exit(bang) end),
+ %% ensure P2 is dead (ignore the race setting up the monitor)
+ await_exit(P2),
+
+ P3 = spawn(fun gs2_test_crasher/0),
+
+ %% since P2 crashes almost immediately and P3 after receiving its first
+ %% message, we have to spawn a few more processes to handle the additional
+ %% cases we're interested in here
+ register(baz, spawn(fun gs2_test_crasher/0)),
+ register(bog, spawn(fun gs2_test_crasher/0)),
+ global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
+
+ NoNode = rabbit_nodes:make("nonode"),
+
+ Targets =
+ %% pids
+ [P1, P2, P3]
+ ++
+ %% registered names
+ [foo, bar, baz]
+ ++
+ %% {Name, Node} pairs
+ [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
+ ++
+ %% {global, Name}
+ [{global, gfoo}, {global, gbar}, {global, gbaz}],
+
+ GoodResults = [{D, goodbye} || D <- [P1, foo,
+ {foo, node()},
+ {global, gfoo}]],
+
+ BadResults = [{P2, noproc}, % died before use
+ {P3, boom}, % died on first use
+ {bar, noproc}, % never registered
+ {baz, boom}, % died on first use
+ {{bar, node()}, noproc}, % never registered
+ {{bog, node()}, boom}, % died on first use
+ {{foo, NoNode}, nodedown}, % invalid node
+ {{global, gbar}, noproc}, % never registered globally
+ {{global, gbaz}, boom}], % died on first use
+
+ {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
+ true = lists:sort(Replies) == lists:sort(GoodResults),
+ true = lists:sort(Errors) == lists:sort(BadResults),
+
+ %% cleanup (ignore the race setting up the monitor)
+ P1 ! stop,
+ await_exit(P1),
+ passed.
+
+await_exit(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ receive
+ {'DOWN', MRef, _, _, _} -> ok
+ end.
+
+gs2_test_crasher() ->
+ receive
+ {'$gen_call', _From, hello} -> exit(boom)
+ end.
+
+gs2_test_listener() ->
+ receive
+ {'$gen_call', From, hello} ->
+ gen_server2:reply(From, goodbye),
+ gs2_test_listener();
+ stop ->
+ ok
+ end.
+
+configurable_server_properties(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, configurable_server_properties1, [Config]).
+
+configurable_server_properties1(_Config) ->
+ %% List of the names of the built-in properties do we expect to find
+ BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
+ <<"copyright">>, <<"information">>],
+
+ Protocol = rabbit_framing_amqp_0_9_1,
+
+ %% Verify that the built-in properties are initially present
+ ActualPropNames = [Key || {Key, longstr, _} <-
+ rabbit_reader:server_properties(Protocol)],
+ true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
+ BuiltInPropNames),
+
+ %% Get the initial server properties configured in the environment
+ {ok, ServerProperties} = application:get_env(rabbit, server_properties),
+
+ %% Helper functions
+ ConsProp = fun (X) -> application:set_env(rabbit,
+ server_properties,
+ [X | ServerProperties]) end,
+ IsPropPresent =
+ fun (X) ->
+ lists:member(X, rabbit_reader:server_properties(Protocol))
+ end,
+
+ %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
+ NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
+ ConsProp(NewSimplifiedProperty),
+ %% Do we find hare soup, appropriately formatted in the generated properties?
+ ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
+ longstr,
+ list_to_binary(NewHareVal)},
+ true = IsPropPresent(ExpectedHareImage),
+
+ %% Add a wholly new property of the {BinaryKey, Type, Value} form
+ %% and check for it
+ NewProperty = {<<"new-bin-key">>, signedint, -1},
+ ConsProp(NewProperty),
+ %% Do we find the new property?
+ true = IsPropPresent(NewProperty),
+
+ %% Add a property that clobbers a built-in, and verify correct clobbering
+ {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
+ {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
+ list_to_binary(NewVerVal)},
+ ConsProp(NewVersion),
+ ClobberedServerProps = rabbit_reader:server_properties(Protocol),
+ %% Is the clobbering insert present?
+ true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
+ %% Is the clobbering insert the only thing with the clobbering key?
+ [{BinNewVerKey, longstr, BinNewVerVal}] =
+ [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
+
+ application:set_env(rabbit, server_properties, ServerProperties),
+ passed.
+
+memory_high_watermark(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, memory_high_watermark1, [Config]).
+
+memory_high_watermark1(_Config) ->
+ %% set vm memory high watermark
+ HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
+ %% this will trigger an alarm
+ ok = control_action(set_vm_memory_high_watermark,
+ ["absolute", "2000"]),
+ [{{resource_limit,memory,_},[]}] = rabbit_alarm:get_alarms(),
+ %% reset
+ ok = control_action(set_vm_memory_high_watermark,
+ [float_to_list(HWM)]),
+
+ passed.
+
+set_disk_free_limit_command(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, set_disk_free_limit_command1, [Config]).
+
+set_disk_free_limit_command1(_Config) ->
+ ok = control_action(set_disk_free_limit,
+ ["2000kiB"]),
+ 2048000 = rabbit_disk_monitor:get_disk_free_limit(),
+ ok = control_action(set_disk_free_limit,
+ ["mem_relative", "1.1"]),
+ ExpectedLimit = 1.1 * vm_memory_monitor:get_total_memory(),
+ % Total memory is unstable, so checking order
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() < 1.2,
+ true = ExpectedLimit/rabbit_disk_monitor:get_disk_free_limit() > 0.98,
+ ok = control_action(set_disk_free_limit, ["50MB"]),
+ passed.
+
+disk_monitor(Config) ->
+ passed = rabbit_ct_broker_helpers:rpc(Config, 0,
+ ?MODULE, disk_monitor1, [Config]).
+
+disk_monitor1(_Config) ->
+ %% Issue: rabbitmq-server #91
+ %% os module could be mocked using 'unstick', however it may have undesired
+ %% side effects in following tests. Thus, we mock at rabbit_misc level
+ ok = meck:new(rabbit_misc, [passthrough]),
+ ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
+ ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
+ ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
+ meck:unload(rabbit_misc),
+ passed.
+
+disconnect_detected_during_alarm(Config) ->
+ A = rabbit_ct_broker_helpers:get_node_config(Config, 0, nodename),
+
+ %% Set a low memory high watermark.
+ rabbit_ct_broker_helpers:rabbitmqctl(Config, A,
+ ["set_vm_memory_high_watermark", "0.000000001"]),
+
+ %% Open a connection and a channel.
+ Port = rabbit_ct_broker_helpers:get_node_config(Config, A, tcp_port_amqp),
+ Heartbeat = 1,
+ {ok, Conn} = amqp_connection:start(
+ #amqp_params_network{port = Port,
+ heartbeat = Heartbeat}),
+ {ok, Ch} = amqp_connection:open_channel(Conn),
+
+ amqp_connection:register_blocked_handler(Conn, self()),
+ Publish = #'basic.publish'{routing_key = <<"nowhere-to-go">>},
+ amqp_channel:cast(Ch, Publish, #amqp_msg{payload = <<"foobar">>}),
+ receive
+ % Check that connection was indeed blocked
+ #'connection.blocked'{} -> ok
+ after
+ 1000 -> exit(connection_was_not_blocked)
+ end,
+
+ %% Connection is blocked, now we should forcefully kill it
+ {'EXIT', _} = (catch amqp_connection:close(Conn, 10)),
+
+ ListConnections =
+ fun() ->
+ rpc:call(A, rabbit_networking, connection_info_all, [])
+ end,
+
+ %% We've already disconnected, but blocked connection still should still linger on.
+ [SingleConn] = ListConnections(),
+ blocked = rabbit_misc:pget(state, SingleConn),
+
+ %% It should definitely go away after 2 heartbeat intervals.
+ timer:sleep(round(2.5 * 1000 * Heartbeat)),
+ [] = ListConnections(),
+
+ passed.
+
+%% ---------------------------------------------------------------------------
+%% Cluster-dependent tests.
+%% ---------------------------------------------------------------------------
+
+delegates_async(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_async1, [Config, To]).
+
+delegates_async1(_Config, SecondaryNode) ->
+ Self = self(),
+ Sender = fun (Pid) -> Pid ! {invoked, Self} end,
+
+ Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
+
+ ok = delegate:invoke_no_result(spawn(Responder), Sender),
+ ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
+ await_response(2),
+
+ LocalPids = spawn_responders(node(), Responder, 10),
+ RemotePids = spawn_responders(SecondaryNode, Responder, 10),
+ ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
+ await_response(20),
+
+ passed.
+
+delegates_sync(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, delegates_sync1, [Config, To]).
+
+delegates_sync1(_Config, SecondaryNode) ->
+ Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
+ BadSender = fun (_Pid) -> exit(exception) end,
+
+ Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end),
+
+ BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
+ gen_server:reply(From, response)
+ end, bad_responder_died),
+
+ response = delegate:invoke(spawn(Responder), Sender),
+ response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
+
+ must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
+ must_exit(fun () ->
+ delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
+
+ LocalGoodPids = spawn_responders(node(), Responder, 2),
+ RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
+ LocalBadPids = spawn_responders(node(), BadResponder, 2),
+ RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
+
+ {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
+ true = lists:all(fun ({_, response}) -> true end, GoodRes),
+ GoodResPids = [Pid || {Pid, _} <- GoodRes],
+
+ Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
+ Good = lists:usort(GoodResPids),
+
+ {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
+ true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
+ BadResPids = [Pid || {Pid, _} <- BadRes],
+
+ Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
+ Bad = lists:usort(BadResPids),
+
+ MagicalPids = [rabbit_misc:string_to_pid(Str) ||
+ Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
+ {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
+ true = lists:all(
+ fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
+ BadNodes),
+ BadNodesPids = [Pid || {Pid, _} <- BadNodes],
+
+ Magical = lists:usort(MagicalPids),
+ Magical = lists:usort(BadNodesPids),
+
+ passed.
+
+queue_cleanup(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, queue_cleanup1, [Config, To]).
+
+queue_cleanup1(_Config, _SecondaryNode) ->
+ {_Writer, Ch} = test_spawn(),
+ rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
+ receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
+ end,
+ rabbit_channel:shutdown(Ch),
+ rabbit:stop(),
+ rabbit:start(),
+ {_Writer2, Ch2} = test_spawn(),
+ rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
+ queue = ?CLEANUP_QUEUE_NAME }),
+ receive
+ #'channel.close'{reply_code = ?NOT_FOUND} ->
+ ok
+ after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
+ end,
+ rabbit_channel:shutdown(Ch2),
+ passed.
+
+declare_on_dead_queue(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, declare_on_dead_queue1, [Config, To]).
+
+declare_on_dead_queue1(_Config, SecondaryNode) ->
+ QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
+ Self = self(),
+ Pid = spawn(SecondaryNode,
+ fun () ->
+ {new, #amqqueue{name = QueueName, pid = QPid}} =
+ rabbit_amqqueue:declare(QueueName, false, false, [],
+ none),
+ exit(QPid, kill),
+ Self ! {self(), killed, QPid}
+ end),
+ receive
+ {Pid, killed, OldPid} ->
+ Q = dead_queue_loop(QueueName, OldPid),
+ {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
+ passed
+ after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
+ end.
+
+refresh_events(Config) ->
+ {I, J} = ?config(test_direction, Config),
+ From = rabbit_ct_broker_helpers:get_node_config(Config, I, nodename),
+ To = rabbit_ct_broker_helpers:get_node_config(Config, J, nodename),
+ rabbit_ct_broker_helpers:add_code_path_to_node(To, ?MODULE),
+ passed = rabbit_ct_broker_helpers:rpc(Config,
+ From, ?MODULE, refresh_events1, [Config, To]).
+
+refresh_events1(Config, SecondaryNode) ->
+ dummy_event_receiver:start(self(), [node(), SecondaryNode],
+ [channel_created, queue_created]),
+
+ {_Writer, Ch} = test_spawn(),
+ expect_events(pid, Ch, channel_created),
+ rabbit_channel:shutdown(Ch),
+
+ {_Writer2, Ch2} = test_spawn(SecondaryNode),
+ expect_events(pid, Ch2, channel_created),
+ rabbit_channel:shutdown(Ch2),
+
+ {new, #amqqueue{name = QName} = Q} =
+ rabbit_amqqueue:declare(queue_name(Config, <<"refresh_events-q">>),
+ false, false, [], none),
+ expect_events(name, QName, queue_created),
+ rabbit_amqqueue:delete(Q, false, false),
+
+ dummy_event_receiver:stop(),
+ passed.
+
+make_responder(FMsg) -> make_responder(FMsg, timeout).
+make_responder(FMsg, Throw) ->
+ fun () ->
+ receive Msg -> FMsg(Msg)
+ after ?TIMEOUT -> throw(Throw)
+ end
+ end.
+
+spawn_responders(Node, Responder, Count) ->
+ [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
+
+await_response(0) ->
+ ok;
+await_response(Count) ->
+ receive
+ response -> ok,
+ await_response(Count - 1)
+ after ?TIMEOUT -> throw(timeout)
+ end.
+
+must_exit(Fun) ->
+ try
+ Fun(),
+ throw(exit_not_thrown)
+ catch
+ exit:_ -> ok
+ end.
+
+dead_queue_loop(QueueName, OldPid) ->
+ {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none),
+ case Q#amqqueue.pid of
+ OldPid -> timer:sleep(25),
+ dead_queue_loop(QueueName, OldPid);
+ _ -> true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
+ Q
+ end.
+
+expect_events(Tag, Key, Type) ->
+ expect_event(Tag, Key, Type),
+ rabbit:force_event_refresh(make_ref()),
+ expect_event(Tag, Key, Type).
+
+expect_event(Tag, Key, Type) ->
+ receive #event{type = Type, props = Props} ->
+ case rabbit_misc:pget(Tag, Props) of
+ Key -> ok;
+ _ -> expect_event(Tag, Key, Type)
+ end
+ after ?TIMEOUT -> throw({failed_to_receive_event, Type})
+ end.
+
+%% ---------------------------------------------------------------------------
+%% rabbitmqctl helpers.
+%% ---------------------------------------------------------------------------
+
+control_action(Command, Args) ->
+ control_action(Command, node(), Args, default_options()).
+
+control_action(Command, Args, NewOpts) ->
+ control_action(Command, node(), Args,
+ expand_options(default_options(), NewOpts)).
+
+control_action(Command, Node, Args, Opts) ->
+ case catch rabbit_control_main:action(
+ Command, Node, Args, Opts,
+ fun (Format, Args1) ->
+ io:format(Format ++ " ...~n", Args1)
+ end) of
+ ok ->
+ io:format("done.~n"),
+ ok;
+ {ok, Result} ->
+ rabbit_control_misc:print_cmd_result(Command, Result),
+ ok;
+ Other ->
+ io:format("failed: ~p~n", [Other]),
+ Other
+ end.
+
+control_action_t(Command, Args, Timeout) when is_number(Timeout) ->
+ control_action_t(Command, node(), Args, default_options(), Timeout).
+
+control_action_t(Command, Args, NewOpts, Timeout) when is_number(Timeout) ->
+ control_action_t(Command, node(), Args,
+ expand_options(default_options(), NewOpts),
+ Timeout).
+
+control_action_t(Command, Node, Args, Opts, Timeout) when is_number(Timeout) ->
+ case catch rabbit_control_main:action(
+ Command, Node, Args, Opts,
+ fun (Format, Args1) ->
+ io:format(Format ++ " ...~n", Args1)
+ end, Timeout) of
+ ok ->
+ io:format("done.~n"),
+ ok;
+ {ok, Result} ->
+ rabbit_control_misc:print_cmd_result(Command, Result),
+ ok;
+ Other ->
+ io:format("failed: ~p~n", [Other]),
+ Other
+ end.
+
+control_action_opts(Raw) ->
+ NodeStr = atom_to_list(node()),
+ case rabbit_control_main:parse_arguments(Raw, NodeStr) of
+ {ok, {Cmd, Opts, Args}} ->
+ case control_action(Cmd, node(), Args, Opts) of
+ ok -> ok;
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+info_action(Command, Args, CheckVHost) ->
+ ok = control_action(Command, []),
+ if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
+ true -> ok
+ end,
+ ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
+ {bad_argument, dummy} = control_action(Command, ["dummy"]),
+ ok.
+
+info_action_t(Command, Args, CheckVHost, Timeout) when is_number(Timeout) ->
+ if CheckVHost -> ok = control_action_t(Command, [], ["-p", "/"], Timeout);
+ true -> ok
+ end,
+ ok = control_action_t(Command, lists:map(fun atom_to_list/1, Args), Timeout),
+ ok.
+
+default_options() -> [{"-p", "/"}, {"-q", "false"}].
+
+expand_options(As, Bs) ->
+ lists:foldl(fun({K, _}=A, R) ->
+ case proplists:is_defined(K, R) of
+ true -> R;
+ false -> [A | R]
+ end
+ end, Bs, As).