summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore64
-rw-r--r--.travis.yml24
-rw-r--r--Makefile791
-rw-r--r--README2
-rw-r--r--README.md47
-rw-r--r--build.config43
-rwxr-xr-xcalculate-relative45
-rw-r--r--codegen.py595
-rw-r--r--docs/README-for-packages (renamed from packaging/common/README)0
-rw-r--r--docs/rabbitmq.config.example49
-rw-r--r--docs/rabbitmqctl.1.xml47
-rw-r--r--erlang.mk6617
-rw-r--r--generate_app16
-rw-r--r--generate_deps57
-rw-r--r--include/rabbit.hrl152
-rw-r--r--include/rabbit_cli.hrl11
-rw-r--r--packaging/Makefile103
-rw-r--r--packaging/RPMS/Fedora/Makefile50
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.init17
-rw-r--r--packaging/RPMS/Fedora/rabbitmq-server.spec57
-rw-r--r--packaging/debs/Debian/.gitignore3
-rw-r--r--packaging/debs/Debian/Makefile65
-rw-r--r--packaging/debs/Debian/debian/changelog6
-rw-r--r--packaging/debs/Debian/debian/compat2
-rw-r--r--packaging/debs/Debian/debian/control18
-rw-r--r--packaging/debs/Debian/debian/copyright52
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.docs1
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.init12
-rw-r--r--packaging/debs/Debian/debian/rabbitmq-server.manpages4
-rwxr-xr-x[-rw-r--r--]packaging/debs/Debian/debian/rules74
-rw-r--r--packaging/debs/Debian/debian/source/format1
-rw-r--r--packaging/debs/apt-repository/Makefile21
-rw-r--r--packaging/generic-unix/Makefile57
-rw-r--r--packaging/macports/Makefile58
-rw-r--r--packaging/macports/Portfile.in123
-rwxr-xr-xpackaging/macports/make-checksums.sh14
-rwxr-xr-xpackaging/macports/make-port-diff.sh29
-rw-r--r--packaging/macports/patch-org.macports.rabbitmq-server.plist.diff10
-rw-r--r--packaging/standalone/Makefile58
-rw-r--r--packaging/standalone/erl.diff5
-rw-r--r--packaging/standalone/src/rabbit_release.erl12
-rw-r--r--packaging/windows-exe/Makefile23
-rw-r--r--packaging/windows-exe/plugins/ExecDos.dllbin0 -> 6656 bytes
-rw-r--r--packaging/windows-exe/rabbitmq_nsi.in128
-rw-r--r--packaging/windows/Makefile76
-rw-r--r--packaging/windows/README-etc14
-rw-r--r--rabbitmq-components.mk331
-rw-r--r--scripts/rabbitmq-defaults6
-rw-r--r--scripts/rabbitmq-defaults.bat80
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmq-echopid.bat110
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmq-env175
-rw-r--r--scripts/rabbitmq-env.bat595
-rwxr-xr-xscripts/rabbitmq-plugins5
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmq-plugins.bat7
-rw-r--r--scripts/rabbitmq-script-wrapper (renamed from packaging/common/rabbitmq-script-wrapper)4
-rwxr-xr-xscripts/rabbitmq-server-ha.ocf (renamed from packaging/common/rabbitmq-server-ha.ocf)19
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmq-server.bat8
-rwxr-xr-xscripts/rabbitmq-server.ocf (renamed from packaging/common/rabbitmq-server.ocf)0
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmq-service.bat3
-rwxr-xr-xscripts/rabbitmqctl5
-rw-r--r--[-rwxr-xr-x]scripts/rabbitmqctl.bat7
-rw-r--r--scripts/set_rabbitmq_policy.sh (renamed from packaging/common/set_rabbitmq_policy.sh)0
-rw-r--r--src/app_utils.erl127
-rw-r--r--src/background_gc.erl3
-rw-r--r--src/credit_flow.erl196
-rw-r--r--src/file_handle_cache.erl38
-rw-r--r--src/file_handle_cache_stats.erl9
-rw-r--r--src/gen_server2.erl1357
-rw-r--r--src/gm.erl9
-rw-r--r--src/mirrored_supervisor.erl517
-rw-r--r--src/mochijson2.erl893
-rw-r--r--src/pg2_fixed.erl4
-rw-r--r--src/pmon.erl109
-rw-r--r--src/priority_queue.erl227
-rw-r--r--src/rabbit.app.src (renamed from ebin/rabbit_app.in)15
-rw-r--r--src/rabbit.erl181
-rw-r--r--src/rabbit_access_control.erl10
-rw-r--r--src/rabbit_alarm.erl105
-rw-r--r--src/rabbit_amqqueue.erl902
-rw-r--r--src/rabbit_amqqueue_process.erl26
-rw-r--r--src/rabbit_auth_backend_internal.erl123
-rw-r--r--src/rabbit_auth_mechanism.erl56
-rw-r--r--src/rabbit_authn_backend.erl49
-rw-r--r--src/rabbit_authz_backend.erl76
-rw-r--r--src/rabbit_backing_queue.erl269
-rw-r--r--src/rabbit_basic.erl321
-rw-r--r--src/rabbit_binary_generator.erl241
-rw-r--r--src/rabbit_binary_parser.erl161
-rw-r--r--src/rabbit_binding.erl8
-rw-r--r--src/rabbit_boot_steps.erl97
-rw-r--r--src/rabbit_channel.erl1897
-rw-r--r--src/rabbit_channel_interceptor.erl91
-rw-r--r--src/rabbit_channel_sup.erl10
-rw-r--r--src/rabbit_channel_sup_sup.erl5
-rw-r--r--src/rabbit_cli.erl50
-rw-r--r--src/rabbit_command_assembler.erl137
-rw-r--r--src/rabbit_connection_helper_sup.erl9
-rw-r--r--src/rabbit_connection_sup.erl18
-rw-r--r--src/rabbit_control_main.erl133
-rw-r--r--src/rabbit_dead_letter.erl2
-rw-r--r--src/rabbit_diagnostics.erl35
-rw-r--r--src/rabbit_direct.erl12
-rw-r--r--src/rabbit_disk_monitor.erl59
-rw-r--r--src/rabbit_error_logger.erl2
-rw-r--r--src/rabbit_event.erl164
-rw-r--r--src/rabbit_exchange.erl9
-rw-r--r--src/rabbit_exchange_decorator.erl128
-rw-r--r--src/rabbit_exchange_type.erl81
-rw-r--r--src/rabbit_heartbeat.erl166
-rw-r--r--src/rabbit_hipe.erl98
-rw-r--r--src/rabbit_memory_monitor.erl8
-rw-r--r--src/rabbit_mirror_queue_master.erl60
-rw-r--r--src/rabbit_mirror_queue_misc.erl75
-rw-r--r--src/rabbit_mirror_queue_mode_exactly.erl5
-rw-r--r--src/rabbit_mirror_queue_slave.erl54
-rw-r--r--src/rabbit_mirror_queue_sync.erl164
-rw-r--r--src/rabbit_misc.erl1159
-rw-r--r--src/rabbit_mnesia.erl2
-rw-r--r--src/rabbit_msg_store.erl103
-rw-r--r--src/rabbit_msg_store_index.erl59
-rw-r--r--src/rabbit_net.erl246
-rw-r--r--src/rabbit_networking.erl608
-rw-r--r--src/rabbit_node_monitor.erl26
-rw-r--r--src/rabbit_nodes.erl221
-rw-r--r--src/rabbit_password.erl64
-rw-r--r--src/rabbit_password_hashing_md5.erl (renamed from src/rabbit_ctl_misc.erl)19
-rw-r--r--src/rabbit_password_hashing_sha256.erl (renamed from include/rabbit_msg_store.hrl)13
-rw-r--r--src/rabbit_password_hashing_sha512.erl (renamed from src/rabbit_policy_validator.erl)25
-rw-r--r--src/rabbit_plugins.erl24
-rw-r--r--src/rabbit_policies.erl12
-rw-r--r--src/rabbit_policy.erl6
-rw-r--r--src/rabbit_priority_queue.erl114
-rw-r--r--src/rabbit_queue_collector.erl92
-rw-r--r--src/rabbit_queue_consumers.erl18
-rw-r--r--src/rabbit_queue_decorator.erl80
-rw-r--r--src/rabbit_queue_index.erl2
-rw-r--r--src/rabbit_queue_location_client_local.erl40
-rw-r--r--src/rabbit_queue_location_min_masters.erl77
-rw-r--r--src/rabbit_queue_location_random.erl44
-rw-r--r--src/rabbit_queue_location_validator.erl69
-rw-r--r--src/rabbit_queue_master_location_misc.erl95
-rw-r--r--src/rabbit_reader.erl1296
-rw-r--r--src/rabbit_registry.erl3
-rw-r--r--src/rabbit_resource_monitor_misc.erl51
-rw-r--r--src/rabbit_runtime_parameter.erl42
-rw-r--r--src/rabbit_runtime_parameters.erl10
-rw-r--r--src/rabbit_types.erl3
-rw-r--r--src/rabbit_upgrade.erl9
-rw-r--r--src/rabbit_upgrade_functions.erl23
-rw-r--r--src/rabbit_variable_queue.erl504
-rw-r--r--src/rabbit_vhost.erl9
-rw-r--r--src/rabbit_vm.erl13
-rw-r--r--src/rabbit_writer.erl354
-rw-r--r--src/ssl_compat.erl75
-rw-r--r--src/supervisor2.erl1566
-rw-r--r--src/tcp_acceptor.erl105
-rw-r--r--src/tcp_acceptor_sup.erl43
-rw-r--r--src/tcp_listener.erl83
-rw-r--r--src/tcp_listener_sup.erl67
-rw-r--r--src/time_compat.erl305
-rw-r--r--src/vm_memory_monitor.erl19
-rw-r--r--src/worker_pool.erl39
-rw-r--r--src/worker_pool_sup.erl23
-rw-r--r--src/worker_pool_worker.erl22
-rw-r--r--test/src/credit_flow_test.erl49
-rw-r--r--test/src/gm_qc.erl384
-rw-r--r--test/src/gm_soak_test.erl133
-rw-r--r--test/src/gm_speed_test.erl83
-rw-r--r--test/src/gm_tests.erl186
-rw-r--r--test/src/mirrored_supervisor_tests.erl307
-rw-r--r--test/src/mirrored_supervisor_tests_gs.erl66
-rw-r--r--test/src/on_disk_store_tunable_parameter_validation_test.erl47
-rw-r--r--test/src/rabbit_backing_queue_qc.erl473
-rw-r--r--test/src/rabbit_runtime_parameters_test.erl72
-rw-r--r--test/src/rabbit_tests.erl3094
-rw-r--r--test/src/rabbit_tests_event_receiver.erl58
-rw-r--r--test/src/supervisor2_tests.erl75
-rw-r--r--test/src/test_sup.erl93
-rw-r--r--test/src/vm_memory_monitor_tests.erl35
-rwxr-xr-xtest/temp/head_message_timestamp_tests.py131
-rwxr-xr-xtest/temp/rabbitmqadmin.py944
-rw-r--r--version.mk1
182 files changed, 12290 insertions, 22432 deletions
diff --git a/.gitignore b/.gitignore
index 05857b2388..b30162c1e3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,39 +2,43 @@
.sw?
.*.sw?
*.beam
-*.orig
-*.patch
-*.plt
-*.tmp
-\#*
-.#*
-TAGS
-erl_crash.dump
-/ebin/rabbit.app
/.erlang.mk/
-/deps/
-/rabbit.d
-/build/
/cover/
-/dist/
+/deps/
+/doc/
+/ebin/
/etc/
+/logs/
/plugins/
-/priv/plugins/
-/deps.mk
-/include/rabbit_framing.hrl
-/include/rabbit_framing_spec.hrl
-/src/rabbit_framing_amqp*.erl
-/src/*_usage.erl
-/packaging/RPMS/Fedora/{BUILD,RPMS,SOURCES,SPECS,SRPMS}/
-/packaging/debs/Debian/rabbitmq-server_*.{dsc,diff.gz,tar.gz,deb,changes}
-/packaging/debs/apt-repository/debian
-/packaging/macports/macports
-/packaging/generic-unix/rabbitmq-server-generic-unix-*.tar.gz
-/packaging/windows/rabbitmq-server-windows-*.zip
-/packaging/windows-exe/rabbitmq_server-*
-/packaging/windows-exe/rabbitmq-*.nsi
-/packaging/windows-exe/rabbitmq-server-*.exe
+/rabbit.d
+
+# Generated sources files.
+/src/rabbit_ctl_usage.erl
+/src/rabbit_plugins_usage.erl
+
+# Generated documentation.
+/docs/rabbitmq-echopid.man.xml
+/docs/rabbitmq-env.conf.5
+/docs/rabbitmq-env.conf.5.man.xml
+/docs/rabbitmq-plugins.1
+/docs/rabbitmq-plugins.1.man.xml
+/docs/rabbitmq-server.1
+/docs/rabbitmq-server.1.man.xml
+/docs/rabbitmq-service.man.xml
+/docs/rabbitmqctl.1
+/docs/rabbitmqctl.1.man.xml
+
+# Source distribution.
+/rabbitmq-server-*/
+/rabbitmq-server-*.tar.gz
+/rabbitmq-server-*.tar.bz2
+/rabbitmq-server-*.tar.xz
+/rabbitmq-server-*.zip
+
+# Dialyzer
+*.plt
-/docs/*.[15].gz
-/docs/*.man.xml
+# Tracing tools
+*-ttb
+*.ti \ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000000..6d9dbc0b71
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,24 @@
+sudo: false
+language: erlang
+notifications:
+ email:
+ - alerts@rabbitmq.com
+addons:
+ apt:
+ packages:
+ - xsltproc
+otp_release:
+ - "R16B03-1"
+ - "17.5"
+ - "18.0"
+
+# The checkout made by Travis is a "detached HEAD". We switch back
+# to a tag or a branch. This pleases our git_rmq fetch method in
+# rabbitmq-components.mk and the proper tag/branch is selected in
+# dependencies too.
+before_script: (test "$TRAVIS_TAG" && git checkout "$TRAVIS_TAG") || (test "$TRAVIS_BRANCH" && git checkout "$TRAVIS_BRANCH")
+
+script: travis_wait make tests
+
+cache:
+ apt: true
diff --git a/Makefile b/Makefile
index c2cae4a161..fb5155f077 100644
--- a/Makefile
+++ b/Makefile
@@ -1,437 +1,454 @@
-TMPDIR ?= /tmp
-
-RABBITMQ_NODENAME ?= rabbit
-RABBITMQ_SERVER_START_ARGS ?=
-RABBITMQ_MNESIA_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-mnesia
-RABBITMQ_PLUGINS_EXPAND_DIR ?= $(TMPDIR)/rabbitmq-$(RABBITMQ_NODENAME)-plugins-scratch
-RABBITMQ_LOG_BASE ?= $(TMPDIR)
-
-DEPS_FILE=deps.mk
-SOURCE_DIR=src
-TEST_DIR=test/src
-EBIN_DIR=ebin
-TEST_EBIN_DIR=test/ebin
-INCLUDE_DIR=include
-DOCS_DIR=docs
-INCLUDES=$(wildcard $(INCLUDE_DIR)/*.hrl) $(INCLUDE_DIR)/rabbit_framing.hrl
-SOURCES=$(wildcard $(SOURCE_DIR)/*.erl) $(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl $(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl $(USAGES_ERL)
-TEST_SOURCES=$(wildcard $(TEST_DIR)/*.erl)
-BEAM_TARGETS=$(patsubst $(SOURCE_DIR)/%.erl, $(EBIN_DIR)/%.beam, $(SOURCES))
-TEST_BEAM_TARGETS=$(patsubst $(TEST_DIR)/%.erl, $(TEST_EBIN_DIR)/%.beam, $(TEST_SOURCES))
-TARGETS=$(EBIN_DIR)/rabbit.app $(INCLUDE_DIR)/rabbit_framing.hrl $(BEAM_TARGETS) plugins
-TEST_TARGETS=$(TEST_BEAM_TARGETS)
-WEB_URL=http://www.rabbitmq.com/
-MANPAGES=$(patsubst %.xml, %.gz, $(wildcard $(DOCS_DIR)/*.[0-9].xml))
-WEB_MANPAGES=$(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml $(DOCS_DIR)/rabbitmq-echopid.xml)
-USAGES_XML=$(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-plugins.1.xml
-USAGES_ERL=$(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML)))
-
-ifeq ($(shell python -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python
-else
-ifeq ($(shell python2.7 -c 'import json' 2>/dev/null && echo yes),yes)
-PYTHON=python2.7
-else
-ifeq ($(shell python2.6 -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python2.6
-else
-ifeq ($(shell python2.5 -c 'import simplejson' 2>/dev/null && echo yes),yes)
-PYTHON=python2.5
-else
-# Hmm. Missing simplejson?
-PYTHON=python
-endif
-endif
-endif
-endif
+PROJECT = rabbit
+VERSION ?= $(call get_app_version,src/$(PROJECT).app.src)
-BASIC_PLT=basic.plt
-RABBIT_PLT=rabbit.plt
+# Release artifacts are put in $(PACKAGES_DIR).
+PACKAGES_DIR ?= $(abspath PACKAGES)
-ifndef USE_PROPER_QC
-# PropEr needs to be installed for property checking
-# http://proper.softlab.ntua.gr/
-USE_PROPER_QC=$(shell erl -noshell -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
-endif
+DEPS = ranch $(PLUGINS)
-#other args: +native +"{hipe,[o3,verbose]}" -Ddebug=true +debug_info +no_strict_record_tests
-ERLC_OPTS=-I $(INCLUDE_DIR) -Wall +warn_export_vars -v +debug_info $(call boolean_macro,$(USE_SPECS),use_specs) $(call boolean_macro,$(USE_PROPER_QC),use_proper_qc)
-
-# Our type specs rely on dict:dict/0 etc, which are only available in
-# 17.0 upwards.
-define compare_version
-$(shell awk 'BEGIN {
- split("$(1)", v1, "\.");
- version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
-
- split("$(2)", v2, "\.");
- version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
-
- if (version1 $(3) version2) {
- print "true";
- } else {
- print "false";
- }
-}')
+define usage_xml_to_erl
+$(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1))))
endef
-ERTS_VER = $(shell erl -version 2>&1 | sed -E 's/.* version //')
-USE_SPECS_MIN_ERTS_VER = 5.11
-ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true)
-ERLC_OPTS += -Duse_specs
+DOCS_DIR = docs
+MANPAGES = $(patsubst %.xml, %, $(wildcard $(DOCS_DIR)/*.[0-9].xml))
+WEB_MANPAGES = $(patsubst %.xml, %.man.xml, $(wildcard $(DOCS_DIR)/*.[0-9].xml) $(DOCS_DIR)/rabbitmq-service.xml $(DOCS_DIR)/rabbitmq-echopid.xml)
+USAGES_XML = $(DOCS_DIR)/rabbitmqctl.1.xml $(DOCS_DIR)/rabbitmq-plugins.1.xml
+USAGES_ERL = $(foreach XML, $(USAGES_XML), $(call usage_xml_to_erl, $(XML)))
+
+EXTRA_SOURCES += $(USAGES_ERL)
+
+.DEFAULT_GOAL = all
+$(PROJECT).d:: $(EXTRA_SOURCES)
+
+DEP_PLUGINS = rabbit_common/mk/rabbitmq-run.mk \
+ rabbit_common/mk/rabbitmq-dist.mk \
+ rabbit_common/mk/rabbitmq-tools.mk
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+include rabbitmq-components.mk
+
+# List of plugins to include in a RabbitMQ release.
+DISTRIBUTED_DEPS := rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_visualiser \
+ rabbitmq_mqtt \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_tracing \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples
+
+ifneq ($(IS_DEP),1)
+ifneq ($(filter source-dist packages package-%,$(MAKECMDGOALS)),)
+DEPS += $(DISTRIBUTED_DEPS)
+endif
+ifneq ($(wildcard git-revisions.txt),)
+DEPS += $(DISTRIBUTED_DEPS)
endif
-
-ifdef INSTRUMENT_FOR_QC
-ERLC_OPTS += -DINSTR_MOD=gm_qc
-else
-ERLC_OPTS += -DINSTR_MOD=gm
endif
-include version.mk
-
-PLUGINS_SRC_DIR?=$(shell [ -d "plugins-src" ] && echo "plugins-src" || echo )
-PLUGINS_DIR=plugins
-TARBALL_NAME=rabbitmq-server-$(VERSION)
-TARGET_SRC_DIR=dist/$(TARBALL_NAME)
-
-SIBLING_CODEGEN_DIR=../rabbitmq-codegen/
-AMQP_CODEGEN_DIR=$(shell [ -d $(SIBLING_CODEGEN_DIR) ] && echo $(SIBLING_CODEGEN_DIR) || echo codegen)
-AMQP_SPEC_JSON_FILES_0_9_1=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json $(AMQP_CODEGEN_DIR)/credit_extension.json
-AMQP_SPEC_JSON_FILES_0_8=$(AMQP_CODEGEN_DIR)/amqp-rabbitmq-0.8.json
-
-ERL_CALL=erl_call -sname $(RABBITMQ_NODENAME) -e
-
-ERL_EBIN=erl -noinput -pa $(EBIN_DIR)
+include erlang.mk
-define usage_xml_to_erl
- $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, $(SOURCE_DIR)/rabbit_%_usage.erl, $(subst -,_,$(1))))
-endef
+# --------------------------------------------------------------------
+# Compilation.
+# --------------------------------------------------------------------
-define usage_dep
- $(call usage_xml_to_erl, $(1)): $(1) $(DOCS_DIR)/usage.xsl
-endef
-
-define boolean_macro
-$(if $(filter true,$(1)),-D$(2))
-endef
+RMQ_ERLC_OPTS += -I $(DEPS_DIR)/rabbit_common/include
-ifneq "$(SBIN_DIR)" ""
-ifneq "$(TARGET_DIR)" ""
-SCRIPTS_REL_PATH=$(shell ./calculate-relative $(TARGET_DIR)/sbin $(SBIN_DIR))
+ifdef INSTRUMENT_FOR_QC
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm_qc
+else
+RMQ_ERLC_OPTS += -DINSTR_MOD=gm
endif
+
+ifdef CREDIT_FLOW_TRACING
+RMQ_ERLC_OPTS += -DCREDIT_FLOW_TRACING=true
endif
-# Versions prior to this are not supported
-NEED_MAKE := 3.80
-ifneq "$(NEED_MAKE)" "$(firstword $(sort $(NEED_MAKE) $(MAKE_VERSION)))"
-$(error Versions of make prior to $(NEED_MAKE) are not supported)
+ERTS_VER := $(shell erl -version 2>&1 | sed -E 's/.* version //')
+USE_SPECS_MIN_ERTS_VER = 5.11
+ifeq ($(call compare_version,$(ERTS_VER),$(USE_SPECS_MIN_ERTS_VER),>=),true)
+RMQ_ERLC_OPTS += -Duse_specs
endif
-# .DEFAULT_GOAL introduced in 3.81
-DEFAULT_GOAL_MAKE := 3.81
-ifneq "$(DEFAULT_GOAL_MAKE)" "$(firstword $(sort $(DEFAULT_GOAL_MAKE) $(MAKE_VERSION)))"
-.DEFAULT_GOAL=all
+ifndef USE_PROPER_QC
+# PropEr needs to be installed for property checking
+# http://proper.softlab.ntua.gr/
+USE_PROPER_QC := $(shell $(ERL) -eval 'io:format({module, proper} =:= code:ensure_loaded(proper)), halt().')
+RMQ_ERLC_OPTS += $(if $(filter true,$(USE_PROPER_QC)),-Duse_proper_qc)
endif
-all: $(TARGETS) $(TEST_TARGETS)
+ERLC_OPTS += $(RMQ_ERLC_OPTS)
-.PHONY: plugins check-xref
-ifneq "$(PLUGINS_SRC_DIR)" ""
-plugins:
- [ -d "$(PLUGINS_SRC_DIR)/rabbitmq-server" ] || ln -s "$(CURDIR)" "$(PLUGINS_SRC_DIR)/rabbitmq-server"
- mkdir -p $(PLUGINS_DIR)
- PLUGINS_SRC_DIR="" $(MAKE) -C "$(PLUGINS_SRC_DIR)" plugins-dist PLUGINS_DIST_DIR="$(CURDIR)/$(PLUGINS_DIR)" VERSION=$(VERSION)
- echo "Put your EZs here and use rabbitmq-plugins to enable them." > $(PLUGINS_DIR)/README
- rm -f $(PLUGINS_DIR)/rabbit_common*.ez
+clean:: clean-extra-sources
-# add -q to remove printout of warnings....
-check-xref: $(BEAM_TARGETS) $(PLUGINS_DIR)
- rm -rf lib
- ./check_xref $(PLUGINS_DIR) -q
+clean-extra-sources:
+ $(gen_verbose) rm -f $(EXTRA_SOURCES)
-else
-plugins:
-# Not building plugins
+# --------------------------------------------------------------------
+# Tests.
+# --------------------------------------------------------------------
-check-xref:
- $(info xref checks are disabled as there is no plugins-src directory)
+TARGETS_IN_RABBITMQ_TEST = $(patsubst %,%-in-rabbitmq_test,\
+ tests full unit lite conformance16 lazy-vq-tests)
-endif
+.PHONY: $(TARGETS_IN_RABBITMQ_TEST)
-$(DEPS_FILE): $(SOURCES) $(INCLUDES)
- rm -f $@
- echo $(subst : ,:,$(foreach FILE,$^,$(FILE):)) | escript generate_deps $@ $(EBIN_DIR)
-
-$(EBIN_DIR)/rabbit.app: $(EBIN_DIR)/rabbit_app.in $(SOURCES) generate_app
- escript generate_app $< $@ $(SOURCE_DIR)
-
-$(EBIN_DIR)/%.beam: $(SOURCE_DIR)/%.erl | $(DEPS_FILE)
- erlc -o $(EBIN_DIR) $(ERLC_OPTS) -pa $(EBIN_DIR) $<
-
-$(TEST_EBIN_DIR)/%.beam: $(TEST_DIR)/%.erl | $(TEST_EBIN_DIR)
- erlc -o $(TEST_EBIN_DIR) $(ERLC_OPTS) -pa $(EBIN_DIR) -pa $(TEST_EBIN_DIR) $<
-
-$(TEST_EBIN_DIR):
- mkdir -p $(TEST_EBIN_DIR)
-
-$(INCLUDE_DIR)/rabbit_framing.hrl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
- $(PYTHON) codegen.py --ignore-conflicts header $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
-
-$(SOURCE_DIR)/rabbit_framing_amqp_0_9_1.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_9_1)
- $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_9_1) $@
-
-$(SOURCE_DIR)/rabbit_framing_amqp_0_8.erl: codegen.py $(AMQP_CODEGEN_DIR)/amqp_codegen.py $(AMQP_SPEC_JSON_FILES_0_8)
- $(PYTHON) codegen.py body $(AMQP_SPEC_JSON_FILES_0_8) $@
-
-dialyze: $(BEAM_TARGETS) $(BASIC_PLT)
- dialyzer --plt $(BASIC_PLT) --no_native --fullpath \
- $(BEAM_TARGETS)
-
-# rabbit.plt is used by rabbitmq-erlang-client's dialyze make target
-create-plt: $(RABBIT_PLT)
-
-$(RABBIT_PLT): $(BEAM_TARGETS) $(BASIC_PLT)
- dialyzer --plt $(BASIC_PLT) --output_plt $@ --no_native \
- --add_to_plt $(BEAM_TARGETS)
-
-$(BASIC_PLT): $(BEAM_TARGETS)
- if [ -f $@ ]; then \
- touch $@; \
- else \
- dialyzer --output_plt $@ --build_plt \
- --apps erts kernel stdlib compiler sasl os_mon mnesia tools \
- public_key crypto ssl xmerl; \
- fi
-
-clean:
- rm -f $(EBIN_DIR)/*.beam
- rm -f $(EBIN_DIR)/rabbit.app $(EBIN_DIR)/rabbit.boot $(EBIN_DIR)/rabbit.script $(EBIN_DIR)/rabbit.rel
- rm -rf $(TEST_EBIN_DIR)
- rm -f $(PLUGINS_DIR)/*.ez
- [ -d "$(PLUGINS_SRC_DIR)" ] && PLUGINS_SRC_DIR="" PRESERVE_CLONE_DIR=1 make -C $(PLUGINS_SRC_DIR) clean || true
- rm -f $(INCLUDE_DIR)/rabbit_framing.hrl $(SOURCE_DIR)/rabbit_framing_amqp_*.erl codegen.pyc
- rm -f $(DOCS_DIR)/*.[0-9].gz $(DOCS_DIR)/*.man.xml $(DOCS_DIR)/*.erl $(USAGES_ERL)
- rm -f $(RABBIT_PLT)
- rm -f $(DEPS_FILE)
-
-cleandb:
- rm -rf $(RABBITMQ_MNESIA_DIR)/*
-
-############ various tasks to interact with RabbitMQ ###################
-
-BASIC_SCRIPT_ENVIRONMENT_SETTINGS=\
- RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
- RABBITMQ_NODE_PORT="$(RABBITMQ_NODE_PORT)" \
- RABBITMQ_LOG_BASE="$(RABBITMQ_LOG_BASE)" \
- RABBITMQ_MNESIA_DIR="$(RABBITMQ_MNESIA_DIR)" \
- RABBITMQ_PLUGINS_EXPAND_DIR="$(RABBITMQ_PLUGINS_EXPAND_DIR)"
-
-run: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_ALLOW_INPUT=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server
-
-run-background: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server -detached
-
-run-node: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_NODE_ONLY=true \
- RABBITMQ_ALLOW_INPUT=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server
-
-run-background-node: all
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_NODE_ONLY=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server -detached
-
-run-tests: all
- echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL)
- echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL) -n hare || true
- OUT=$$(echo "rabbit_tests:all_tests()." | $(ERL_CALL)) ; \
- echo $$OUT ; echo $$OUT | grep '^{ok, passed}$$' > /dev/null
-
-run-qc: all
- echo 'code:add_path("$(TEST_EBIN_DIR)").' | $(ERL_CALL)
- ./quickcheck $(RABBITMQ_NODENAME) rabbit_backing_queue_qc 100 40
- ./quickcheck $(RABBITMQ_NODENAME) gm_qc 1000 200
-
-start-background-node: all
- -rm -f $(RABBITMQ_MNESIA_DIR).pid
- mkdir -p $(RABBITMQ_MNESIA_DIR)
- $(BASIC_SCRIPT_ENVIRONMENT_SETTINGS) \
- RABBITMQ_NODE_ONLY=true \
- RABBITMQ_SERVER_START_ARGS="$(RABBITMQ_SERVER_START_ARGS)" \
- ./scripts/rabbitmq-server \
- > $(RABBITMQ_MNESIA_DIR)/startup_log \
- 2> $(RABBITMQ_MNESIA_DIR)/startup_err &
- ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid kernel
-
-start-rabbit-on-node: all
- echo "rabbit:start()." | $(ERL_CALL)
- ./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) wait $(RABBITMQ_MNESIA_DIR).pid
-
-stop-rabbit-on-node: all
- echo "rabbit:stop()." | $(ERL_CALL)
-
-set-resource-alarm: all
- echo "rabbit_alarm:set_alarm({{resource_limit, $(SOURCE), node()}, []})." | \
- $(ERL_CALL)
-
-clear-resource-alarm: all
- echo "rabbit_alarm:clear_alarm({resource_limit, $(SOURCE), node()})." | \
- $(ERL_CALL)
-
-stop-node:
- -( \
- pid=$$(./scripts/rabbitmqctl -n $(RABBITMQ_NODENAME) eval 'os:getpid().') && \
- $(ERL_CALL) -q && \
- while ps -p $$pid >/dev/null 2>&1; do sleep 1; done \
- )
-
-# code coverage will be created for subdirectory "ebin" of COVER_DIR
-COVER_DIR=.
-
-start-cover: all
- echo "rabbit_misc:start_cover([\"rabbit\", \"hare\"])." | $(ERL_CALL)
- echo "rabbit_misc:enable_cover([\"$(COVER_DIR)\"])." | $(ERL_CALL)
-
-stop-cover: all
- echo "rabbit_misc:report_cover(), cover:stop()." | $(ERL_CALL)
- cat cover/summary.txt
-
-########################################################################
-
-srcdist: distclean
- mkdir -p $(TARGET_SRC_DIR)/codegen
- cp -r ebin src include LICENSE LICENSE-MPL-RabbitMQ INSTALL README $(TARGET_SRC_DIR)
- sed 's/%%VSN%%/$(VERSION)/' $(TARGET_SRC_DIR)/ebin/rabbit_app.in > $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp && \
- mv $(TARGET_SRC_DIR)/ebin/rabbit_app.in.tmp $(TARGET_SRC_DIR)/ebin/rabbit_app.in
-
- cp -r $(AMQP_CODEGEN_DIR)/* $(TARGET_SRC_DIR)/codegen/
- cp codegen.py Makefile generate_app generate_deps calculate-relative $(TARGET_SRC_DIR)
-
- echo "VERSION?=${VERSION}" > $(TARGET_SRC_DIR)/version.mk
-
- cp -r scripts $(TARGET_SRC_DIR)
- cp -r $(DOCS_DIR) $(TARGET_SRC_DIR)
- chmod 0755 $(TARGET_SRC_DIR)/scripts/*
-
-ifneq "$(PLUGINS_SRC_DIR)" ""
- cp -r $(PLUGINS_SRC_DIR) $(TARGET_SRC_DIR)/plugins-src
- rm $(TARGET_SRC_DIR)/LICENSE
- cat packaging/common/LICENSE.head >> $(TARGET_SRC_DIR)/LICENSE
- cat $(AMQP_CODEGEN_DIR)/license_info >> $(TARGET_SRC_DIR)/LICENSE
- find $(PLUGINS_SRC_DIR)/licensing -name "license_info_*" -exec cat '{}' >> $(TARGET_SRC_DIR)/LICENSE \;
- cat packaging/common/LICENSE.tail >> $(TARGET_SRC_DIR)/LICENSE
- find $(PLUGINS_SRC_DIR)/licensing -name "LICENSE-*" -exec cp '{}' $(TARGET_SRC_DIR) \;
- rm -rf $(TARGET_SRC_DIR)/licensing
-else
- @echo No plugins source distribution found
-endif
+TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
- (cd dist; tar -zchf $(TARBALL_NAME).tar.gz $(TARBALL_NAME))
- (cd dist; zip -q -r $(TARBALL_NAME).zip $(TARBALL_NAME))
- rm -rf $(TARGET_SRC_DIR)
+tests:: tests-in-rabbitmq_test
-distclean: clean
- $(MAKE) -C $(AMQP_CODEGEN_DIR) distclean
- rm -rf dist
- find . -regex '.*\(~\|#\|\.swp\|\.dump\)' -exec rm {} \;
+$(TARGETS_IN_RABBITMQ_TEST): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ test-build $(DEPS_DIR)/rabbitmq_test
+ $(MAKE) -C $(DEPS_DIR)/rabbitmq_test \
+ IS_DEP=1 \
+ RABBITMQ_BROKER_DIR=$(RABBITMQ_BROKER_DIR) \
+ $(patsubst %-in-rabbitmq_test,%,$@)
+
+# --------------------------------------------------------------------
+# Documentation.
+# --------------------------------------------------------------------
# xmlto can not read from standard input, so we mess with a tmp file.
-%.gz: %.xml $(DOCS_DIR)/examples-to-end.xsl
- xmlto --version | grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || opt='--stringparam man.indent.verbatims=0' ; \
- xsltproc --novalid $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \
- xmlto -o $(DOCS_DIR) $$opt man $<.tmp && \
- gzip -f $(DOCS_DIR)/`basename $< .xml`
- rm -f $<.tmp
+%: %.xml $(DOCS_DIR)/examples-to-end.xsl
+ $(gen_verbose) xmlto --version | \
+ grep -E '^xmlto version 0\.0\.([0-9]|1[1-8])$$' >/dev/null || \
+ opt='--stringparam man.indent.verbatims=0' ; \
+ xsltproc --novalid $(DOCS_DIR)/examples-to-end.xsl $< > $<.tmp && \
+ (xmlto -o $(DOCS_DIR) $$opt man $< 2>&1 | (grep -qv '^Note: Writing' || :)) && \
+ rm $<.tmp
# Use tmp files rather than a pipeline so that we get meaningful errors
# Do not fold the cp into previous line, it's there to stop the file being
# generated but empty if we fail
-$(SOURCE_DIR)/%_usage.erl:
- xsltproc --novalid --stringparam modulename "`basename $@ .erl`" \
- $(DOCS_DIR)/usage.xsl $< > $@.tmp
- sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $@.tmp > $@.tmp2
- fold -s $@.tmp2 > $@.tmp3
- mv $@.tmp3 $@
- rm $@.tmp $@.tmp2
+define usage_dep
+$(call usage_xml_to_erl, $(1)):: $(1) $(DOCS_DIR)/usage.xsl
+ $$(gen_verbose) xsltproc --novalid --stringparam modulename "`basename $$@ .erl`" \
+ $(DOCS_DIR)/usage.xsl $$< > $$@.tmp && \
+ sed -e 's/"/\\"/g' -e 's/%QUOTE%/"/g' $$@.tmp > $$@.tmp2 && \
+ fold -s $$@.tmp2 > $$@.tmp3 && \
+ mv $$@.tmp3 $$@ && \
+ rm $$@.tmp $$@.tmp2
+endef
+
+$(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML))))
# We rename the file before xmlto sees it since xmlto will use the name of
# the file to make internal links.
%.man.xml: %.xml $(DOCS_DIR)/html-to-website-xml.xsl
- cp $< `basename $< .xml`.xml && \
- xmlto xhtml-nochunks `basename $< .xml`.xml ; rm `basename $< .xml`.xml
+ $(gen_verbose) cp $< `basename $< .xml`.xml && \
+ xmlto xhtml-nochunks `basename $< .xml`.xml ; \
+ rm `basename $< .xml`.xml && \
cat `basename $< .xml`.html | \
xsltproc --novalid $(DOCS_DIR)/remove-namespaces.xsl - | \
- xsltproc --novalid --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \
- xmllint --format - > $@
+ xsltproc --novalid --stringparam original `basename $<` $(DOCS_DIR)/html-to-website-xml.xsl - | \
+ xmllint --format - > $@ && \
rm `basename $< .xml`.html
-docs_all: $(MANPAGES) $(WEB_MANPAGES)
-
-install: install_bin install_docs
-
-install_bin: all install_dirs
- cp -r ebin include LICENSE* INSTALL $(TARGET_DIR)
-
- chmod 0755 scripts/*
- for script in rabbitmq-env rabbitmq-server rabbitmqctl rabbitmq-plugins rabbitmq-defaults; do \
- cp scripts/$$script $(TARGET_DIR)/sbin; \
- [ -e $(SBIN_DIR)/$$script ] || ln -s $(SCRIPTS_REL_PATH)/$$script $(SBIN_DIR)/$$script; \
+.PHONY: manpages web-manpages distclean-manpages
+
+docs:: manpages web-manpages
+
+manpages: $(MANPAGES)
+ @:
+
+web-manpages: $(WEB_MANPAGES)
+ @:
+
+distclean:: distclean-manpages
+
+distclean-manpages::
+ $(gen_verbose) rm -f $(MANPAGES) $(WEB_MANPAGES)
+
+# --------------------------------------------------------------------
+# Distribution.
+# --------------------------------------------------------------------
+
+.PHONY: source-dist clean-source-dist
+
+SOURCE_DIST_BASE ?= rabbitmq-server
+SOURCE_DIST_SUFFIXES ?= tar.xz zip
+SOURCE_DIST ?= $(PACKAGES_DIR)/$(SOURCE_DIST_BASE)-$(VERSION)
+
+# The first source distribution file is used by packages: if the archive
+# type changes, you must update all packages' Makefile.
+SOURCE_DIST_FILES = $(addprefix $(SOURCE_DIST).,$(SOURCE_DIST_SUFFIXES))
+
+.PHONY: $(SOURCE_DIST_FILES)
+
+source-dist: $(SOURCE_DIST_FILES)
+ @:
+
+RSYNC ?= rsync
+RSYNC_V_0 =
+RSYNC_V_1 = -v
+RSYNC_V_2 = -v
+RSYNC_V = $(RSYNC_V_$(V))
+RSYNC_FLAGS += -a $(RSYNC_V) \
+ --exclude '.sw?' --exclude '.*.sw?' \
+ --exclude '*.beam' \
+ --exclude '*.pyc' \
+ --exclude '.git*' \
+ --exclude '.hg*' \
+ --exclude '.travis.yml' \
+ --exclude '.*.plt' \
+ --exclude '$(notdir $(ERLANG_MK_TMP))' \
+ --exclude 'ebin' \
+ --exclude 'packaging' \
+ --exclude 'erl_crash.dump' \
+ --exclude 'MnesiaCore.*' \
+ --exclude 'cover/' \
+ --exclude 'deps/' \
+ --exclude '$(notdir $(DEPS_DIR))/' \
+ --exclude 'plugins/' \
+ --exclude '$(notdir $(DIST_DIR))/' \
+ --exclude '/$(notdir $(PACKAGES_DIR))/' \
+ --exclude '/cowboy/doc/' \
+ --exclude '/cowboy/examples/' \
+ --exclude '/rabbitmq_amqp1_0/test/swiftmq/build/'\
+ --exclude '/rabbitmq_amqp1_0/test/swiftmq/swiftmq*'\
+ --exclude '/rabbitmq_mqtt/test/build/' \
+ --exclude '/rabbitmq_mqtt/test/test_client/'\
+ --delete \
+ --delete-excluded
+
+TAR ?= tar
+TAR_V_0 =
+TAR_V_1 = -v
+TAR_V_2 = -v
+TAR_V = $(TAR_V_$(V))
+
+GZIP ?= gzip
+BZIP2 ?= bzip2
+XZ ?= xz
+
+ZIP ?= zip
+ZIP_V_0 = -q
+ZIP_V_1 =
+ZIP_V_2 =
+ZIP_V = $(ZIP_V_$(V))
+
+.PHONY: $(SOURCE_DIST)
+
+$(SOURCE_DIST): $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+ $(verbose) mkdir -p $(dir $@)
+ $(gen_verbose) $(RSYNC) $(RSYNC_FLAGS) ./ $@/
+ $(verbose) sed -E -i.bak \
+ -e 's/[{]vsn[[:blank:]]*,[^}]+}/{vsn, "$(VERSION)"}/' \
+ $@/src/$(PROJECT).app.src && \
+ rm $@/src/$(PROJECT).app.src.bak
+ $(verbose) cat packaging/common/LICENSE.head > $@/LICENSE
+ $(verbose) mkdir -p $@/deps/licensing
+ $(verbose) for dep in $$(cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | grep -v '/$(PROJECT)$$' | LC_COLLATE=C sort); do \
+ $(RSYNC) $(RSYNC_FLAGS) \
+ $$dep \
+ $@/deps; \
+ if test -f $@/deps/$$(basename $$dep)/erlang.mk && \
+ test "$$(wc -l $@/deps/$$(basename $$dep)/erlang.mk | awk '{print $$1;}')" = "1" && \
+ grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" $@/deps/$$(basename $$dep)/erlang.mk; then \
+ echo "include ../../erlang.mk" > $@/deps/$$(basename $$dep)/erlang.mk; \
+ fi; \
+ sed -E -i.bak "s|^[[:blank:]]*include[[:blank:]]+\.\./.*erlang.mk$$|include ../../erlang.mk|" \
+ $@/deps/$$(basename $$dep)/Makefile && \
+ rm $@/deps/$$(basename $$dep)/Makefile.bak; \
+ if test -f "$$dep/license_info"; then \
+ cp "$$dep/license_info" "$@/deps/licensing/license_info_$$(basename "$$dep")"; \
+ cat "$$dep/license_info" >> $@/LICENSE; \
+ fi; \
+ find "$$dep" -maxdepth 1 -name 'LICENSE-*' -exec cp '{}' $@/deps/licensing \; ; \
+ done
+ $(verbose) cat packaging/common/LICENSE.tail >> $@/LICENSE
+ $(verbose) find $@/deps/licensing -name 'LICENSE-*' -exec cp '{}' $@ \;
+ $(verbose) for file in $$(find $@ -name '*.app.src'); do \
+ sed -E -i.bak -e 's/[{]vsn[[:blank:]]*,[[:blank:]]*""[[:blank:]]*}/{vsn, "$(VERSION)"}/' $$file; \
+ rm $$file.bak; \
+ done
+ $(verbose) echo "$(PROJECT) $$(git rev-parse HEAD) $$(git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)" > $@/git-revisions.txt
+ $(verbose) for dep in $$(cat $(ERLANG_MK_RECURSIVE_DEPS_LIST)); do \
+ (cd $$dep; echo "$$(basename "$$dep") $$(git rev-parse HEAD) $$(git describe --tags --exact-match 2>/dev/null || git symbolic-ref -q --short HEAD)") >> $@/git-revisions.txt; \
done
- mkdir -p $(TARGET_DIR)/$(PLUGINS_DIR)
- [ -d "$(PLUGINS_DIR)" ] && cp $(PLUGINS_DIR)/*.ez $(PLUGINS_DIR)/README $(TARGET_DIR)/$(PLUGINS_DIR) || true
+# TODO: Fix file timestamps to have reproducible source archives.
+# $(verbose) find $@ -not -name 'git-revisions.txt' -print0 | xargs -0 touch -r $@/git-revisions.txt
+
+$(SOURCE_DIST).tar.gz: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(GZIP) --best > $@
+
+$(SOURCE_DIST).tar.bz2: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(BZIP2) > $@
+
+$(SOURCE_DIST).tar.xz: $(SOURCE_DIST)
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(TAR) $(TAR_V) --no-recursion -cf - | \
+ $(XZ) > $@
+
+$(SOURCE_DIST).zip: $(SOURCE_DIST)
+ $(verbose) rm -f $@
+ $(gen_verbose) cd $(dir $(SOURCE_DIST)) && \
+ find $(notdir $(SOURCE_DIST)) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 $(ZIP) $(ZIP_V) $@
+
+clean:: clean-source-dist
+
+clean-source-dist:
+ $(gen_verbose) rm -rf -- $(SOURCE_DIST_BASE)-*
+
+# --------------------------------------------------------------------
+# Installation.
+# --------------------------------------------------------------------
+
+.PHONY: install install-erlapp install-scripts install-bin install-man
+.PHONY: install-windows install-windows-erlapp install-windows-scripts install-windows-docs
+
+DESTDIR ?=
+
+PREFIX ?= /usr/local
+WINDOWS_PREFIX ?= rabbitmq-server-windows-$(VERSION)
+
+MANDIR ?= $(PREFIX)/share/man
+RMQ_ROOTDIR ?= $(PREFIX)/lib/erlang
+RMQ_BINDIR ?= $(RMQ_ROOTDIR)/bin
+RMQ_LIBDIR ?= $(RMQ_ROOTDIR)/lib
+RMQ_ERLAPP_DIR ?= $(RMQ_LIBDIR)/rabbitmq_server-$(VERSION)
+
+SCRIPTS = rabbitmq-defaults \
+ rabbitmq-env \
+ rabbitmq-server \
+ rabbitmqctl \
+ rabbitmq-plugins
+
+WINDOWS_SCRIPTS = rabbitmq-defaults.bat \
+ rabbitmq-echopid.bat \
+ rabbitmq-env.bat \
+ rabbitmq-plugins.bat \
+ rabbitmq-server.bat \
+ rabbitmq-service.bat \
+ rabbitmqctl.bat
+
+UNIX_TO_DOS ?= todos
+
+inst_verbose_0 = @echo " INST " $@;
+inst_verbose = $(inst_verbose_$(V))
+
+install: install-erlapp install-scripts
+
+install-erlapp: dist
+ $(verbose) mkdir -p $(DESTDIR)$(RMQ_ERLAPP_DIR)
+ $(inst_verbose) cp -r include ebin plugins LICENSE* INSTALL \
+ $(DESTDIR)$(RMQ_ERLAPP_DIR)
+ $(verbose) echo "Put your EZs here and use rabbitmq-plugins to enable them." \
+ > $(DESTDIR)$(RMQ_ERLAPP_DIR)/plugins/README
+
+ @# rabbitmq-common provides headers too: copy them to
+ @# rabbitmq_server/include.
+ $(verbose) cp -r $(DEPS_DIR)/rabbit_common/include $(DESTDIR)$(RMQ_ERLAPP_DIR)
+
+install-scripts:
+ $(verbose) mkdir -p $(DESTDIR)$(RMQ_ERLAPP_DIR)/sbin
+ $(inst_verbose) for script in $(SCRIPTS); do \
+ cp "scripts/$$script" "$(DESTDIR)$(RMQ_ERLAPP_DIR)/sbin"; \
+ chmod 0755 "$(DESTDIR)$(RMQ_ERLAPP_DIR)/sbin/$$script"; \
+ done
-install_docs: docs_all install_dirs
- for section in 1 5; do \
- mkdir -p $(MAN_DIR)/man$$section; \
- for manpage in $(DOCS_DIR)/*.$$section.gz; do \
- cp $$manpage $(MAN_DIR)/man$$section; \
- done; \
+# FIXME: We do symlinks to scripts in $(RMQ_ERLAPP_DIR))/sbin but this
+# code assumes a certain hierarchy to make relative symlinks.
+install-bin: install-scripts
+ $(verbose) mkdir -p $(DESTDIR)$(RMQ_BINDIR)
+ $(inst_verbose) for script in $(SCRIPTS); do \
+ test -e $(DESTDIR)$(RMQ_BINDIR)/$$script || \
+ ln -sf ../lib/$(notdir $(RMQ_ERLAPP_DIR))/sbin/$$script \
+ $(DESTDIR)$(RMQ_BINDIR)/$$script; \
done
- if test "$(DOC_INSTALL_DIR)"; then \
- cp $(DOCS_DIR)/rabbitmq.config.example $(DOC_INSTALL_DIR)/rabbitmq.config.example; \
- fi
-
-install_dirs:
- @ OK=true && \
- { [ -n "$(TARGET_DIR)" ] || { echo "Please set TARGET_DIR."; OK=false; }; } && \
- { [ -n "$(SBIN_DIR)" ] || { echo "Please set SBIN_DIR."; OK=false; }; } && \
- { [ -n "$(MAN_DIR)" ] || { echo "Please set MAN_DIR."; OK=false; }; } && $$OK
-
- mkdir -p $(TARGET_DIR)/sbin
- mkdir -p $(SBIN_DIR)
- mkdir -p $(MAN_DIR)
- if test "$(DOC_INSTALL_DIR)"; then \
- mkdir -p $(DOC_INSTALL_DIR); \
- fi
-$(foreach XML,$(USAGES_XML),$(eval $(call usage_dep, $(XML))))
+install-man: manpages
+ $(inst_verbose) sections=$$(ls -1 docs/*.[1-9] \
+ | sed -E 's/.*\.([1-9])$$/\1/' | uniq | sort); \
+ for section in $$sections; do \
+ mkdir -p $(DESTDIR)$(MANDIR)/man$$section; \
+ for manpage in $(DOCS_DIR)/*.$$section; do \
+ gzip < $$manpage \
+ > $(DESTDIR)$(MANDIR)/man$$section/$$(basename $$manpage).gz; \
+ done; \
+ done
+
+install-windows: install-windows-erlapp install-windows-scripts install-windows-docs
+
+install-windows-erlapp: dist
+ $(verbose) mkdir -p $(DESTDIR)$(WINDOWS_PREFIX)
+ $(inst_verbose) cp -r include ebin plugins LICENSE* INSTALL \
+ $(DESTDIR)$(WINDOWS_PREFIX)
+ $(verbose) echo "Put your EZs here and use rabbitmq-plugins.bat to enable them." \
+ > $(DESTDIR)$(WINDOWS_PREFIX)/plugins/README.txt
+ $(verbose) $(UNIX_TO_DOS) $(DESTDIR)$(WINDOWS_PREFIX)/plugins/README.txt
+
+# rabbitmq-common provides headers too: copy them to
+# rabbitmq_server/include.
+ $(verbose) cp -r $(DEPS_DIR)/rabbit_common/include $(DESTDIR)$(WINDOWS_PREFIX)
+
+install-windows-scripts:
+ $(verbose) mkdir -p $(DESTDIR)$(WINDOWS_PREFIX)/sbin
+ $(inst_verbose) for script in $(WINDOWS_SCRIPTS); do \
+ cp "scripts/$$script" "$(DESTDIR)$(WINDOWS_PREFIX)/sbin"; \
+ chmod 0755 "$(DESTDIR)$(WINDOWS_PREFIX)/sbin/$$script"; \
+ done
-# Note that all targets which depend on clean must have clean in their
-# name. Also any target that doesn't depend on clean should not have
-# clean in its name, unless you know that you don't need any of the
-# automatic dependency generation for that target (e.g. cleandb).
+install-windows-docs: install-windows-erlapp
+ $(verbose) mkdir -p $(DESTDIR)$(WINDOWS_PREFIX)/etc
+ $(inst_verbose) xmlto -o . xhtml-nochunks docs/rabbitmq-service.xml
+ $(verbose) elinks -dump -no-references -no-numbering rabbitmq-service.html \
+ > $(DESTDIR)$(WINDOWS_PREFIX)/readme-service.txt
+ $(verbose) rm rabbitmq-service.html
+ $(verbose) cp docs/rabbitmq.config.example $(DESTDIR)$(WINDOWS_PREFIX)/etc
+ $(verbose) for file in $(DESTDIR)$(WINDOWS_PREFIX)/readme-service.txt \
+ $(DESTDIR)$(WINDOWS_PREFIX)/LICENSE* $(DESTDIR)$(WINDOWS_PREFIX)/INSTALL \
+ $(DESTDIR)$(WINDOWS_PREFIX)/etc/rabbitmq.config.example; do \
+ $(UNIX_TO_DOS) "$$file"; \
+ case "$$file" in \
+ *.txt) ;; \
+ *.example) ;; \
+ *) mv "$$file" "$$file.txt" ;; \
+ esac; \
+ done
-# We want to load the dep file if *any* target *doesn't* contain
-# "clean" - i.e. if removing all clean-like targets leaves something.
+# --------------------------------------------------------------------
+# Packaging.
+# --------------------------------------------------------------------
-ifeq "$(MAKECMDGOALS)" ""
-TESTABLEGOALS:=$(.DEFAULT_GOAL)
-else
-TESTABLEGOALS:=$(MAKECMDGOALS)
-endif
+.PHONY: packages package-deb \
+ package-rpm package-rpm-fedora package-rpm-suse \
+ package-windows package-standalone-macosx \
+ package-generic-unix
-ifneq "$(strip $(patsubst clean%,,$(patsubst %clean,,$(TESTABLEGOALS))))" ""
-include $(DEPS_FILE)
-endif
+# This variable is exported so sub-make instances know where to find the
+# archive.
+PACKAGES_SOURCE_DIST_FILE ?= $(firstword $(SOURCE_DIST_FILES))
-.PHONY: run-qc
+packages package-deb package-rpm package-rpm-fedora \
+package-rpm-suse package-windows package-standalone-macosx \
+package-generic-unix: $(PACKAGES_SOURCE_DIST_FILE)
+ $(verbose) $(MAKE) -C packaging $@ \
+ SOURCE_DIST_FILE=$(abspath $(PACKAGES_SOURCE_DIST_FILE))
diff --git a/README b/README
index 67e3a66ad6..43bfe006f6 100644
--- a/README
+++ b/README
@@ -1 +1 @@
-Please see http://www.rabbitmq.com/build-server.html for build instructions.
+See http://rabbitmq.com and https://github.com/rabbitmq/rabbitmq-server.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..d64ab34a16
--- /dev/null
+++ b/README.md
@@ -0,0 +1,47 @@
+# RabbitMQ Server
+
+[RabbitMQ](http://rabbitmq.com) is a [feature rich](http://www.rabbitmq.com/features.html), multi-protocol messaging broker. It supports:
+
+ * AMQP 0-9-1
+ * STOMP 1.0 through 1.2
+ * MQTT 3.1.1
+ * AMQP 1.0
+
+
+## Installation
+
+ * [Installation guides](http://www.rabbitmq.com/download.html) for various platforms
+
+
+## Tutorials & Documentation
+
+ * [RabbitMQ tutorials](http://www.rabbitmq.com/getstarted.html)
+ * [Documentation guides](http://www.rabbitmq.com/documentation.html)
+ * [Client libraries and tools](http://www.rabbitmq.com/devtools.html)
+
+
+## Getting Help
+
+ * [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users)
+ * `#rabbitmq` on Freenode
+ * [Commercial RabbitMQ support](http://www.rabbitmq.com/services.html) from [Pivotal](http://pivotal.io)
+
+
+## Contributing
+
+See [CONTRIBUTING.md](./CONTRIBUTING.md) and our [development process overview](http://www.rabbitmq.com/github.html).
+
+
+## License
+
+RabbitMQ server is [licensed under the MPL](LICENSE-MPL-RabbitMQ).
+
+
+## Building From Source
+
+See [building RabbitMQ server from source](http://www.rabbitmq.com/build-server.html).
+
+
+## Copyright
+
+(c) Pivotal Software Inc., 2007-2015.
diff --git a/build.config b/build.config
new file mode 100644
index 0000000000..b1430689a1
--- /dev/null
+++ b/build.config
@@ -0,0 +1,43 @@
+# Do *not* comment or remove core modules
+# unless you know what you are doing.
+#
+# Feel free to comment plugins out however.
+
+# Core modules.
+core/core
+index/*
+core/index
+core/deps
+
+# Plugins that must run before Erlang code gets compiled.
+plugins/erlydtl
+plugins/protobuffs
+
+# Core modules, continued.
+core/erlc
+core/docs
+core/rel
+core/test
+core/compat
+
+# Plugins.
+plugins/asciidoc
+plugins/bootstrap
+plugins/c_src
+plugins/ci
+plugins/ct
+plugins/dialyzer
+# plugins/edoc
+plugins/elvis
+plugins/escript
+plugins/eunit
+plugins/relx
+plugins/shell
+plugins/triq
+plugins/xref
+
+# Plugins enhancing the functionality of other plugins.
+plugins/cover
+
+# Core modules which can use variables from plugins.
+core/deps-tools
diff --git a/calculate-relative b/calculate-relative
deleted file mode 100755
index 3af18e8ff8..0000000000
--- a/calculate-relative
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-#
-# relpath.py
-# R.Barran 30/08/2004
-# Retrieved from http://code.activestate.com/recipes/302594/
-
-import os
-import sys
-
-def relpath(target, base=os.curdir):
- """
- Return a relative path to the target from either the current dir or an optional base dir.
- Base can be a directory specified either as absolute or relative to current dir.
- """
-
- if not os.path.exists(target):
- raise OSError, 'Target does not exist: '+target
-
- if not os.path.isdir(base):
- raise OSError, 'Base is not a directory or does not exist: '+base
-
- base_list = (os.path.abspath(base)).split(os.sep)
- target_list = (os.path.abspath(target)).split(os.sep)
-
- # On the windows platform the target may be on a completely different drive from the base.
- if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]:
- raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper()
-
- # Starting from the filepath root, work out how much of the filepath is
- # shared by base and target.
- for i in range(min(len(base_list), len(target_list))):
- if base_list[i] <> target_list[i]: break
- else:
- # If we broke out of the loop, i is pointing to the first differing path elements.
- # If we didn't break out of the loop, i is pointing to identical path elements.
- # Increment i so that in all cases it points to the first differing path elements.
- i+=1
-
- rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
- if (len(rel_list) == 0):
- return "."
- return os.path.join(*rel_list)
-
-if __name__ == "__main__":
- print(relpath(sys.argv[1], sys.argv[2]))
diff --git a/codegen.py b/codegen.py
deleted file mode 100644
index fbc6f61f8a..0000000000
--- a/codegen.py
+++ /dev/null
@@ -1,595 +0,0 @@
-## The contents of this file are subject to the Mozilla Public License
-## Version 1.1 (the "License"); you may not use this file except in
-## compliance with the License. You may obtain a copy of the License
-## at http://www.mozilla.org/MPL/
-##
-## Software distributed under the License is distributed on an "AS IS"
-## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-## the License for the specific language governing rights and
-## limitations under the License.
-##
-## The Original Code is RabbitMQ.
-##
-## The Initial Developer of the Original Code is Pivotal Software, Inc.
-## Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-##
-
-from __future__ import nested_scopes
-
-import sys
-sys.path.append("../rabbitmq-codegen") # in case we're next to an experimental revision
-sys.path.append("codegen") # in case we're building from a distribution package
-
-from amqp_codegen import *
-import string
-import re
-
-# Coming up with a proper encoding of AMQP tables in JSON is too much
-# hassle at this stage. Given that the only default value we are
-# interested in is for the empty table, we only support that.
-def convertTable(d):
- if len(d) == 0:
- return "[]"
- else:
- raise Exception('Non-empty table defaults not supported ' + d)
-
-erlangDefaultValueTypeConvMap = {
- bool : lambda x: str(x).lower(),
- str : lambda x: "<<\"" + x + "\">>",
- int : lambda x: str(x),
- float : lambda x: str(x),
- dict: convertTable,
- unicode: lambda x: "<<\"" + x.encode("utf-8") + "\">>"
-}
-
-def erlangize(s):
- s = s.replace('-', '_')
- s = s.replace(' ', '_')
- return s
-
-AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'"
-
-AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'"
-
-def erlangConstantName(s):
- return '_'.join(re.split('[- ]', s.upper()))
-
-class PackedMethodBitField:
- def __init__(self, index):
- self.index = index
- self.domain = 'bit'
- self.contents = []
-
- def extend(self, f):
- self.contents.append(f)
-
- def count(self):
- return len(self.contents)
-
- def full(self):
- return self.count() == 8
-
-def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4):
- r = [prologue]
- i = 0
- for t in things:
- if i != 0:
- if i % thingsPerLine == 0:
- r += [lineSeparator]
- else:
- r += [separator]
- r += [t]
- i += 1
- r += [epilogue]
- return "".join(r)
-
-def prettyType(typeName, subTypes, typesPerLine = 4):
- """Pretty print a type signature made up of many alternative subtypes"""
- sTs = multiLineFormat(subTypes,
- "( ", " | ", "\n | ", " )",
- thingsPerLine = typesPerLine)
- return "-type(%s ::\n %s)." % (typeName, sTs)
-
-def printFileHeader():
- print """%% Autogenerated code. Do not edit.
-%%
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is Pivotal Software, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%"""
-
-def genErl(spec):
- def erlType(domain):
- return erlangize(spec.resolveDomain(domain))
-
- def fieldTypeList(fields):
- return '[' + ', '.join([erlType(f.domain) for f in fields]) + ']'
-
- def fieldNameList(fields):
- return '[' + ', '.join([erlangize(f.name) for f in fields]) + ']'
-
- def fieldTempList(fields):
- return '[' + ', '.join(['F' + str(f.index) for f in fields]) + ']'
-
- def fieldMapList(fields):
- return ', '.join([erlangize(f.name) + " = F" + str(f.index) for f in fields])
-
- def genLookupMethodName(m):
- print "lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName())
-
- def genLookupClassName(c):
- print "lookup_class_name(%d) -> %s;" % (c.index, c.erlangName())
-
- def genMethodId(m):
- print "method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index)
-
- def genMethodHasContent(m):
- print "method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower())
-
- def genMethodIsSynchronous(m):
- hasNoWait = "nowait" in fieldNameList(m.arguments)
- if m.isSynchronous and hasNoWait:
- print "is_method_synchronous(#%s{nowait = NoWait}) -> not(NoWait);" % (m.erlangName())
- else:
- print "is_method_synchronous(#%s{}) -> %s;" % (m.erlangName(), str(m.isSynchronous).lower())
-
- def genMethodFieldTypes(m):
- """Not currently used - may be useful in future?"""
- print "method_fieldtypes(%s) -> %s;" % (m.erlangName(), fieldTypeList(m.arguments))
-
- def genMethodFieldNames(m):
- print "method_fieldnames(%s) -> %s;" % (m.erlangName(), fieldNameList(m.arguments))
-
- def packMethodFields(fields):
- packed = []
- bitfield = None
- for f in fields:
- if erlType(f.domain) == 'bit':
- if not(bitfield) or bitfield.full():
- bitfield = PackedMethodBitField(f.index)
- packed.append(bitfield)
- bitfield.extend(f)
- else:
- bitfield = None
- packed.append(f)
- return packed
-
- def methodFieldFragment(f):
- type = erlType(f.domain)
- p = 'F' + str(f.index)
- if type == 'shortstr':
- return p+'Len:8/unsigned, '+p+':'+p+'Len/binary'
- elif type == 'longstr':
- return p+'Len:32/unsigned, '+p+':'+p+'Len/binary'
- elif type == 'octet':
- return p+':8/unsigned'
- elif type == 'short':
- return p+':16/unsigned'
- elif type == 'long':
- return p+':32/unsigned'
- elif type == 'longlong':
- return p+':64/unsigned'
- elif type == 'timestamp':
- return p+':64/unsigned'
- elif type == 'bit':
- return p+'Bits:8'
- elif type == 'table':
- return p+'Len:32/unsigned, '+p+'Tab:'+p+'Len/binary'
-
- def genFieldPostprocessing(packed, hasContent):
- for f in packed:
- type = erlType(f.domain)
- if type == 'bit':
- for index in range(f.count()):
- print " F%d = ((F%dBits band %d) /= 0)," % \
- (f.index + index,
- f.index,
- 1 << index)
- elif type == 'table':
- print " F%d = rabbit_binary_parser:parse_table(F%dTab)," % \
- (f.index, f.index)
- # We skip the check on content-bearing methods for
- # speed. This is a sanity check, not a security thing.
- elif type == 'shortstr' and not hasContent:
- print " rabbit_binary_parser:assert_utf8(F%d)," % (f.index)
- else:
- pass
-
- def genMethodRecord(m):
- print "method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName())
-
- def genDecodeMethodFields(m):
- packedFields = packMethodFields(m.arguments)
- binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields])
- if binaryPattern:
- restSeparator = ', '
- else:
- restSeparator = ''
- recordConstructorExpr = '#%s{%s}' % (m.erlangName(), fieldMapList(m.arguments))
- print "decode_method_fields(%s, <<%s>>) ->" % (m.erlangName(), binaryPattern)
- genFieldPostprocessing(packedFields, m.hasContent)
- print " %s;" % (recordConstructorExpr,)
-
- def genDecodeProperties(c):
- def presentBin(fields):
- ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
- return '<<' + ps + ', _:%d, R0/binary>>' % (16 - len(fields),)
- def writePropFieldLine(field):
- i = str(field.index)
- if field.domain == 'bit':
- print " {F%s, R%s} = {P%s =/= 0, R%s}," % \
- (i, str(field.index + 1), i, i)
- else:
- print " {F%s, R%s} = if P%s =:= 0 -> {undefined, R%s}; true -> ?%s_VAL(R%s, L%s, V%s, X%s) end," % \
- (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i, i)
-
- if len(c.fields) == 0:
- print "decode_properties(%d, <<>>) ->" % (c.index,)
- else:
- print ("decode_properties(%d, %s) ->" %
- (c.index, presentBin(c.fields)))
- for field in c.fields:
- writePropFieldLine(field)
- print " <<>> = %s," % ('R' + str(len(c.fields)))
- print " #'P_%s'{%s};" % (erlangize(c.name), fieldMapList(c.fields))
-
- def genFieldPreprocessing(packed):
- for f in packed:
- type = erlType(f.domain)
- if type == 'bit':
- print " F%dBits = (%s)," % \
- (f.index,
- ' bor '.join(['(bitvalue(F%d) bsl %d)' % (x.index, x.index - f.index)
- for x in f.contents]))
- elif type == 'table':
- print " F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index)
- print " F%dLen = size(F%dTab)," % (f.index, f.index)
- elif type == 'shortstr':
- print " F%dLen = shortstr_size(F%d)," % (f.index, f.index)
- elif type == 'longstr':
- print " F%dLen = size(F%d)," % (f.index, f.index)
- else:
- pass
-
- def genEncodeMethodFields(m):
- packedFields = packMethodFields(m.arguments)
- print "encode_method_fields(#%s{%s}) ->" % (m.erlangName(), fieldMapList(m.arguments))
- genFieldPreprocessing(packedFields)
- print " <<%s>>;" % (', '.join([methodFieldFragment(f) for f in packedFields]))
-
- def genEncodeProperties(c):
- def presentBin(fields):
- ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
- return '<<' + ps + ', 0:%d>>' % (16 - len(fields),)
- def writePropFieldLine(field):
- i = str(field.index)
- if field.domain == 'bit':
- print " {P%s, R%s} = {F%s =:= 1, R%s}," % \
- (i, str(field.index + 1), i, i)
- else:
- print " {P%s, R%s} = if F%s =:= undefined -> {0, R%s}; true -> {1, [?%s_PROP(F%s, L%s) | R%s]} end," % \
- (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i)
-
- print "encode_properties(#'P_%s'{%s}) ->" % (erlangize(c.name), fieldMapList(c.fields))
- if len(c.fields) == 0:
- print " <<>>;"
- else:
- print " R0 = [<<>>],"
- for field in c.fields:
- writePropFieldLine(field)
- print " list_to_binary([%s | lists:reverse(R%s)]);" % \
- (presentBin(c.fields), str(len(c.fields)))
-
- def messageConstantClass(cls):
- # We do this because 0.8 uses "soft error" and 8.1 uses "soft-error".
- return erlangConstantName(cls)
-
- def genLookupException(c,v,cls):
- mCls = messageConstantClass(cls)
- if mCls == 'SOFT_ERROR': genLookupException1(c,'false')
- elif mCls == 'HARD_ERROR': genLookupException1(c, 'true')
- elif mCls == '': pass
- else: raise Exception('Unknown constant class' + cls)
-
- def genLookupException1(c,hardErrorBoolStr):
- n = erlangConstantName(c)
- print 'lookup_amqp_exception(%s) -> {%s, ?%s, <<"%s">>};' % \
- (n.lower(), hardErrorBoolStr, n, n)
-
- def genAmqpException(c,v,cls):
- n = erlangConstantName(c)
- print 'amqp_exception(?%s) -> %s;' % \
- (n, n.lower())
-
- methods = spec.allMethods()
-
- printFileHeader()
- module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor)
- if spec.revision != 0:
- module = "%s_%d" % (module, spec.revision)
- if module == "rabbit_framing_amqp_8_0":
- module = "rabbit_framing_amqp_0_8"
- print "-module(%s)." % module
- print """-include("rabbit_framing.hrl").
-
--export([version/0]).
--export([lookup_method_name/1]).
--export([lookup_class_name/1]).
-
--export([method_id/1]).
--export([method_has_content/1]).
--export([is_method_synchronous/1]).
--export([method_record/1]).
--export([method_fieldnames/1]).
--export([decode_method_fields/2]).
--export([decode_properties/2]).
--export([encode_method_fields/1]).
--export([encode_properties/1]).
--export([lookup_amqp_exception/1]).
--export([amqp_exception/1]).
-
-"""
- print "%% Various types"
- print "-ifdef(use_specs)."
-
- print """-export_type([amqp_field_type/0, amqp_property_type/0,
- amqp_table/0, amqp_array/0, amqp_value/0,
- amqp_method_name/0, amqp_method/0, amqp_method_record/0,
- amqp_method_field_name/0, amqp_property_record/0,
- amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
-
--type(amqp_field_type() ::
- 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
- 'table' | 'byte' | 'double' | 'float' | 'long' |
- 'short' | 'bool' | 'binary' | 'void' | 'array').
--type(amqp_property_type() ::
- 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
- 'longlong' | 'timestamp' | 'bit' | 'table').
-
--type(amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}]).
--type(amqp_array() :: [{amqp_field_type(), amqp_value()}]).
--type(amqp_value() :: binary() | % longstr
- integer() | % signedint
- {non_neg_integer(), non_neg_integer()} | % decimal
- amqp_table() |
- amqp_array() |
- byte() | % byte
- float() | % double
- integer() | % long
- integer() | % short
- boolean() | % bool
- binary() | % binary
- 'undefined' | % void
- non_neg_integer() % timestamp
- ).
-"""
-
- print prettyType("amqp_method_name()",
- [m.erlangName() for m in methods])
- print prettyType("amqp_method()",
- ["{%s, %s}" % (m.klass.index, m.index) for m in methods],
- 6)
- print prettyType("amqp_method_record()",
- ["#%s{}" % (m.erlangName()) for m in methods])
- fieldNames = set()
- for m in methods:
- fieldNames.update(m.arguments)
- fieldNames = [erlangize(f.name) for f in fieldNames]
- print prettyType("amqp_method_field_name()",
- fieldNames)
- print prettyType("amqp_property_record()",
- ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()])
- print prettyType("amqp_exception()",
- ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants])
- print prettyType("amqp_exception_code()",
- ["%i" % v for (c, v, cls) in spec.constants])
- classIds = set()
- for m in spec.allMethods():
- classIds.add(m.klass.index)
- print prettyType("amqp_class_id()",
- ["%i" % ci for ci in classIds])
- print prettyType("amqp_class_name()",
- ["%s" % c.erlangName() for c in spec.allClasses()])
- print "-endif. % use_specs"
-
- print """
-%% Method signatures
--ifdef(use_specs).
--spec(version/0 :: () -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
--spec(lookup_method_name/1 :: (amqp_method()) -> amqp_method_name()).
--spec(lookup_class_name/1 :: (amqp_class_id()) -> amqp_class_name()).
--spec(method_id/1 :: (amqp_method_name()) -> amqp_method()).
--spec(method_has_content/1 :: (amqp_method_name()) -> boolean()).
--spec(is_method_synchronous/1 :: (amqp_method_record()) -> boolean()).
--spec(method_record/1 :: (amqp_method_name()) -> amqp_method_record()).
--spec(method_fieldnames/1 :: (amqp_method_name()) -> [amqp_method_field_name()]).
--spec(decode_method_fields/2 ::
- (amqp_method_name(), binary()) -> amqp_method_record() | rabbit_types:connection_exit()).
--spec(decode_properties/2 :: (non_neg_integer(), binary()) -> amqp_property_record()).
--spec(encode_method_fields/1 :: (amqp_method_record()) -> binary()).
--spec(encode_properties/1 :: (amqp_property_record()) -> binary()).
--spec(lookup_amqp_exception/1 :: (amqp_exception()) -> {boolean(), amqp_exception_code(), binary()}).
--spec(amqp_exception/1 :: (amqp_exception_code()) -> amqp_exception()).
--endif. % use_specs
-
-bitvalue(true) -> 1;
-bitvalue(false) -> 0;
-bitvalue(undefined) -> 0.
-
-shortstr_size(S) ->
- case size(S) of
- Len when Len =< 255 -> Len;
- _ -> exit(method_field_shortstr_overflow)
- end.
-
--define(SHORTSTR_VAL(R, L, V, X),
- begin
- <<L:8/unsigned, V:L/binary, X/binary>> = R,
- {V, X}
- end).
-
--define(LONGSTR_VAL(R, L, V, X),
- begin
- <<L:32/unsigned, V:L/binary, X/binary>> = R,
- {V, X}
- end).
-
--define(SHORT_VAL(R, L, V, X),
- begin
- <<V:8/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(LONG_VAL(R, L, V, X),
- begin
- <<V:32/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(LONGLONG_VAL(R, L, V, X),
- begin
- <<V:64/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(OCTET_VAL(R, L, V, X),
- begin
- <<V:8/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(TABLE_VAL(R, L, V, X),
- begin
- <<L:32/unsigned, V:L/binary, X/binary>> = R,
- {rabbit_binary_parser:parse_table(V), X}
- end).
-
--define(TIMESTAMP_VAL(R, L, V, X),
- begin
- <<V:64/unsigned, X/binary>> = R,
- {V, X}
- end).
-
--define(SHORTSTR_PROP(X, L),
- begin
- L = size(X),
- if L < 256 -> <<L:8, X:L/binary>>;
- true -> exit(content_properties_shortstr_overflow)
- end
- end).
-
--define(LONGSTR_PROP(X, L),
- begin
- L = size(X),
- <<L:32, X:L/binary>>
- end).
-
--define(OCTET_PROP(X, L), <<X:8/unsigned>>).
--define(SHORT_PROP(X, L), <<X:16/unsigned>>).
--define(LONG_PROP(X, L), <<X:32/unsigned>>).
--define(LONGLONG_PROP(X, L), <<X:64/unsigned>>).
--define(TIMESTAMP_PROP(X, L), <<X:64/unsigned>>).
-
--define(TABLE_PROP(X, T),
- begin
- T = rabbit_binary_generator:generate_table(X),
- <<(size(T)):32, T/binary>>
- end).
-"""
- version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision)
- if version == '{8, 0, 0}': version = '{0, 8, 0}'
- print "version() -> %s." % (version)
-
- for m in methods: genLookupMethodName(m)
- print "lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id})."
-
- for c in spec.allClasses(): genLookupClassName(c)
- print "lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId})."
-
- for m in methods: genMethodId(m)
- print "method_id(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodHasContent(m)
- print "method_has_content(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodIsSynchronous(m)
- print "is_method_synchronous(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodRecord(m)
- print "method_record(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genMethodFieldNames(m)
- print "method_fieldnames(Name) -> exit({unknown_method_name, Name})."
-
- for m in methods: genDecodeMethodFields(m)
- print "decode_method_fields(Name, BinaryFields) ->"
- print " rabbit_misc:frame_error(Name, BinaryFields)."
-
- for c in spec.allClasses(): genDecodeProperties(c)
- print "decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId})."
-
- for m in methods: genEncodeMethodFields(m)
- print "encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)})."
-
- for c in spec.allClasses(): genEncodeProperties(c)
- print "encode_properties(Record) -> exit({unknown_properties_record, Record})."
-
- for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
- print "lookup_amqp_exception(Code) ->"
- print " rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),"
- print " {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}."
-
- for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
- print "amqp_exception(_Code) -> undefined."
-
-def genHrl(spec):
- def fieldNameList(fields):
- return ', '.join([erlangize(f.name) for f in fields])
-
- def fieldNameListDefaults(fields):
- def fillField(field):
- result = erlangize(f.name)
- if field.defaultvalue != None:
- conv_fn = erlangDefaultValueTypeConvMap[type(field.defaultvalue)]
- result += ' = ' + conv_fn(field.defaultvalue)
- return result
- return ', '.join([fillField(f) for f in fields])
-
- methods = spec.allMethods()
-
- printFileHeader()
- print "-define(PROTOCOL_PORT, %d)." % (spec.port)
-
- for (c,v,cls) in spec.constants:
- print "-define(%s, %s)." % (erlangConstantName(c), v)
-
- print "%% Method field records."
- for m in methods:
- print "-record(%s, {%s})." % (m.erlangName(), fieldNameListDefaults(m.arguments))
-
- print "%% Class property records."
- for c in spec.allClasses():
- print "-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields))
-
-
-def generateErl(specPath):
- genErl(AmqpSpec(specPath))
-
-def generateHrl(specPath):
- genHrl(AmqpSpec(specPath))
-
-if __name__ == "__main__":
- do_main_dict({"header": generateHrl,
- "body": generateErl})
-
diff --git a/packaging/common/README b/docs/README-for-packages
index 35a1523ac3..35a1523ac3 100644
--- a/packaging/common/README
+++ b/docs/README-for-packages
diff --git a/docs/rabbitmq.config.example b/docs/rabbitmq.config.example
index a8e5016524..55c5736994 100644
--- a/docs/rabbitmq.config.example
+++ b/docs/rabbitmq.config.example
@@ -120,6 +120,12 @@
%%
%% {ssl_handshake_timeout, 5000},
+ %% Password hashing implementation. Will only affect newly
+ %% created users. To recalculate hash for an existing user
+ %% it's necessary to update her password.
+ %%
+ %% {password_hashing_module, rabbit_password_hashing_sha256},
+
%%
%% Default User / VHost
%% ====================
@@ -155,6 +161,11 @@
%%
%% {frame_max, 131072},
+ %% Set the max frame size the server will accept before connection
+ %% tuning occurs
+ %%
+ %% {initial_frame_max, 4096},
+
%% Set the max permissible number of channels per connection.
%% 0 means "no limit".
%%
@@ -165,10 +176,7 @@
%% See (http://www.erlang.org/doc/man/inet.html#setopts-2) for
%% further documentation.
%%
- %% {tcp_listen_options, [binary,
- %% {packet, raw},
- %% {reuseaddr, true},
- %% {backlog, 128},
+ %% {tcp_listen_options, [{backlog, 128},
%% {nodelay, true},
%% {exit_on_close, false}]},
@@ -185,21 +193,47 @@
%% Alternatively, we can set a limit (in bytes) of RAM used by the node.
%%
%% {vm_memory_high_watermark, {absolute, 1073741824}},
+ %%
+ %% Or you can set absolute value using memory units.
+ %%
+ %% {vm_memory_high_watermark, {absolute, "1024M"}},
+ %%
+ %% Supported units suffixes:
+ %%
+ %% k, kiB: kibibytes (2^10 bytes)
+ %% M, MiB: mebibytes (2^20)
+ %% G, GiB: gibibytes (2^30)
+ %% kB: kilobytes (10^3)
+ %% MB: megabytes (10^6)
+ %% GB: gigabytes (10^9)
%% Fraction of the high watermark limit at which queues start to
%% page message out to disc in order to free up memory.
%%
+ %% Values greater than 0.9 can be dangerous and should be used carefully.
+ %%
%% {vm_memory_high_watermark_paging_ratio, 0.5},
+ %% Interval (in milliseconds) at which we perform the check of the memory
+ %% levels against the watermarks.
+ %%
+ %% {memory_monitor_interval, 2500},
+
%% Set disk free limit (in bytes). Once free disk space reaches this
%% lower bound, a disk alarm will be set - see the documentation
%% listed above for more details.
%%
%% {disk_free_limit, 50000000},
+ %%
+ %% Or you can set it using memory units (same as in vm_memory_high_watermark)
+ %% {disk_free_limit, "50MB"},
+ %% {disk_free_limit, "50000kB"},
+ %% {disk_free_limit, "2GB"},
%% Alternatively, we can set a limit relative to total available RAM.
%%
- %% {disk_free_limit, {mem_relative, 1.0}},
+ %% Values lower than 1.0 can be dangerous and should be used carefully.
+ %% {disk_free_limit, {mem_relative, 2.0}},
%%
%% Misc/Advanced Options
@@ -467,10 +501,7 @@
%% TCP/Socket options (as per the broker configuration).
%%
- %% {tcp_listen_options, [binary,
- %% {packet, raw},
- %% {reuseaddr, true},
- %% {backlog, 128},
+ %% {tcp_listen_options, [{backlog, 128},
%% {nodelay, true}]}
]},
diff --git a/docs/rabbitmqctl.1.xml b/docs/rabbitmqctl.1.xml
index 508cc7a585..fb4959b276 100644
--- a/docs/rabbitmqctl.1.xml
+++ b/docs/rabbitmqctl.1.xml
@@ -1322,6 +1322,10 @@
<listitem><para>Like <command>message_bytes</command> but counting only those messages which are persistent.</para></listitem>
</varlistentry>
<varlistentry>
+ <term>head_message_timestamp</term>
+ <listitem><para>The timestamp property of the first message in the queue, if present. Timestamps of messages only appear when they are in the paged-in state.</para></listitem>
+ </varlistentry>
+ <varlistentry>
<term>disk_reads</term>
<listitem><para>Total number of times messages have been read from disk by this queue since it started.</para></listitem>
</varlistentry>
@@ -1954,15 +1958,52 @@
</listitem>
</varlistentry>
<varlistentry>
- <term><cmdsynopsis><command>set_vm_memory_high_watermark absolute</command> <arg choice="req"><replaceable>memory_limit_in_bytes</replaceable></arg></cmdsynopsis></term>
+ <term><cmdsynopsis><command>set_vm_memory_high_watermark absolute</command> <arg choice="req"><replaceable>memory_limit</replaceable></arg></cmdsynopsis></term>
<listitem>
<variablelist>
<varlistentry>
- <term>memory_limit_in_bytes</term>
+ <term>memory_limit</term>
<listitem><para>
The new memory limit at which flow control is
triggered, expressed in bytes as an integer number
- greater than or equal to 0.
+ greater than or equal to 0 or as a string with memory units
+ (e.g. 512M or 1G). Available units are:
+ k, kiB: kibibytes (2^10 bytes)
+ M, MiB: mebibytes (2^20)
+ G, GiB: gibibytes (2^30)
+ kB: kilobytes (10^3)
+ MB: megabytes (10^6)
+ GB: gigabytes (10^9)
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_disk_free_limit</command> <arg choice="req"><replaceable>disk_limit</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>disk_limit</term>
+ <listitem><para>
+ Lower bound limit as an integer in bytes or a string with memory units (see vm_memory_high_watermark),
+ e.g. 512M or 1G. Once free disk space reaches the limit, a disk alarm will be set.
+ </para></listitem>
+ </varlistentry>
+ </variablelist>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><cmdsynopsis><command>set_disk_free_limit mem_relative</command> <arg choice="req"><replaceable>fraction</replaceable></arg></cmdsynopsis></term>
+ <listitem>
+ <variablelist>
+ <varlistentry>
+ <term>fraction</term>
+ <listitem><para>
+ Limit relative to the total amount available RAM
+ as a non-negative floating point number.
+ Values lower than 1.0 can be dangerous and
+ should be used carefully.
</para></listitem>
</varlistentry>
</variablelist>
diff --git a/erlang.mk b/erlang.mk
new file mode 100644
index 0000000000..fc2d806f2e
--- /dev/null
+++ b/erlang.mk
@@ -0,0 +1,6617 @@
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app deps search rel docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+
+ERLANG_MK_VERSION = 2.0.0-pre.2-16-gb52203c-dirty
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A0 -noinput -boot start_clean
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+check:: clean app tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2015 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies (if needed) without compiling them" \
+ " list-deps Fetch dependencies (if needed) and list them" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $(2) -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(subst ",\",$(1)))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(subst \,\\\\,$(shell cygpath -w $1))
+else
+core_native_path = $1
+endif
+
+ifeq ($(shell which wget 2>/dev/null | wc -l), 1)
+define core_http_get
+ wget --no-check-certificate -O $(1) $(2)|| rm $(1)
+endef
+else
+define core_http_get.erl
+ ssl:start(),
+ inets:start(),
+ case httpc:request(get, {"$(2)", []}, [{autoredirect, true}], []) of
+ {ok, {{_, 200, _}, _, Body}} ->
+ case file:write_file("$(1)", Body) of
+ ok -> ok;
+ {error, R1} -> halt(R1)
+ end;
+ {error, R2} ->
+ halt(R2)
+ end,
+ halt(0).
+endef
+
+define core_http_get
+ $(call erlang,$(call core_http_get.erl,$(call core_native_path,$1),$2))
+endef
+endif
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) -type f -name $(subst *,\*,$2)))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk:
+ git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ifdef ERLANG_MK_COMMIT
+ cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+endif
+ if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(MAKE) -C $(ERLANG_MK_BUILD_DIR)
+ cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ rm -rf $(ERLANG_MK_BUILD_DIR)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = 1.0.4
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/riverrun/branglecrypt
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = master
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = v0.1.2
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += classifier
+pkg_classifier_name = classifier
+pkg_classifier_description = An Erlang Bayesian Filter and Text Classifier
+pkg_classifier_homepage = https://github.com/inaka/classifier
+pkg_classifier_fetch = git
+pkg_classifier_repo = https://github.com/inaka/classifier
+pkg_classifier_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.1
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.1
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dhtcrawler
+pkg_dhtcrawler_name = dhtcrawler
+pkg_dhtcrawler_description = dhtcrawler is a DHT crawler written in erlang. It can join a DHT network and crawl many P2P torrents.
+pkg_dhtcrawler_homepage = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_fetch = git
+pkg_dhtcrawler_repo = https://github.com/kevinlynx/dhtcrawler
+pkg_dhtcrawler_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dtl
+pkg_dtl_name = dtl
+pkg_dtl_description = Django Template Language: A full-featured port of the Django template engine to Erlang.
+pkg_dtl_homepage = https://github.com/oinksoft/dtl
+pkg_dtl_fetch = git
+pkg_dtl_repo = https://github.com/oinksoft/dtl
+pkg_dtl_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += eganglia
+pkg_eganglia_name = eganglia
+pkg_eganglia_description = Erlang library to interact with Ganglia
+pkg_eganglia_homepage = https://github.com/inaka/eganglia
+pkg_eganglia_fetch = git
+pkg_eganglia_repo = https://github.com/inaka/eganglia
+pkg_eganglia_commit = v0.9.1
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = 2.0.4
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/knutin/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/knutin/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = 0.2.4
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = 0.1.1
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = exec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = 1.2
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = v1.4.6
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gossiperl
+pkg_gossiperl_name = gossiperl
+pkg_gossiperl_description = Gossip middleware in Erlang
+pkg_gossiperl_homepage = http://gossiperl.com/
+pkg_gossiperl_fetch = git
+pkg_gossiperl_repo = https://github.com/gossiperl/gossiperl
+pkg_gossiperl_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = v4.1.1
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = 0.6.0
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/klarna/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/klarna/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = 0.3.3
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = 0.3
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/basho/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/basho/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/basho/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/basho/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = 0.1.0
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = Erlang MySQL Driver (from code.google.com)
+pkg_mysql_homepage = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/dizzyd/erlang-mysql-driver
+pkg_mysql_commit = master
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += oauth2c
+pkg_oauth2c_name = oauth2c
+pkg_oauth2c_description = Erlang OAuth2 Client
+pkg_oauth2c_homepage = https://github.com/kivra/oauth2_client
+pkg_oauth2c_fetch = git
+pkg_oauth2c_repo = https://github.com/kivra/oauth2_client
+pkg_oauth2c_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = 1.0.0
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = 0.3
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = 0.4.0
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.1.0
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = 2.2.1
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = 0.1.0
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global process registry for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://github.com/krestenkrab/triq
+pkg_triq_fetch = git
+pkg_triq_repo = https://github.com/krestenkrab/triq
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = 0.3.0
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = v1.4.0
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = 1.0.3
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = 0.2.0
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit =
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+dep_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+dep_repo = $(patsubst git://github.com/%,https://github.com/%, \
+ $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP " $(1);
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS)
+ifndef IS_APP
+ $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep IS_APP=1 || exit $$?; \
+ done
+endif
+ifneq ($(IS_DEP),1)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/deps.log
+endif
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(verbose) for dep in $(ALL_DEPS_DIRS) ; do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1 || exit $$?; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep."; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci rebar $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i rebar '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ else \
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ else \
+ $(call erlang,$(call dep_autopatch_app.erl,$(1))); \
+ fi \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Overwrite erlang.mk with the current file by default.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ echo "include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk" \
+ > $(DEPS_DIR)/$1/erlang.mk
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+define dep_autopatch_fetch_rebar
+ mkdir -p $(ERLANG_MK_TMP); \
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- https://github.com/rebar/rebar $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q 791db716b5a3a7671e0b351f95ddf24b848ee173; \
+ $(MAKE); \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ atom_to_list(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ atom_to_list(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, V} -> {hex, V, undefined};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ FindFirst = fun(F, Fd) ->
+ case io:parse_erl_form(Fd, undefined) of
+ {ok, {attribute, _, compile, {parse_transform, PT}}, _} ->
+ [PT, F(F, Fd)];
+ {ok, {attribute, _, compile, CompileOpts}, _} when is_list(CompileOpts) ->
+ case proplists:get_value(parse_transform, CompileOpts) of
+ undefined -> [F(F, Fd)];
+ PT -> [PT, F(F, Fd)]
+ end;
+ {ok, {attribute, _, include, Hrl}, _} ->
+ case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+ {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+ _ ->
+ case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ Hrl, [read]) of
+ {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+ _ -> [F(F, Fd)]
+ end
+ end;
+ {ok, {attribute, _, include_lib, "$(1)/include/" ++ Hrl}, _} ->
+ {ok, HrlFd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]),
+ [F(F, HrlFd), F(F, Fd)];
+ {ok, {attribute, _, include_lib, Hrl}, _} ->
+ case file:open("$(call core_native_path,$(DEPS_DIR)/$1/include/)" ++ Hrl, [read]) of
+ {ok, HrlFd} -> [F(F, HrlFd), F(F, Fd)];
+ _ -> [F(F, Fd)]
+ end;
+ {ok, {attribute, _, import, {Imp, _}}, _} ->
+ case file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(Imp) ++ ".erl", [read]) of
+ {ok, ImpFd} -> [Imp, F(F, ImpFd), F(F, Fd)];
+ _ -> [F(F, Fd)]
+ end;
+ {eof, _} ->
+ file:close(Fd),
+ [];
+ _ ->
+ F(F, Fd)
+ end
+ end,
+ fun() ->
+ ErlFiles = filelib:wildcard("$(call core_native_path,$(DEPS_DIR)/$1/src/)*.erl"),
+ First0 = lists:usort(lists:flatten([begin
+ {ok, Fd} = file:open(F, [read]),
+ FindFirst(FindFirst, Fd)
+ end || F <- ErlFiles])),
+ First = lists:flatten([begin
+ {ok, Fd} = file:open("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", [read]),
+ FindFirst(FindFirst, Fd)
+ end || M <- First0, lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)]) ++ First0,
+ Write(["COMPILE_FIRST +=", [[" ", atom_to_list(M)] || M <- First,
+ lists:member("$(call core_native_path,$(DEPS_DIR)/$1/src/)" ++ atom_to_list(M) ++ ".erl", ErlFiles)], "\n"])
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ case Cmd of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V) ->
+ re:replace(re:replace(V, "(\\\\$$)(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t$$\(MAKE) -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS = -finline-functions -Wall -fPIC -I ~s/erts-~s/include -I ~s\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS = -L ~s -lerl_interface -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " = ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case filename:extension(Output) of
+ [] -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ Write("\ninclude $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(DEPS_DIR)/app)/erlang.mk"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins} ->
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_app.erl
+ UpdateModules = fun(App) ->
+ case filelib:is_regular(App) of
+ false -> ok;
+ true ->
+ {ok, [{application, '$(1)', L0}]} = file:consult(App),
+ Mods = filelib:fold_files("$(call core_native_path,$(DEPS_DIR)/$1/src)", "\\\\.erl$$", true,
+ fun (F, Acc) -> [list_to_atom(filename:rootname(filename:basename(F)))|Acc] end, []),
+ L = lists:keystore(modules, 1, L0, {modules, Mods}),
+ ok = file:write_file(App, io_lib:format("~p.~n", [{application, '$(1)', L}]))
+ end
+ end,
+ UpdateModules("$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, "git"}); _ -> L1 end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_hex.erl
+ ssl:start(),
+ inets:start(),
+ {ok, {{_, 200, _}, _, Body}} = httpc:request(get,
+ {"https://s3.amazonaws.com/s3.hex.pm/tarballs/$(1)-$(2).tar", []},
+ [], [{body_format, binary}]),
+ {ok, Files} = erl_tar:extract({binary, Body}, [memory]),
+ {_, Source} = lists:keyfind("contents.tar.gz", 1, Files),
+ ok = erl_tar:extract({binary, Source}, [{cwd, "$(call core_native_path,$(DEPS_DIR)/$1)"}, compressed]),
+ halt()
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ $(call erlang,$(call dep_fetch_hex.erl,$(1),$(strip $(word 2,$(dep_$(1))))));
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_fetch
+ $(if $(dep_$(1)), \
+ $(if $(dep_fetch_$(word 1,$(dep_$(1)))), \
+ $(word 1,$(dep_$(1))), \
+ $(if $(IS_DEP),legacy,fail)), \
+ $(if $(filter $(1),$(PACKAGES)), \
+ $(pkg_$(1)_fetch), \
+ fail))
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1):
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter-out $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)."; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$1)),$1)
+ $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure.ac -o -f $(DEPS_DIR)/$(DEP_NAME)/configure.in ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ else \
+ $$(call dep_autopatch,$(DEP_NAME)) \
+ fi
+endif
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1 || exit $$?; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1 || exit $$?; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/list-deps.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/list-doc-deps.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/list-rel-deps.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/list-test-deps.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/list-shell-deps.log
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+define core_dep_plugin
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endef
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_SUFFIX ?= _dtl
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(DTL_PATH)" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom(string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [{out_dir, "ebin/"}, return_errors, {doc_root, "templates"}]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ifneq ($(wildcard src/),)
+
+DTL_FILES = $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifdef DTL_FULL_PATH
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(subst /,_,$(DTL_FILES:$(DTL_PATH)%=%))))
+else
+BEAM_FILES += $(addprefix ebin/,$(patsubst %.dtl,%_dtl.beam,$(notdir $(DTL_FILES))))
+endif
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST)
+ @mkdir -p $(ERLANG_MK_TMP)
+ @if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ @touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+ebin/$(PROJECT).app:: $(DTL_FILES)
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$?,-pa ebin/ $(DEPS_DIR)/erlydtl/ebin/)))
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+define compile_proto
+ $(verbose) mkdir -p ebin/ include/
+ $(proto_verbose) $(call erlang,$(call compile_proto.erl,$(1)))
+ $(proto_verbose) erlc +debug_info -o ebin/ ebin/*.erl
+ $(verbose) rm ebin/*.erl
+endef
+
+define compile_proto.erl
+ [begin
+ Dir = filename:dirname(filename:dirname(F)),
+ protobuffs_compile:generate_source(F,
+ [{output_include_dir, Dir ++ "/include"},
+ {output_src_dir, Dir ++ "/ebin"}])
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ifneq ($(wildcard src/),)
+ebin/$(PROJECT).app:: $(sort $(call core_find,src/,*.proto))
+ $(if $(strip $?),$(call compile_proto,$?))
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+ifeq ($(wildcard ebin/test),)
+app:: deps $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+else
+app:: clean deps $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+endif
+
+ifeq ($(wildcard src/$(PROJECT)_app.erl),)
+define app_file
+{application, $(PROJECT), [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]}
+]}.
+endef
+else
+define app_file
+{application, $(PROJECT), [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS))]},
+ {mod, {$(PROJECT)_app, []}}
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ERL_FILES = $(sort $(call core_find,src/,*.erl))
+CORE_FILES = $(sort $(call core_find,src/,*.core))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(1)
+ $(verbose) mv asn1/*.erl src/
+ $(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES = $(sort $(call core_find,src/,*.xrl))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES = $(sort $(call core_find,src/,*.yrl))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ Modules = [{filename:basename(F, ".erl"), F} || F <- ErlFiles],
+ Add = fun (Dep, Acc) ->
+ case lists:keyfind(atom_to_list(Dep), 1, Modules) of
+ {_, DepFile} -> [DepFile|Acc];
+ false -> Acc
+ end
+ end,
+ AddHd = fun (Dep, Acc) ->
+ case {Dep, lists:keymember(Dep, 2, Modules)} of
+ {"src/" ++ _, false} -> [Dep|Acc];
+ {"include/" ++ _, false} -> [Dep|Acc];
+ _ -> Acc
+ end
+ end,
+ CompileFirst = fun (Deps) ->
+ First0 = [case filename:extension(D) of
+ ".erl" -> filename:basename(D, ".erl");
+ _ -> []
+ end || D <- Deps],
+ case lists:usort(First0) of
+ [] -> [];
+ [[]] -> [];
+ First -> ["COMPILE_FIRST +=", [[" ", F] || F <- First], "\n"]
+ end
+ end,
+ Depend = [begin
+ case epp:parse_file(F, ["include/"], []) of
+ {ok, Forms} ->
+ Deps = lists:usort(lists:foldl(fun
+ ({attribute, _, behavior, Dep}, Acc) -> Add(Dep, Acc);
+ ({attribute, _, behaviour, Dep}, Acc) -> Add(Dep, Acc);
+ ({attribute, _, compile, {parse_transform, Dep}}, Acc) -> Add(Dep, Acc);
+ ({attribute, _, file, {Dep, _}}, Acc) -> AddHd(Dep, Acc);
+ (_, Acc) -> Acc
+ end, [], Forms)),
+ case Deps of
+ [] -> "";
+ _ -> [F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n", CompileFirst(Deps)]
+ end;
+ {error, enoent} ->
+ []
+ end
+ end || F <- ErlFiles],
+ ok = file:write_file("$(1)", Depend),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST)
+ @mkdir -p $(ERLANG_MK_TMP)
+ @if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ @touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+
+-include $(PROJECT).d
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(call app_file,$(GITDESCRIBE),$(MODULES))))" \
+ > ebin/$(PROJECT).app
+else
+ $(verbose) if [ -z "$$(grep -E '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk README for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(GITDESCRIBE)\"}/" \
+ > ebin/$(PROJECT).app
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) for dep in $(ALL_TEST_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir:
+ $(gen_verbose) erlc -v $(TEST_ERLC_OPTS) -I include/ -o $(TEST_DIR) \
+ $(call core_find,$(TEST_DIR)/,*.erl) -pa ebin/
+endif
+
+ifeq ($(wildcard ebin/test),)
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: clean deps test-deps $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+ $(gen_verbose) touch ebin/test
+else
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: deps test-deps $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build test-dir ERLC_OPTS="$(TEST_ERLC_OPTS)"
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_rebar_config
+{deps, [$(call comma_list,$(foreach d,$(DEPS),\
+ {$(call dep_name,$d),".*",{git,"$(call dep_repo,$d)","$(call dep_commit,$d)"}}))]}.
+{erl_opts, [$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$(ERLC_OPTS)),\
+ $(call compat_convert_erlc_opts,$o)))]}.
+endef
+
+$(eval _compat_rebar_config = $$(compat_rebar_config))
+$(eval export _compat_rebar_config)
+
+rebar.config:
+ $(gen_verbose) echo "$${_compat_rebar_config}" > rebar.config
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+
+docs:: asciidoc
+
+asciidoc: distclean-asciidoc doc-deps asciidoc-guide asciidoc-manual
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide:
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+endif
+
+ifeq ($(wildcard doc/src/manual/*.asciidoc),)
+asciidoc-manual:
+else
+asciidoc-manual:
+ for f in doc/src/manual/*.asciidoc ; do \
+ a2x -v -f manpage $$f ; \
+ done
+ for s in $(MAN_SECTIONS); do \
+ mkdir -p doc/man$$s/ ; \
+ mv doc/src/manual/*.$$s doc/man$$s/ ; \
+ gzip doc/man$$s/*.$$s ; \
+ done
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ for s in $(MAN_SECTIONS); do \
+ mkdir -p $(MAN_INSTALL_PATH)/man$$s/ ; \
+ install -g 0 -o 0 -m 0644 doc/man$$s/*.gz $(MAN_INSTALL_PATH)/man$$s/ ; \
+ done
+endif
+
+distclean:: distclean-asciidoc
+
+distclean-asciidoc:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf doc/man3/ doc/man7/
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app n=NAME Create a new local OTP application NAME" \
+ " new-lib n=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+ifdef SP
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.0.1
+
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+
+include erlang.mk
+endef
+else
+define bs_Makefile
+PROJECT = $p
+include erlang.mk
+endef
+endif
+
+define bs_apps_Makefile
+PROJECT = $p
+include $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p]}.
+{extended_start_script, true}.
+{sys_config, "rel/sys.config"}.
+{vm_args, "rel/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+define render_template
+ $(verbose) printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(eval n := $(PROJECT)_sup)
+ $(call render_template,bs_Makefile,Makefile)
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(call render_template,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(call render_template,bs_app,src/$(PROJECT)_app.erl)
+ $(call render_template,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(call render_template,bs_Makefile,Makefile)
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(call render_template,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard rel/),)
+ $(error Error: rel/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(call render_template,bs_relx_config,relx.config)
+ $(verbose) mkdir rel/
+ $(call render_template,bs_sys_config,rel/sys.config)
+ $(call render_template,bs_vm_args,rel/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(call render_template,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(call render_template,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(call render_template,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(call render_template,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(call render_template,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef tpl_$(t)
+ $(error Unknown template)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new t=$t n=$n in=
+else
+ $(call render_template,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) echo Available templates: $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT).so
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -finline-functions -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+CFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+CXXFLAGS += -fPIC -I $(ERTS_INCLUDE_DIR) -I $(ERL_INTERFACE_INCLUDE_DIR)
+
+LDLIBS += -L $(ERL_INTERFACE_LIB_DIR) -lerl_interface -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(?F);
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(?F);
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT)
+
+$(C_SRC_OUTPUT): $(OBJECTS)
+ $(verbose) mkdir -p priv/
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(C_SRC_ENV)\", \
+ io_lib:format( \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(call render_template,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(call render_template,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-setup distclean-kerl
+
+KERL ?= $(CURDIR)/kerl
+export KERL
+
+KERL_URL ?= https://raw.githubusercontent.com/yrashk/kerl/master/kerl
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+CI_INSTALL_DIR ?= $(HOME)/erlang
+CI_OTP ?=
+
+ifeq ($(strip $(CI_OTP)),)
+ci::
+else
+ci:: $(addprefix ci-,$(CI_OTP))
+
+ci-prepare: $(addprefix $(CI_INSTALL_DIR)/,$(CI_OTP))
+
+ci-setup::
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$(1): $(CI_INSTALL_DIR)/$(1)
+ $(ci_verbose) \
+ PATH="$(CI_INSTALL_DIR)/$(1)/bin:$(PATH)" \
+ CI_OTP_RELEASE="$(1)" \
+ CT_OPTS="-label $(1)" \
+ $(MAKE) clean ci-setup tests
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp))))
+
+define ci_otp_target
+ifeq ($(wildcard $(CI_INSTALL_DIR)/$(1)),)
+$(CI_INSTALL_DIR)/$(1): $(KERL)
+ $(KERL) build git $(OTP_GIT) $(1) $(1)
+ $(KERL) install $(1) $(CI_INSTALL_DIR)/$(1)
+endif
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_otp_target,$(otp))))
+
+$(KERL):
+ $(gen_verbose) $(call core_http_get,$(KERL),$(KERL_URL))
+ $(verbose) chmod +x $(KERL)
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL)
+endif
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+ifneq ($(wildcard $(TEST_DIR)),)
+ CT_SUITES ?= $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+else
+ CT_SUITES ?=
+endif
+
+# Core targets.
+
+tests:: ct
+
+distclean:: distclean-ct
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(DEPS_DIR)/*/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CURDIR)/logs
+
+ifeq ($(CT_SUITES),)
+ct:
+else
+ct: test-build
+ $(verbose) mkdir -p $(CURDIR)/logs/
+ $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CURDIR)/logs/
+ $(gen_verbose) $(CT_RUN) -suite $(addsuffix _SUITE,$(1)) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CURDIR)/logs/
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r src
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \
+ -Wunmatched_returns # -Wunderspecs
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+$(DIALYZER_PLT): deps app
+ $(verbose) dialyzer --build_plt --apps erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS)
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze:
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native $(DIALYZER_DIRS) $(DIALYZER_OPTS)
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: elvis distclean-elvis
+
+# Configuration.
+
+ELVIS_CONFIG ?= $(CURDIR)/elvis.config
+
+ELVIS ?= $(CURDIR)/elvis
+export ELVIS
+
+ELVIS_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis
+ELVIS_CONFIG_URL ?= https://github.com/inaka/elvis/releases/download/0.2.5/elvis.config
+ELVIS_OPTS ?=
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Elvis targets:" \
+ " elvis Run Elvis using the local elvis.config or download the default otherwise"
+
+distclean:: distclean-elvis
+
+# Plugin-specific targets.
+
+$(ELVIS):
+ $(gen_verbose) $(call core_http_get,$(ELVIS),$(ELVIS_URL))
+ $(verbose) chmod +x $(ELVIS)
+
+$(ELVIS_CONFIG):
+ $(verbose) $(call core_http_get,$(ELVIS_CONFIG),$(ELVIS_CONFIG_URL))
+
+elvis: $(ELVIS) $(ELVIS_CONFIG)
+ $(verbose) $(ELVIS) rock -c $(ELVIS_CONFIG) $(ELVIS_OPTS)
+
+distclean-elvis:
+ $(gen_verbose) rm -rf $(ELVIS)
+
+# Copyright (c) 2014 Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+
+ESCRIPT_BEAMS ?= "ebin/*", "deps/*/ebin/*"
+ESCRIPT_SYS_CONFIG ?= "rel/sys.config"
+ESCRIPT_EMU_ARGS ?= -pa . \
+ -sasl errlog_type error \
+ -escript main $(ESCRIPT_NAME)
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_STATIC ?= "deps/*/priv/**", "priv/**"
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+# Based on https://github.com/synrc/mad/blob/master/src/mad_bundle.erl
+# Copyright (c) 2013 Maxim Sokhatsky, Synrc Research Center
+# Modified MIT License, https://github.com/synrc/mad/blob/master/LICENSE :
+# Software may only be used for the great good and the true happiness of all
+# sentient beings.
+
+define ESCRIPT_RAW
+'Read = fun(F) -> {ok, B} = file:read_file(filename:absname(F)), B end,'\
+'Files = fun(L) -> A = lists:concat([filelib:wildcard(X)||X<- L ]),'\
+' [F || F <- A, not filelib:is_dir(F) ] end,'\
+'Squash = fun(L) -> [{filename:basename(F), Read(F) } || F <- L ] end,'\
+'Zip = fun(A, L) -> {ok,{_,Z}} = zip:create(A, L, [{compress,all},memory]), Z end,'\
+'Ez = fun(Escript) ->'\
+' Static = Files([$(ESCRIPT_STATIC)]),'\
+' Beams = Squash(Files([$(ESCRIPT_BEAMS), $(ESCRIPT_SYS_CONFIG)])),'\
+' Archive = Beams ++ [{ "static.gz", Zip("static.gz", Static)}],'\
+' escript:create(Escript, [ $(ESCRIPT_OPTIONS)'\
+' {archive, Archive, [memory]},'\
+' {shebang, "$(ESCRIPT_SHEBANG)"},'\
+' {comment, "$(ESCRIPT_COMMENT)"},'\
+' {emu_args, " $(ESCRIPT_EMU_ARGS)"}'\
+' ]),'\
+' file:change_mode(Escript, 8#755)'\
+'end,'\
+'Ez("$(ESCRIPT_NAME)"),'\
+'halt().'
+endef
+
+ESCRIPT_COMMAND = $(subst ' ',,$(ESCRIPT_RAW))
+
+escript:: distclean-escript deps app
+ $(gen_verbose) $(ERL) -eval $(ESCRIPT_COMMAND)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_NAME)
+
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ case "$(COVER)" of
+ "" -> ok;
+ _ ->
+ case cover:compile_beam_directory("ebin") of
+ {error, _} -> halt(1);
+ _ -> ok
+ end
+ end,
+ case eunit:test([$(call comma_list,$(1))], [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ case "$(COVER)" of
+ "" -> ok;
+ _ ->
+ cover:export("eunit.coverdata")
+ end,
+ halt()
+endef
+
+EUNIT_EBIN_MODS = $(notdir $(basename $(call core_find,ebin/,*.beam)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.beam)))
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),{module,'$(mod)'})
+
+eunit: test-build
+ $(gen_verbose) $(ERL) -pa $(TEST_DIR) $(DEPS_DIR)/*/ebin ebin \
+ -eval "$(subst $(newline),,$(subst ",\",$(call eunit.erl,$(EUNIT_MODS))))"
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel distclean-relx-rel distclean-relx run
+
+# Configuration.
+
+RELX ?= $(CURDIR)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://github.com/erlware/relx/releases/download/v3.5.0/relx
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+endif
+endif
+
+distclean:: distclean-relx-rel distclean-relx
+
+# Plugin-specific targets.
+
+$(RELX):
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) -c $(RELX_CONFIG) $(RELX_OPTS)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+distclean-relx:
+ $(gen_verbose) rm -rf $(RELX)
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run:
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(RELX_CONFIG)"),
+ {release, {Name, _}, _} = lists:keyfind(release, 1, Config),
+ io:format("~s", [Name]),
+ halt(0).
+endef
+
+RELX_RELEASE = `$(call erlang,$(get_relx_release.erl))`
+
+run: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_RELEASE)/bin/$(RELX_RELEASE) console
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(APPS_DIR)/*/ebin $(DEPS_DIR)/*/ebin
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) for dep in $(ALL_SHELL_DEPS_DIRS) ; do $(MAKE) -C $$dep ; done
+
+shell: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright (c) 2015, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ code:add_pathsa(["$(CURDIR)/ebin", "$(DEPS_DIR)/*/ebin"]),
+ try
+ case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end
+ of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module~n"),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename $(wildcard ebin/*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREF_ARGS :=
+else
+ XREF_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/0.2.2/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Xref targets:" \
+ " xref Run Xrefr using $XREF_CONFIG as config file if defined"
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR = cover
+
+# Hook in coverage to ct
+
+ifdef COVER
+ifdef CT_RUN
+# All modules in 'ebin'
+COVER_MODS = $(notdir $(basename $(call core_ls,ebin/*.beam)))
+
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec:
+ $(verbose) echo Cover mods: $(COVER_MODS)
+ $(gen_verbose) printf "%s\n" \
+ '{incl_mods,[$(subst $(space),$(comma),$(COVER_MODS))]}.' \
+ '{export,"$(CURDIR)/ct.coverdata"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge {eunit,ct}.coverdata into one coverdata file." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out all.coverdata,$(wildcard *.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f *.coverdata ct.cover.spec
+
+# Merge all coverdata files into one.
+all.coverdata: $(COVERDATA)
+ $(gen_verbose) $(ERL) -eval ' \
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) \
+ cover:export("$@"), halt(0).'
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ TotalPerc = round(100 * TotalY / (TotalY + TotalN)),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, round(100 * Y / (Y + N))]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(gen_verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+ @:
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+fetch-deps: $(ALL_DEPS_DIRS)
+fetch-doc-deps: $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+fetch-rel-deps: $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+fetch-test-deps: $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+fetch-shell-deps: $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+fetch-deps: $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+fetch-deps: $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+fetch-deps: $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+fetch-deps: $(ALL_SHELL_DEPS_DIRS)
+endif
+
+fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps fetch-shell-deps:
+ifndef IS_APP
+ $(verbose) for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep $@ IS_APP=1 || exit $$?; \
+ done
+endif
+ifneq ($(IS_DEP),1)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/$@.log
+endif
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(verbose) for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/$@.log; then \
+ echo $$dep >> $(ERLANG_MK_TMP)/$@.log; \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps IS_DEP=1 || exit $$?; \
+ fi \
+ fi \
+ done
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+LIST_DIRS = $(ALL_DEPS_DIRS)
+LIST_DEPS = $(BUILD_DEPS) $(DEPS)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): fetch-deps
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DIRS += $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): LIST_DEPS += $(DOC_DEPS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-doc-deps
+else
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DIRS += $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): LIST_DEPS += $(REL_DEPS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-rel-deps
+else
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DIRS += $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): LIST_DEPS += $(TEST_DEPS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-test-deps
+else
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): fetch-deps
+endif
+
+ifneq ($(IS_DEP),1)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DIRS += $(ALL_SHELL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): LIST_DEPS += $(SHELL_DEPS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-shell-deps
+else
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): fetch-deps
+endif
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ifneq ($(IS_DEP),1)
+ $(verbose) rm -f $@.orig
+endif
+ifndef IS_APP
+ $(verbose) for app in $(filter-out $(CURDIR),$(ALL_APPS_DIRS)); do \
+ $(MAKE) -C "$$app" --no-print-directory $@ IS_APP=1 || :; \
+ done
+endif
+ $(verbose) for dep in $(filter-out $(CURDIR),$(LIST_DIRS)); do \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C "$$dep" --no-print-directory $@ IS_DEP=1; \
+ fi; \
+ done
+ $(verbose) for dep in $(LIST_DEPS); do \
+ echo $(DEPS_DIR)/$$dep; \
+ done >> $@.orig
+ifndef IS_APP
+ifneq ($(IS_DEP),1)
+ $(verbose) sort < $@.orig | uniq > $@
+ $(verbose) rm -f $@.orig
+endif
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+ifneq ($(SKIP_DEPS),)
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ @:
+else
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(IS_DEP),1)
+ifneq ($(filter doc,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+list-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+endif
+endif
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^ | sort | uniq
+endif # ifneq ($(SKIP_DEPS),)
diff --git a/generate_app b/generate_app
deleted file mode 100644
index fb0eb1ea62..0000000000
--- a/generate_app
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
-
-main([InFile, OutFile | SrcDirs]) ->
- Modules = [list_to_atom(filename:basename(F, ".erl")) ||
- SrcDir <- SrcDirs,
- F <- filelib:wildcard("*.erl", SrcDir)],
- {ok, [{application, Application, Properties}]} = file:consult(InFile),
- NewProperties =
- case proplists:get_value(modules, Properties) of
- [] -> lists:keyreplace(modules, 1, Properties, {modules, Modules});
- _ -> Properties
- end,
- file:write_file(
- OutFile,
- io_lib:format("~p.~n", [{application, Application, NewProperties}])).
diff --git a/generate_deps b/generate_deps
deleted file mode 100644
index ddfca816b4..0000000000
--- a/generate_deps
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env escript
-%% -*- erlang -*-
--mode(compile).
-
-%% We expect the list of Erlang source and header files to arrive on
-%% stdin, with the entries colon-separated.
-main([TargetFile, EbinDir]) ->
- ErlsAndHrls = [ string:strip(S,left) ||
- S <- string:tokens(io:get_line(""), ":\n")],
- ErlFiles = [F || F <- ErlsAndHrls, lists:suffix(".erl", F)],
- Modules = sets:from_list(
- [list_to_atom(filename:basename(FileName, ".erl")) ||
- FileName <- ErlFiles]),
- HrlFiles = [F || F <- ErlsAndHrls, lists:suffix(".hrl", F)],
- IncludeDirs = lists:usort([filename:dirname(Path) || Path <- HrlFiles]),
- Headers = sets:from_list(HrlFiles),
- Deps = lists:foldl(
- fun (Path, Deps1) ->
- dict:store(Path, detect_deps(IncludeDirs, EbinDir,
- Modules, Headers, Path),
- Deps1)
- end, dict:new(), ErlFiles),
- {ok, Hdl} = file:open(TargetFile, [write, delayed_write]),
- dict:fold(
- fun (_Path, [], ok) ->
- ok;
- (Path, Dep, ok) ->
- Module = filename:basename(Path, ".erl"),
- ok = file:write(Hdl, [EbinDir, "/", Module, ".beam: ",
- Path]),
- ok = sets:fold(fun (E, ok) -> file:write(Hdl, [" ", E]) end,
- ok, Dep),
- file:write(Hdl, ["\n"])
- end, ok, Deps),
- ok = file:write(Hdl, [TargetFile, ": ", escript:script_name(), "\n"]),
- ok = file:sync(Hdl),
- ok = file:close(Hdl).
-
-detect_deps(IncludeDirs, EbinDir, Modules, Headers, Path) ->
- {ok, Forms} = epp:parse_file(Path, IncludeDirs, [{use_specs, true}]),
- lists:foldl(
- fun ({attribute, _LineNumber, Attribute, Behaviour}, Deps)
- when Attribute =:= behaviour orelse Attribute =:= behavior ->
- case sets:is_element(Behaviour, Modules) of
- true -> sets:add_element(
- [EbinDir, "/", atom_to_list(Behaviour), ".beam"],
- Deps);
- false -> Deps
- end;
- ({attribute, _LineNumber, file, {FileName, _LineNumber1}}, Deps) ->
- case sets:is_element(FileName, Headers) of
- true -> sets:add_element(FileName, Deps);
- false -> Deps
- end;
- (_Form, Deps) ->
- Deps
- end, sets:new(), Forms).
diff --git a/include/rabbit.hrl b/include/rabbit.hrl
deleted file mode 100644
index 5b90956122..0000000000
--- a/include/rabbit.hrl
+++ /dev/null
@@ -1,152 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is Pivotal Software, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
-%% Passed around most places
--record(user, {username,
- tags,
- authz_backends}). %% List of {Module, AuthUserImpl} pairs
-
-%% Passed to auth backends
--record(auth_user, {username,
- tags,
- impl}).
-
-%% Implementation for the internal auth backend
--record(internal_user, {username, password_hash, tags}).
--record(permission, {configure, write, read}).
--record(user_vhost, {username, virtual_host}).
--record(user_permission, {user_vhost, permission}).
-
--record(vhost, {virtual_host, dummy}).
-
--record(content,
- {class_id,
- properties, %% either 'none', or a decoded record/tuple
- properties_bin, %% either 'none', or an encoded properties binary
- %% Note: at most one of properties and properties_bin can be
- %% 'none' at once.
- protocol, %% The protocol under which properties_bin was encoded
- payload_fragments_rev %% list of binaries, in reverse order (!)
- }).
-
--record(resource, {virtual_host, kind, name}).
-
-%% fields described as 'transient' here are cleared when writing to
-%% rabbit_durable_<thing>
--record(exchange, {
- name, type, durable, auto_delete, internal, arguments, %% immutable
- scratches, %% durable, explicitly updated via update_scratch/3
- policy, %% durable, implicitly updated when policy changes
- decorators}). %% transient, recalculated in store/1 (i.e. recovery)
-
--record(amqqueue, {
- name, durable, auto_delete, exclusive_owner = none, %% immutable
- arguments, %% immutable
- pid, %% durable (just so we know home node)
- slave_pids, sync_slave_pids, %% transient
- recoverable_slaves, %% durable
- policy, %% durable, implicit update as above
- gm_pids, %% transient
- decorators, %% transient, recalculated as above
- state}). %% durable (have we crashed?)
-
--record(exchange_serial, {name, next}).
-
-%% mnesia doesn't like unary records, so we add a dummy 'value' field
--record(route, {binding, value = const}).
--record(reverse_route, {reverse_binding, value = const}).
-
--record(binding, {source, key, destination, args = []}).
--record(reverse_binding, {destination, key, source, args = []}).
-
--record(topic_trie_node, {trie_node, edge_count, binding_count}).
--record(topic_trie_edge, {trie_edge, node_id}).
--record(topic_trie_binding, {trie_binding, value = const}).
-
--record(trie_node, {exchange_name, node_id}).
--record(trie_edge, {exchange_name, node_id, word}).
--record(trie_binding, {exchange_name, node_id, destination, arguments}).
-
--record(listener, {node, protocol, host, ip_address, port}).
-
--record(runtime_parameters, {key, value}).
-
--record(basic_message, {exchange_name, routing_keys = [], content, id,
- is_persistent}).
-
--record(ssl_socket, {tcp, ssl}).
--record(delivery, {mandatory, confirm, sender, message, msg_seq_no, flow}).
--record(amqp_error, {name, explanation = "", method = none}).
-
--record(event, {type, props, reference = undefined, timestamp}).
-
--record(message_properties, {expiry, needs_confirming = false, size}).
-
--record(plugin, {name, %% atom()
- version, %% string()
- description, %% string()
- type, %% 'ez' or 'dir'
- dependencies, %% [{atom(), string()}]
- location}). %% string()
-
-%%----------------------------------------------------------------------------
-
--define(COPYRIGHT_MESSAGE, "Copyright (C) 2007-2015 Pivotal Software, Inc.").
--define(INFORMATION_MESSAGE, "Licensed under the MPL. See http://www.rabbitmq.com/").
--define(ERTS_MINIMUM, "5.6.3").
-
-%% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1
-%% - 1 byte of frame type
-%% - 2 bytes of channel number
-%% - 4 bytes of frame payload length
-%% - 1 byte of payload trailer FRAME_END byte
-%% See rabbit_binary_generator:check_empty_frame_size/0, an assertion
-%% called at startup.
--define(EMPTY_FRAME_SIZE, 8).
-
--define(MAX_WAIT, 16#ffffffff).
-
--define(HIBERNATE_AFTER_MIN, 1000).
--define(DESIRED_HIBERNATE, 10000).
--define(CREDIT_DISC_BOUND, {2000, 500}).
-%% When we discover that we should write some indices to disk for some
-%% betas, the IO_BATCH_SIZE sets the number of betas that we must be
-%% due to write indices for before we do any work at all.
--define(IO_BATCH_SIZE, 2048). %% next power-of-2 after ?CREDIT_DISC_BOUND
-
--define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
--define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
--define(DELETED_HEADER, <<"BCC">>).
-
-%% Trying to send a term across a cluster larger than 2^31 bytes will
-%% cause the VM to exit with "Absurdly large distribution output data
-%% buffer". So we limit the max message size to 2^31 - 10^6 bytes (1MB
-%% to allow plenty of leeway for the #basic_message{} and #content{}
-%% wrapping the message body).
--define(MAX_MSG_SIZE, 2147383648).
-
-%% First number is maximum size in bytes before we start to
-%% truncate. The following 4-tuple is:
-%%
-%% 1) Maximum size of printable lists and binaries.
-%% 2) Maximum size of any structural term.
-%% 3) Amount to decrease 1) every time we descend while truncating.
-%% 4) Amount to decrease 2) every time we descend while truncating.
-%%
-%% Whole thing feeds into truncate:log_event/2.
--define(LOG_TRUNC, {100000, {2000, 100, 50, 5}}).
-
--define(store_proc_name(N), rabbit_misc:store_proc_name(?MODULE, N)).
diff --git a/include/rabbit_cli.hrl b/include/rabbit_cli.hrl
index 737bb4ea3d..2e687e2eb8 100644
--- a/include/rabbit_cli.hrl
+++ b/include/rabbit_cli.hrl
@@ -48,3 +48,14 @@
-define(ONLINE_DEF, {?ONLINE_OPT, flag}).
-define(RPC_TIMEOUT, infinity).
+
+%% Subset of standartized exit codes from sysexits.h, see
+%% https://github.com/rabbitmq/rabbitmq-server/issues/396 for discussion.
+-define(EX_OK , 0).
+-define(EX_USAGE , 64). % Bad command-line arguments.
+-define(EX_DATAERR , 65). % Wrong data in command-line arguments.
+-define(EX_NOUSER , 67). % The user specified does not exist.
+-define(EX_UNAVAILABLE, 69). % Could not connect to the target node.
+-define(EX_SOFTWARE , 70). % Failed to execute command.
+-define(EX_TEMPFAIL , 75). % Temporary error (e.g. something has timed out).
+-define(EX_CONFIG , 78). % Misconfiguration detected
diff --git a/packaging/Makefile b/packaging/Makefile
new file mode 100644
index 0000000000..da3dcccb60
--- /dev/null
+++ b/packaging/Makefile
@@ -0,0 +1,103 @@
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+endif
+
+all: packages
+ @:
+
+# --------------------------------------------------------------------
+# Packaging.
+# --------------------------------------------------------------------
+
+.PHONY: packages package-deb \
+ package-rpm package-rpm-fedora package-rpm-suse \
+ package-windows package-standalone-macosx \
+ package-generic-unix
+
+PACKAGES_DIR ?= ../PACKAGES
+SOURCE_DIST_FILE ?= $(wildcard $(PACKAGES_DIR)/rabbitmq-server-*.tar.xz)
+
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+ifeq ($(filter %.tar.xz %.txz,$(SOURCE_DIST_FILE)),)
+$(error The source archive must a tar.xz archive)
+endif
+ifeq ($(wildcard $(SOURCE_DIST_FILE)),)
+$(error The source archive must exist)
+endif
+endif
+
+ifndef NO_CLEAN
+DO_CLEAN := clean
+endif
+
+VARS = SOURCE_DIST_FILE="$(abspath $(SOURCE_DIST_FILE))" \
+ PACKAGES_DIR="$(abspath $(PACKAGES_DIR))" \
+ SIGNING_KEY="$(SIGNING_KEY)" \
+ SIGNING_USER_ID="$(SIGNING_USER_ID)" \
+ SIGNING_USER_EMAIL="$(SIGNING_USER_EMAIL)"
+
+packages: package-deb package-rpm package-windows package-generic-unix
+ @:
+
+package-deb: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C debs/Debian $(VARS) all $(DO_CLEAN)
+
+package-rpm: package-rpm-fedora package-rpm-suse
+ @:
+
+package-rpm-fedora: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C RPMS/Fedora $(VARS) all $(DO_CLEAN)
+
+package-rpm-suse: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C RPMS/Fedora $(VARS) RPM_OS=suse all $(DO_CLEAN)
+
+package-windows: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C windows $(VARS) all $(DO_CLEAN)
+ $(verbose) $(MAKE) -C windows-exe $(VARS) all $(DO_CLEAN)
+
+package-generic-unix: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C generic-unix $(VARS) all $(DO_CLEAN)
+
+ifeq ($(PLATFORM),darwin)
+packages: package-standalone-macosx
+
+package-standalone-macosx: $(SOURCE_DIST_FILE)
+ $(gen_verbose) $(MAKE) -C standalone $(VARS) OS=mac all $(DO_CLEAN)
+endif
+
+.PHONY: clean
+
+clean:
+ for subdir in debs/Debian RPMS/Fedora windows windows-exe generic-unix standalone; do \
+ $(MAKE) -C "$$subdir" clean; \
+ done
diff --git a/packaging/RPMS/Fedora/Makefile b/packaging/RPMS/Fedora/Makefile
index 4f5f13278d..5763cadf04 100644
--- a/packaging/RPMS/Fedora/Makefile
+++ b/packaging/RPMS/Fedora/Makefile
@@ -1,15 +1,27 @@
-TARBALL_DIR=../../../dist
-TARBALL=$(notdir $(wildcard $(TARBALL_DIR)/rabbitmq-server-[0-9.]*.tar.gz))
-COMMON_DIR=../../common
-VERSION=$(shell echo $(TARBALL) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
+SOURCE_DIST_FILE ?= $(wildcard ../../../rabbitmq-server-*.tar.xz)
-TOP_DIR=$(shell pwd)
-#Under debian we do not want to check build dependencies, since that
-#only checks build-dependencies using rpms, not debs
-DEFINES=--define '_topdir $(TOP_DIR)' --define '_tmppath $(TOP_DIR)/tmp' --define '_sysconfdir /etc' --define '_localstatedir /var'
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+
+VERSION ?= $(patsubst rabbitmq-server-%.tar.xz,%,$(notdir $(SOURCE_DIST_FILE)))
+ifeq ($(VERSION),)
+$(error Cannot determine version; please specify VERSION)
+endif
+endif
+
+TOP_DIR = $(shell pwd)
+# Under debian we do not want to check build dependencies, since that
+# only checks build-dependencies using rpms, not debs
+DEFINES = --define '_topdir $(TOP_DIR)' --define '_tmppath $(TOP_DIR)/tmp' \
+ --define '_sysconfdir /etc' --define '_localstatedir /var'
ifndef RPM_OS
-RPM_OS=fedora
+RPM_OS = fedora
endif
ifeq "$(RPM_OS)" "suse"
@@ -26,16 +38,21 @@ SPEC_DEFINES=--define 'group_tag Development/Libraries'
START_PROG=daemon
endif
-rpms: clean server
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+.PHONY: all prepare server clean
+
+all: clean server
+ @:
prepare:
mkdir -p BUILD SOURCES SPECS SRPMS RPMS tmp
- cp $(TARBALL_DIR)/$(TARBALL) SOURCES
+ cp $(SOURCE_DIST_FILE) SOURCES
cp rabbitmq-server.spec SPECS
sed -i 's|%%VERSION%%|$(VERSION)|;s|%%REQUIRES%%|$(REQUIRES)|' \
SPECS/rabbitmq-server.spec
- cp ${COMMON_DIR}/* SOURCES/
cp rabbitmq-server.init SOURCES/rabbitmq-server.init
sed -i \
-e 's|^START_PROG=.*$$|START_PROG="$(START_PROG)"|' \
@@ -46,13 +63,16 @@ ifeq "$(RPM_OS)" "fedora"
sed -i -e '/^# Default-Start:/d;/^# Default-Stop:/d' \
SOURCES/rabbitmq-server.init
endif
- sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
- -e 's|@STDOUT_STDERR_REDIRECTION@||' \
- SOURCES/rabbitmq-script-wrapper
+
cp rabbitmq-server.logrotate SOURCES/rabbitmq-server.logrotate
server: prepare
rpmbuild -ba --nodeps SPECS/rabbitmq-server.spec $(DEFINES) $(OS_DEFINES) $(SPEC_DEFINES)
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv SRPMS/*.rpm RPMS/noarch/*.rpm "$(PACKAGES_DIR)"; \
+ fi
+
clean:
rm -rf SOURCES SPECS RPMS SRPMS BUILD tmp
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.init b/packaging/RPMS/Fedora/rabbitmq-server.init
index 3e48147b63..15929108b5 100644
--- a/packaging/RPMS/Fedora/rabbitmq-server.init
+++ b/packaging/RPMS/Fedora/rabbitmq-server.init
@@ -25,8 +25,8 @@ CONTROL=/usr/sbin/rabbitmqctl
DESC=rabbitmq-server
USER=rabbitmq
ROTATE_SUFFIX=
-INIT_LOG_DIR=/var/log/rabbitmq
PID_FILE=/var/run/rabbitmq/pid
+RABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env
START_PROG= # Set when building package
LOCK_FILE=/var/lock/subsys/$NAME
@@ -39,6 +39,9 @@ set -e
[ -f /etc/default/${NAME} ] && . /etc/default/${NAME}
+RABBITMQ_SCRIPTS_DIR=$(dirname "$RABBITMQ_ENV")
+. "$RABBITMQ_ENV"
+
ensure_pid_dir () {
PID_DIR=`dirname ${PID_FILE}`
if [ ! -d ${PID_DIR} ] ; then
@@ -62,8 +65,8 @@ start_rabbitmq () {
ensure_pid_dir
set +e
RABBITMQ_PID_FILE=$PID_FILE $START_PROG $DAEMON \
- > "${INIT_LOG_DIR}/startup_log" \
- 2> "${INIT_LOG_DIR}/startup_err" \
+ > "${RABBITMQ_LOG_BASE}/startup_log" \
+ 2> "${RABBITMQ_LOG_BASE}/startup_err" \
0<&- &
$CONTROL wait $PID_FILE >/dev/null 2>&1
RETVAL=$?
@@ -77,7 +80,7 @@ start_rabbitmq () {
;;
*)
remove_pid
- echo FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\}
+ echo FAILED - check ${RABBITMQ_LOG_BASE}/startup_\{log, _err\}
RETVAL=1
;;
esac
@@ -88,7 +91,9 @@ stop_rabbitmq () {
status_rabbitmq quiet
if [ $RETVAL = 0 ] ; then
set +e
- $CONTROL stop ${PID_FILE} > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err
+ $CONTROL stop ${PID_FILE} \
+ > ${RABBITMQ_LOG_BASE}/shutdown_log \
+ 2> ${RABBITMQ_LOG_BASE}/shutdown_err
RETVAL=$?
set -e
if [ $RETVAL = 0 ] ; then
@@ -97,7 +102,7 @@ stop_rabbitmq () {
rm -f $LOCK_FILE
fi
else
- echo FAILED - check ${INIT_LOG_DIR}/shutdown_log, _err
+ echo FAILED - check ${RABBITMQ_LOG_BASE}/shutdown_log, _err
fi
else
echo RabbitMQ is not running
diff --git a/packaging/RPMS/Fedora/rabbitmq-server.spec b/packaging/RPMS/Fedora/rabbitmq-server.spec
index ce7a949bf3..7cf3177400 100644
--- a/packaging/RPMS/Fedora/rabbitmq-server.spec
+++ b/packaging/RPMS/Fedora/rabbitmq-server.spec
@@ -5,36 +5,28 @@ Version: %%VERSION%%
Release: 1%{?dist}
License: MPLv1.1 and MIT and ASL 2.0 and BSD
Group: %{group_tag}
-Source: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{version}.tar.gz
+Source: http://www.rabbitmq.com/releases/rabbitmq-server/v%{version}/%{name}-%{version}.tar.xz
Source1: rabbitmq-server.init
-Source2: rabbitmq-script-wrapper
-Source3: rabbitmq-server.logrotate
-Source4: rabbitmq-server.ocf
-Source5: README
-Source6: rabbitmq-server-ha.ocf
-Source7: set_rabbitmq_policy.sh
+Source2: rabbitmq-server.logrotate
URL: http://www.rabbitmq.com/
BuildArch: noarch
-BuildRequires: erlang >= R13B-03, python-simplejson, xmlto, libxslt, gzip, sed, zip
-Requires: erlang >= R13B-03, logrotate
+BuildRequires: erlang >= R16B-03, python-simplejson, xmlto, libxslt, gzip, sed, zip, rsync
+Requires: erlang >= R16B-03, logrotate
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-%{_arch}-root
Summary: The RabbitMQ server
Requires(post): %%REQUIRES%%
Requires(pre): %%REQUIRES%%
%description
-RabbitMQ is an implementation of AMQP, the emerging standard for high
-performance enterprise messaging. The RabbitMQ server is a robust and
-scalable implementation of an AMQP broker.
+RabbitMQ is an open source multi-protocol messaging broker.
# We want to install into /usr/lib, even on 64-bit platforms
%define _rabbit_libdir %{_exec_prefix}/lib/rabbitmq
%define _rabbit_erllibdir %{_rabbit_libdir}/lib/rabbitmq_server-%{version}
-%define _rabbit_wrapper %{_builddir}/`basename %{S:2}`
-%define _rabbit_server_ocf %{_builddir}/`basename %{S:4}`
+%define _rabbit_server_ocf scripts/rabbitmq-server.ocf
%define _plugins_state_dir %{_localstatedir}/lib/rabbitmq/plugins
-%define _rabbit_server_ha_ocf %{_builddir}/`basename %{S:6}`
-%define _set_rabbitmq_policy_sh %{_builddir}/`basename %{S:7}`
+%define _rabbit_server_ha_ocf scripts/rabbitmq-server-ha.ocf
+%define _set_rabbitmq_policy_sh scripts/set_rabbitmq_policy.sh
%define _maindir %{buildroot}%{_rabbit_erllibdir}
@@ -44,36 +36,38 @@ scalable implementation of an AMQP broker.
%setup -q
%build
-cp %{S:2} %{_rabbit_wrapper}
-cp %{S:4} %{_rabbit_server_ocf}
-cp %{S:5} %{_builddir}/rabbitmq-server-%{version}/README
-cp %{S:6} %{_rabbit_server_ha_ocf}
-cp %{S:7} %{_set_rabbitmq_policy_sh}
-make %{?_smp_mflags}
+cp -a docs/README-for-packages %{_builddir}/rabbitmq-server-%{version}/README
+make %{?_smp_mflags} dist manpages
%install
rm -rf %{buildroot}
-make install TARGET_DIR=%{_maindir} \
- SBIN_DIR=%{buildroot}%{_rabbit_libdir}/bin \
- MAN_DIR=%{buildroot}%{_mandir}
+make install install-bin install-man DESTDIR=%{buildroot} PREFIX=%{_exec_prefix} RMQ_ROOTDIR=%{_rabbit_libdir} MANDIR=%{_mandir}
mkdir -p %{buildroot}%{_localstatedir}/lib/rabbitmq/mnesia
mkdir -p %{buildroot}%{_localstatedir}/log/rabbitmq
#Copy all necessary lib files etc.
install -p -D -m 0755 %{S:1} %{buildroot}%{_initrddir}/rabbitmq-server
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmqctl
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-server
-install -p -D -m 0755 %{_rabbit_wrapper} %{buildroot}%{_sbindir}/rabbitmq-plugins
install -p -D -m 0755 %{_rabbit_server_ocf} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server
install -p -D -m 0755 %{_rabbit_server_ha_ocf} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
install -p -D -m 0644 %{_set_rabbitmq_policy_sh} %{buildroot}%{_exec_prefix}/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh.example
-install -p -D -m 0644 %{S:3} %{buildroot}%{_sysconfdir}/logrotate.d/rabbitmq-server
+install -p -D -m 0644 %{S:2} %{buildroot}%{_sysconfdir}/logrotate.d/rabbitmq-server
mkdir -p %{buildroot}%{_sysconfdir}/rabbitmq
-rm %{_maindir}/LICENSE %{_maindir}/LICENSE-MPL-RabbitMQ %{_maindir}/INSTALL
+mkdir -p %{buildroot}%{_sbindir}
+sed -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
+ -e 's|@STDOUT_STDERR_REDIRECTION@||' \
+ < scripts/rabbitmq-script-wrapper \
+ > %{buildroot}%{_sbindir}/rabbitmqctl
+chmod 0755 %{buildroot}%{_sbindir}/rabbitmqctl
+for script in rabbitmq-server rabbitmq-plugins; do \
+ cp -a %{buildroot}%{_sbindir}/rabbitmqctl \
+ %{buildroot}%{_sbindir}/$script; \
+done
+
+rm %{_maindir}/LICENSE* %{_maindir}/INSTALL
#Build the list of files
echo '%defattr(-,root,root, -)' >%{_builddir}/%{name}.files
@@ -136,6 +130,9 @@ done
rm -rf %{buildroot}
%changelog
+* Tue Dec 22 2015 michael@rabbitmq.com 3.6.0-1
+- New Upstream Release
+
* Tue Dec 15 2015 michael@rabbitmq.com 3.5.7-1
- New Upstream Release
diff --git a/packaging/debs/Debian/.gitignore b/packaging/debs/Debian/.gitignore
new file mode 100644
index 0000000000..6a4aec11b5
--- /dev/null
+++ b/packaging/debs/Debian/.gitignore
@@ -0,0 +1,3 @@
+/debian/postrm
+/debian/stamp-makefile-build
+/rabbitmq-server_*
diff --git a/packaging/debs/Debian/Makefile b/packaging/debs/Debian/Makefile
index bbebad3604..df01eee588 100644
--- a/packaging/debs/Debian/Makefile
+++ b/packaging/debs/Debian/Makefile
@@ -1,42 +1,57 @@
-TARBALL_DIR=../../../dist
-TARBALL=$(notdir $(wildcard $(TARBALL_DIR)/rabbitmq-server-[0-9.]*.tar.gz))
-COMMON_DIR=../../common
-VERSION=$(shell echo $(TARBALL) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
+SOURCE_DIST_FILE ?= $(wildcard ../../../rabbitmq-server-*.tar.xz)
-DEBIAN_ORIG_TARBALL=$(shell echo $(TARBALL) | sed -e 's:\(.*\)-\(.*\)\(\.tar\.gz\):\1_\2\.orig\3:g')
-UNPACKED_DIR=rabbitmq-server-$(VERSION)
-PACKAGENAME=rabbitmq-server
-SIGNING_KEY_ID=056E8E56
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+
+VERSION ?= $(patsubst rabbitmq-server-%.tar.xz,%,$(notdir $(SOURCE_DIST_FILE)))
+ifeq ($(VERSION),)
+$(error Cannot determine version; please specify VERSION)
+endif
+endif
+
+DEBIAN_ORIG_TARBALL = rabbitmq-server_$(VERSION).orig.tar.xz
+UNPACKED_DIR = rabbitmq-server-$(VERSION)
+PACKAGENAME = rabbitmq-server
ifneq "$(UNOFFICIAL_RELEASE)" ""
SIGNING=-us -uc
else
- SIGNING=-k$(SIGNING_KEY_ID)
+ SIGNING=-k$(SIGNING_KEY)
endif
-all:
- @echo 'Please choose a target from the Makefile.'
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+all: package
+ @:
package: clean
- cp $(TARBALL_DIR)/$(TARBALL) $(DEBIAN_ORIG_TARBALL)
- tar -zxf $(DEBIAN_ORIG_TARBALL)
- cp -r debian $(UNPACKED_DIR)
- cp $(COMMON_DIR)/* $(UNPACKED_DIR)/debian/
- sed -i -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
- -e 's|@STDOUT_STDERR_REDIRECTION@| > "/var/log/rabbitmq/startup_log" 2> "/var/log/rabbitmq/startup_err"|' \
- $(UNPACKED_DIR)/debian/rabbitmq-script-wrapper
- chmod a+x $(UNPACKED_DIR)/debian/rules
- echo "This package was debianized by Tony Garnock-Jones <tonyg@rabbitmq.com> on\nWed, 3 Jan 2007 15:43:44 +0000.\n\nIt was downloaded from http://www.rabbitmq.com/\n\n" > $(UNPACKED_DIR)/debian/copyright
- cat $(UNPACKED_DIR)/LICENSE >> $(UNPACKED_DIR)/debian/copyright
- echo "\n\nThe Debian packaging is (C) 2007-2013, GoPivotal, Inc. and is licensed\nunder the MPL 1.1, see above.\n" >> $(UNPACKED_DIR)/debian/copyright
+ cp -a $(SOURCE_DIST_FILE) $(DEBIAN_ORIG_TARBALL)
+ xzcat $(DEBIAN_ORIG_TARBALL) | tar -xf -
+ cp -a debian $(UNPACKED_DIR)
+ rsync -a \
+ --exclude '.sw?' --exclude '.*.sw?' \
+ --exclude '.git*' \
+ --delete --delete-excluded \
+ debian/ $(UNPACKED_DIR)/debian/
UNOFFICIAL_RELEASE=$(UNOFFICIAL_RELEASE) VERSION=$(VERSION) ./check-changelog.sh rabbitmq-server $(UNPACKED_DIR)
- cd $(UNPACKED_DIR); GNUPGHOME=$(GNUPG_PATH)/.gnupg dpkg-buildpackage -sa -rfakeroot $(SIGNING)
+ cd $(UNPACKED_DIR); GNUPGHOME=$(GNUPG_PATH)/.gnupg dpkg-buildpackage -sa $(SIGNING)
rm -rf $(UNPACKED_DIR)
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv $(PACKAGENAME)_$(VERSION)* "$(PACKAGES_DIR)"; \
+ fi
+
clean:
rm -rf $(UNPACKED_DIR)
- rm -f $(PACKAGENAME)_*.tar.gz
- rm -f $(PACKAGENAME)_*.diff.gz
+ rm -f $(DEBIAN_ORIG_TARBALL)
+ rm -f $(PACKAGENAME)_*.debian.tar.gz
rm -f $(PACKAGENAME)_*.dsc
rm -f $(PACKAGENAME)_*_*.changes
rm -f $(PACKAGENAME)_*_*.deb
diff --git a/packaging/debs/Debian/debian/changelog b/packaging/debs/Debian/debian/changelog
index c19b98579f..372afa8258 100644
--- a/packaging/debs/Debian/debian/changelog
+++ b/packaging/debs/Debian/debian/changelog
@@ -1,3 +1,9 @@
+rabbitmq-server (3.6.0-1) unstable; urgency=low
+
+ * New Upstream Release
+
+ -- Michael Klishin <michael@rabbitmq.com> Tue, 22 Dec 2015 13:21:56 +0000
+
rabbitmq-server (3.5.7-1) unstable; urgency=low
* New Upstream Release
diff --git a/packaging/debs/Debian/debian/compat b/packaging/debs/Debian/debian/compat
index 7ed6ff82de..ec635144f6 100644
--- a/packaging/debs/Debian/debian/compat
+++ b/packaging/debs/Debian/debian/compat
@@ -1 +1 @@
-5
+9
diff --git a/packaging/debs/Debian/debian/control b/packaging/debs/Debian/debian/control
index 9075258340..56acaa948e 100644
--- a/packaging/debs/Debian/debian/control
+++ b/packaging/debs/Debian/debian/control
@@ -2,13 +2,23 @@ Source: rabbitmq-server
Section: net
Priority: extra
Maintainer: RabbitMQ Team <info@rabbitmq.com>
-Uploaders: RabbitMQ Team <info@rabbitmq.com>
-Build-Depends: cdbs, debhelper (>= 5), erlang-dev, python-simplejson, xmlto, xsltproc, erlang-nox (>= 1:13.b.3), erlang-src (>= 1:13.b.3), unzip, zip
-Standards-Version: 3.9.2
+Uploaders: Alvaro Videla <alvaro@rabbitmq.com>,
+ Michael Klishin <michael@rabbitmq.com>,
+ Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>,
+ Giuseppe Privitera <giuseppe@rabbitmq.com>
+Build-Depends: debhelper (>= 9),
+ erlang-dev,
+ python-simplejson,
+ xmlto,
+ xsltproc,
+ erlang-nox (>= 1:16.b.3),
+ zip,
+ rsync
+Standards-Version: 3.9.4
Package: rabbitmq-server
Architecture: all
-Depends: erlang-nox (>= 1:13.b.3) | esl-erlang, adduser, logrotate, ${misc:Depends}
+Depends: erlang-nox (>= 1:16.b.3) | esl-erlang, adduser, logrotate, ${misc:Depends}
Description: Multi-protocol messaging broker
RabbitMQ is an open source multi-protocol messaging broker.
Homepage: http://www.rabbitmq.com/
diff --git a/packaging/debs/Debian/debian/copyright b/packaging/debs/Debian/debian/copyright
new file mode 100644
index 0000000000..521b903754
--- /dev/null
+++ b/packaging/debs/Debian/debian/copyright
@@ -0,0 +1,52 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: rabbitmq-server
+Upstream-Contact: Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+Source: https://github.com/rabbitmq/rabbitmq-server
+
+Files: *
+Copyright: 2007-2015 Pivotal Software, Inc.
+License: MPL-1.1
+
+Files: src/mochinum.erl deps/rabbit_common/src/mochijson2.erl
+Copyright: 2007 Mochi Media, Inc.
+License: MIT
+
+License: MPL-1.1
+ The contents of this file are subject to the Mozilla Public License
+ Version 1.1 (the "License"); you may not use this file except in
+ compliance with the License. You may obtain a copy of the License at
+ http://www.mozilla.org/MPL/
+ .
+ Software distributed under the License is distributed on an "AS IS"
+ basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ the License for the specific language governing rights and limitations
+ under the License
+ .
+ The Original Code is RabbitMQ
+ .
+ The Initial Developer of the Original Code is Pivotal Software, Inc.
+ Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+
+License: MIT
+ This is the MIT license
+ .
+ Copyright (c) 2007 Mochi Media, Inc
+ .
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions
+ :
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.docs b/packaging/debs/Debian/debian/rabbitmq-server.docs
new file mode 100644
index 0000000000..40d4f2dc81
--- /dev/null
+++ b/packaging/debs/Debian/debian/rabbitmq-server.docs
@@ -0,0 +1 @@
+docs/rabbitmq.config.example
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.init b/packaging/debs/Debian/debian/rabbitmq-server.init
index 1019ea91c8..fce2d16401 100644
--- a/packaging/debs/Debian/debian/rabbitmq-server.init
+++ b/packaging/debs/Debian/debian/rabbitmq-server.init
@@ -23,9 +23,8 @@ CONTROL=/usr/sbin/rabbitmqctl
DESC="message broker"
USER=rabbitmq
ROTATE_SUFFIX=
-INIT_LOG_DIR=/var/log/rabbitmq
PID_FILE=/var/run/rabbitmq/pid
-
+RABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env
test -x $DAEMON || exit 0
test -x $CONTROL || exit 0
@@ -35,6 +34,9 @@ set -e
[ -f /etc/default/${NAME} ] && . /etc/default/${NAME}
+RABBITMQ_SCRIPTS_DIR=$(dirname "$RABBITMQ_ENV")
+. "$RABBITMQ_ENV"
+
. /lib/lsb/init-functions
. /lib/init/vars.sh
@@ -76,7 +78,9 @@ stop_rabbitmq () {
status_rabbitmq quiet
if [ $RETVAL = 0 ] ; then
set +e
- $CONTROL stop ${PID_FILE} > ${INIT_LOG_DIR}/shutdown_log 2> ${INIT_LOG_DIR}/shutdown_err
+ $CONTROL stop ${PID_FILE} \
+ > ${RABBITMQ_LOG_BASE}/shutdown_log \
+ 2> ${RABBITMQ_LOG_BASE}/shutdown_err
RETVAL=$?
set -e
if [ $RETVAL = 0 ] ; then
@@ -143,7 +147,7 @@ start_stop_end() {
RETVAL=0
;;
*)
- log_warning_msg "FAILED - check ${INIT_LOG_DIR}/startup_\{log, _err\}"
+ log_warning_msg "FAILED - check ${RABBITMQ_LOG_BASE}/startup_\{log, _err\}"
log_end_msg 1
;;
esac
diff --git a/packaging/debs/Debian/debian/rabbitmq-server.manpages b/packaging/debs/Debian/debian/rabbitmq-server.manpages
new file mode 100644
index 0000000000..e0220b47c3
--- /dev/null
+++ b/packaging/debs/Debian/debian/rabbitmq-server.manpages
@@ -0,0 +1,4 @@
+docs/rabbitmq-env.conf.5
+docs/rabbitmq-plugins.1
+docs/rabbitmq-server.1
+docs/rabbitmqctl.1
diff --git a/packaging/debs/Debian/debian/rules b/packaging/debs/Debian/debian/rules
index 37bf5f8a6c..669e30c177 100644..100755
--- a/packaging/debs/Debian/debian/rules
+++ b/packaging/debs/Debian/debian/rules
@@ -1,27 +1,59 @@
#!/usr/bin/make -f
+# -*- makefile -*-
-include /usr/share/cdbs/1/rules/debhelper.mk
-include /usr/share/cdbs/1/class/makefile.mk
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
-RABBIT_LIB=$(DEB_DESTDIR)usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)/
-RABBIT_BIN=$(DEB_DESTDIR)usr/lib/rabbitmq/bin/
+DEB_DESTDIR = debian/rabbitmq-server
+VERSION = $(shell dpkg-parsechangelog | awk '/^Version:/ {version=$$0; sub(/Version: /, "", version); sub(/-.*/, "", version); print version;}')
-DOCDIR=$(DEB_DESTDIR)usr/share/doc/rabbitmq-server/
-DEB_MAKE_INSTALL_TARGET := install TARGET_DIR=$(RABBIT_LIB) SBIN_DIR=$(RABBIT_BIN) DOC_INSTALL_DIR=$(DOCDIR) MAN_DIR=$(DEB_DESTDIR)usr/share/man/
-DEB_MAKE_CLEAN_TARGET:= distclean
-DEB_INSTALL_DOCS_ALL=debian/README
+%:
+ dh $@ --parallel
-install/rabbitmq-server::
- mkdir -p $(DOCDIR)
- rm $(RABBIT_LIB)LICENSE* $(RABBIT_LIB)INSTALL*
- for script in rabbitmqctl rabbitmq-server rabbitmq-plugins; do \
- install -p -D -m 0755 debian/rabbitmq-script-wrapper $(DEB_DESTDIR)usr/sbin/$$script; \
+override_dh_auto_clean:
+ $(MAKE) clean distclean-manpages
+
+override_dh_auto_build:
+ $(MAKE) dist manpages
+
+override_dh_auto_test:
+ @:
+
+export PREFIX RMQ_ROOTDIR
+
+override_dh_auto_install: PREFIX = /usr
+override_dh_auto_install: RMQ_ROOTDIR = $(PREFIX)/lib/rabbitmq
+override_dh_auto_install: RMQ_ERLAPP_DIR = $(RMQ_ROOTDIR)/lib/rabbitmq_server-$(VERSION)
+override_dh_auto_install:
+ dh_auto_install
+
+ $(MAKE) install-bin DESTDIR=$(DEB_DESTDIR)
+
+ sed -e 's|@RABBIT_LIB@|$(RMQ_ERLAPP_DIR)|g' \
+ < debian/postrm.in > debian/postrm
+
+ sed -e 's|@SU_RABBITMQ_SH_C@|su rabbitmq -s /bin/sh -c|' \
+ -e 's|@STDOUT_STDERR_REDIRECTION@|> "$$RABBITMQ_LOG_BASE/startup_log" 2> "$$RABBITMQ_LOG_BASE/startup_err"|' \
+ < scripts/rabbitmq-script-wrapper \
+ > $(DEB_DESTDIR)$(PREFIX)/sbin/rabbitmqctl
+ chmod 0755 $(DEB_DESTDIR)$(PREFIX)/sbin/rabbitmqctl
+ for script in rabbitmq-server rabbitmq-plugins; do \
+ cp -a $(DEB_DESTDIR)$(PREFIX)/sbin/rabbitmqctl \
+ $(DEB_DESTDIR)$(PREFIX)/sbin/$$script; \
done
- sed -e 's|@RABBIT_LIB@|/usr/lib/rabbitmq/lib/rabbitmq_server-$(DEB_UPSTREAM_VERSION)|g' <debian/postrm.in >debian/postrm
- install -p -D -m 0755 debian/rabbitmq-server.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server
- install -p -D -m 0644 debian/set_rabbitmq_policy.sh $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh.example
- install -p -D -m 0755 debian/rabbitmq-server-ha.ocf $(DEB_DESTDIR)usr/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
- install -p -D -m 0644 debian/rabbitmq-server.default $(DEB_DESTDIR)etc/default/rabbitmq-server
-
-clean::
- rm -f plugins-src/rabbitmq-server debian/postrm plugins/README
+
+ install -p -D -m 0644 debian/rabbitmq-server.default \
+ $(DEB_DESTDIR)/etc/default/rabbitmq-server
+
+ install -p -D -m 0755 scripts/rabbitmq-server.ocf \
+ $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/rabbitmq-server
+ install -p -D -m 0755 scripts/rabbitmq-server-ha.ocf \
+ $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/rabbitmq-server-ha
+ install -p -D -m 0644 scripts/set_rabbitmq_policy.sh \
+ $(DEB_DESTDIR)$(PREFIX)/lib/ocf/resource.d/rabbitmq/set_rabbitmq_policy.sh.example
+
+ rm $(DEB_DESTDIR)$(RMQ_ERLAPP_DIR)/LICENSE* \
+ $(DEB_DESTDIR)$(RMQ_ERLAPP_DIR)/INSTALL
+
+ rmdir $(DEB_DESTDIR)$(PREFIX)/lib/erlang/lib \
+ $(DEB_DESTDIR)$(PREFIX)/lib/erlang
diff --git a/packaging/debs/Debian/debian/source/format b/packaging/debs/Debian/debian/source/format
new file mode 100644
index 0000000000..163aaf8d82
--- /dev/null
+++ b/packaging/debs/Debian/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/packaging/debs/apt-repository/Makefile b/packaging/debs/apt-repository/Makefile
index ce4347bcb4..bbddc15a4e 100644
--- a/packaging/debs/apt-repository/Makefile
+++ b/packaging/debs/apt-repository/Makefile
@@ -1,27 +1,30 @@
-SIGNING_USER_EMAIL=info@rabbitmq.com
+PACKAGES_DIR ?= ../../../PACKAGES
+REPO_DIR ?= debian
+
+SIGNING_USER_EMAIL ?= info@rabbitmq.com
ifeq "$(UNOFFICIAL_RELEASE)" ""
-HOME_ARG=HOME=$(GNUPG_PATH)
+HOME_ARG = HOME=$(GNUPG_PATH)
endif
all: debian_apt_repository
clean:
- rm -rf debian
+ rm -rf $(REPO_DIR)
CAN_HAS_REPREPRO=$(shell [ -f /usr/bin/reprepro ] && echo true)
ifeq ($(CAN_HAS_REPREPRO), true)
debian_apt_repository: clean
- mkdir -p debian/conf
- cp -a distributions debian/conf
+ mkdir -p $(REPO_DIR)/conf
+ cp -a distributions $(REPO_DIR)/conf
ifeq "$(UNOFFICIAL_RELEASE)" ""
- echo SignWith: $(SIGNING_USER_EMAIL) >> debian/conf/distributions
+ echo SignWith: $(SIGNING_USER_EMAIL) >> $(REPO_DIR)/conf/distributions
endif
- for FILE in ../Debian/*.changes ; do \
+ for FILE in $(PACKAGES_DIR)/*.changes ; do \
$(HOME_ARG) reprepro --ignore=wrongdistribution \
- -Vb debian include kitten $${FILE} ; \
+ -Vb $(REPO_DIR) include kitten $${FILE} ; \
done
- reprepro -Vb debian createsymlinks
+ reprepro -Vb $(REPO_DIR) createsymlinks
else
debian_apt_repository:
@echo Not building APT repository as reprepro could not be found
diff --git a/packaging/generic-unix/Makefile b/packaging/generic-unix/Makefile
index ddad8c09fa..66aeff9071 100644
--- a/packaging/generic-unix/Makefile
+++ b/packaging/generic-unix/Makefile
@@ -1,17 +1,39 @@
-VERSION=0.0.0
-SOURCE_DIR=rabbitmq-server-$(VERSION)
-TARGET_DIR=rabbitmq_server-$(VERSION)
-TARGET_TARBALL=rabbitmq-server-generic-unix-$(VERSION)
+SOURCE_DIST_FILE ?= $(wildcard ../../../rabbitmq-server-*.tar.xz)
+
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+
+VERSION ?= $(patsubst rabbitmq-server-%.tar.xz,%,$(notdir $(SOURCE_DIST_FILE)))
+ifeq ($(VERSION),)
+$(error Cannot determine version; please specify VERSION)
+endif
+endif
+
+SOURCE_DIR = rabbitmq-server-$(VERSION)
+TARGET_DIR = rabbitmq_server-$(VERSION)
+TARGET_TARBALL = rabbitmq-server-generic-unix-$(VERSION)
+
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+all: dist
+ @:
dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
+ xzcat $(SOURCE_DIST_FILE) | tar -xf -
+# web-manpages are not used by generic-unix but by `make release` in the.
+# Umbrella. Those manpages are copied to www.rabbitmq.com
$(MAKE) -C $(SOURCE_DIR) \
- TARGET_DIR=`pwd`/$(TARGET_DIR) \
- SBIN_DIR=`pwd`/$(TARGET_DIR)/sbin \
- MAN_DIR=`pwd`/$(TARGET_DIR)/share/man \
- DOC_INSTALL_DIR=`pwd`/$(TARGET_DIR)/etc/rabbitmq \
- install
+ PREFIX= RMQ_ROOTDIR= \
+ RMQ_ERLAPP_DIR=`pwd`/$(TARGET_DIR) \
+ MANDIR=`pwd`/$(TARGET_DIR)/share/man \
+ manpages web-manpages install install-man
sed -e 's:^SYS_PREFIX=$$:SYS_PREFIX=\$${RABBITMQ_HOME}:' \
$(TARGET_DIR)/sbin/rabbitmq-defaults >$(TARGET_DIR)/sbin/rabbitmq-defaults.tmp \
@@ -20,12 +42,17 @@ dist:
mkdir -p $(TARGET_DIR)/etc/rabbitmq
- tar -zcf $(TARGET_TARBALL).tar.gz $(TARGET_DIR)
- rm -rf $(SOURCE_DIR) $(TARGET_DIR)
+ find $(TARGET_DIR) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 tar --no-recursion -cf - | \
+ xz > $(CURDIR)/$(TARGET_TARBALL).tar.xz
+
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv $(TARGET_TARBALL).tar.xz "$(PACKAGES_DIR)"; \
+ fi
clean: clean_partial
- rm -f rabbitmq-server-generic-unix-*.tar.gz
+ rm -f rabbitmq-server-generic-unix-*.tar.xz
clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
+ rm -rf rabbitmq-server-* rabbitmq_server-*
diff --git a/packaging/macports/Makefile b/packaging/macports/Makefile
deleted file mode 100644
index 897fc18327..0000000000
--- a/packaging/macports/Makefile
+++ /dev/null
@@ -1,58 +0,0 @@
-TARBALL_SRC_DIR=../../dist
-TARBALL_BIN_DIR=../../packaging/generic-unix/
-TARBALL_SRC=$(wildcard $(TARBALL_SRC_DIR)/rabbitmq-server-[0-9.]*.tar.gz)
-TARBALL_BIN=$(wildcard $(TARBALL_BIN_DIR)/rabbitmq-server-generic-unix-[0-9.]*.tar.gz)
-COMMON_DIR=../common
-VERSION=$(shell echo $(TARBALL_SRC) | sed -e 's:rabbitmq-server-\(.*\)\.tar\.gz:\1:g')
-
-# The URL at which things really get deployed
-REAL_WEB_URL=http://www.rabbitmq.com/
-
-# The user@host for an OSX machine with macports installed, which is
-# used to generate the macports index files. That step will be
-# skipped if this variable is not set. If you do set it, you might
-# also want to set SSH_OPTS, which allows adding ssh options, e.g. to
-# specify a key that will get into the OSX machine without a
-# passphrase.
-MACPORTS_USERHOST=
-
-MACPORTS_DIR=macports
-DEST=$(MACPORTS_DIR)/net/rabbitmq-server
-
-all: macports
-
-dirs:
- mkdir -p $(DEST)/files
-
-$(DEST)/Portfile: Portfile.in
- ./make-checksums.sh $(TARBALL_SRC) $(TARBALL_BIN) > checksums.sed
- sed -e "s|@VERSION@|$(VERSION)|g;s|@BASE_URL@|$(REAL_WEB_URL)|g" \
- -f checksums.sed <$^ >$@
- rm checksums.sed
-
-# The purpose of the intricate substitution below is to set up similar
-# environment vars to the ones that su will on Linux. On OS X, we
-# have to use the -m option to su in order to be able to set the shell
-# (which for the rabbitmq user would otherwise be /dev/null). But the
-# -m option means that *all* environment vars get preserved. Erlang
-# needs vars such as HOME to be set. So we have to set them
-# explicitly.
-macports: dirs $(DEST)/Portfile
- sed -e 's|@SU_RABBITMQ_SH_C@|SHELL=/bin/sh HOME=/var/lib/rabbitmq USER=rabbitmq LOGNAME=rabbitmq PATH="$$(eval `PATH=@MACPORTS_PREFIX@/bin /usr/libexec/path_helper -s`; echo $$PATH)" su -m rabbitmq -c|' \
- $(COMMON_DIR)/rabbitmq-script-wrapper >$(DEST)/files/rabbitmq-script-wrapper
- cp patch-org.macports.rabbitmq-server.plist.diff $(DEST)/files
- if [ -n "$(MACPORTS_USERHOST)" ] ; then \
- tar cf - -C $(MACPORTS_DIR) . | ssh $(SSH_OPTS) $(MACPORTS_USERHOST) ' \
- d="/tmp/mkportindex.$$$$" ; \
- mkdir $$d \
- && cd $$d \
- && tar xf - \
- && /opt/local/bin/portindex -a -o . >/dev/null \
- && tar cf - . \
- && cd \
- && rm -rf $$d' \
- | tar xf - -C $(MACPORTS_DIR) ; \
- fi
-
-clean:
- rm -rf $(MACPORTS_DIR) checksums.sed
diff --git a/packaging/macports/Portfile.in b/packaging/macports/Portfile.in
deleted file mode 100644
index 82c1fb0cac..0000000000
--- a/packaging/macports/Portfile.in
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8; mode: tcl; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- vim:fenc=utf-8:filetype=tcl:et:sw=4:ts=4:sts=4
-# $Id$
-
-PortSystem 1.0
-name rabbitmq-server
-version @VERSION@
-categories net
-maintainers paperplanes.de:meyer openmaintainer
-platforms darwin
-supported_archs noarch
-
-description The RabbitMQ AMQP Server
-long_description \
- RabbitMQ is an implementation of AMQP, the emerging standard for \
- high performance enterprise messaging. The RabbitMQ server is a \
- robust and scalable implementation of an AMQP broker.
-
-
-homepage @BASE_URL@
-master_sites @BASE_URL@releases/rabbitmq-server/v${version}/
-
-distfiles ${name}-${version}${extract.suffix} \
- ${name}-generic-unix-${version}${extract.suffix}
-
-checksums \
- ${name}-${version}${extract.suffix} \
- sha1 @sha1-src@ \
- rmd160 @rmd160-src@ \
- ${name}-generic-unix-${version}${extract.suffix} \
- sha1 @sha1-bin@ \
- rmd160 @rmd160-bin@
-
-depends_lib port:erlang
-depends_build port:libxslt
-
-platform darwin 8 {
- depends_build-append port:py26-simplejson
- build.args PYTHON=${prefix}/bin/python2.6
-}
-platform darwin 9 {
- depends_build-append port:py26-simplejson
- build.args PYTHON=${prefix}/bin/python2.6
-}
-# no need for simplejson on Snow Leopard or higher
-
-
-set serveruser rabbitmq
-set servergroup rabbitmq
-set serverhome ${prefix}/var/lib/rabbitmq
-set logdir ${prefix}/var/log/rabbitmq
-set confdir ${prefix}/etc/rabbitmq
-set mnesiadbdir ${prefix}/var/lib/rabbitmq/mnesia
-set plistloc ${prefix}/etc/LaunchDaemons/org.macports.rabbitmq-server
-set sbindir ${destroot}${prefix}/lib/rabbitmq/bin
-set wrappersbin ${destroot}${prefix}/sbin
-set realsbin ${destroot}${prefix}/lib/rabbitmq/lib/rabbitmq_server-${version}/sbin
-set mansrc ${workpath}/rabbitmq_server-${version}/share/man
-set mandest ${destroot}${prefix}/share/man
-
-use_configure no
-
-use_parallel_build no
-
-build.env-append HOME=${workpath}
-
-build.env-append VERSION=${version}
-
-destroot.env-append VERSION=${version}
-
-destroot.target install_bin
-
-destroot.destdir \
- TARGET_DIR=${destroot}${prefix}/lib/rabbitmq/lib/rabbitmq_server-${version} \
- SBIN_DIR=${sbindir} \
- MAN_DIR=${destroot}${prefix}/share/man
-
-destroot.keepdirs \
- ${destroot}${confdir} \
- ${destroot}${logdir} \
- ${destroot}${mnesiadbdir}
-
-pre-destroot {
- addgroup ${servergroup}
- adduser ${serveruser} gid=[existsgroup ${servergroup}] realname=RabbitMQ\ Server home=${serverhome}
-}
-
-post-destroot {
- xinstall -d -m 775 ${destroot}${confdir}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${logdir}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${serverhome}
- xinstall -d -g [existsgroup ${servergroup}] -m 775 ${destroot}${mnesiadbdir}
-
- reinplace -E "s:^SYS_PREFIX=\${RABBITMQ_HOME}$:SYS_PREFIX=${prefix}:" \
- ${realsbin}/rabbitmq-defaults
- reinplace -E "s:^SYS_PREFIX=$:SYS_PREFIX=${prefix}:" \
- ${realsbin}/rabbitmq-defaults
-
- xinstall -m 555 ${filespath}/rabbitmq-script-wrapper \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:@MACPORTS_PREFIX@:${prefix}:g" \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:/usr/lib/rabbitmq/bin/:${prefix}/lib/rabbitmq/bin/:g" \
- ${wrappersbin}/rabbitmq-server
- reinplace -E "s:/var/lib/rabbitmq:${prefix}/var/lib/rabbitmq:g" \
- ${wrappersbin}/rabbitmq-server
-
- file copy ${wrappersbin}/rabbitmq-server ${wrappersbin}/rabbitmqctl
- file copy ${wrappersbin}/rabbitmq-server ${wrappersbin}/rabbitmq-plugins
-
- xinstall -m 644 -W ${mansrc}/man1 rabbitmq-server.1.gz rabbitmqctl.1.gz rabbitmq-plugins.1.gz \
- ${mandest}/man1/
- xinstall -m 644 -W ${mansrc}/man5 rabbitmq-env.conf.5.gz ${mandest}/man5/
-}
-
-pre-install {
- system "cd ${destroot}${plistloc}; patch <${filespath}/patch-org.macports.rabbitmq-server.plist.diff"
-}
-
-startupitem.create yes
-startupitem.init "PATH=${prefix}/bin:${prefix}/sbin:\$PATH; export PATH"
-startupitem.start "rabbitmq-server 2>&1"
-startupitem.stop "rabbitmqctl stop 2>&1"
-startupitem.logfile ${prefix}/var/log/rabbitmq/startupitem.log
diff --git a/packaging/macports/make-checksums.sh b/packaging/macports/make-checksums.sh
deleted file mode 100755
index 891de6ba65..0000000000
--- a/packaging/macports/make-checksums.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-# NB: this script requires bash
-tarball_src=$1
-tarball_bin=$2
-for type in src bin
-do
- tarball_var=tarball_${type}
- tarball=${!tarball_var}
- for algo in sha1 rmd160
- do
- checksum=$(openssl $algo ${tarball} | awk '{print $NF}')
- echo "s|@$algo-$type@|$checksum|g"
- done
-done
diff --git a/packaging/macports/make-port-diff.sh b/packaging/macports/make-port-diff.sh
deleted file mode 100755
index ac3afa4ee5..0000000000
--- a/packaging/macports/make-port-diff.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-# This script grabs the latest rabbitmq-server bits from the main
-# macports subversion repo, and from the rabbitmq.com macports repo,
-# and produces a diff from the former to the latter for submission
-# through the macports trac.
-
-set -e
-
-dir=/tmp/$(basename $0).$$
-mkdir -p $dir/macports $dir/rabbitmq
-
-# Get the files from the macports subversion repo
-cd $dir/macports
-svn checkout http://svn.macports.org/repository/macports/trunk/dports/net/rabbitmq-server/ 2>&1 >/dev/null
-
-# Clear out the svn $id tag from the Portfile (and avoid using -i)
-portfile=rabbitmq-server/Portfile
-sed -e 's|^# \$.*$|# $Id$|' ${portfile} > ${portfile}.new
-mv ${portfile}.new ${portfile}
-
-# Get the files from the rabbitmq.com macports repo
-cd ../rabbitmq
-curl -s http://www.rabbitmq.com/releases/macports/net/rabbitmq-server.tgz | tar xzf -
-
-cd ..
-diff -Naur --exclude=.svn macports rabbitmq
-cd /
-rm -rf $dir
diff --git a/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff b/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff
deleted file mode 100644
index 45b4949616..0000000000
--- a/packaging/macports/patch-org.macports.rabbitmq-server.plist.diff
+++ /dev/null
@@ -1,10 +0,0 @@
---- org.macports.rabbitmq-server.plist.old 2009-02-26 08:00:31.000000000 -0800
-+++ org.macports.rabbitmq-server.plist 2009-02-26 08:01:27.000000000 -0800
-@@ -22,6 +22,7 @@
- <string>;</string>
- <string>--pid=none</string>
- </array>
-+<key>UserName</key><string>rabbitmq</string>
- <key>Debug</key><false/>
- <key>Disabled</key><true/>
- <key>OnDemand</key><false/>
diff --git a/packaging/standalone/Makefile b/packaging/standalone/Makefile
index 903458836c..b86af40ca6 100644
--- a/packaging/standalone/Makefile
+++ b/packaging/standalone/Makefile
@@ -1,4 +1,19 @@
-VERSION=0.0.0
+SOURCE_DIST_FILE ?= $(wildcard ../../rabbitmq-server-*.tar.xz)
+
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+
+VERSION ?= $(patsubst rabbitmq-server-%.tar.xz,%,$(notdir $(SOURCE_DIST_FILE)))
+ifeq ($(VERSION),)
+$(error Cannot determine version; please specify VERSION)
+endif
+endif
+
SOURCE_DIR=rabbitmq-server-$(VERSION)
TARGET_DIR=rabbitmq_server-$(VERSION)
TARGET_TARBALL=rabbitmq-server-$(OS)-standalone-$(VERSION)
@@ -18,15 +33,24 @@ RABBITMQ_DEFAULTS=$(TARGET_DIR)/sbin/rabbitmq-defaults
fix_defaults = sed -e $(1) $(RABBITMQ_DEFAULTS) > $(RABBITMQ_DEFAULTS).tmp \
&& mv $(RABBITMQ_DEFAULTS).tmp $(RABBITMQ_DEFAULTS)
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+all: dist
+ @:
+
dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
+ rm -rf $(SOURCE_DIR) $(TARGET_DIR)
+ xzcat $(SOURCE_DIST_FILE) | tar -xf -
$(MAKE) -C $(SOURCE_DIR) \
- TARGET_DIR=`pwd`/$(TARGET_DIR) \
- SBIN_DIR=`pwd`/$(TARGET_DIR)/sbin \
- MAN_DIR=`pwd`/$(TARGET_DIR)/share/man \
- DOC_INSTALL_DIR=`pwd`/$(TARGET_DIR)/etc/rabbitmq \
- install
+ PREFIX= RMQ_ROOTDIR= \
+ RMQ_ERLAPP_DIR=$(abspath $(TARGET_DIR)) \
+ MANDIR=$(abspath $(TARGET_DIR))/share/man \
+ manpages install install-man
+
+ mkdir -p $(TARGET_DIR)/etc/rabbitmq
+ cp $(SOURCE_DIR)/docs/rabbitmq.config.example $(TARGET_DIR)/etc/rabbitmq
## Here we set the RABBITMQ_HOME variable,
## then we make ERL_DIR point to our released erl
@@ -66,16 +90,20 @@ dist:
# fix Erlang ROOTDIR
patch -o $(RLS_DIR)/erts-$(ERTS_VSN)/bin/erl $(RLS_DIR)/erts-$(ERTS_VSN)/bin/erl.src < erl.diff
+ rm -f $(RLS_DIR)/erts-$(ERTS_VSN)/bin/erl.orig
- tar -zcf $(TARGET_TARBALL).tar.gz -C $(TARGET_DIR)/release $(TARGET_DIR)
- rm -rf $(SOURCE_DIR) $(TARGET_DIR)
+ cd $(TARGET_DIR)/release && \
+ find $(TARGET_DIR) -print0 | LC_COLLATE=C sort -z | \
+ xargs -0 tar --no-recursion -cf - | \
+ xz > $(CURDIR)/$(TARGET_TARBALL).tar.xz
-clean: clean_partial
- rm -f rabbitmq-server-$(OS)-standalone-*.tar.gz
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv $(TARGET_TARBALL).tar.xz "$(PACKAGES_DIR)"; \
+ fi
-clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
+clean:
+ rm -rf rabbitmq-server-* rabbitmq_server-*
.PHONY : generate_release
generate_release:
@@ -83,6 +111,7 @@ generate_release:
-I $(TARGET_DIR)/include/ -o src -Wall \
-v +debug_info -Duse_specs -Duse_proper_qc \
-pa $(TARGET_DIR)/ebin/ src/rabbit_release.erl
+ ERL_LIBS="$(TARGET_DIR)/plugins:$$ERL_LIBS" \
erl \
-pa "$(RABBITMQ_EBIN_ROOT)" \
-pa src \
@@ -90,4 +119,5 @@ generate_release:
-hidden \
-s rabbit_release \
-extra "$(RABBITMQ_PLUGINS_DIR)" "$(RABBITMQ_PLUGINS_EXPAND_DIR)" "$(RABBITMQ_HOME)"
+ test -f $(RABBITMQ_HOME)/rabbit.tar.gz
rm src/rabbit_release.beam
diff --git a/packaging/standalone/erl.diff b/packaging/standalone/erl.diff
index c51bfe2213..13b7d328d6 100644
--- a/packaging/standalone/erl.diff
+++ b/packaging/standalone/erl.diff
@@ -1,5 +1,4 @@
-20c20,21
+21c21
< ROOTDIR="%FINAL_ROOTDIR%"
---
-> realpath() { [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}" ; }
-> ROOTDIR="$(dirname `realpath $0`)/../.."
+> ROOTDIR="$(cd $(dirname "$0") && pwd)/../.."
diff --git a/packaging/standalone/src/rabbit_release.erl b/packaging/standalone/src/rabbit_release.erl
index f5e1ecf8a1..9eed1a59fa 100644
--- a/packaging/standalone/src/rabbit_release.erl
+++ b/packaging/standalone/src/rabbit_release.erl
@@ -41,7 +41,7 @@ start() ->
add_plugins_to_path(UnpackedPluginDir),
PluginAppNames = [P#plugin.name ||
- P <- rabbit_plugins:list(PluginsDistDir)],
+ P <- rabbit_plugins:list(PluginsDistDir, false)],
%% Build the entire set of dependencies - this will load the
%% applications along the way
@@ -54,9 +54,15 @@ start() ->
end,
%% we need a list of ERTS apps we need to ship with rabbit
+ RabbitMQAppNames = [rabbit | [P#plugin.name ||
+ P <- rabbit_plugins:list(PluginsDistDir, true)]]
+ -- PluginAppNames,
{ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
- BaseApps = SslAppsConfig ++ AllApps -- PluginAppNames,
+ BaseApps = lists:umerge([
+ lists:sort(RabbitMQAppNames),
+ lists:sort(SslAppsConfig),
+ lists:sort(AllApps -- PluginAppNames)]),
AppVersions = [determine_version(App) || App <- BaseApps],
RabbitVersion = proplists:get_value(rabbit, AppVersions),
@@ -109,7 +115,7 @@ prepare_plugins(PluginsDistDir, DestDir) ->
end,
[prepare_plugin(Plugin, DestDir) ||
- Plugin <- rabbit_plugins:list(PluginsDistDir)].
+ Plugin <- rabbit_plugins:list(PluginsDistDir, true)].
prepare_plugin(#plugin{type = ez, location = Location}, PluginDestDir) ->
zip:unzip(Location, [{cwd, PluginDestDir}]);
diff --git a/packaging/windows-exe/Makefile b/packaging/windows-exe/Makefile
index ab50e30b1d..26ef4585c3 100644
--- a/packaging/windows-exe/Makefile
+++ b/packaging/windows-exe/Makefile
@@ -1,16 +1,33 @@
-VERSION=0.0.0
-ZIP=../windows/rabbitmq-server-windows-$(VERSION)
+ifeq ($(PACKAGES_DIR),)
+ZIP_DIR = ../windows
+else
+ZIP_DIR = $(PACKAGES_DIR)
+endif
+ZIP = $(notdir $(wildcard $(ZIP_DIR)/rabbitmq-server-windows-*.zip))
+
+VERSION = $(patsubst rabbitmq-server-windows-%.zip,%,$(ZIP))
+
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+all: dist
+ @:
dist: rabbitmq-$(VERSION).nsi rabbitmq_server-$(VERSION)
makensis -V2 rabbitmq-$(VERSION).nsi
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv rabbitmq-server-$(VERSION).exe "$(PACKAGES_DIR)"; \
+ fi
+
rabbitmq-$(VERSION).nsi: rabbitmq_nsi.in
sed \
-e 's|%%VERSION%%|$(VERSION)|' \
$< > $@
rabbitmq_server-$(VERSION):
- unzip -q $(ZIP)
+ unzip -q $(ZIP_DIR)/$(ZIP)
clean:
rm -rf rabbitmq-*.nsi rabbitmq_server-* rabbitmq-server-*.exe
diff --git a/packaging/windows-exe/plugins/ExecDos.dll b/packaging/windows-exe/plugins/ExecDos.dll
new file mode 100644
index 0000000000..0d8a871a9d
--- /dev/null
+++ b/packaging/windows-exe/plugins/ExecDos.dll
Binary files differ
diff --git a/packaging/windows-exe/rabbitmq_nsi.in b/packaging/windows-exe/rabbitmq_nsi.in
index 153ff0ef93..3c868b91e7 100644
--- a/packaging/windows-exe/rabbitmq_nsi.in
+++ b/packaging/windows-exe/rabbitmq_nsi.in
@@ -4,11 +4,65 @@
!include WinMessages.nsh
!include FileFunc.nsh
!include WordFunc.nsh
+!include x64.nsh
+
+!addplugindir plugins
!define env_hklm 'HKLM "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"'
!define uninstall "Software\Microsoft\Windows\CurrentVersion\Uninstall\RabbitMQ"
;--------------------------------
+; Third-party functions
+; StrContains
+; This function does a case sensitive searches for an occurrence of a substring in a string.
+; It returns the substring if it is found.
+; Otherwise it returns null("").
+; Written by kenglish_hi
+; Adapted from StrReplace written by dandaman32
+
+
+Var STR_HAYSTACK
+Var STR_NEEDLE
+Var STR_CONTAINS_VAR_1
+Var STR_CONTAINS_VAR_2
+Var STR_CONTAINS_VAR_3
+Var STR_CONTAINS_VAR_4
+Var STR_RETURN_VAR
+
+Function un.StrContains
+ Exch $STR_NEEDLE
+ Exch 1
+ Exch $STR_HAYSTACK
+ ; Uncomment to debug
+ ;MessageBox MB_OK 'STR_NEEDLE = $STR_NEEDLE STR_HAYSTACK = $STR_HAYSTACK '
+ StrCpy $STR_RETURN_VAR ""
+ StrCpy $STR_CONTAINS_VAR_1 -1
+ StrLen $STR_CONTAINS_VAR_2 $STR_NEEDLE
+ StrLen $STR_CONTAINS_VAR_4 $STR_HAYSTACK
+ loop:
+ IntOp $STR_CONTAINS_VAR_1 $STR_CONTAINS_VAR_1 + 1
+ StrCpy $STR_CONTAINS_VAR_3 $STR_HAYSTACK $STR_CONTAINS_VAR_2 $STR_CONTAINS_VAR_1
+ StrCmp $STR_CONTAINS_VAR_3 $STR_NEEDLE found
+ StrCmp $STR_CONTAINS_VAR_1 $STR_CONTAINS_VAR_4 done
+ Goto loop
+ found:
+ StrCpy $STR_RETURN_VAR $STR_NEEDLE
+ Goto done
+ done:
+ Pop $STR_NEEDLE ;Prevent "invalid opcode" errors and keep the
+ Exch $STR_RETURN_VAR
+FunctionEnd
+
+!macro _un.StrContainsConstructor OUT NEEDLE HAYSTACK
+ Push `${HAYSTACK}`
+ Push `${NEEDLE}`
+ Call un.StrContains
+ Pop `${OUT}`
+!macroend
+
+!define un.StrContains '!insertmacro "_un.StrContainsConstructor"'
+
+;--------------------------------
; The name of the installer
Name "RabbitMQ Server %%VERSION%%"
@@ -19,8 +73,10 @@ OutFile "rabbitmq-server-%%VERSION%%.exe"
; Icons
!define MUI_ICON "rabbitmq.ico"
-; The default installation directory
-InstallDir "$PROGRAMFILES\RabbitMQ Server"
+; The default installation directory is empty. The .onInit function
+; below takes care of selecting the appropriate (32-bit vs. 64-bit)
+; "Program Files".
+InstallDir ""
; Registry key to check for directory (so if you install again, it will
; overwrite the old one automatically)
@@ -31,16 +87,6 @@ RequestExecutionLevel admin
SetCompressor /solid lzma
-VIProductVersion "%%VERSION%%.0"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "%%VERSION%%"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductName" "RabbitMQ Server"
-;VIAddVersionKey /LANG=${LANG_ENGLISH} "Comments" ""
-VIAddVersionKey /LANG=${LANG_ENGLISH} "CompanyName" "Pivotal Software, Inc"
-;VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalTrademarks" "" ; TODO ?
-VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalCopyright" "Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved."
-VIAddVersionKey /LANG=${LANG_ENGLISH} "FileDescription" "RabbitMQ Server"
-VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "%%VERSION%%"
-
;--------------------------------
; Pages
@@ -64,6 +110,16 @@ VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "%%VERSION%%"
;--------------------------------
+VIProductVersion "%%VERSION%%.0"
+VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductVersion" "%%VERSION%%"
+VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductName" "RabbitMQ Server"
+;VIAddVersionKey /LANG=${LANG_ENGLISH} "Comments" ""
+VIAddVersionKey /LANG=${LANG_ENGLISH} "CompanyName" "Pivotal Software, Inc"
+;VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalTrademarks" "" ; TODO ?
+VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalCopyright" "Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved."
+VIAddVersionKey /LANG=${LANG_ENGLISH} "FileDescription" "RabbitMQ Server"
+VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "%%VERSION%%"
+
; The stuff to install
Section "RabbitMQ Server (required)" Rabbit
@@ -86,9 +142,9 @@ Section "RabbitMQ Server (required)" Rabbit
WriteRegStr HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server" "Install_Dir" "$INSTDIR"
; Write the uninstall keys for Windows
- WriteRegStr HKLM ${uninstall} "DisplayName" "RabbitMQ Server"
+ WriteRegStr HKLM ${uninstall} "DisplayName" "RabbitMQ Server %%VERSION%%"
WriteRegStr HKLM ${uninstall} "UninstallString" "$INSTDIR\uninstall.exe"
- WriteRegStr HKLM ${uninstall} "DisplayIcon" "$INSTDIR\uninstall.exe,0"
+ WriteRegStr HKLM ${uninstall} "DisplayIcon" "$INSTDIR\rabbitmq.ico"
WriteRegStr HKLM ${uninstall} "Publisher" "Pivotal Software, Inc."
WriteRegStr HKLM ${uninstall} "DisplayVersion" "%%VERSION%%"
WriteRegDWORD HKLM ${uninstall} "NoModify" 1
@@ -104,9 +160,10 @@ SectionEnd
;--------------------------------
Section "RabbitMQ Service" RabbitService
- ExpandEnvStrings $0 %COMSPEC%
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" install'
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" start'
+ DetailPrint "Installing RabbitMQ service..."
+ ExecDos::exec /DETAILED '"$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" install' ""
+ DetailPrint "Starting RabbitMQ service..."
+ ExecDos::exec /DETAILED '"$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" start' ""
ReadEnvStr $1 "HOMEDRIVE"
ReadEnvStr $2 "HOMEPATH"
CopyFiles "$WINDIR\.erlang.cookie" "$1$2\.erlang.cookie"
@@ -155,25 +212,36 @@ LangString DESC_RabbitStartMenu ${LANG_ENGLISH} "Add some useful links to the st
Section "Uninstall"
+ ; Check if reinstall will occur immediately - don't remove ERLANG_HOME
+ Var /GLOBAL REINSTALLFLAG
+ Var /GLOBAL ISREINSTALL
+ ${GetParameters} $REINSTALLFLAG
+ ${un.StrContains} $ISREINSTALL "reinstall" $REINSTALLFLAG
+
; Remove registry keys
DeleteRegKey HKLM ${uninstall}
DeleteRegKey HKLM "SOFTWARE\VMware, Inc.\RabbitMQ Server"
; TODO these will fail if the service is not installed - do we care?
- ExpandEnvStrings $0 %COMSPEC%
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" stop'
- ExecWait '"$0" /C "$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" remove'
+ DetailPrint "Stopping RabbitMQ service..."
+ ExecDos::exec /DETAILED '"$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" stop' ""
+ DetailPrint "Removing RabbitMQ service..."
+ ExecDos::exec /DETAILED '"$INSTDIR\rabbitmq_server-%%VERSION%%\sbin\rabbitmq-service.bat" remove' ""
; Remove files and uninstaller
RMDir /r "$INSTDIR\rabbitmq_server-%%VERSION%%"
Delete "$INSTDIR\rabbitmq.ico"
Delete "$INSTDIR\uninstall.exe"
+ RMDir "$INSTDIR"
; Remove start menu items
RMDir /r "$SMPROGRAMS\RabbitMQ Server"
- DeleteRegValue ${env_hklm} ERLANG_HOME
- SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
+ ; If reinstalling immediately (e.g. program update) don't remove ERLANG_HOME environment variable
+ ${If} $ISREINSTALL == ""
+ DeleteRegValue ${env_hklm} ERLANG_HOME
+ SendMessage ${HWND_BROADCAST} ${WM_WININICHANGE} 0 "STR:Environment" /TIMEOUT=5000
+ ${EndIf}
SectionEnd
@@ -182,11 +250,21 @@ SectionEnd
; Functions
Function .onInit
+ ; By default, always install in "\Program Files", not matter if we run
+ ; on a 32-bit or 64-bit Windows.
+ ${If} $INSTDIR == "";
+ ${If} ${RunningX64}
+ StrCpy $INSTDIR "$PROGRAMFILES64\RabbitMQ Server"
+ ${Else}
+ StrCpy $INSTDIR "$PROGRAMFILES\RabbitMQ Server"
+ ${EndIf}
+ ${EndIf}
+
Call findErlang
ReadRegStr $0 HKLM ${uninstall} "UninstallString"
${If} $0 != ""
- MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION "RabbitMQ is already installed. $\n$\nClick 'OK' to remove the previous version or 'Cancel' to cancel this installation." IDOK rununinstall IDCANCEL norun
+ MessageBox MB_OKCANCEL|MB_ICONEXCLAMATION "RabbitMQ is already installed. $\n$\nClick 'OK' to remove the previous version or 'Cancel' to cancel this installation." /SD IDOK IDOK rununinstall IDCANCEL norun
norun:
Abort
@@ -194,7 +272,9 @@ Function .onInit
rununinstall:
;Run the uninstaller
ClearErrors
- ExecWait "$INSTDIR\uninstall.exe /S"
+ ExecWait '"$INSTDIR\uninstall.exe" /S _?=$INSTDIR'
+ Delete "$INSTDIR\uninstall.exe"
+ RMDir "$INSTDIR"
${EndIf}
FunctionEnd
diff --git a/packaging/windows/Makefile b/packaging/windows/Makefile
index 5dc802a8c2..52b6531c3c 100644
--- a/packaging/windows/Makefile
+++ b/packaging/windows/Makefile
@@ -1,43 +1,43 @@
-VERSION=0.0.0
-SOURCE_DIR=rabbitmq-server-$(VERSION)
-TARGET_DIR=rabbitmq_server-$(VERSION)
-TARGET_ZIP=rabbitmq-server-windows-$(VERSION)
+SOURCE_DIST_FILE ?= $(wildcard ../../../rabbitmq-server-*.tar.xz)
+
+ifneq ($(filter-out clean,$(MAKECMDGOALS)),)
+ifeq ($(SOURCE_DIST_FILE),)
+$(error Cannot find source archive; please specify SOURCE_DIST_FILE)
+endif
+ifneq ($(words $(SOURCE_DIST_FILE)),1)
+$(error Multile source archives found; please specify SOURCE_DIST_FILE)
+endif
+
+VERSION ?= $(patsubst rabbitmq-server-%.tar.xz,%,$(notdir $(SOURCE_DIST_FILE)))
+ifeq ($(VERSION),)
+$(error Cannot determine version; please specify VERSION)
+endif
+endif
+
+SOURCE_DIR = rabbitmq-server-$(VERSION)
+TARGET_DIR = rabbitmq_server-$(VERSION)
+TARGET_ZIP = rabbitmq-server-windows-$(VERSION)
+
+unexport DEPS_DIR
+unexport ERL_LIBS
+
+all: dist
+ @:
dist:
- tar -zxf ../../dist/$(SOURCE_DIR).tar.gz
- $(MAKE) -C $(SOURCE_DIR)
-
- mkdir -p $(SOURCE_DIR)/sbin
- mv $(SOURCE_DIR)/scripts/*.bat $(SOURCE_DIR)/sbin
- mkdir -p $(SOURCE_DIR)/etc
- cp $(SOURCE_DIR)/docs/rabbitmq.config.example $(SOURCE_DIR)/etc/rabbitmq.config.example
- cp README-etc $(SOURCE_DIR)/etc/README.txt
- rm -rf $(SOURCE_DIR)/scripts
- rm -rf $(SOURCE_DIR)/codegen* $(SOURCE_DIR)/Makefile $(SOURCE_DIR)/*mk
- rm -f $(SOURCE_DIR)/README
- rm -rf $(SOURCE_DIR)/docs
- rm -rf $(SOURCE_DIR)/src
- rm -rf $(SOURCE_DIR)/dist
-
- mv $(SOURCE_DIR) $(TARGET_DIR)
- mkdir -p $(TARGET_DIR)
- mv $(TARGET_DIR)/plugins/README $(TARGET_DIR)/plugins/README.txt
- xmlto -o . xhtml-nochunks ../../docs/rabbitmq-service.xml
- elinks -dump -no-references -no-numbering rabbitmq-service.html \
- > $(TARGET_DIR)/readme-service.txt
- todos $(TARGET_DIR)/readme-service.txt
- todos $(TARGET_DIR)/INSTALL
- todos $(TARGET_DIR)/LICENSE*
- todos $(TARGET_DIR)/plugins/README.txt
- todos $(TARGET_DIR)/etc/rabbitmq.config.example
- todos $(TARGET_DIR)/etc/README.txt
- rm -rf $(TARGET_DIR)/plugins-src
+ xzcat $(SOURCE_DIST_FILE) | tar -xf -
+ $(MAKE) -C $(SOURCE_DIR) install-windows \
+ DESTDIR=$(abspath $(TARGET_DIR)) \
+ WINDOWS_PREFIX=
+
+ cp -a README-etc $(TARGET_DIR)/etc/README.txt
+
zip -q -r $(TARGET_ZIP).zip $(TARGET_DIR)
- rm -rf $(TARGET_DIR) rabbitmq-service.html
-clean: clean_partial
- rm -f rabbitmq-server-windows-*.zip
+ if test "$(PACKAGES_DIR)"; then \
+ mkdir -p "$(PACKAGES_DIR)"; \
+ mv $(TARGET_ZIP).zip "$(PACKAGES_DIR)"; \
+ fi
-clean_partial:
- rm -rf $(SOURCE_DIR)
- rm -rf $(TARGET_DIR)
+clean:
+ rm -rf rabbitmq-server-* rabbitmq_server-*
diff --git a/packaging/windows/README-etc b/packaging/windows/README-etc
index 807698e81f..b431247c6b 100644
--- a/packaging/windows/README-etc
+++ b/packaging/windows/README-etc
@@ -1,7 +1,7 @@
-In this directory you can find an example configuration file for RabbitMQ.
-
-Note that this directory is *not* where the real RabbitMQ
-configuration lives. The default location for the real configuration
-file is %APPDATA%\RabbitMQ\rabbitmq.config.
-
-%APPDATA% usually expands to C:\Users\%USERNAME%\AppData\Roaming or similar.
+In this directory you can find an example configuration file for RabbitMQ.
+
+Note that this directory is *not* where the real RabbitMQ
+configuration lives. The default location for the real configuration
+file is %APPDATA%\RabbitMQ\rabbitmq.config.
+
+%APPDATA% usually expands to C:\Users\%USERNAME%\AppData\Roaming or similar.
diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk
new file mode 100644
index 0000000000..eed26fdac8
--- /dev/null
+++ b/rabbitmq-components.mk
@@ -0,0 +1,331 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# Automatically add rabbitmq-common to the dependencies, at least for
+# the Makefiles.
+ifneq ($(PROJECT),rabbit_common)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+ifeq ($(filter rabbit_common,$(DEPS)),)
+DEPS += rabbit_common
+endif
+endif
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_clusterer = git_rmq rabbitmq-clusterer $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc = git_rmq rabbitmq-lvc-plugin $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_visualiser = git_rmq rabbitmq-management-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_sockjs = git_rmq sockjs-erlang $(current_rmq_ref) $(base_rmq_ref) master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# FIXME: As of 2015-11-20, we depend on Ranch 1.2.1, but erlang.mk
+# defaults to Ranch 1.1.0. All projects depending indirectly on Ranch
+# needs to add "ranch" as a BUILD_DEPS. The list of projects needing
+# this workaround are:
+# o rabbitmq-web-stomp
+dep_ranch = git https://github.com/ninenines/ranch 1.2.1
+
+RABBITMQ_COMPONENTS = amqp_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_clusterer \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_lvc \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_management_visualiser \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_test \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q stable >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) stable && \
+ echo stable) || \
+ echo master)
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Maccro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Run a RabbitMQ node (moved from rabbitmq-run.mk as a workaround).
+# --------------------------------------------------------------------
+
+# Add "rabbit" to the build dependencies when the user wants to start
+# a broker or to the test dependencies when the user wants to test a
+# project.
+#
+# NOTE: This should belong to rabbitmq-run.mk. Unfortunately, it is
+# loaded *after* erlang.mk which is too late to add a dependency. That's
+# why rabbitmq-components.mk knows the list of targets which start a
+# broker and add "rabbit" to the dependencies in this case.
+
+ifneq ($(PROJECT),rabbit)
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS)),)
+RUN_RMQ_TARGETS = run-broker \
+ run-background-broker \
+ run-node \
+ run-background-node \
+ start-background-node
+
+ifneq ($(filter $(RUN_RMQ_TARGETS),$(MAKECMDGOALS)),)
+BUILD_DEPS += rabbit
+endif
+endif
+
+ifeq ($(filter rabbit,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+ifneq ($(filter check tests tests-with-broker test,$(MAKECMDGOALS)),)
+TEST_DEPS += rabbit
+endif
+endif
+endif
+
+ifeq ($(filter rabbit_public_umbrella amqp_client rabbit_common rabbitmq_test,$(PROJECT)),)
+ifeq ($(filter rabbitmq_test,$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)),)
+TEST_DEPS += rabbitmq_test
+endif
+endif
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+ifeq ($(PROJECT),rabbit_common)
+else ifdef SKIP_RMQCOMP_CHECK
+else ifeq ($(IS_DEP),1)
+else ifneq ($(filter co up,$(MAKECMDGOALS)),)
+else
+# In all other cases, rabbitmq-components.mk must be in sync.
+deps:: check-rabbitmq-components.mk
+fetch-deps: check-rabbitmq-components.mk
+endif
+
+# If this project is under the Umbrella project, we override $(DEPS_DIR)
+# to point to the Umbrella's one. We also disable `make distclean` so
+# $(DEPS_DIR) is not accidentally removed.
+
+ifneq ($(wildcard ../../UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+else ifneq ($(wildcard UMBRELLA.md),)
+UNDER_UMBRELLA = 1
+endif
+
+ifeq ($(UNDER_UMBRELLA),1)
+ifneq ($(PROJECT),rabbitmq_public_umbrella)
+DEPS_DIR ?= $(abspath ..)
+
+distclean:: distclean-components
+ @:
+
+distclean-components:
+endif
+
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+check-rabbitmq-components.mk:
+ $(verbose) cmp -s rabbitmq-components.mk \
+ $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+ (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+ false)
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+ @:
+else
+rabbitmq-components-mk:
+ $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) git diff --quiet rabbitmq-components.mk \
+ || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
diff --git a/scripts/rabbitmq-defaults b/scripts/rabbitmq-defaults
index 26f6af7cff..c5d87822a2 100644
--- a/scripts/rabbitmq-defaults
+++ b/scripts/rabbitmq-defaults
@@ -24,6 +24,12 @@ ERL_DIR=
CLEAN_BOOT_FILE=start_clean
SASL_BOOT_FILE=start_sasl
+if [ -f "${RABBITMQ_HOME}/erlang.mk" ]; then
+ # RabbitMQ is executed from its source directory. The plugins
+ # directory and ERL_LIBS are tuned based on this.
+ RABBITMQ_DEV_ENV=1
+fi
+
## Set default values
BOOT_MODULE="rabbit"
diff --git a/scripts/rabbitmq-defaults.bat b/scripts/rabbitmq-defaults.bat
index d1e3b4141b..2125af68f1 100644
--- a/scripts/rabbitmq-defaults.bat
+++ b/scripts/rabbitmq-defaults.bat
@@ -1,37 +1,43 @@
-@echo off
-
-REM ### next line potentially updated in package install steps
-REM set SYS_PREFIX=
-
-REM ### next line will be updated when generating a standalone release
-REM ERL_DIR=
-set ERL_DIR=
-
-REM These boot files don't appear to be referenced in the batch scripts
-REM set CLEAN_BOOT_FILE=start_clean
-REM set SASL_BOOT_FILE=start_sasl
-
-REM ## Set default values
-
-if "!RABBITMQ_BASE!"=="" (
- set RABBITMQ_BASE=!APPDATA!\RabbitMQ
-)
-
-REM BOOT_MODULE="rabbit"
-REM CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
-REM LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
-REM MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
-REM ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
-set BOOT_MODULE=rabbit
-set CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
-set LOG_BASE=!RABBITMQ_BASE!\log
-set MNESIA_BASE=!RABBITMQ_BASE!\db
-set ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
-
-REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
-set PLUGINS_DIR=!TDP0!..\plugins
-
-REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
-if "!RABBITMQ_CONF_ENV_FILE!"=="" (
- set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat
-)
+@echo off
+
+REM ### next line potentially updated in package install steps
+REM set SYS_PREFIX=
+
+REM ### next line will be updated when generating a standalone release
+REM ERL_DIR=
+set ERL_DIR=
+
+REM These boot files don't appear to be referenced in the batch scripts
+REM set CLEAN_BOOT_FILE=start_clean
+REM set SASL_BOOT_FILE=start_sasl
+
+if exist "%RABBITMQ_HOME%\erlang.mk" (
+ REM RabbitMQ is executed from its source directory. The plugins
+ REM directory and ERL_LIBS are tuned based on this.
+ set RABBITMQ_DEV_ENV=1
+)
+
+REM ## Set default values
+
+if "!RABBITMQ_BASE!"=="" (
+ set RABBITMQ_BASE=!APPDATA!\RabbitMQ
+)
+
+REM BOOT_MODULE="rabbit"
+REM CONFIG_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq
+REM LOG_BASE=${SYS_PREFIX}/var/log/rabbitmq
+REM MNESIA_BASE=${SYS_PREFIX}/var/lib/rabbitmq/mnesia
+REM ENABLED_PLUGINS_FILE=${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
+set BOOT_MODULE=rabbit
+set CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+set LOG_BASE=!RABBITMQ_BASE!\log
+set MNESIA_BASE=!RABBITMQ_BASE!\db
+set ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+
+REM PLUGINS_DIR="${RABBITMQ_HOME}/plugins"
+set PLUGINS_DIR=!TDP0!..\plugins
+
+REM CONF_ENV_FILE=${SYS_PREFIX}/etc/rabbitmq/rabbitmq-env.conf
+if "!RABBITMQ_CONF_ENV_FILE!"=="" (
+ set RABBITMQ_CONF_ENV_FILE=!RABBITMQ_BASE!\rabbitmq-env-conf.bat
+)
diff --git a/scripts/rabbitmq-echopid.bat b/scripts/rabbitmq-echopid.bat
index 6262a1638f..650fcc5202 100755..100644
--- a/scripts/rabbitmq-echopid.bat
+++ b/scripts/rabbitmq-echopid.bat
@@ -1,55 +1,55 @@
-@echo off
-
-REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
-REM
-REM <rabbitmq_nodename> (s)name of the erlang node to connect to (required)
-
-setlocal
-
-set TDP0=%~dp0
-
-REM Get default settings with user overrides for (RABBITMQ_)<var_name>
-REM Non-empty defaults should be set in rabbitmq-env
-call "!TDP0!\rabbitmq-env.bat"
-
-if "%1"=="" goto fail
-
-:: set timeout vars ::
-set TIMEOUT=10
-set TIMER=1
-
-:: check that wmic exists ::
-set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
-if not exist "%WMIC_PATH%" (
- goto fail
-)
-
-:getpid
-for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%%RABBITMQ_NAME_TYPE% %1%%'" get processid 2^>nul`) do (
- set PID=%%P
- goto echopid
-)
-
-:echopid
-:: check for pid not found ::
-if "%PID%" == "" (
- PING 127.0.0.1 -n 2 > nul
- set /a TIMER+=1
- if %TIMEOUT%==%TIMER% goto fail
- goto getpid
-)
-
-:: show pid ::
-echo %PID%
-
-:: all done ::
-:ok
-endlocal
-EXIT /B 0
-
-:: something went wrong ::
-:fail
-endlocal
-EXIT /B 1
-
-
+@echo off
+
+REM Usage: rabbitmq-echopid.bat <rabbitmq_nodename>
+REM
+REM <rabbitmq_nodename> (s)name of the erlang node to connect to (required)
+
+setlocal
+
+set TDP0=%~dp0
+
+REM Get default settings with user overrides for (RABBITMQ_)<var_name>
+REM Non-empty defaults should be set in rabbitmq-env
+call "!TDP0!\rabbitmq-env.bat" %~n0
+
+if "%1"=="" goto fail
+
+:: set timeout vars ::
+set TIMEOUT=10
+set TIMER=1
+
+:: check that wmic exists ::
+set WMIC_PATH=%SYSTEMROOT%\System32\Wbem\wmic.exe
+if not exist "%WMIC_PATH%" (
+ goto fail
+)
+
+:getpid
+for /f "usebackq tokens=* skip=1" %%P IN (`%%WMIC_PATH%% process where "name='erl.exe' and commandline like '%%%RABBITMQ_NAME_TYPE% %1%%'" get processid 2^>nul`) do (
+ set PID=%%P
+ goto echopid
+)
+
+:echopid
+:: check for pid not found ::
+if "%PID%" == "" (
+ PING 127.0.0.1 -n 2 > nul
+ set /a TIMER+=1
+ if %TIMEOUT%==%TIMER% goto fail
+ goto getpid
+)
+
+:: show pid ::
+echo %PID%
+
+:: all done ::
+:ok
+endlocal
+EXIT /B 0
+
+:: something went wrong ::
+:fail
+endlocal
+EXIT /B 1
+
+
diff --git a/scripts/rabbitmq-env b/scripts/rabbitmq-env
index a5bf52ab6a..0014643260 100755..100644
--- a/scripts/rabbitmq-env
+++ b/scripts/rabbitmq-env
@@ -15,33 +15,52 @@
## Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
##
-# We set +e here since since our test for "readlink -f" below needs to
-# be able to fail.
-set +e
-# Determine where this script is really located (if this script is
-# invoked from another script, this is the location of the caller)
-SCRIPT_PATH="$0"
-while [ -h "$SCRIPT_PATH" ] ; do
- # Determine if readlink -f is supported at all. TODO clean this up.
- FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
- if [ "$?" != "0" ]; then
- REL_PATH=`readlink $SCRIPT_PATH`
- if expr "$REL_PATH" : '/.*' > /dev/null; then
- SCRIPT_PATH="$REL_PATH"
- else
- SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
- fi
+if [ "$RABBITMQ_ENV_LOADED" = 1 ]; then
+ return 0;
+fi
+
+if [ -z "$RABBITMQ_SCRIPTS_DIR" ]; then
+ # We set +e here since since our test for "readlink -f" below needs to
+ # be able to fail.
+ set +e
+ # Determine where this script is really located (if this script is
+ # invoked from another script, this is the location of the caller)
+ SCRIPT_PATH="$0"
+ while [ -h "$SCRIPT_PATH" ] ; do
+ # Determine if readlink -f is supported at all. TODO clean this up.
+ FULL_PATH=`readlink -f $SCRIPT_PATH 2>/dev/null`
+ if [ "$?" != "0" ]; then
+ REL_PATH=`readlink $SCRIPT_PATH`
+ if expr "$REL_PATH" : '/.*' > /dev/null; then
+ SCRIPT_PATH="$REL_PATH"
+ else
+ SCRIPT_PATH="`dirname "$SCRIPT_PATH"`/$REL_PATH"
+ fi
+ else
+ SCRIPT_PATH=$FULL_PATH
+ fi
+ done
+ set -e
+
+ RABBITMQ_SCRIPTS_DIR=`dirname $SCRIPT_PATH`
+fi
+
+rmq_realpath() {
+ local path=$1
+
+ if [ -d "$path" ]; then
+ cd "$path" && pwd
+ elif [ -f "$path" ]; then
+ cd "$(dirname "$path")" && echo $(pwd)/$(basename "$path")
else
- SCRIPT_PATH=$FULL_PATH
+ echo "$path"
fi
-done
-set -e
+}
-SCRIPT_DIR=`dirname $SCRIPT_PATH`
-RABBITMQ_HOME="${SCRIPT_DIR}/.."
+RABBITMQ_HOME="$(rmq_realpath "${RABBITMQ_SCRIPTS_DIR}/..")"
## Set defaults
-. ${SCRIPT_DIR}/rabbitmq-defaults
+. ${RABBITMQ_SCRIPTS_DIR}/rabbitmq-defaults
## Common defaults
SERVER_ERL_ARGS="+P 1048576"
@@ -84,20 +103,36 @@ fi
##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-rmq_realpath() {
+rmq_normalize_path() {
local path=$1
- if [ -d "$path" ]; then
- cd "$path" && pwd
- elif [ -f "$path" ]; then
- cd "$(dirname "$path")" && echo $(pwd)/$(basename "$path")
- else
- echo "$path"
- fi
+ echo "$path" | sed -E -e 's,//+,/,g' -e 's,(.)/$,\1,'
+}
+
+rmq_normalize_path_var() {
+ local var warning
+
+ local prefix="WARNING:"
+
+ for var in "$@"; do
+ local path=$(eval "echo \"\$$var\"")
+ case "$path" in
+ */)
+ warning=1
+ echo "$prefix Removing trailing slash from $var" 1>&2
+ ;;
+ esac
+
+ eval "$var=$(rmq_normalize_path "$path")"
+
+ if [ "x$warning" = "x1" ]; then
+ prefix=" "
+ fi
+ done
}
rmq_check_if_shared_with_mnesia() {
- local var
+ local var warning
local mnesia_dir=$(rmq_realpath "${RABBITMQ_MNESIA_DIR}")
local prefix="WARNING:"
@@ -149,17 +184,29 @@ DEFAULT_NODE_PORT=5672
[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
[ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+rmq_normalize_path_var \
+ RABBITMQ_CONFIG_FILE \
+ RABBITMQ_LOG_BASE \
+ RABBITMQ_MNESIA_BASE \
+ RABBITMQ_MNESIA_DIR
+
[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
[ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
+rmq_normalize_path_var RABBITMQ_PID_FILE
[ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
[ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+rmq_normalize_path_var RABBITMQ_PLUGINS_EXPAND_DIR
+[ "x" != "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE_source=environment
[ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+rmq_normalize_path_var RABBITMQ_ENABLED_PLUGINS_FILE
+[ "x" != "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR_source=environment
[ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+rmq_normalize_path_var RABBITMQ_PLUGINS_DIR
## Log rotation
[ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
@@ -167,6 +214,10 @@ DEFAULT_NODE_PORT=5672
[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
[ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
+rmq_normalize_path_var \
+ RABBITMQ_LOGS \
+ RABBITMQ_SASL_LOGS
+
[ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
# Check if files and directories non-related to Mnesia are configured
@@ -186,5 +237,67 @@ rmq_check_if_shared_with_mnesia \
##--- End of overridden <var_name> variables
+## Development-specific environment.
+if [ "${RABBITMQ_DEV_ENV}" ]; then
+ if [ "$(basename "$0")" = 'rabbitmq-plugins' -a \( \
+ "$RABBITMQ_PLUGINS_DIR_source" != 'environment' -o \
+ "$RABBITMQ_ENABLED_PLUGINS_FILE_source" != 'environment' \) ]; then
+ # We need to query the running node for the plugins directory
+ # and the "enabled plugins" file.
+ eval $( (${RABBITMQ_SCRIPTS_DIR}/rabbitmqctl eval \
+ '{ok, P} = application:get_env(rabbit, plugins_dir),
+ {ok, E} = application:get_env(rabbit, enabled_plugins_file),
+ io:format(
+ "plugins_dir=\"~s\"~n"
+ "enabled_plugins_file=\"~s\"~n", [P, E]).' \
+ 2>/dev/null | head -n 2) || :)
+ if [ "${plugins_dir}" -a \
+ "$RABBITMQ_PLUGINS_DIR_source" != 'environment' ]; then
+ RABBITMQ_PLUGINS_DIR="${plugins_dir}"
+ fi
+ if [ "${enabled_plugins_file}" -a \
+ "$RABBITMQ_ENABLED_PLUGINS_FILE_source" != 'environment' ]; then
+ RABBITMQ_ENABLED_PLUGINS_FILE="${enabled_plugins_file}"
+ fi
+ fi
+
+ if [ -d "${RABBITMQ_PLUGINS_DIR}" ]; then
+ # RabbitMQ was started with "make run-broker" from its own
+ # source tree. Take rabbit_common from the plugins directory.
+ ERL_LIBS="${RABBITMQ_PLUGINS_DIR}:${ERL_LIBS}"
+ else
+ # RabbitMQ runs from a testsuite or a plugin. The .ez files are
+ # not available under RabbitMQ source tree. We need to look at
+ # $DEPS_DIR and default locations.
+
+ if [ "${DEPS_DIR}" -a -d "${DEPS_DIR}/rabbit_common/ebin" ]; then
+ # $DEPS_DIR is set, and it contains rabbitmq-common, use
+ # this.
+ DEPS_DIR_norm="${DEPS_DIR}"
+ elif [ -f "${RABBITMQ_SCRIPTS_DIR}/../../../erlang.mk" -a \
+ -d "${RABBITMQ_SCRIPTS_DIR}/../../rabbit_common/ebin" ]; then
+ # Look at default locations: "deps" subdirectory inside a
+ # plugin or the Umbrella.
+ DEPS_DIR_norm="${RABBITMQ_SCRIPTS_DIR}/../.."
+ fi
+ DEPS_DIR_norm=$(rmq_realpath "${DEPS_DIR_norm}")
+
+ ERL_LIBS="${DEPS_DIR_norm}:${ERL_LIBS}"
+ fi
+else
+ if [ -d "${RABBITMQ_PLUGINS_DIR}" ]; then
+ # RabbitMQ was started from its install directory. Take
+ # rabbit_common from the plugins directory.
+ ERL_LIBS="${RABBITMQ_PLUGINS_DIR}:${ERL_LIBS}"
+ fi
+fi
+
+ERL_LIBS=${ERL_LIBS%:}
+if [ "$ERL_LIBS" ]; then
+ export ERL_LIBS
+fi
+
+RABBITMQ_ENV_LOADED=1
+
# Since we source this elsewhere, don't accidentally stop execution
true
diff --git a/scripts/rabbitmq-env.bat b/scripts/rabbitmq-env.bat
index 2c1a38b7ec..b50468a909 100644
--- a/scripts/rabbitmq-env.bat
+++ b/scripts/rabbitmq-env.bat
@@ -1,257 +1,338 @@
-@echo off
-
-REM Scopes the variables to the current batch file
-REM setlocal
-
-rem Preserve values that might contain exclamation marks before
-rem enabling delayed expansion
-set TDP0=%~dp0
-REM setlocal enabledelayedexpansion
-
-REM SCRIPT_DIR=`dirname $SCRIPT_PATH`
-REM RABBITMQ_HOME="${SCRIPT_DIR}/.."
-set SCRIPT_DIR=%TDP0%
-set RABBITMQ_HOME=%SCRIPT_DIR%..
-
-REM ## Set defaults
-REM . ${SCRIPT_DIR}/rabbitmq-defaults
-call "%SCRIPT_DIR%\rabbitmq-defaults.bat"
-
-REM These common defaults aren't referenced in the batch scripts
-REM ## Common defaults
-REM SERVER_ERL_ARGS="+P 1048576"
-REM
-REM # warn about old rabbitmq.conf file, if no new one
-REM if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
-REM [ ! -f ${CONF_ENV_FILE} ] ; then
-REM echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
-REM echo "location has moved to ${CONF_ENV_FILE}"
-REM fi
-
-REM ERL_ARGS aren't referenced in the batch scripts
-REM Common defaults
-REM set SERVER_ERL_ARGS=+P 1048576
-
-REM ## Get configuration variables from the configure environment file
-REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
-if exist "!RABBITMQ_CONF_ENV_FILE!" (
- call "!RABBITMQ_CONF_ENV_FILE!"
-)
-
-REM Check for the short names here too
-if "!RABBITMQ_USE_LONGNAME!"=="" (
- if "!USE_LONGNAME!"=="" (
- set RABBITMQ_NAME_TYPE="-sname"
- )
-)
-
-if "!RABBITMQ_USE_LONGNAME!"=="true" (
- if "!USE_LONGNAME!"=="true" (
- set RABBITMQ_NAME_TYPE="-name"
- )
-)
-
-if "!COMPUTERNAME!"=="" (
- set COMPUTERNAME=localhost
-)
-
-REM [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
-if "!RABBITMQ_NODENAME!"=="" (
- if "!NODENAME!"=="" (
- set RABBITMQ_NODENAME=rabbit@!COMPUTERNAME!
- ) else (
- set RABBITMQ_NODENAME=!NODENAME!
- )
-)
-
-REM
-REM ##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
-REM
-REM DEFAULT_NODE_IP_ADDRESS=auto
-REM DEFAULT_NODE_PORT=5672
-REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
-REM [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
-REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
-REM [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
-
-REM if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
-REM if not "!RABBITMQ_NODE_PORT!"=="" (
-REM set RABBITMQ_NODE_IP_ADDRESS=auto
-REM )
-REM ) else (
-REM if "!RABBITMQ_NODE_PORT!"=="" (
-REM set RABBITMQ_NODE_PORT=5672
-REM )
-REM )
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!NODE_IP_ADDRESS!"=="" (
- set RABBITMQ_NODE_IP_ADDRESS=!NODE_IP_ADDRESS!
- )
-)
-
-if "!RABBITMQ_NODE_PORT!"=="" (
- if not "!NODE_PORT!"=="" (
- set RABBITMQ_NODE_PORT=!NODE_PORT!
- )
-)
-
-if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
- if not "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_IP_ADDRESS=auto
- )
-) else (
- if "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_NODE_PORT=5672
- )
-)
-
-REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
-REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
-REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
-
-if "!RABBITMQ_DIST_PORT!"=="" (
- if "!DIST_PORT!"=="" (
- if "!RABBITMQ_NODE_PORT!"=="" (
- set RABBITMQ_DIST_PORT=25672
- ) else (
- set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
- )
- ) else (
- set RABBITMQ_DIST_PORT=!DIST_PORT!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
-REM No Windows equivalent
-
-REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
-if "!RABBITMQ_CONFIG_FILE!"=="" (
- if "!CONFIG_FILE!"=="" (
- set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
- ) else (
- set RABBITMQ_CONFIG_FILE=!CONFIG_FILE!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
-if "!RABBITMQ_LOG_BASE!"=="" (
- if "!LOG_BASE!"=="" (
- set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!\log
- ) else (
- set RABBITMQ_LOG_BASE=!LOG_BASE!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
-if "!RABBITMQ_MNESIA_BASE!"=="" (
- if "!MNESIA_BASE!"=="" (
- set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!\db
- ) else (
- set RABBITMQ_MNESIA_BASE=!MNESIA_BASE!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
-REM No Windows equivalent
-
-REM [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
-REM No Windows equivalent
-
-REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
-REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
-if "!RABBITMQ_MNESIA_DIR!"=="" (
- if "!MNESIA_DIR!"=="" (
- set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-mnesia
- ) else (
- set RABBITMQ_MNESIA_DIR=!MNESIA_DIR!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
-REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
-REM No Windows equivalent
-
-REM [ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
-if "!RABBITMQ_BOOT_MODULE!"=="" (
- if "!BOOT_MODULE!"=="" (
- set RABBITMQ_BOOT_MODULE=rabbit
- ) else (
- set RABBITMQ_BOOT_MODULE=!BOOT_MODULE!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
-REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
-if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
- if "!PLUGINS_EXPAND_DIR!"=="" (
- set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!/!RABBITMQ_NODENAME!-plugins-expand
- ) else (
- set RABBITMQ_PLUGINS_EXPAND_DIR=!PLUGINS_EXPAND_DIR!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
-if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
- if "!ENABLED_PLUGINS_FILE!"=="" (
- set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
- ) else (
- set RABBITMQ_ENABLED_PLUGINS_FILE=!ENABLED_PLUGINS_FILE!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
-if "!RABBITMQ_PLUGINS_DIR!"=="" (
- if "!PLUGINS_DIR!"=="" (
- set RABBITMQ_PLUGINS_DIR=!RABBITMQ_HOME!\plugins
- ) else (
- set RABBITMQ_PLUGINS_DIR=!PLUGINS_DIR!
- )
-)
-
-REM ## Log rotation
-REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
-REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
-if "!RABBITMQ_LOGS!"=="" (
- if "!LOGS!"=="" (
- set RABBITMQ_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
- ) else (
- set RABBITMQ_LOGS=!LOGS!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
-REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
-if "!RABBITMQ_SASL_LOGS!"=="" (
- if "!SASL_LOGS!"=="" (
- set RABBITMQ_SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
- ) else (
- set RABBITMQ_SASL_LOGS=!SASL_LOGS!
- )
-)
-
-REM [ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
-if "!$RABBITMQ_CTL_ERL_ARGS!"=="" (
- if not "!CTL_ERL_ARGS!"=="" (
- set RABBITMQ_CTL_ERL_ARGS=!CTL_ERL_ARGS!
- )
-)
-
-REM ADDITIONAL WINDOWS ONLY CONFIG ITEMS
-REM rabbitmq-plugins.bat
-REM if "!RABBITMQ_SERVICENAME!"=="" (
-REM set RABBITMQ_SERVICENAME=RabbitMQ
-REM )
-
-if "!RABBITMQ_SERVICENAME!"=="" (
- if "!SERVICENAME!"=="" (
- set RABBITMQ_SERVICENAME=RabbitMQ
- ) else (
- set RABBITMQ_SERVICENAME=!SERVICENAME!
- )
-)
-
-REM ##--- End of overridden <var_name> variables
-REM
-REM # Since we source this elsewhere, don't accidentally stop execution
-REM true
+@echo off
+
+REM Scopes the variables to the current batch file
+REM setlocal
+
+rem Preserve values that might contain exclamation marks before
+rem enabling delayed expansion
+set TDP0=%~dp0
+REM setlocal enabledelayedexpansion
+
+REM SCRIPT_DIR=`dirname $SCRIPT_PATH`
+REM RABBITMQ_HOME="${SCRIPT_DIR}/.."
+set SCRIPT_DIR=%TDP0%
+set SCRIPT_NAME=%1
+set RABBITMQ_HOME=%SCRIPT_DIR%..
+
+REM If ERLANG_HOME is not defined, check if "erl.exe" is available in
+REM the path and use that.
+if not defined ERLANG_HOME (
+ for /f "delims=" %%F in ('where.exe erl.exe') do @set ERL_PATH=%%F
+ if exist "!ERL_PATH!" (
+ for /f "delims=" %%F in ("!ERL_PATH!") do set ERL_DIRNAME=%%~dpF
+ for /f "delims=" %%F in ('realpath "!ERL_DIRNAME!\.."') do @set ERLANG_HOME=%%F
+ )
+ set ERL_PATH=
+ set ERL_DIRNAME=
+)
+
+REM ## Set defaults
+REM . ${SCRIPT_DIR}/rabbitmq-defaults
+call "%SCRIPT_DIR%\rabbitmq-defaults.bat"
+
+REM These common defaults aren't referenced in the batch scripts
+REM ## Common defaults
+REM SERVER_ERL_ARGS="+P 1048576"
+REM
+REM # warn about old rabbitmq.conf file, if no new one
+REM if [ -f /etc/rabbitmq/rabbitmq.conf ] && \
+REM [ ! -f ${CONF_ENV_FILE} ] ; then
+REM echo -n "WARNING: ignoring /etc/rabbitmq/rabbitmq.conf -- "
+REM echo "location has moved to ${CONF_ENV_FILE}"
+REM fi
+
+REM ERL_ARGS aren't referenced in the batch scripts
+REM Common defaults
+REM set SERVER_ERL_ARGS=+P 1048576
+
+REM ## Get configuration variables from the configure environment file
+REM [ -f ${CONF_ENV_FILE} ] && . ${CONF_ENV_FILE} || true
+if exist "!RABBITMQ_CONF_ENV_FILE!" (
+ call "!RABBITMQ_CONF_ENV_FILE!"
+)
+
+REM Check for the short names here too
+if "!RABBITMQ_USE_LONGNAME!"=="" (
+ if "!USE_LONGNAME!"=="" (
+ set RABBITMQ_NAME_TYPE="-sname"
+ set NAMETYPE=shortnames
+ )
+)
+
+if "!RABBITMQ_USE_LONGNAME!"=="true" (
+ if "!USE_LONGNAME!"=="true" (
+ set RABBITMQ_NAME_TYPE="-name"
+ set NAMETYPE=longnames
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_NODENAME" ] && RABBITMQ_NODENAME=${NODENAME}
+if "!RABBITMQ_NODENAME!"=="" (
+ if "!NODENAME!"=="" (
+ REM We use Erlang to query the local hostname because
+ REM !COMPUTERNAME! and Erlang may return different results.
+ for /f "delims=" %%F in ('call "%ERLANG_HOME%\bin\erl.exe" -A0 -noinput -boot start_clean -eval "net_kernel:start([list_to_atom(""rabbit-gethostname-"" ++ os:getpid()), %NAMETYPE%]), [_, H] = string:tokens(atom_to_list(node()), ""@""), io:format(""~s~n"", [H]), init:stop()."') do @set HOSTNAME=%%F
+ set RABBITMQ_NODENAME=rabbit@!HOSTNAME!
+ set HOSTNAME=
+ ) else (
+ set RABBITMQ_NODENAME=!NODENAME!
+ )
+)
+set NAMETYPE=
+
+REM
+REM ##--- Set environment vars RABBITMQ_<var_name> to defaults if not set
+REM
+REM DEFAULT_NODE_IP_ADDRESS=auto
+REM DEFAULT_NODE_PORT=5672
+REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && RABBITMQ_NODE_IP_ADDRESS=${NODE_IP_ADDRESS}
+REM [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${NODE_PORT}
+REM [ "x" = "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_IP_ADDRESS=${DEFAULT_NODE_IP_ADDRESS}
+REM [ "x" != "x$RABBITMQ_NODE_IP_ADDRESS" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_NODE_PORT=${DEFAULT_NODE_PORT}
+
+REM if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+REM if not "!RABBITMQ_NODE_PORT!"=="" (
+REM set RABBITMQ_NODE_IP_ADDRESS=auto
+REM )
+REM ) else (
+REM if "!RABBITMQ_NODE_PORT!"=="" (
+REM set RABBITMQ_NODE_PORT=5672
+REM )
+REM )
+
+if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!NODE_IP_ADDRESS!"=="" (
+ set RABBITMQ_NODE_IP_ADDRESS=!NODE_IP_ADDRESS!
+ )
+)
+
+if "!RABBITMQ_NODE_PORT!"=="" (
+ if not "!NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=!NODE_PORT!
+ )
+)
+
+if "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
+ if not "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_IP_ADDRESS=auto
+ )
+) else (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_NODE_PORT=5672
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && RABBITMQ_DIST_PORT=${DIST_PORT}
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" = "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${DEFAULT_NODE_PORT} + 20000))
+REM [ "x" = "x$RABBITMQ_DIST_PORT" ] && [ "x" != "x$RABBITMQ_NODE_PORT" ] && RABBITMQ_DIST_PORT=$((${RABBITMQ_NODE_PORT} + 20000))
+
+if "!RABBITMQ_DIST_PORT!"=="" (
+ if "!DIST_PORT!"=="" (
+ if "!RABBITMQ_NODE_PORT!"=="" (
+ set RABBITMQ_DIST_PORT=25672
+ ) else (
+ set /a RABBITMQ_DIST_PORT=20000+!RABBITMQ_NODE_PORT!
+ )
+ ) else (
+ set RABBITMQ_DIST_PORT=!DIST_PORT!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_ERL_ARGS" ] && RABBITMQ_SERVER_ERL_ARGS=${SERVER_ERL_ARGS}
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_CONFIG_FILE" ] && RABBITMQ_CONFIG_FILE=${CONFIG_FILE}
+if "!RABBITMQ_CONFIG_FILE!"=="" (
+ if "!CONFIG_FILE!"=="" (
+ set RABBITMQ_CONFIG_FILE=!RABBITMQ_BASE!\rabbitmq
+ ) else (
+ set RABBITMQ_CONFIG_FILE=!CONFIG_FILE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_LOG_BASE" ] && RABBITMQ_LOG_BASE=${LOG_BASE}
+if "!RABBITMQ_LOG_BASE!"=="" (
+ if "!LOG_BASE!"=="" (
+ set RABBITMQ_LOG_BASE=!RABBITMQ_BASE!\log
+ ) else (
+ set RABBITMQ_LOG_BASE=!LOG_BASE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_MNESIA_BASE" ] && RABBITMQ_MNESIA_BASE=${MNESIA_BASE}
+if "!RABBITMQ_MNESIA_BASE!"=="" (
+ if "!MNESIA_BASE!"=="" (
+ set RABBITMQ_MNESIA_BASE=!RABBITMQ_BASE!\db
+ ) else (
+ set RABBITMQ_MNESIA_BASE=!MNESIA_BASE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_SERVER_START_ARGS" ] && RABBITMQ_SERVER_START_ARGS=${SERVER_START_ARGS}
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS" ] && RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=${SERVER_ADDITIONAL_ERL_ARGS}
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${MNESIA_DIR}
+REM [ "x" = "x$RABBITMQ_MNESIA_DIR" ] && RABBITMQ_MNESIA_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+if "!RABBITMQ_MNESIA_DIR!"=="" (
+ if "!MNESIA_DIR!"=="" (
+ set RABBITMQ_MNESIA_DIR=!RABBITMQ_MNESIA_BASE!\!RABBITMQ_NODENAME!-mnesia
+ ) else (
+ set RABBITMQ_MNESIA_DIR=!MNESIA_DIR!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${PID_FILE}
+REM [ "x" = "x$RABBITMQ_PID_FILE" ] && RABBITMQ_PID_FILE=${RABBITMQ_MNESIA_DIR}.pid
+REM No Windows equivalent
+
+REM [ "x" = "x$RABBITMQ_BOOT_MODULE" ] && RABBITMQ_BOOT_MODULE=${BOOT_MODULE}
+if "!RABBITMQ_BOOT_MODULE!"=="" (
+ if "!BOOT_MODULE!"=="" (
+ set RABBITMQ_BOOT_MODULE=rabbit
+ ) else (
+ set RABBITMQ_BOOT_MODULE=!BOOT_MODULE!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${PLUGINS_EXPAND_DIR}
+REM [ "x" = "x$RABBITMQ_PLUGINS_EXPAND_DIR" ] && RABBITMQ_PLUGINS_EXPAND_DIR=${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+if "!RABBITMQ_PLUGINS_EXPAND_DIR!"=="" (
+ if "!PLUGINS_EXPAND_DIR!"=="" (
+ set RABBITMQ_PLUGINS_EXPAND_DIR=!RABBITMQ_MNESIA_BASE!\!RABBITMQ_NODENAME!-plugins-expand
+ ) else (
+ set RABBITMQ_PLUGINS_EXPAND_DIR=!PLUGINS_EXPAND_DIR!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_ENABLED_PLUGINS_FILE" ] && RABBITMQ_ENABLED_PLUGINS_FILE=${ENABLED_PLUGINS_FILE}
+if "!RABBITMQ_ENABLED_PLUGINS_FILE!"=="" (
+ if "!ENABLED_PLUGINS_FILE!"=="" (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!RABBITMQ_BASE!\enabled_plugins
+ ) else (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!ENABLED_PLUGINS_FILE!
+ )
+) else (
+ set RABBITMQ_ENABLED_PLUGINS_FILE_source=environment
+)
+
+REM [ "x" = "x$RABBITMQ_PLUGINS_DIR" ] && RABBITMQ_PLUGINS_DIR=${PLUGINS_DIR}
+if "!RABBITMQ_PLUGINS_DIR!"=="" (
+ if "!PLUGINS_DIR!"=="" (
+ set RABBITMQ_PLUGINS_DIR=!RABBITMQ_HOME!\plugins
+ ) else (
+ set RABBITMQ_PLUGINS_DIR=!PLUGINS_DIR!
+ )
+) else (
+ set RABBITMQ_PLUGINS_DIR_source=environment
+)
+
+REM ## Log rotation
+REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS=${LOGS}
+REM [ "x" = "x$RABBITMQ_LOGS" ] && RABBITMQ_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log"
+if "!RABBITMQ_LOGS!"=="" (
+ if "!LOGS!"=="" (
+ set RABBITMQ_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!.log
+ ) else (
+ set RABBITMQ_LOGS=!LOGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS=${SASL_LOGS}
+REM [ "x" = "x$RABBITMQ_SASL_LOGS" ] && RABBITMQ_SASL_LOGS="${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}-sasl.log"
+if "!RABBITMQ_SASL_LOGS!"=="" (
+ if "!SASL_LOGS!"=="" (
+ set RABBITMQ_SASL_LOGS=!RABBITMQ_LOG_BASE!\!RABBITMQ_NODENAME!-sasl.log
+ ) else (
+ set RABBITMQ_SASL_LOGS=!SASL_LOGS!
+ )
+)
+
+REM [ "x" = "x$RABBITMQ_CTL_ERL_ARGS" ] && RABBITMQ_CTL_ERL_ARGS=${CTL_ERL_ARGS}
+if "!$RABBITMQ_CTL_ERL_ARGS!"=="" (
+ if not "!CTL_ERL_ARGS!"=="" (
+ set RABBITMQ_CTL_ERL_ARGS=!CTL_ERL_ARGS!
+ )
+)
+
+REM ADDITIONAL WINDOWS ONLY CONFIG ITEMS
+REM rabbitmq-plugins.bat
+REM if "!RABBITMQ_SERVICENAME!"=="" (
+REM set RABBITMQ_SERVICENAME=RabbitMQ
+REM )
+
+if "!RABBITMQ_SERVICENAME!"=="" (
+ if "!SERVICENAME!"=="" (
+ set RABBITMQ_SERVICENAME=RabbitMQ
+ ) else (
+ set RABBITMQ_SERVICENAME=!SERVICENAME!
+ )
+)
+
+REM Development-specific environment.
+if defined RABBITMQ_DEV_ENV (
+ if "!SCRIPT_NAME!" == "rabbitmq-plugins" (
+ REM We may need to query the running node for the plugins directory
+ REM and the "enabled plugins" file.
+ if not "%RABBITMQ_PLUGINS_DIR_source%" == "environment" (
+ for /f "delims=" %%F in ('!SCRIPT_DIR!\rabbitmqctl eval "{ok, P} = application:get_env(rabbit, plugins_dir), io:format(""~s~n"", [P])."') do @set plugins_dir=%%F
+ if exist "!plugins_dir!" (
+ set RABBITMQ_PLUGINS_DIR=!plugins_dir!
+ )
+ REM set plugins_dir=
+ )
+ if not "%RABBITMQ_ENABLED_PLUGINS_FILE_source%" == "environment" (
+ for /f "delims=" %%F in ('!SCRIPT_DIR!\rabbitmqctl eval "{ok, P} = application:get_env(rabbit, enabled_plugins_file), io:format(""~s~n"", [P])."') do @set enabled_plugins_file=%%F
+ if exist "!enabled_plugins_file!" (
+ set RABBITMQ_ENABLED_PLUGINS_FILE=!enabled_plugins_file!
+ )
+ REM set enabled_plugins_file=
+ )
+ )
+
+ if exist "!RABBITMQ_PLUGINS_DIR!" (
+ REM RabbitMQ was started with "make run-broker" from its own
+ REM source tree. Take rabbit_common from the plugins directory.
+ set ERL_LIBS=!RABBITMQ_PLUGINS_DIR!;!ERL_LIBS!
+ ) else (
+ REM RabbitMQ runs from a testsuite or a plugin. The .ez files are
+ REM not available under RabbitMQ source tree. We need to look at
+ REM $DEPS_DIR and default locations.
+
+ if not "!DEPS_DIR!" == "" (
+ if exist "!DEPS_DIR!\rabbit_common\ebin" (
+ REM $DEPS_DIR is set, and it contains rabbitmq-common, use
+ REM this.
+ set DEPS_DIR_norm=!DEPS_DIR!
+ ) else (
+ if exist "!SCRIPT_DIR!\..\..\..\erlang.mk" (
+ if exist "!SCRIPT_DIR!\..\..\rabbit_common\ebin" (
+ REM Look at default locations: "deps" subdirectory
+ REM inside a plugin or the Umbrella.
+ set DEPS_DIR_norm=!SCRIPT_DIR!\..\..
+ )
+ )
+ )
+ )
+ for /f "delims=" %%F in ('realpath "!DEPS_DIR_norm!"') do @set DEPS_DIR_norm=%%F
+
+ set ERL_LIBS=!DEPS_DIR_norm!;!ERL_LIBS!
+ )
+) else (
+ if exist "!RABBITMQ_PLUGINS_DIR!" (
+ REM RabbitMQ was started from its install directory. Take
+ REM rabbit_common from the plugins directory.
+ set ERL_LIBS=!RABBITMQ_PLUGINS_DIR!;!ERL_LIBS!
+ )
+)
+
+if "!ERL_LIBS!" == ";" (
+ set ERL_LIBS=
+)
+
+REM ##--- End of overridden <var_name> variables
+REM
+REM # Since we source this elsewhere, don't accidentally stop execution
+REM true
diff --git a/scripts/rabbitmq-plugins b/scripts/rabbitmq-plugins
index e8b6c9e92b..d72df8ad86 100755
--- a/scripts/rabbitmq-plugins
+++ b/scripts/rabbitmq-plugins
@@ -19,6 +19,11 @@
# Non-empty defaults should be set in rabbitmq-env
. `dirname $0`/rabbitmq-env
+# Disable erl_crash.dump by default for control scripts.
+if [ -z "$ERL_CRASH_DUMP_SECONDS" ]; then
+ export ERL_CRASH_DUMP_SECONDS=0
+fi
+
RABBITMQ_USE_LONGNAME=${RABBITMQ_USE_LONGNAME} \
exec ${ERL_DIR}erl \
-pa "${RABBITMQ_HOME}/ebin" \
diff --git a/scripts/rabbitmq-plugins.bat b/scripts/rabbitmq-plugins.bat
index 6fb2f4f546..be938f0785 100755..100644
--- a/scripts/rabbitmq-plugins.bat
+++ b/scripts/rabbitmq-plugins.bat
@@ -26,7 +26,7 @@ setlocal enabledelayedexpansion
REM Get default settings with user overrides for (RABBITMQ_)<var_name>
REM Non-empty defaults should be set in rabbitmq-env
-call "!TDP0!\rabbitmq-env.bat"
+call "!TDP0!\rabbitmq-env.bat" %~n0
if not exist "!ERLANG_HOME!\bin\erl.exe" (
echo.
@@ -40,6 +40,11 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" (
exit /B 1
)
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
"!ERLANG_HOME!\bin\erl.exe" ^
-pa "!TDP0!..\ebin" ^
-noinput ^
diff --git a/packaging/common/rabbitmq-script-wrapper b/scripts/rabbitmq-script-wrapper
index 79967538e5..ed4c276e53 100644
--- a/packaging/common/rabbitmq-script-wrapper
+++ b/scripts/rabbitmq-script-wrapper
@@ -31,6 +31,10 @@ cd /var/lib/rabbitmq
SCRIPT=`basename $0`
if [ `id -u` = `id -u rabbitmq` -a "$SCRIPT" = "rabbitmq-server" ] ; then
+ RABBITMQ_ENV=/usr/lib/rabbitmq/bin/rabbitmq-env
+ RABBITMQ_SCRIPTS_DIR=$(dirname "$RABBITMQ_ENV")
+ . "$RABBITMQ_ENV"
+
exec /usr/lib/rabbitmq/bin/rabbitmq-server "$@" @STDOUT_STDERR_REDIRECTION@
elif [ `id -u` = `id -u rabbitmq` -o "$SCRIPT" = "rabbitmq-plugins" ] ; then
if [ -f $PWD/.erlang.cookie ] ; then
diff --git a/packaging/common/rabbitmq-server-ha.ocf b/scripts/rabbitmq-server-ha.ocf
index 02f2a1780a..76757b220c 100755
--- a/packaging/common/rabbitmq-server-ha.ocf
+++ b/scripts/rabbitmq-server-ha.ocf
@@ -263,7 +263,7 @@ Erlang cookie file path where the cookie will be put, if requested
Either to use FQDN or a shortname for the rabbitmq node
</longdesc>
<shortdesc lang="en">Use FQDN</shortdesc>
-<content type="boolean" default="${OCF_RESKEY_erlang_cookie_file_default}" />
+<content type="boolean" default="${OCF_RESKEY_use_fqdn_default}" />
</parameter>
<parameter name="max_rabbitmqctl_timeouts" unique="0" required="0">
@@ -592,9 +592,22 @@ check_need_join_to() {
# Update erlang cookie, if it has been specified
update_cookie() {
+ local cookie_file_content
if [ "${OCF_RESKEY_erlang_cookie}" != 'false' ] ; then
- echo "${OCF_RESKEY_erlang_cookie}" > "${OCF_RESKEY_erlang_cookie_file}" && \
- chown ${OCF_RESKEY_username}:${OCF_RESKEY_groupname} "${OCF_RESKEY_erlang_cookie_file}" && \
+ if [ -f "${OCF_RESKEY_erlang_cookie_file}" ]; then
+ # First line of cookie file without newline
+ cookie_file_content=$(head -n1 "${OCF_RESKEY_erlang_cookie_file}" | perl -pe chomp)
+ fi
+ # As there is a brief period of time when the file is empty
+ # (shell redirection has already opened and truncated file,
+ # and echo hasn't finished its job), we are doing this write
+ # only when cookie has changed.
+ if [ "${OCF_RESKEY_erlang_cookie}" != "${cookie_file_content}" ]; then
+ echo "${OCF_RESKEY_erlang_cookie}" > "${OCF_RESKEY_erlang_cookie_file}"
+ fi
+ # And this are idempotent operations, so we don't have to
+ # check any preconditions for running them.
+ chown ${OCF_RESKEY_username}:${OCF_RESKEY_groupname} "${OCF_RESKEY_erlang_cookie_file}"
chmod 600 "${OCF_RESKEY_erlang_cookie_file}"
fi
return $OCF_SUCCESS
diff --git a/scripts/rabbitmq-server.bat b/scripts/rabbitmq-server.bat
index 62da2f6256..c1aa42c65f 100755..100644
--- a/scripts/rabbitmq-server.bat
+++ b/scripts/rabbitmq-server.bat
@@ -25,7 +25,7 @@ setlocal enabledelayedexpansion
REM Get default settings with user overrides for (RABBITMQ_)<var_name>
REM Non-empty defaults should be set in rabbitmq-env
-call "%TDP0%\rabbitmq-env.bat"
+call "%TDP0%\rabbitmq-env.bat" %~n0
if not exist "!ERLANG_HOME!\bin\erl.exe" (
echo.
@@ -72,8 +72,11 @@ if not "!RABBITMQ_NODE_IP_ADDRESS!"=="" (
)
set RABBITMQ_START_RABBIT=
+if "!RABBITMQ_ALLOW_INPUT!"=="" (
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -noinput
+)
if "!RABBITMQ_NODE_ONLY!"=="" (
- set RABBITMQ_START_RABBIT=-s "!RABBITMQ_BOOT_MODULE!" boot
+ set RABBITMQ_START_RABBIT=!RABBITMQ_START_RABBIT! -s "!RABBITMQ_BOOT_MODULE!" boot
)
if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (
@@ -82,7 +85,6 @@ if "!RABBITMQ_IO_THREAD_POOL_SIZE!"=="" (
"!ERLANG_HOME!\bin\erl.exe" ^
-pa "!RABBITMQ_EBIN_ROOT!" ^
--noinput ^
-boot start_sasl ^
!RABBITMQ_START_RABBIT! ^
!RABBITMQ_CONFIG_ARG! ^
diff --git a/packaging/common/rabbitmq-server.ocf b/scripts/rabbitmq-server.ocf
index 804e65423d..804e65423d 100755
--- a/packaging/common/rabbitmq-server.ocf
+++ b/scripts/rabbitmq-server.ocf
diff --git a/scripts/rabbitmq-service.bat b/scripts/rabbitmq-service.bat
index afc34b4e83..389dd7eea1 100755..100644
--- a/scripts/rabbitmq-service.bat
+++ b/scripts/rabbitmq-service.bat
@@ -26,7 +26,7 @@ setlocal enabledelayedexpansion
REM Get default settings with user overrides for (RABBITMQ_)<var_name>
REM Non-empty defaults should be set in rabbitmq-env
-call "%TDP0%\rabbitmq-env.bat"
+call "%TDP0%\rabbitmq-env.bat" %~n0
set STARVAR=
shift
@@ -187,6 +187,7 @@ set ERLANG_SERVICE_ARGUMENTS=!ERLANG_SERVICE_ARGUMENTS:"=\"!
"!ERLANG_SERVICE_MANAGER_PATH!\erlsrv" set !RABBITMQ_SERVICENAME! ^
-machine "!ERLANG_SERVICE_MANAGER_PATH!\erl.exe" ^
-env ERL_CRASH_DUMP="!RABBITMQ_BASE:\=/!/erl_crash.dump" ^
+-env ERL_LIBS="!ERL_LIBS!" ^
-workdir "!RABBITMQ_BASE!" ^
-stopaction "rabbit:stop_and_halt()." ^
!RABBITMQ_NAME_TYPE! !RABBITMQ_NODENAME! ^
diff --git a/scripts/rabbitmqctl b/scripts/rabbitmqctl
index 03f8765e27..3705b9a979 100755
--- a/scripts/rabbitmqctl
+++ b/scripts/rabbitmqctl
@@ -19,6 +19,11 @@
# Non-empty defaults should be set in rabbitmq-env
. `dirname $0`/rabbitmq-env
+# Disable erl_crash.dump by default for control scripts.
+if [ -z "$ERL_CRASH_DUMP_SECONDS" ]; then
+ export ERL_CRASH_DUMP_SECONDS=0
+fi
+
# We specify Mnesia dir and sasl error logger since some actions
# (e.g. forget_cluster_node --offline) require us to impersonate the
# real node.
diff --git a/scripts/rabbitmqctl.bat b/scripts/rabbitmqctl.bat
index 9ad855bf87..174f61ba23 100755..100644
--- a/scripts/rabbitmqctl.bat
+++ b/scripts/rabbitmqctl.bat
@@ -26,7 +26,7 @@ setlocal enabledelayedexpansion
REM Get default settings with user overrides for (RABBITMQ_)<var_name>
REM Non-empty defaults should be set in rabbitmq-env
-call "%TDP0%\rabbitmq-env.bat"
+call "%TDP0%\rabbitmq-env.bat" %~n0
if not exist "!ERLANG_HOME!\bin\erl.exe" (
echo.
@@ -40,6 +40,11 @@ if not exist "!ERLANG_HOME!\bin\erl.exe" (
exit /B 1
)
+REM Disable erl_crash.dump by default for control scripts.
+if not defined ERL_CRASH_DUMP_SECONDS (
+ set ERL_CRASH_DUMP_SECONDS=0
+)
+
"!ERLANG_HOME!\bin\erl.exe" ^
-pa "!TDP0!..\ebin" ^
-noinput ^
diff --git a/packaging/common/set_rabbitmq_policy.sh b/scripts/set_rabbitmq_policy.sh
index a88b0c417a..a88b0c417a 100644
--- a/packaging/common/set_rabbitmq_policy.sh
+++ b/scripts/set_rabbitmq_policy.sh
diff --git a/src/app_utils.erl b/src/app_utils.erl
deleted file mode 100644
index bab327eab6..0000000000
--- a/src/app_utils.erl
+++ /dev/null
@@ -1,127 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
--module(app_utils).
-
--export([load_applications/1, start_applications/1, start_applications/2,
- stop_applications/1, stop_applications/2, app_dependency_order/2,
- app_dependencies/1]).
-
--ifdef(use_specs).
-
--type error_handler() :: fun((atom(), any()) -> 'ok').
-
--spec load_applications([atom()]) -> 'ok'.
--spec start_applications([atom()]) -> 'ok'.
--spec stop_applications([atom()]) -> 'ok'.
--spec start_applications([atom()], error_handler()) -> 'ok'.
--spec stop_applications([atom()], error_handler()) -> 'ok'.
--spec app_dependency_order([atom()], boolean()) -> [digraph:vertex()].
--spec app_dependencies(atom()) -> [atom()].
-
--endif.
-
-%%---------------------------------------------------------------------------
-%% Public API
-
-load_applications(Apps) ->
- load_applications(queue:from_list(Apps), sets:new()),
- ok.
-
-start_applications(Apps) ->
- start_applications(
- Apps, fun (App, Reason) ->
- throw({error, {cannot_start_application, App, Reason}})
- end).
-
-stop_applications(Apps) ->
- stop_applications(
- Apps, fun (App, Reason) ->
- throw({error, {cannot_stop_application, App, Reason}})
- end).
-
-start_applications(Apps, ErrorHandler) ->
- manage_applications(fun lists:foldl/3,
- fun application:start/1,
- fun application:stop/1,
- already_started,
- ErrorHandler,
- Apps).
-
-stop_applications(Apps, ErrorHandler) ->
- manage_applications(fun lists:foldr/3,
- fun application:stop/1,
- fun application:start/1,
- not_started,
- ErrorHandler,
- Apps).
-
-app_dependency_order(RootApps, StripUnreachable) ->
- {ok, G} = rabbit_misc:build_acyclic_graph(
- fun ({App, _Deps}) -> [{App, App}] end,
- fun ({App, Deps}) -> [{Dep, App} || Dep <- Deps] end,
- [{App, app_dependencies(App)} ||
- {App, _Desc, _Vsn} <- application:loaded_applications()]),
- try
- case StripUnreachable of
- true -> digraph:del_vertices(G, digraph:vertices(G) --
- digraph_utils:reachable(RootApps, G));
- false -> ok
- end,
- digraph_utils:topsort(G)
- after
- true = digraph:delete(G)
- end.
-
-%%---------------------------------------------------------------------------
-%% Private API
-
-load_applications(Worklist, Loaded) ->
- case queue:out(Worklist) of
- {empty, _WorkList} ->
- ok;
- {{value, App}, Worklist1} ->
- case sets:is_element(App, Loaded) of
- true -> load_applications(Worklist1, Loaded);
- false -> case application:load(App) of
- ok -> ok;
- {error, {already_loaded, App}} -> ok;
- Error -> throw(Error)
- end,
- load_applications(
- queue:join(Worklist1,
- queue:from_list(app_dependencies(App))),
- sets:add_element(App, Loaded))
- end
- end.
-
-app_dependencies(App) ->
- case application:get_key(App, applications) of
- undefined -> [];
- {ok, Lst} -> Lst
- end.
-
-manage_applications(Iterate, Do, Undo, SkipError, ErrorHandler, Apps) ->
- Iterate(fun (App, Acc) ->
- case Do(App) of
- ok -> [App | Acc];
- {error, {SkipError, _}} -> Acc;
- {error, Reason} ->
- lists:foreach(Undo, Acc),
- ErrorHandler(App, Reason)
- end
- end, [], Apps),
- ok.
-
diff --git a/src/background_gc.erl b/src/background_gc.erl
index 0dafde6dc2..6cf397324d 100644
--- a/src/background_gc.erl
+++ b/src/background_gc.erl
@@ -26,6 +26,7 @@
-define(MAX_RATIO, 0.01).
-define(IDEAL_INTERVAL, 60000).
+-define(MAX_INTERVAL, 240000).
-record(state, {last_interval}).
@@ -70,7 +71,7 @@ terminate(_Reason, State) -> State.
interval_gc(State = #state{last_interval = LastInterval}) ->
{ok, Interval} = rabbit_misc:interval_operation(
{?MODULE, gc, []},
- ?MAX_RATIO, ?IDEAL_INTERVAL, LastInterval),
+ ?MAX_RATIO, ?MAX_INTERVAL, ?IDEAL_INTERVAL, LastInterval),
erlang:send_after(Interval, self(), run),
State#state{last_interval = Interval}.
diff --git a/src/credit_flow.erl b/src/credit_flow.erl
deleted file mode 100644
index 8b630929cd..0000000000
--- a/src/credit_flow.erl
+++ /dev/null
@@ -1,196 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(credit_flow).
-
-%% Credit flow is controlled by a credit specification - a
-%% {InitialCredit, MoreCreditAfter} tuple. For the message sender,
-%% credit starts at InitialCredit and is decremented with every
-%% message sent. The message receiver grants more credit to the sender
-%% by sending it a {bump_credit, ...} control message after receiving
-%% MoreCreditAfter messages. The sender should pass this message in to
-%% handle_bump_msg/1. The sender should block when it goes below 0
-%% (check by invoking blocked/0). If a process is both a sender and a
-%% receiver it will not grant any more credit to its senders when it
-%% is itself blocked - thus the only processes that need to check
-%% blocked/0 are ones that read from network sockets.
-%%
-%% Credit flows left to right when process send messags down the
-%% chain, starting at the rabbit_reader, ending at the msg_store:
-%% reader -> channel -> queue_process -> msg_store.
-%%
-%% If the message store has a back log, then it will block the
-%% queue_process, which will block the channel, and finally the reader
-%% will be blocked, throttling down publishers.
-%%
-%% Once a process is unblocked, it will grant credits up the chain,
-%% possibly unblocking other processes:
-%% reader <--grant channel <--grant queue_process <--grant msg_store.
-%%
-%% Grepping the project files for `credit_flow` will reveal the places
-%% where this module is currently used, with extra comments on what's
-%% going on at each instance. Note that credit flow between mirrors
-%% synchronization has not been documented, since this doesn't affect
-%% client publishes.
-
--define(DEFAULT_INITIAL_CREDIT, 200).
--define(DEFAULT_MORE_CREDIT_AFTER, 50).
-
--define(DEFAULT_CREDIT,
- case get(credit_flow_default_credit) of
- undefined ->
- Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit,
- {?DEFAULT_INITIAL_CREDIT,
- ?DEFAULT_MORE_CREDIT_AFTER}),
- put(credit_flow_default_credit, Val),
- Val;
- Val -> Val
- end).
-
--export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]).
--export([peer_down/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([bump_msg/0]).
-
--opaque(bump_msg() :: {pid(), non_neg_integer()}).
--type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
-
--spec(send/1 :: (pid()) -> 'ok').
--spec(send/2 :: (pid(), credit_spec()) -> 'ok').
--spec(ack/1 :: (pid()) -> 'ok').
--spec(ack/2 :: (pid(), credit_spec()) -> 'ok').
--spec(handle_bump_msg/1 :: (bump_msg()) -> 'ok').
--spec(blocked/0 :: () -> boolean()).
--spec(peer_down/1 :: (pid()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% process dict update macro - eliminates the performance-hurting
-%% closure creation a HOF would introduce
--define(UPDATE(Key, Default, Var, Expr),
- begin
- %% We deliberately allow Var to escape from the case here
- %% to be used in Expr. Any temporary var we introduced
- %% would also escape, and might conflict.
- Var = case get(Key) of
- undefined -> Default;
- V -> V
- end,
- put(Key, Expr)
- end).
-
-%% If current process was blocked by credit flow in the last
-%% STATE_CHANGE_INTERVAL milliseconds, state/0 will report it as "in
-%% flow".
--define(STATE_CHANGE_INTERVAL, 1000000).
-
-%%----------------------------------------------------------------------------
-
-%% There are two "flows" here; of messages and of credit, going in
-%% opposite directions. The variable names "From" and "To" refer to
-%% the flow of credit, but the function names refer to the flow of
-%% messages. This is the clearest I can make it (since the function
-%% names form the API and want to make sense externally, while the
-%% variable names are used in credit bookkeeping and want to make
-%% sense internally).
-
-%% For any given pair of processes, ack/2 and send/2 must always be
-%% called with the same credit_spec().
-
-send(From) -> send(From, ?DEFAULT_CREDIT).
-
-send(From, {InitialCredit, _MoreCreditAfter}) ->
- ?UPDATE({credit_from, From}, InitialCredit, C,
- if C == 1 -> block(From),
- 0;
- true -> C - 1
- end).
-
-ack(To) -> ack(To, ?DEFAULT_CREDIT).
-
-ack(To, {_InitialCredit, MoreCreditAfter}) ->
- ?UPDATE({credit_to, To}, MoreCreditAfter, C,
- if C == 1 -> grant(To, MoreCreditAfter),
- MoreCreditAfter;
- true -> C - 1
- end).
-
-handle_bump_msg({From, MoreCredit}) ->
- ?UPDATE({credit_from, From}, 0, C,
- if C =< 0 andalso C + MoreCredit > 0 -> unblock(From),
- C + MoreCredit;
- true -> C + MoreCredit
- end).
-
-blocked() -> case get(credit_blocked) of
- undefined -> false;
- [] -> false;
- _ -> true
- end.
-
-state() -> case blocked() of
- true -> flow;
- false -> case get(credit_blocked_at) of
- undefined -> running;
- B -> Diff = timer:now_diff(erlang:now(), B),
- case Diff < ?STATE_CHANGE_INTERVAL of
- true -> flow;
- false -> running
- end
- end
- end.
-
-peer_down(Peer) ->
- %% In theory we could also remove it from credit_deferred here, but it
- %% doesn't really matter; at some point later we will drain
- %% credit_deferred and thus send messages into the void...
- unblock(Peer),
- erase({credit_from, Peer}),
- erase({credit_to, Peer}),
- ok.
-
-%% --------------------------------------------------------------------------
-
-grant(To, Quantity) ->
- Msg = {bump_credit, {self(), Quantity}},
- case blocked() of
- false -> To ! Msg;
- true -> ?UPDATE(credit_deferred, [], Deferred, [{To, Msg} | Deferred])
- end.
-
-block(From) ->
- case blocked() of
- false -> put(credit_blocked_at, erlang:now());
- true -> ok
- end,
- ?UPDATE(credit_blocked, [], Blocks, [From | Blocks]).
-
-unblock(From) ->
- ?UPDATE(credit_blocked, [], Blocks, Blocks -- [From]),
- case blocked() of
- false -> case erase(credit_deferred) of
- undefined -> ok;
- Credits -> _ = [To ! Msg || {To, Msg} <- Credits],
- ok
- end;
- true -> ok
- end.
diff --git a/src/file_handle_cache.erl b/src/file_handle_cache.erl
index c65b6f21b1..a7dfd748ac 100644
--- a/src/file_handle_cache.erl
+++ b/src/file_handle_cache.erl
@@ -175,6 +175,7 @@
-record(handle,
{ hdl,
+ ref,
offset,
is_dirty,
write_buffer_size,
@@ -536,12 +537,15 @@ clear(Ref) ->
end).
set_maximum_since_use(MaximumAge) ->
- Now = now(),
+ Now = time_compat:monotonic_time(),
case lists:foldl(
fun ({{Ref, fhc_handle},
Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) ->
case Hdl =/= closed andalso
- timer:now_diff(Now, Then) >= MaximumAge of
+ time_compat:convert_time_unit(Now - Then,
+ native,
+ micro_seconds)
+ >= MaximumAge of
true -> soft_close(Ref, Handle) orelse Rep;
false -> Rep
end;
@@ -710,7 +714,8 @@ get_or_reopen(RefNewOrReopens) ->
{OpenHdls, []} ->
{ok, [Handle || {_Ref, Handle} <- OpenHdls]};
{OpenHdls, ClosedHdls} ->
- Oldest = oldest(get_age_tree(), fun () -> now() end),
+ Oldest = oldest(get_age_tree(),
+ fun () -> time_compat:monotonic_time() end),
case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
Oldest}, infinity) of
ok ->
@@ -746,14 +751,14 @@ reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
end,
case prim_file:open(Path, Mode) of
{ok, Hdl} ->
- Now = now(),
+ Now = time_compat:monotonic_time(),
{{ok, _Offset}, Handle1} =
maybe_seek(Offset, reset_read_buffer(
Handle#handle{hdl = Hdl,
offset = 0,
last_used_at = Now})),
put({Ref, fhc_handle}, Handle1),
- reopen(RefNewOrReopenHdls, gb_trees:insert(Now, Ref, Tree),
+ reopen(RefNewOrReopenHdls, gb_trees:insert({Now, Ref}, true, Tree),
[{Ref, Handle1} | RefHdls]);
Error ->
%% NB: none of the handles in ToOpen are in the age tree
@@ -782,7 +787,7 @@ sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) ->
sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]).
put_handle(Ref, Handle = #handle { last_used_at = Then }) ->
- Now = now(),
+ Now = time_compat:monotonic_time(),
age_tree_update(Then, Now, Ref),
put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }).
@@ -799,13 +804,14 @@ put_age_tree(Tree) -> put(fhc_age_tree, Tree).
age_tree_update(Then, Now, Ref) ->
with_age_tree(
fun (Tree) ->
- gb_trees:insert(Now, Ref, gb_trees:delete_any(Then, Tree))
+ gb_trees:insert({Now, Ref}, true,
+ gb_trees:delete_any({Then, Ref}, Tree))
end).
-age_tree_delete(Then) ->
+age_tree_delete(Then, Ref) ->
with_age_tree(
fun (Tree) ->
- Tree1 = gb_trees:delete_any(Then, Tree),
+ Tree1 = gb_trees:delete_any({Then, Ref}, Tree),
Oldest = oldest(Tree1, fun () -> undefined end),
gen_server2:cast(?SERVER, {close, self(), Oldest}),
Tree1
@@ -816,7 +822,7 @@ age_tree_change() ->
fun (Tree) ->
case gb_trees:is_empty(Tree) of
true -> Tree;
- false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
gen_server2:cast(?SERVER, {update, self(), Oldest}),
Tree
end
@@ -825,7 +831,7 @@ age_tree_change() ->
oldest(Tree, DefaultFun) ->
case gb_trees:is_empty(Tree) of
true -> DefaultFun();
- false -> {Oldest, _Ref} = gb_trees:smallest(Tree),
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
Oldest
end.
@@ -851,6 +857,7 @@ new_closed_handle(Path, Mode, Options) ->
end,
Ref = make_ref(),
put({Ref, fhc_handle}, #handle { hdl = closed,
+ ref = Ref,
offset = 0,
is_dirty = false,
write_buffer_size = 0,
@@ -885,6 +892,7 @@ soft_close(Handle = #handle { hdl = closed }) ->
soft_close(Handle) ->
case write_buffer(Handle) of
{ok, #handle { hdl = Hdl,
+ ref = Ref,
is_dirty = IsDirty,
last_used_at = Then } = Handle1 } ->
ok = case IsDirty of
@@ -892,7 +900,7 @@ soft_close(Handle) ->
false -> ok
end,
ok = prim_file:close(Hdl),
- age_tree_delete(Then),
+ age_tree_delete(Then, Ref),
{ok, Handle1 #handle { hdl = closed,
is_dirty = false,
last_used_at = undefined }};
@@ -1421,17 +1429,19 @@ reduce(State = #fhc_state { open_pending = OpenPending,
elders = Elders,
clients = Clients,
timer_ref = TRef }) ->
- Now = now(),
+ Now = time_compat:monotonic_time(),
{CStates, Sum, ClientCount} =
ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
[#cstate { pending_closes = PendingCloses,
opened = Opened,
blocked = Blocked } = CState] =
ets:lookup(Clients, Pid),
+ TimeDiff = time_compat:convert_time_unit(
+ Now - Eldest, native, micro_seconds),
case Blocked orelse PendingCloses =:= Opened of
true -> Accs;
false -> {[CState | CStatesAcc],
- SumAcc + timer:now_diff(Now, Eldest),
+ SumAcc + TimeDiff,
CountAcc + 1}
end
end, {[], 0, 0}, Elders),
diff --git a/src/file_handle_cache_stats.erl b/src/file_handle_cache_stats.erl
index 5f6926b5d2..b54d22ef61 100644
--- a/src/file_handle_cache_stats.erl
+++ b/src/file_handle_cache_stats.erl
@@ -58,10 +58,9 @@ update(Op) ->
get() ->
lists:sort(ets:tab2list(?TABLE)).
-%% TODO timer:tc/1 was introduced in R14B03; use that function once we
-%% require that version.
timer_tc(Thunk) ->
- T1 = os:timestamp(),
+ T1 = time_compat:monotonic_time(),
Res = Thunk(),
- T2 = os:timestamp(),
- {timer:now_diff(T2, T1), Res}.
+ T2 = time_compat:monotonic_time(),
+ Diff = time_compat:convert_time_unit(T2 - T1, native, micro_seconds),
+ {Diff, Res}.
diff --git a/src/gen_server2.erl b/src/gen_server2.erl
deleted file mode 100644
index ffc075da7f..0000000000
--- a/src/gen_server2.erl
+++ /dev/null
@@ -1,1357 +0,0 @@
-%% This file is a copy of gen_server.erl from the R13B-1 Erlang/OTP
-%% distribution, with the following modifications:
-%%
-%% 1) the module name is gen_server2
-%%
-%% 2) more efficient handling of selective receives in callbacks
-%% gen_server2 processes drain their message queue into an internal
-%% buffer before invoking any callback module functions. Messages are
-%% dequeued from the buffer for processing. Thus the effective message
-%% queue of a gen_server2 process is the concatenation of the internal
-%% buffer and the real message queue.
-%% As a result of the draining, any selective receive invoked inside a
-%% callback is less likely to have to scan a large message queue.
-%%
-%% 3) gen_server2:cast is guaranteed to be order-preserving
-%% The original code could reorder messages when communicating with a
-%% process on a remote node that was not currently connected.
-%%
-%% 4) The callback module can optionally implement prioritise_call/4,
-%% prioritise_cast/3 and prioritise_info/3. These functions take
-%% Message, From, Length and State or just Message, Length and State
-%% (where Length is the current number of messages waiting to be
-%% processed) and return a single integer representing the priority
-%% attached to the message, or 'drop' to ignore it (for
-%% prioritise_cast/3 and prioritise_info/3 only). Messages with
-%% higher priorities are processed before requests with lower
-%% priorities. The default priority is 0.
-%%
-%% 5) The callback module can optionally implement
-%% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be
-%% called immediately prior to and post hibernation, respectively. If
-%% handle_pre_hibernate returns {hibernate, NewState} then the process
-%% will hibernate. If the module does not implement
-%% handle_pre_hibernate/1 then the default action is to hibernate.
-%%
-%% 6) init can return a 4th arg, {backoff, InitialTimeout,
-%% MinimumTimeout, DesiredHibernatePeriod} (all in milliseconds,
-%% 'infinity' does not make sense here). Then, on all callbacks which
-%% can return a timeout (including init), timeout can be
-%% 'hibernate'. When this is the case, the current timeout value will
-%% be used (initially, the InitialTimeout supplied from init). After
-%% this timeout has occurred, hibernation will occur as normal. Upon
-%% awaking, a new current timeout value will be calculated.
-%%
-%% The purpose is that the gen_server2 takes care of adjusting the
-%% current timeout value such that the process will increase the
-%% timeout value repeatedly if it is unable to sleep for the
-%% DesiredHibernatePeriod. If it is able to sleep for the
-%% DesiredHibernatePeriod it will decrease the current timeout down to
-%% the MinimumTimeout, so that the process is put to sleep sooner (and
-%% hopefully stays asleep for longer). In short, should a process
-%% using this receive a burst of messages, it should not hibernate
-%% between those messages, but as the messages become less frequent,
-%% the process will not only hibernate, it will do so sooner after
-%% each message.
-%%
-%% When using this backoff mechanism, normal timeout values (i.e. not
-%% 'hibernate') can still be used, and if they are used then the
-%% handle_info(timeout, State) will be called as normal. In this case,
-%% returning 'hibernate' from handle_info(timeout, State) will not
-%% hibernate the process immediately, as it would if backoff wasn't
-%% being used. Instead it'll wait for the current timeout as described
-%% above.
-%%
-%% 7) The callback module can return from any of the handle_*
-%% functions, a {become, Module, State} triple, or a {become, Module,
-%% State, Timeout} quadruple. This allows the gen_server to
-%% dynamically change the callback module. The State is the new state
-%% which will be passed into any of the callback functions in the new
-%% module. Note there is no form also encompassing a reply, thus if
-%% you wish to reply in handle_call/3 and change the callback module,
-%% you need to use gen_server2:reply/2 to issue the reply
-%% manually. The init function can similarly return a 5th argument,
-%% Module, in order to dynamically decide the callback module on init.
-%%
-%% 8) The callback module can optionally implement
-%% format_message_queue/2 which is the equivalent of format_status/2
-%% but where the second argument is specifically the priority_queue
-%% which contains the prioritised message_queue.
-%%
-%% 9) The function with_state/2 can be used to debug a process with
-%% heavyweight state (without needing to copy the entire state out of
-%% process as sys:get_status/1 would). Pass through a function which
-%% can be invoked on the state, get back the result. The state is not
-%% modified.
-%%
-%% 10) an mcall/1 function has been added for performing multiple
-%% call/3 in parallel. Unlike multi_call, which sends the same request
-%% to same-named processes residing on a supplied list of nodes, it
-%% operates on name/request pairs, where name is anything accepted by
-%% call/3, i.e. a pid, global name, local name, or local name on a
-%% particular node.
-%%
-
-%% All modifications are (C) 2009-2013 GoPivotal, Inc.
-
-%% ``The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved via the world wide web at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
-%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
-%% AB. All Rights Reserved.''
-%%
-%% $Id$
-%%
--module(gen_server2).
-
-%%% ---------------------------------------------------
-%%%
-%%% The idea behind THIS server is that the user module
-%%% provides (different) functions to handle different
-%%% kind of inputs.
-%%% If the Parent process terminates the Module:terminate/2
-%%% function is called.
-%%%
-%%% The user module should export:
-%%%
-%%% init(Args)
-%%% ==> {ok, State}
-%%% {ok, State, Timeout}
-%%% {ok, State, Timeout, Backoff}
-%%% {ok, State, Timeout, Backoff, Module}
-%%% ignore
-%%% {stop, Reason}
-%%%
-%%% handle_call(Msg, {From, Tag}, State)
-%%%
-%%% ==> {reply, Reply, State}
-%%% {reply, Reply, State, Timeout}
-%%% {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, Reply, State}
-%%% Reason = normal | shutdown | Term terminate(State) is called
-%%%
-%%% handle_cast(Msg, State)
-%%%
-%%% ==> {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term terminate(State) is called
-%%%
-%%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ...
-%%%
-%%% ==> {noreply, State}
-%%% {noreply, State, Timeout}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% terminate(Reason, State) Let the user module clean up
-%%% Reason = normal | shutdown | {shutdown, Term} | Term
-%%% always called when server terminates
-%%%
-%%% ==> ok | Term
-%%%
-%%% handle_pre_hibernate(State)
-%%%
-%%% ==> {hibernate, State}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% handle_post_hibernate(State)
-%%%
-%%% ==> {noreply, State}
-%%% {stop, Reason, State}
-%%% Reason = normal | shutdown | Term, terminate(State) is called
-%%%
-%%% The work flow (of the server) can be described as follows:
-%%%
-%%% User module Generic
-%%% ----------- -------
-%%% start -----> start
-%%% init <----- .
-%%%
-%%% loop
-%%% handle_call <----- .
-%%% -----> reply
-%%%
-%%% handle_cast <----- .
-%%%
-%%% handle_info <----- .
-%%%
-%%% terminate <----- .
-%%%
-%%% -----> reply
-%%%
-%%%
-%%% ---------------------------------------------------
-
-%% API
--export([start/3, start/4,
- start_link/3, start_link/4,
- call/2, call/3,
- cast/2, reply/2,
- abcast/2, abcast/3,
- multi_call/2, multi_call/3, multi_call/4,
- mcall/1,
- with_state/2,
- enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]).
-
-%% System exports
--export([system_continue/3,
- system_terminate/4,
- system_code_change/4,
- format_status/2]).
-
-%% Internal exports
--export([init_it/6]).
-
--import(error_logger, [format/2]).
-
-%% State record
--record(gs2_state, {parent, name, state, mod, time,
- timeout_state, queue, debug, prioritisers}).
-
--ifdef(use_specs).
-
-%%%=========================================================================
-%%% Specs. These exist only to shut up dialyzer's warnings
-%%%=========================================================================
-
--type(gs2_state() :: #gs2_state{}).
-
--spec(handle_common_termination/3 ::
- (any(), atom(), gs2_state()) -> no_return()).
--spec(hibernate/1 :: (gs2_state()) -> no_return()).
--spec(pre_hibernate/1 :: (gs2_state()) -> no_return()).
--spec(system_terminate/4 :: (_, _, _, gs2_state()) -> no_return()).
-
--type(millis() :: non_neg_integer()).
-
-%%%=========================================================================
-%%% API
-%%%=========================================================================
-
--callback init(Args :: term()) ->
- {ok, State :: term()} |
- {ok, State :: term(), timeout() | hibernate} |
- {ok, State :: term(), timeout() | hibernate,
- {backoff, millis(), millis(), millis()}} |
- {ok, State :: term(), timeout() | hibernate,
- {backoff, millis(), millis(), millis()}, atom()} |
- ignore |
- {stop, Reason :: term()}.
--callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
- State :: term()) ->
- {reply, Reply :: term(), NewState :: term()} |
- {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(),
- Reply :: term(), NewState :: term()}.
--callback handle_cast(Request :: term(), State :: term()) ->
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(), NewState :: term()}.
--callback handle_info(Info :: term(), State :: term()) ->
- {noreply, NewState :: term()} |
- {noreply, NewState :: term(), timeout() | hibernate} |
- {stop, Reason :: term(), NewState :: term()}.
--callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
- State :: term()) ->
- ok | term().
--callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
- Extra :: term()) ->
- {ok, NewState :: term()} | {error, Reason :: term()}.
-
-%% It's not possible to define "optional" -callbacks, so putting specs
-%% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
-%% in warnings (the same applied for the behaviour_info before).
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{init,1},{handle_call,3},{handle_cast,2},{handle_info,2},
- {terminate,2},{code_change,3}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%% -----------------------------------------------------------------
-%%% Starts a generic server.
-%%% start(Mod, Args, Options)
-%%% start(Name, Mod, Args, Options)
-%%% start_link(Mod, Args, Options)
-%%% start_link(Name, Mod, Args, Options) where:
-%%% Name ::= {local, atom()} | {global, atom()}
-%%% Mod ::= atom(), callback module implementing the 'real' server
-%%% Args ::= term(), init arguments (to Mod:init/1)
-%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
-%%% Flag ::= trace | log | {logfile, File} | statistics | debug
-%%% (debug == log && statistics)
-%%% Returns: {ok, Pid} |
-%%% {error, {already_started, Pid}} |
-%%% {error, Reason}
-%%% -----------------------------------------------------------------
-start(Mod, Args, Options) ->
- gen:start(?MODULE, nolink, Mod, Args, Options).
-
-start(Name, Mod, Args, Options) ->
- gen:start(?MODULE, nolink, Name, Mod, Args, Options).
-
-start_link(Mod, Args, Options) ->
- gen:start(?MODULE, link, Mod, Args, Options).
-
-start_link(Name, Mod, Args, Options) ->
- gen:start(?MODULE, link, Name, Mod, Args, Options).
-
-
-%% -----------------------------------------------------------------
-%% Make a call to a generic server.
-%% If the server is located at another node, that node will
-%% be monitored.
-%% If the client is trapping exits and is linked server termination
-%% is handled here (? Shall we do that here (or rely on timeouts) ?).
-%% -----------------------------------------------------------------
-call(Name, Request) ->
- case catch gen:call(Name, '$gen_call', Request) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, call, [Name, Request]}})
- end.
-
-call(Name, Request, Timeout) ->
- case catch gen:call(Name, '$gen_call', Request, Timeout) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, call, [Name, Request, Timeout]}})
- end.
-
-%% -----------------------------------------------------------------
-%% Make a cast to a generic server.
-%% -----------------------------------------------------------------
-cast({global,Name}, Request) ->
- catch global:send(Name, cast_msg(Request)),
- ok;
-cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) ->
- do_cast(Dest, Request);
-cast(Dest, Request) when is_atom(Dest) ->
- do_cast(Dest, Request);
-cast(Dest, Request) when is_pid(Dest) ->
- do_cast(Dest, Request).
-
-do_cast(Dest, Request) ->
- do_send(Dest, cast_msg(Request)),
- ok.
-
-cast_msg(Request) -> {'$gen_cast',Request}.
-
-%% -----------------------------------------------------------------
-%% Send a reply to the client.
-%% -----------------------------------------------------------------
-reply({To, Tag}, Reply) ->
- catch To ! {Tag, Reply}.
-
-%% -----------------------------------------------------------------
-%% Asyncronous broadcast, returns nothing, it's just send'n pray
-%% -----------------------------------------------------------------
-abcast(Name, Request) when is_atom(Name) ->
- do_abcast([node() | nodes()], Name, cast_msg(Request)).
-
-abcast(Nodes, Name, Request) when is_list(Nodes), is_atom(Name) ->
- do_abcast(Nodes, Name, cast_msg(Request)).
-
-do_abcast([Node|Nodes], Name, Msg) when is_atom(Node) ->
- do_send({Name,Node},Msg),
- do_abcast(Nodes, Name, Msg);
-do_abcast([], _,_) -> abcast.
-
-%%% -----------------------------------------------------------------
-%%% Make a call to servers at several nodes.
-%%% Returns: {[Replies],[BadNodes]}
-%%% A Timeout can be given
-%%%
-%%% A middleman process is used in case late answers arrives after
-%%% the timeout. If they would be allowed to glog the callers message
-%%% queue, it would probably become confused. Late answers will
-%%% now arrive to the terminated middleman and so be discarded.
-%%% -----------------------------------------------------------------
-multi_call(Name, Req)
- when is_atom(Name) ->
- do_multi_call([node() | nodes()], Name, Req, infinity).
-
-multi_call(Nodes, Name, Req)
- when is_list(Nodes), is_atom(Name) ->
- do_multi_call(Nodes, Name, Req, infinity).
-
-multi_call(Nodes, Name, Req, infinity) ->
- do_multi_call(Nodes, Name, Req, infinity);
-multi_call(Nodes, Name, Req, Timeout)
- when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 ->
- do_multi_call(Nodes, Name, Req, Timeout).
-
-%%% -----------------------------------------------------------------
-%%% Make multiple calls to multiple servers, given pairs of servers
-%%% and messages.
-%%% Returns: {[{Dest, Reply}], [{Dest, Error}]}
-%%%
-%%% Dest can be pid() | RegName :: atom() |
-%%% {Name :: atom(), Node :: atom()} | {global, Name :: atom()}
-%%%
-%%% A middleman process is used to avoid clogging up the callers
-%%% message queue.
-%%% -----------------------------------------------------------------
-mcall(CallSpecs) ->
- Tag = make_ref(),
- {_, MRef} = spawn_monitor(
- fun() ->
- Refs = lists:foldl(
- fun ({Dest, _Request}=S, Dict) ->
- dict:store(do_mcall(S), Dest, Dict)
- end, dict:new(), CallSpecs),
- collect_replies(Tag, Refs, [], [])
- end),
- receive
- {'DOWN', MRef, _, _, {Tag, Result}} -> Result;
- {'DOWN', MRef, _, _, Reason} -> exit(Reason)
- end.
-
-do_mcall({{global,Name}=Dest, Request}) ->
- %% whereis_name is simply an ets lookup, and is precisely what
- %% global:send/2 does, yet we need a Ref to put in the call to the
- %% server, so invoking whereis_name makes a lot more sense here.
- case global:whereis_name(Name) of
- Pid when is_pid(Pid) ->
- MRef = erlang:monitor(process, Pid),
- catch msend(Pid, MRef, Request),
- MRef;
- undefined ->
- Ref = make_ref(),
- self() ! {'DOWN', Ref, process, Dest, noproc},
- Ref
- end;
-do_mcall({{Name,Node}=Dest, Request}) when is_atom(Name), is_atom(Node) ->
- {_Node, MRef} = start_monitor(Node, Name), %% NB: we don't handle R6
- catch msend(Dest, MRef, Request),
- MRef;
-do_mcall({Dest, Request}) when is_atom(Dest); is_pid(Dest) ->
- MRef = erlang:monitor(process, Dest),
- catch msend(Dest, MRef, Request),
- MRef.
-
-msend(Dest, MRef, Request) ->
- erlang:send(Dest, {'$gen_call', {self(), MRef}, Request}, [noconnect]).
-
-collect_replies(Tag, Refs, Replies, Errors) ->
- case dict:size(Refs) of
- 0 -> exit({Tag, {Replies, Errors}});
- _ -> receive
- {MRef, Reply} ->
- {Refs1, Replies1} = handle_call_result(MRef, Reply,
- Refs, Replies),
- collect_replies(Tag, Refs1, Replies1, Errors);
- {'DOWN', MRef, _, _, Reason} ->
- Reason1 = case Reason of
- noconnection -> nodedown;
- _ -> Reason
- end,
- {Refs1, Errors1} = handle_call_result(MRef, Reason1,
- Refs, Errors),
- collect_replies(Tag, Refs1, Replies, Errors1)
- end
- end.
-
-handle_call_result(MRef, Result, Refs, AccList) ->
- %% we avoid the mailbox scanning cost of a call to erlang:demonitor/{1,2}
- %% here, so we must cope with MRefs that we've already seen and erased
- case dict:find(MRef, Refs) of
- {ok, Pid} -> {dict:erase(MRef, Refs), [{Pid, Result}|AccList]};
- _ -> {Refs, AccList}
- end.
-
-%% -----------------------------------------------------------------
-%% Apply a function to a generic server's state.
-%% -----------------------------------------------------------------
-with_state(Name, Fun) ->
- case catch gen:call(Name, '$with_state', Fun, infinity) of
- {ok,Res} ->
- Res;
- {'EXIT',Reason} ->
- exit({Reason, {?MODULE, with_state, [Name, Fun]}})
- end.
-
-%%-----------------------------------------------------------------
-%% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>, <Backoff>) ->_
-%%
-%% Description: Makes an existing process into a gen_server.
-%% The calling process will enter the gen_server receive
-%% loop and become a gen_server process.
-%% The process *must* have been started using one of the
-%% start functions in proc_lib, see proc_lib(3).
-%% The user is responsible for any initialization of the
-%% process, including registering a name for it.
-%%-----------------------------------------------------------------
-enter_loop(Mod, Options, State) ->
- enter_loop(Mod, Options, State, self(), infinity, undefined).
-
-enter_loop(Mod, Options, State, Backoff = {backoff, _, _ , _}) ->
- enter_loop(Mod, Options, State, self(), infinity, Backoff);
-
-enter_loop(Mod, Options, State, ServerName = {_, _}) ->
- enter_loop(Mod, Options, State, ServerName, infinity, undefined);
-
-enter_loop(Mod, Options, State, Timeout) ->
- enter_loop(Mod, Options, State, self(), Timeout, undefined).
-
-enter_loop(Mod, Options, State, ServerName, Backoff = {backoff, _, _, _}) ->
- enter_loop(Mod, Options, State, ServerName, infinity, Backoff);
-
-enter_loop(Mod, Options, State, ServerName, Timeout) ->
- enter_loop(Mod, Options, State, ServerName, Timeout, undefined).
-
-enter_loop(Mod, Options, State, ServerName, Timeout, Backoff) ->
- Name = get_proc_name(ServerName),
- Parent = get_parent(),
- Debug = debug_options(Name, Options),
- Queue = priority_queue:new(),
- Backoff1 = extend_backoff(Backoff),
- loop(find_prioritisers(
- #gs2_state { parent = Parent, name = Name, state = State,
- mod = Mod, time = Timeout, timeout_state = Backoff1,
- queue = Queue, debug = Debug })).
-
-%%%========================================================================
-%%% Gen-callback functions
-%%%========================================================================
-
-%%% ---------------------------------------------------
-%%% Initiate the new process.
-%%% Register the name using the Rfunc function
-%%% Calls the Mod:init/Args function.
-%%% Finally an acknowledge is sent to Parent and the main
-%%% loop is entered.
-%%% ---------------------------------------------------
-init_it(Starter, self, Name, Mod, Args, Options) ->
- init_it(Starter, self(), Name, Mod, Args, Options);
-init_it(Starter, Parent, Name0, Mod, Args, Options) ->
- Name = name(Name0),
- Debug = debug_options(Name, Options),
- Queue = priority_queue:new(),
- GS2State = find_prioritisers(
- #gs2_state { parent = Parent,
- name = Name,
- mod = Mod,
- queue = Queue,
- debug = Debug }),
- case catch Mod:init(Args) of
- {ok, State} ->
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = infinity,
- timeout_state = undefined });
- {ok, State, Timeout} ->
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = Timeout,
- timeout_state = undefined });
- {ok, State, Timeout, Backoff = {backoff, _, _, _}} ->
- Backoff1 = extend_backoff(Backoff),
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(GS2State #gs2_state { state = State,
- time = Timeout,
- timeout_state = Backoff1 });
- {ok, State, Timeout, Backoff = {backoff, _, _, _}, Mod1} ->
- Backoff1 = extend_backoff(Backoff),
- proc_lib:init_ack(Starter, {ok, self()}),
- loop(find_prioritisers(
- GS2State #gs2_state { mod = Mod1,
- state = State,
- time = Timeout,
- timeout_state = Backoff1 }));
- {stop, Reason} ->
- %% For consistency, we must make sure that the
- %% registered name (if any) is unregistered before
- %% the parent process is notified about the failure.
- %% (Otherwise, the parent process could get
- %% an 'already_started' error if it immediately
- %% tried starting the process again.)
- unregister_name(Name0),
- proc_lib:init_ack(Starter, {error, Reason}),
- exit(Reason);
- ignore ->
- unregister_name(Name0),
- proc_lib:init_ack(Starter, ignore),
- exit(normal);
- {'EXIT', Reason} ->
- unregister_name(Name0),
- proc_lib:init_ack(Starter, {error, Reason}),
- exit(Reason);
- Else ->
- Error = {bad_return_value, Else},
- proc_lib:init_ack(Starter, {error, Error}),
- exit(Error)
- end.
-
-name({local,Name}) -> Name;
-name({global,Name}) -> Name;
-%% name(Pid) when is_pid(Pid) -> Pid;
-%% when R12 goes away, drop the line beneath and uncomment the line above
-name(Name) -> Name.
-
-unregister_name({local,Name}) ->
- _ = (catch unregister(Name));
-unregister_name({global,Name}) ->
- _ = global:unregister_name(Name);
-unregister_name(Pid) when is_pid(Pid) ->
- Pid;
-%% Under R12 let's just ignore it, as we have a single term as Name.
-%% On R13 it will never get here, as we get tuple with 'local/global' atom.
-unregister_name(_Name) -> ok.
-
-extend_backoff(undefined) ->
- undefined;
-extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
- {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod, now()}.
-
-%%%========================================================================
-%%% Internal functions
-%%%========================================================================
-%%% ---------------------------------------------------
-%%% The MAIN loop.
-%%% ---------------------------------------------------
-loop(GS2State = #gs2_state { time = hibernate,
- timeout_state = undefined,
- queue = Queue }) ->
- case priority_queue:is_empty(Queue) of
- true ->
- pre_hibernate(GS2State);
- false ->
- process_next_msg(GS2State)
- end;
-
-loop(GS2State) ->
- process_next_msg(drain(GS2State)).
-
-drain(GS2State) ->
- receive
- Input -> drain(in(Input, GS2State))
- after 0 -> GS2State
- end.
-
-process_next_msg(GS2State = #gs2_state { time = Time,
- timeout_state = TimeoutState,
- queue = Queue }) ->
- case priority_queue:out(Queue) of
- {{value, Msg}, Queue1} ->
- process_msg(Msg, GS2State #gs2_state { queue = Queue1 });
- {empty, Queue1} ->
- {Time1, HibOnTimeout}
- = case {Time, TimeoutState} of
- {hibernate, {backoff, Current, _Min, _Desired, _RSt}} ->
- {Current, true};
- {hibernate, _} ->
- %% wake_hib/7 will set Time to hibernate. If
- %% we were woken and didn't receive a msg
- %% then we will get here and need a sensible
- %% value for Time1, otherwise we crash.
- %% R13B1 always waits infinitely when waking
- %% from hibernation, so that's what we do
- %% here too.
- {infinity, false};
- _ -> {Time, false}
- end,
- receive
- Input ->
- %% Time could be 'hibernate' here, so *don't* call loop
- process_next_msg(
- drain(in(Input, GS2State #gs2_state { queue = Queue1 })))
- after Time1 ->
- case HibOnTimeout of
- true ->
- pre_hibernate(
- GS2State #gs2_state { queue = Queue1 });
- false ->
- process_msg(timeout,
- GS2State #gs2_state { queue = Queue1 })
- end
- end
- end.
-
-wake_hib(GS2State = #gs2_state { timeout_state = TS }) ->
- TimeoutState1 = case TS of
- undefined ->
- undefined;
- {SleptAt, TimeoutState} ->
- adjust_timeout_state(SleptAt, now(), TimeoutState)
- end,
- post_hibernate(
- drain(GS2State #gs2_state { timeout_state = TimeoutState1 })).
-
-hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) ->
- TS = case TimeoutState of
- undefined -> undefined;
- {backoff, _, _, _, _} -> {now(), TimeoutState}
- end,
- proc_lib:hibernate(?MODULE, wake_hib,
- [GS2State #gs2_state { timeout_state = TS }]).
-
-pre_hibernate(GS2State = #gs2_state { state = State,
- mod = Mod }) ->
- case erlang:function_exported(Mod, handle_pre_hibernate, 1) of
- true ->
- case catch Mod:handle_pre_hibernate(State) of
- {hibernate, NState} ->
- hibernate(GS2State #gs2_state { state = NState } );
- Reply ->
- handle_common_termination(Reply, pre_hibernate, GS2State)
- end;
- false ->
- hibernate(GS2State)
- end.
-
-post_hibernate(GS2State = #gs2_state { state = State,
- mod = Mod }) ->
- case erlang:function_exported(Mod, handle_post_hibernate, 1) of
- true ->
- case catch Mod:handle_post_hibernate(State) of
- {noreply, NState} ->
- process_next_msg(GS2State #gs2_state { state = NState,
- time = infinity });
- {noreply, NState, Time} ->
- process_next_msg(GS2State #gs2_state { state = NState,
- time = Time });
- Reply ->
- handle_common_termination(Reply, post_hibernate, GS2State)
- end;
- false ->
- %% use hibernate here, not infinity. This matches
- %% R13B. The key is that we should be able to get through
- %% to process_msg calling sys:handle_system_msg with Time
- %% still set to hibernate, iff that msg is the very msg
- %% that woke us up (or the first msg we receive after
- %% waking up).
- process_next_msg(GS2State #gs2_state { time = hibernate })
- end.
-
-adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
- DesiredHibPeriod, RandomState}) ->
- NapLengthMicros = timer:now_diff(AwokeAt, SleptAt),
- CurrentMicros = CurrentTO * 1000,
- MinimumMicros = MinimumTO * 1000,
- DesiredHibMicros = DesiredHibPeriod * 1000,
- GapBetweenMessagesMicros = NapLengthMicros + CurrentMicros,
- Base =
- %% If enough time has passed between the last two messages then we
- %% should consider sleeping sooner. Otherwise stay awake longer.
- case GapBetweenMessagesMicros > (MinimumMicros + DesiredHibMicros) of
- true -> lists:max([MinimumTO, CurrentTO div 2]);
- false -> CurrentTO
- end,
- {Extra, RandomState1} = random:uniform_s(Base, RandomState),
- CurrentTO1 = Base + Extra,
- {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
-
-in({'$gen_cast', Msg} = Input,
- GS2State = #gs2_state { prioritisers = {_, F, _} }) ->
- in(Input, F(Msg, GS2State), GS2State);
-in({'$gen_call', From, Msg} = Input,
- GS2State = #gs2_state { prioritisers = {F, _, _} }) ->
- in(Input, F(Msg, From, GS2State), GS2State);
-in({'$with_state', _From, _Fun} = Input, GS2State) ->
- in(Input, 0, GS2State);
-in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
- in(Input, infinity, GS2State);
-in({system, _From, _Req} = Input, GS2State) ->
- in(Input, infinity, GS2State);
-in(Input, GS2State = #gs2_state { prioritisers = {_, _, F} }) ->
- in(Input, F(Input, GS2State), GS2State).
-
-in(_Input, drop, GS2State) ->
- GS2State;
-
-in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
- GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
-
-process_msg({system, From, Req},
- GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
- %% gen_server puts Hib on the end as the 7th arg, but that version
- %% of the fun seems not to be documented so leaving out for now.
- sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State);
-process_msg({'$with_state', From, Fun},
- GS2State = #gs2_state{state = State}) ->
- reply(From, catch Fun(State)),
- loop(GS2State);
-process_msg({'EXIT', Parent, Reason} = Msg,
- GS2State = #gs2_state { parent = Parent }) ->
- terminate(Reason, Msg, GS2State);
-process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
- handle_msg(Msg, GS2State);
-process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
- Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
- handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
-
-%%% ---------------------------------------------------
-%%% Send/recive functions
-%%% ---------------------------------------------------
-do_send(Dest, Msg) ->
- catch erlang:send(Dest, Msg).
-
-do_multi_call(Nodes, Name, Req, infinity) ->
- Tag = make_ref(),
- Monitors = send_nodes(Nodes, Name, Tag, Req),
- rec_nodes(Tag, Monitors, Name, undefined);
-do_multi_call(Nodes, Name, Req, Timeout) ->
- Tag = make_ref(),
- Caller = self(),
- Receiver =
- spawn(
- fun () ->
- %% Middleman process. Should be unsensitive to regular
- %% exit signals. The sychronization is needed in case
- %% the receiver would exit before the caller started
- %% the monitor.
- process_flag(trap_exit, true),
- Mref = erlang:monitor(process, Caller),
- receive
- {Caller,Tag} ->
- Monitors = send_nodes(Nodes, Name, Tag, Req),
- TimerId = erlang:start_timer(Timeout, self(), ok),
- Result = rec_nodes(Tag, Monitors, Name, TimerId),
- exit({self(),Tag,Result});
- {'DOWN',Mref,_,_,_} ->
- %% Caller died before sending us the go-ahead.
- %% Give up silently.
- exit(normal)
- end
- end),
- Mref = erlang:monitor(process, Receiver),
- Receiver ! {self(),Tag},
- receive
- {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
- Result;
- {'DOWN',Mref,_,_,Reason} ->
- %% The middleman code failed. Or someone did
- %% exit(_, kill) on the middleman process => Reason==killed
- exit(Reason)
- end.
-
-send_nodes(Nodes, Name, Tag, Req) ->
- send_nodes(Nodes, Name, Tag, Req, []).
-
-send_nodes([Node|Tail], Name, Tag, Req, Monitors)
- when is_atom(Node) ->
- Monitor = start_monitor(Node, Name),
- %% Handle non-existing names in rec_nodes.
- catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
- send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
-send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
- %% Skip non-atom Node
- send_nodes(Tail, Name, Tag, Req, Monitors);
-send_nodes([], _Name, _Tag, _Req, Monitors) ->
- Monitors.
-
-%% Against old nodes:
-%% If no reply has been delivered within 2 secs. (per node) check that
-%% the server really exists and wait for ever for the answer.
-%%
-%% Against contemporary nodes:
-%% Wait for reply, server 'DOWN', or timeout from TimerId.
-
-rec_nodes(Tag, Nodes, Name, TimerId) ->
- rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
-
-rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
- receive
- {'DOWN', R, _, _, _} ->
- rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- unmonitor(R),
- rec_nodes(Tag, Tail, Name, Badnodes,
- [{N,Reply}|Replies], Time, TimerId);
- {timeout, TimerId, _} ->
- unmonitor(R),
- %% Collect all replies that already have arrived
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
- %% R6 node
- receive
- {nodedown, N} ->
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, Badnodes,
- [{N,Reply}|Replies], 2000, TimerId);
- {timeout, TimerId, _} ->
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- %% Collect all replies that already have arrived
- rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
- after Time ->
- case rpc:call(N, erlang, whereis, [Name]) of
- Pid when is_pid(Pid) -> % It exists try again.
- rec_nodes(Tag, [N|Tail], Name, Badnodes,
- Replies, infinity, TimerId);
- _ -> % badnode
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes(Tag, Tail, Name, [N|Badnodes],
- Replies, 2000, TimerId)
- end
- end;
-rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
- case catch erlang:cancel_timer(TimerId) of
- false -> % It has already sent it's message
- receive
- {timeout, TimerId, _} -> ok
- after 0 ->
- ok
- end;
- _ -> % Timer was cancelled, or TimerId was 'undefined'
- ok
- end,
- {Replies, Badnodes}.
-
-%% Collect all replies that already have arrived
-rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
- receive
- {'DOWN', R, _, _, _} ->
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- unmonitor(R),
- rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
- after 0 ->
- unmonitor(R),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
- %% R6 node
- receive
- {nodedown, N} ->
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
- {{Tag, N}, Reply} -> %% Tag is bound !!!
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
- after 0 ->
- receive {nodedown, N} -> ok after 0 -> ok end,
- monitor_node(N, false),
- rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
- end;
-rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
- {Replies, Badnodes}.
-
-
-%%% ---------------------------------------------------
-%%% Monitor functions
-%%% ---------------------------------------------------
-
-start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
- if node() =:= nonode@nohost, Node =/= nonode@nohost ->
- Ref = make_ref(),
- self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
- {Node, Ref};
- true ->
- case catch erlang:monitor(process, {Name, Node}) of
- {'EXIT', _} ->
- %% Remote node is R6
- monitor_node(Node, true),
- Node;
- Ref when is_reference(Ref) ->
- {Node, Ref}
- end
- end.
-
-%% Cancels a monitor started with Ref=erlang:monitor(_, _).
-unmonitor(Ref) when is_reference(Ref) ->
- erlang:demonitor(Ref),
- receive
- {'DOWN', Ref, _, _, _} ->
- true
- after 0 ->
- true
- end.
-
-%%% ---------------------------------------------------
-%%% Message handling functions
-%%% ---------------------------------------------------
-
-dispatch({'$gen_cast', Msg}, Mod, State) ->
- Mod:handle_cast(Msg, State);
-dispatch(Info, Mod, State) ->
- Mod:handle_info(Info, State).
-
-common_reply(_Name, From, Reply, _NState, [] = _Debug) ->
- reply(From, Reply),
- [];
-common_reply(Name, {To, _Tag} = From, Reply, NState, Debug) ->
- reply(From, Reply),
- sys:handle_debug(Debug, fun print_event/3, Name, {out, Reply, To, NState}).
-
-common_noreply(_Name, _NState, [] = _Debug) ->
- [];
-common_noreply(Name, NState, Debug) ->
- sys:handle_debug(Debug, fun print_event/3, Name, {noreply, NState}).
-
-common_become(_Name, _Mod, _NState, [] = _Debug) ->
- [];
-common_become(Name, Mod, NState, Debug) ->
- sys:handle_debug(Debug, fun print_event/3, Name, {become, Mod, NState}).
-
-handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod,
- state = State,
- name = Name,
- debug = Debug }) ->
- case catch Mod:handle_call(Msg, From, State) of
- {reply, Reply, NState} ->
- Debug1 = common_reply(Name, From, Reply, NState, Debug),
- loop(GS2State #gs2_state { state = NState,
- time = infinity,
- debug = Debug1 });
- {reply, Reply, NState, Time1} ->
- Debug1 = common_reply(Name, From, Reply, NState, Debug),
- loop(GS2State #gs2_state { state = NState,
- time = Time1,
- debug = Debug1});
- {stop, Reason, Reply, NState} ->
- {'EXIT', R} =
- (catch terminate(Reason, Msg,
- GS2State #gs2_state { state = NState })),
- common_reply(Name, From, Reply, NState, Debug),
- exit(R);
- Other ->
- handle_common_reply(Other, Msg, GS2State)
- end;
-handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) ->
- Reply = (catch dispatch(Msg, Mod, State)),
- handle_common_reply(Reply, Msg, GS2State).
-
-handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name,
- debug = Debug}) ->
- case Reply of
- {noreply, NState} ->
- Debug1 = common_noreply(Name, NState, Debug),
- loop(GS2State #gs2_state {state = NState,
- time = infinity,
- debug = Debug1});
- {noreply, NState, Time1} ->
- Debug1 = common_noreply(Name, NState, Debug),
- loop(GS2State #gs2_state {state = NState,
- time = Time1,
- debug = Debug1});
- {become, Mod, NState} ->
- Debug1 = common_become(Name, Mod, NState, Debug),
- loop(find_prioritisers(
- GS2State #gs2_state { mod = Mod,
- state = NState,
- time = infinity,
- debug = Debug1 }));
- {become, Mod, NState, Time1} ->
- Debug1 = common_become(Name, Mod, NState, Debug),
- loop(find_prioritisers(
- GS2State #gs2_state { mod = Mod,
- state = NState,
- time = Time1,
- debug = Debug1 }));
- _ ->
- handle_common_termination(Reply, Msg, GS2State)
- end.
-
-handle_common_termination(Reply, Msg, GS2State) ->
- case Reply of
- {stop, Reason, NState} ->
- terminate(Reason, Msg, GS2State #gs2_state { state = NState });
- {'EXIT', What} ->
- terminate(What, Msg, GS2State);
- _ ->
- terminate({bad_return_value, Reply}, Msg, GS2State)
- end.
-
-%%-----------------------------------------------------------------
-%% Callback functions for system messages handling.
-%%-----------------------------------------------------------------
-system_continue(Parent, Debug, GS2State) ->
- loop(GS2State #gs2_state { parent = Parent, debug = Debug }).
-
-system_terminate(Reason, _Parent, Debug, GS2State) ->
- terminate(Reason, [], GS2State #gs2_state { debug = Debug }).
-
-system_code_change(GS2State = #gs2_state { mod = Mod,
- state = State },
- _Module, OldVsn, Extra) ->
- case catch Mod:code_change(OldVsn, State, Extra) of
- {ok, NewState} ->
- NewGS2State = find_prioritisers(
- GS2State #gs2_state { state = NewState }),
- {ok, [NewGS2State]};
- Else ->
- Else
- end.
-
-%%-----------------------------------------------------------------
-%% Format debug messages. Print them as the call-back module sees
-%% them, not as the real erlang messages. Use trace for that.
-%%-----------------------------------------------------------------
-print_event(Dev, {in, Msg}, Name) ->
- case Msg of
- {'$gen_call', {From, _Tag}, Call} ->
- io:format(Dev, "*DBG* ~p got call ~p from ~w~n",
- [Name, Call, From]);
- {'$gen_cast', Cast} ->
- io:format(Dev, "*DBG* ~p got cast ~p~n",
- [Name, Cast]);
- _ ->
- io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg])
- end;
-print_event(Dev, {out, Msg, To, State}, Name) ->
- io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n",
- [Name, Msg, To, State]);
-print_event(Dev, {noreply, State}, Name) ->
- io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]);
-print_event(Dev, Event, Name) ->
- io:format(Dev, "*DBG* ~p dbg ~p~n", [Name, Event]).
-
-
-%%% ---------------------------------------------------
-%%% Terminate the server.
-%%% ---------------------------------------------------
-
-terminate(Reason, Msg, #gs2_state { name = Name,
- mod = Mod,
- state = State,
- debug = Debug }) ->
- case catch Mod:terminate(Reason, State) of
- {'EXIT', R} ->
- error_info(R, Reason, Name, Msg, State, Debug),
- exit(R);
- _ ->
- case Reason of
- normal ->
- exit(normal);
- shutdown ->
- exit(shutdown);
- {shutdown,_}=Shutdown ->
- exit(Shutdown);
- _ ->
- error_info(Reason, undefined, Name, Msg, State, Debug),
- exit(Reason)
- end
- end.
-
-error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) ->
- %% OTP-5811 Don't send an error report if it's the system process
- %% application_controller which is terminating - let init take care
- %% of it instead
- ok;
-error_info(Reason, RootCause, Name, Msg, State, Debug) ->
- Reason1 = error_reason(Reason),
- Fmt =
- "** Generic server ~p terminating~n"
- "** Last message in was ~p~n"
- "** When Server state == ~p~n"
- "** Reason for termination == ~n** ~p~n",
- case RootCause of
- undefined -> format(Fmt, [Name, Msg, State, Reason1]);
- _ -> format(Fmt ++ "** In 'terminate' callback "
- "with reason ==~n** ~p~n",
- [Name, Msg, State, Reason1,
- error_reason(RootCause)])
- end,
- sys:print_log(Debug),
- ok.
-
-error_reason({undef,[{M,F,A}|MFAs]} = Reason) ->
- case code:is_loaded(M) of
- false -> {'module could not be loaded',[{M,F,A}|MFAs]};
- _ -> case erlang:function_exported(M, F, length(A)) of
- true -> Reason;
- false -> {'function not exported',[{M,F,A}|MFAs]}
- end
- end;
-error_reason(Reason) ->
- Reason.
-
-%%% ---------------------------------------------------
-%%% Misc. functions.
-%%% ---------------------------------------------------
-
-opt(Op, [{Op, Value}|_]) ->
- {ok, Value};
-opt(Op, [_|Options]) ->
- opt(Op, Options);
-opt(_, []) ->
- false.
-
-debug_options(Name, Opts) ->
- case opt(debug, Opts) of
- {ok, Options} -> dbg_options(Name, Options);
- _ -> dbg_options(Name, [])
- end.
-
-dbg_options(Name, []) ->
- Opts =
- case init:get_argument(generic_debug) of
- error ->
- [];
- _ ->
- [log, statistics]
- end,
- dbg_opts(Name, Opts);
-dbg_options(Name, Opts) ->
- dbg_opts(Name, Opts).
-
-dbg_opts(Name, Opts) ->
- case catch sys:debug_options(Opts) of
- {'EXIT',_} ->
- format("~p: ignoring erroneous debug options - ~p~n",
- [Name, Opts]),
- [];
- Dbg ->
- Dbg
- end.
-
-get_proc_name(Pid) when is_pid(Pid) ->
- Pid;
-get_proc_name({local, Name}) ->
- case process_info(self(), registered_name) of
- {registered_name, Name} ->
- Name;
- {registered_name, _Name} ->
- exit(process_not_registered);
- [] ->
- exit(process_not_registered)
- end;
-get_proc_name({global, Name}) ->
- case whereis_name(Name) of
- undefined ->
- exit(process_not_registered_globally);
- Pid when Pid =:= self() ->
- Name;
- _Pid ->
- exit(process_not_registered_globally)
- end.
-
-get_parent() ->
- case get('$ancestors') of
- [Parent | _] when is_pid(Parent)->
- Parent;
- [Parent | _] when is_atom(Parent)->
- name_to_pid(Parent);
- _ ->
- exit(process_was_not_started_by_proc_lib)
- end.
-
-name_to_pid(Name) ->
- case whereis(Name) of
- undefined ->
- case whereis_name(Name) of
- undefined ->
- exit(could_not_find_registerd_name);
- Pid ->
- Pid
- end;
- Pid ->
- Pid
- end.
-
-whereis_name(Name) ->
- case ets:lookup(global_names, Name) of
- [{_Name, Pid, _Method, _RPid, _Ref}] ->
- if node(Pid) == node() ->
- case is_process_alive(Pid) of
- true -> Pid;
- false -> undefined
- end;
- true ->
- Pid
- end;
- [] -> undefined
- end.
-
-find_prioritisers(GS2State = #gs2_state { mod = Mod }) ->
- PCall = function_exported_or_default(Mod, 'prioritise_call', 4,
- fun (_Msg, _From, _State) -> 0 end),
- PCast = function_exported_or_default(Mod, 'prioritise_cast', 3,
- fun (_Msg, _State) -> 0 end),
- PInfo = function_exported_or_default(Mod, 'prioritise_info', 3,
- fun (_Msg, _State) -> 0 end),
- GS2State #gs2_state { prioritisers = {PCall, PCast, PInfo} }.
-
-function_exported_or_default(Mod, Fun, Arity, Default) ->
- case erlang:function_exported(Mod, Fun, Arity) of
- true -> case Arity of
- 3 -> fun (Msg, GS2State = #gs2_state { queue = Queue,
- state = State }) ->
- Length = priority_queue:len(Queue),
- case catch Mod:Fun(Msg, Length, State) of
- drop ->
- drop;
- Res when is_integer(Res) ->
- Res;
- Err ->
- handle_common_termination(Err, Msg, GS2State)
- end
- end;
- 4 -> fun (Msg, From, GS2State = #gs2_state { queue = Queue,
- state = State }) ->
- Length = priority_queue:len(Queue),
- case catch Mod:Fun(Msg, From, Length, State) of
- Res when is_integer(Res) ->
- Res;
- Err ->
- handle_common_termination(Err, Msg, GS2State)
- end
- end
- end;
- false -> Default
- end.
-
-%%-----------------------------------------------------------------
-%% Status information
-%%-----------------------------------------------------------------
-format_status(Opt, StatusData) ->
- [PDict, SysState, Parent, Debug,
- #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] =
- StatusData,
- NameTag = if is_pid(Name) ->
- pid_to_list(Name);
- is_atom(Name) ->
- Name
- end,
- Header = lists:concat(["Status for generic server ", NameTag]),
- Log = sys:get_debug(log, Debug, []),
- Specfic = callback(Mod, format_status, [Opt, [PDict, State]],
- fun () -> [{data, [{"State", State}]}] end),
- Messages = callback(Mod, format_message_queue, [Opt, Queue],
- fun () -> priority_queue:to_list(Queue) end),
- [{header, Header},
- {data, [{"Status", SysState},
- {"Parent", Parent},
- {"Logged events", Log},
- {"Queued messages", Messages}]} |
- Specfic].
-
-callback(Mod, FunName, Args, DefaultThunk) ->
- case erlang:function_exported(Mod, FunName, length(Args)) of
- true -> case catch apply(Mod, FunName, Args) of
- {'EXIT', _} -> DefaultThunk();
- Success -> Success
- end;
- false -> DefaultThunk()
- end.
diff --git a/src/gm.erl b/src/gm.erl
index 0ca0d8d9da..61a8f3eef7 100644
--- a/src/gm.erl
+++ b/src/gm.erl
@@ -551,8 +551,9 @@ forget_group(GroupName) ->
init([GroupName, Module, Args, TxnFun]) ->
put(process_name, {?MODULE, GroupName}),
- {MegaSecs, Secs, MicroSecs} = now(),
- _ = random:seed(MegaSecs, Secs, MicroSecs),
+ _ = random:seed(erlang:phash2([node()]),
+ time_compat:monotonic_time(),
+ time_compat:unique_integer()),
Self = make_member(GroupName),
gen_server2:cast(self(), join),
{ok, #state { self = Self,
@@ -712,6 +713,10 @@ handle_info(flush, State) ->
handle_info(timeout, State) ->
noreply(flush_broadcast_buffer(State));
+handle_info({'DOWN', _MRef, process, _Pid, _Reason},
+ State = #state { shutting_down =
+ {true, {shutdown, ring_shutdown}} }) ->
+ noreply(State);
handle_info({'DOWN', MRef, process, _Pid, Reason},
State = #state { self = Self,
left = Left,
diff --git a/src/mirrored_supervisor.erl b/src/mirrored_supervisor.erl
deleted file mode 100644
index 8df90391a0..0000000000
--- a/src/mirrored_supervisor.erl
+++ /dev/null
@@ -1,517 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(mirrored_supervisor).
-
-%% Mirrored Supervisor
-%% ===================
-%%
-%% This module implements a new type of supervisor. It acts like a
-%% normal supervisor, but at creation time you also provide the name
-%% of a process group to join. All the supervisors within the
-%% process group act like a single large distributed supervisor:
-%%
-%% * A process with a given child_id will only exist on one
-%% supervisor within the group.
-%%
-%% * If one supervisor fails, children may migrate to surviving
-%% supervisors within the group.
-%%
-%% In almost all cases you will want to use the module name for the
-%% process group. Using multiple process groups with the same module
-%% name is supported. Having multiple module names for the same
-%% process group will lead to undefined behaviour.
-%%
-%% Motivation
-%% ----------
-%%
-%% Sometimes you have processes which:
-%%
-%% * Only need to exist once per cluster.
-%%
-%% * Does not contain much state (or can reconstruct its state easily).
-%%
-%% * Needs to be restarted elsewhere should it be running on a node
-%% which fails.
-%%
-%% By creating a mirrored supervisor group with one supervisor on
-%% each node, that's what you get.
-%%
-%%
-%% API use
-%% -------
-%%
-%% This is basically the same as for supervisor, except that:
-%%
-%% 1) start_link(Module, Args) becomes
-%% start_link(Group, TxFun, Module, Args).
-%%
-%% 2) start_link({local, Name}, Module, Args) becomes
-%% start_link({local, Name}, Group, TxFun, Module, Args).
-%%
-%% 3) start_link({global, Name}, Module, Args) is not available.
-%%
-%% 4) The restart strategy simple_one_for_one is not available.
-%%
-%% 5) Mnesia is used to hold global state. At some point your
-%% application should invoke create_tables() (or table_definitions()
-%% if it wants to manage table creation itself).
-%%
-%% The TxFun parameter to start_link/{4,5} is a function which the
-%% mirrored supervisor can use to execute Mnesia transactions. In the
-%% RabbitMQ server this goes via a worker pool; in other cases a
-%% function like:
-%%
-%% tx_fun(Fun) ->
-%% case mnesia:sync_transaction(Fun) of
-%% {atomic, Result} -> Result;
-%% {aborted, Reason} -> throw({error, Reason})
-%% end.
-%%
-%% could be used.
-%%
-%% Internals
-%% ---------
-%%
-%% Each mirrored_supervisor consists of three processes - the overall
-%% supervisor, the delegate supervisor and the mirroring server. The
-%% overall supervisor supervises the other two processes. Its pid is
-%% the one returned from start_link; the pids of the other two
-%% processes are effectively hidden in the API.
-%%
-%% The delegate supervisor is in charge of supervising all the child
-%% processes that are added to the supervisor as usual.
-%%
-%% The mirroring server intercepts calls to the supervisor API
-%% (directed at the overall supervisor), does any special handling,
-%% and forwards everything to the delegate supervisor.
-%%
-%% This module implements all three, hence init/1 is somewhat overloaded.
-%%
-%% The mirroring server creates and joins a process group on
-%% startup. It monitors all the existing members of this group, and
-%% broadcasts a "hello" message to them so that they can monitor it in
-%% turn. When it receives a 'DOWN' message, it checks to see if it's
-%% the "first" server in the group and restarts all the child
-%% processes from the dead supervisor if so.
-%%
-%% In the future we might load balance this.
-%%
-%% Startup is slightly fiddly. The mirroring server needs to know the
-%% Pid of the overall supervisor, but we don't have that until it has
-%% started. Therefore we set this after the fact. We also start any
-%% children we found in Module:init() at this point, since starting
-%% children requires knowing the overall supervisor pid.
-
--define(SUPERVISOR, supervisor2).
--define(GEN_SERVER, gen_server2).
--define(PG2, pg2_fixed).
--define(SUP_MODULE, mirrored_supervisor_sups).
-
--define(TABLE, mirrored_sup_childspec).
--define(TABLE_DEF,
- {?TABLE,
- [{record_name, mirrored_sup_childspec},
- {type, ordered_set},
- {attributes, record_info(fields, mirrored_sup_childspec)}]}).
--define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
-
--export([start_link/4, start_link/5,
- start_child/2, restart_child/2,
- delete_child/2, terminate_child/2,
- which_children/1, count_children/1, check_childspecs/1]).
-
--behaviour(?GEN_SERVER).
-
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
- handle_cast/2]).
-
--export([start_internal/3]).
--export([create_tables/0, table_definitions/0]).
-
--record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
-
--record(state, {overall,
- delegate,
- group,
- tx_fun,
- initial_childspecs}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
-%%--------------------------------------------------------------------------
-%% Callback behaviour
-%%--------------------------------------------------------------------------
-
--callback init(Args :: term()) ->
- {ok, {{RestartStrategy :: supervisor2:strategy(),
- MaxR :: non_neg_integer(),
- MaxT :: non_neg_integer()},
- [ChildSpec :: supervisor2:child_spec()]}}
- | ignore.
-
-%%--------------------------------------------------------------------------
-%% Specs
-%%--------------------------------------------------------------------------
-
--type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
--type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
-
--type group_name() :: any().
-
--type(tx_fun() :: fun((fun(() -> A)) -> A)).
-
--spec start_link(GroupName, TxFun, Module, Args) -> startlink_ret() when
- GroupName :: group_name(),
- TxFun :: tx_fun(),
- Module :: module(),
- Args :: term().
-
--spec start_link(SupName, GroupName, TxFun, Module, Args) ->
- startlink_ret() when
- SupName :: supervisor2:sup_name(),
- GroupName :: group_name(),
- TxFun :: tx_fun(),
- Module :: module(),
- Args :: term().
-
--spec start_internal(Group, TxFun, ChildSpecs) -> Result when
- Group :: group_name(),
- TxFun :: tx_fun(),
- ChildSpecs :: [supervisor2:child_spec()],
- Result :: {'ok', pid()} | {'error', term()}.
-
--spec create_tables() -> Result when
- Result :: 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) -> [{init,1}];
-behaviour_info(_Other) -> undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Group, TxFun, Mod, Args) ->
- start_link0([], Group, TxFun, init(Mod, Args)).
-
-start_link({local, SupName}, Group, TxFun, Mod, Args) ->
- start_link0([{local, SupName}], Group, TxFun, init(Mod, Args));
-
-start_link({global, _SupName}, _Group, _TxFun, _Mod, _Args) ->
- erlang:error(badarg).
-
-start_link0(Prefix, Group, TxFun, Init) ->
- case apply(?SUPERVISOR, start_link,
- Prefix ++ [?SUP_MODULE, {overall, Group, TxFun, Init}]) of
- {ok, Pid} -> case catch call(Pid, {init, Pid}) of
- ok -> {ok, Pid};
- E -> E
- end;
- Other -> Other
- end.
-
-init(Mod, Args) ->
- case Mod:init(Args) of
- {ok, {{Bad, _, _}, _ChildSpecs}} when
- Bad =:= simple_one_for_one -> erlang:error(badarg);
- Init -> Init
- end.
-
-start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
-delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
-restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
-terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
-which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
-count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
-check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
-
-call(Sup, Msg) -> ?GEN_SERVER:call(mirroring(Sup), Msg, infinity).
-cast(Sup, Msg) -> with_exit_handler(
- fun() -> ok end,
- fun() -> ?GEN_SERVER:cast(mirroring(Sup), Msg) end).
-
-find_call(Sup, Id, Msg) ->
- Group = call(Sup, group),
- MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
- key = {Group, Id},
- _ = '_'},
- %% If we did this inside a tx we could still have failover
- %% immediately after the tx - we can't be 100% here. So we may as
- %% well dirty_select.
- case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
- [Mirror] -> call(Mirror, Msg);
- [] -> {error, not_found}
- end.
-
-fold(FunAtom, Sup, AggFun) ->
- Group = call(Sup, group),
- lists:foldl(AggFun, [],
- [apply(?SUPERVISOR, FunAtom, [D]) ||
- M <- ?PG2:get_members(Group),
- D <- [delegate(M)]]).
-
-child(Sup, Id) ->
- [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
- Id1 =:= Id],
- Pid.
-
-delegate(Sup) -> child(Sup, delegate).
-mirroring(Sup) -> child(Sup, mirroring).
-
-%%----------------------------------------------------------------------------
-
-start_internal(Group, TxFun, ChildSpecs) ->
- ?GEN_SERVER:start_link(?MODULE, {Group, TxFun, ChildSpecs},
- [{timeout, infinity}]).
-
-%%----------------------------------------------------------------------------
-
-init({Group, TxFun, ChildSpecs}) ->
- {ok, #state{group = Group,
- tx_fun = TxFun,
- initial_childspecs = ChildSpecs}}.
-
-handle_call({init, Overall}, _From,
- State = #state{overall = undefined,
- delegate = undefined,
- group = Group,
- tx_fun = TxFun,
- initial_childspecs = ChildSpecs}) ->
- process_flag(trap_exit, true),
- ?PG2:create(Group),
- ok = ?PG2:join(Group, Overall),
- Rest = ?PG2:get_members(Group) -- [Overall],
- case Rest of
- [] -> TxFun(fun() -> delete_all(Group) end);
- _ -> ok
- end,
- [begin
- ?GEN_SERVER:cast(mirroring(Pid), {ensure_monitoring, Overall}),
- erlang:monitor(process, Pid)
- end || Pid <- Rest],
- Delegate = delegate(Overall),
- erlang:monitor(process, Delegate),
- State1 = State#state{overall = Overall, delegate = Delegate},
- case errors([maybe_start(Group, TxFun, Overall, Delegate, S)
- || S <- ChildSpecs]) of
- [] -> {reply, ok, State1};
- Errors -> {stop, {shutdown, Errors}, State1}
- end;
-
-handle_call({start_child, ChildSpec}, _From,
- State = #state{overall = Overall,
- delegate = Delegate,
- group = Group,
- tx_fun = TxFun}) ->
- {reply, case maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) of
- already_in_mnesia -> {error, already_present};
- {already_in_mnesia, Pid} -> {error, {already_started, Pid}};
- Else -> Else
- end, State};
-
-handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
- group = Group,
- tx_fun = TxFun}) ->
- {reply, stop(Group, TxFun, Delegate, Id), State};
-
-handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
- {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
-
-handle_call(group, _From, State = #state{group = Group}) ->
- {reply, Group, State};
-
-handle_call(Msg, _From, State) ->
- {stop, {unexpected_call, Msg}, State}.
-
-handle_cast({ensure_monitoring, Pid}, State) ->
- erlang:monitor(process, Pid),
- {noreply, State};
-
-handle_cast({die, Reason}, State = #state{group = Group}) ->
- _ = tell_all_peers_to_die(Group, Reason),
- {stop, Reason, State};
-
-handle_cast(Msg, State) ->
- {stop, {unexpected_cast, Msg}, State}.
-
-handle_info({'DOWN', _Ref, process, Pid, Reason},
- State = #state{delegate = Pid, group = Group}) ->
- %% Since the delegate is temporary, its death won't cause us to
- %% die. Since the overall supervisor kills processes in reverse
- %% order when shutting down "from above" and we started after the
- %% delegate, if we see the delegate die then that means it died
- %% "from below" i.e. due to the behaviour of its children, not
- %% because the whole app was being torn down.
- %%
- %% Therefore if we get here we know we need to cause the entire
- %% mirrored sup to shut down, not just fail over.
- _ = tell_all_peers_to_die(Group, Reason),
- {stop, Reason, State};
-
-handle_info({'DOWN', _Ref, process, Pid, _Reason},
- State = #state{delegate = Delegate,
- group = Group,
- tx_fun = TxFun,
- overall = O}) ->
- %% TODO load balance this
- %% No guarantee pg2 will have received the DOWN before us.
- R = case lists:sort(?PG2:get_members(Group)) -- [Pid] of
- [O | _] -> ChildSpecs =
- TxFun(fun() -> update_all(O, Pid) end),
- [start(Delegate, ChildSpec) || ChildSpec <- ChildSpecs];
- _ -> []
- end,
- case errors(R) of
- [] -> {noreply, State};
- Errors -> {stop, {shutdown, Errors}, State}
- end;
-
-handle_info(Info, State) ->
- {stop, {unexpected_info, Info}, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
-
-tell_all_peers_to_die(Group, Reason) ->
- [cast(P, {die, Reason}) || P <- ?PG2:get_members(Group) -- [self()]].
-
-maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) ->
- try TxFun(fun() -> check_start(Group, Overall, Delegate, ChildSpec) end) of
- start -> start(Delegate, ChildSpec);
- undefined -> already_in_mnesia;
- Pid -> {already_in_mnesia, Pid}
- catch
- %% If we are torn down while in the transaction...
- {error, E} -> {error, E}
- end.
-
-check_start(Group, Overall, Delegate, ChildSpec) ->
- case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
- [] -> _ = write(Group, Overall, ChildSpec),
- start;
- [S] -> #mirrored_sup_childspec{key = {Group, Id},
- mirroring_pid = Pid} = S,
- case Overall of
- Pid -> child(Delegate, Id);
- _ -> case supervisor(Pid) of
- dead -> _ = write(Group, Overall, ChildSpec),
- start;
- Delegate0 -> child(Delegate0, Id)
- end
- end
- end.
-
-supervisor(Pid) -> with_exit_handler(fun() -> dead end,
- fun() -> delegate(Pid) end).
-
-write(Group, Overall, ChildSpec) ->
- S = #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
- mirroring_pid = Overall,
- childspec = ChildSpec},
- ok = mnesia:write(?TABLE, S, write),
- ChildSpec.
-
-delete(Group, Id) ->
- ok = mnesia:delete({?TABLE, {Group, Id}}).
-
-start(Delegate, ChildSpec) ->
- apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
-
-stop(Group, TxFun, Delegate, Id) ->
- try TxFun(fun() -> check_stop(Group, Delegate, Id) end) of
- deleted -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
- running -> {error, running}
- catch
- {error, E} -> {error, E}
- end.
-
-check_stop(Group, Delegate, Id) ->
- case child(Delegate, Id) of
- undefined -> delete(Group, Id),
- deleted;
- _ -> running
- end.
-
-id({Id, _, _, _, _, _}) -> Id.
-
-update_all(Overall, OldOverall) ->
- MatchHead = #mirrored_sup_childspec{mirroring_pid = OldOverall,
- key = '$1',
- childspec = '$2',
- _ = '_'},
- [write(Group, Overall, C) ||
- [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
-
-delete_all(Group) ->
- MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
- childspec = '$1',
- _ = '_'},
- [delete(Group, id(C)) ||
- C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
-
-errors(Results) -> [E || {error, E} <- Results].
-
-%%----------------------------------------------------------------------------
-
-create_tables() -> create_tables([?TABLE_DEF]).
-
-create_tables([]) ->
- ok;
-create_tables([{Table, Attributes} | Ts]) ->
- case mnesia:create_table(Table, Attributes) of
- {atomic, ok} -> create_tables(Ts);
- {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
- Err -> Err
- end.
-
-table_definitions() ->
- {Name, Attributes} = ?TABLE_DEF,
- [{Name, [?TABLE_MATCH | Attributes]}].
-
-%%----------------------------------------------------------------------------
-
-with_exit_handler(Handler, Thunk) ->
- try
- Thunk()
- catch
- exit:{R, _} when R =:= noproc; R =:= nodedown;
- R =:= normal; R =:= shutdown ->
- Handler();
- exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
- Handler()
- end.
-
-add_proplists(P1, P2) ->
- add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
-add_proplists([], P2, Acc) -> P2 ++ Acc;
-add_proplists(P1, [], Acc) -> P1 ++ Acc;
-add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
- add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
-add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
- add_proplists(P1, P2, [KV | Acc]);
-add_proplists(P1, [KV | P2], Acc) ->
- add_proplists(P1, P2, [KV | Acc]).
diff --git a/src/mochijson2.erl b/src/mochijson2.erl
deleted file mode 100644
index bddb52cc6f..0000000000
--- a/src/mochijson2.erl
+++ /dev/null
@@ -1,893 +0,0 @@
-%% This file is a copy of `mochijson2.erl' from mochiweb, revision
-%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
-%% `LICENSE-MIT-Mochi'.
-
-%% @author Bob Ippolito <bob@mochimedia.com>
-%% @copyright 2007 Mochi Media, Inc.
-
-%% @doc Yet another JSON (RFC 4627) library for Erlang. mochijson2 works
-%% with binaries as strings, arrays as lists (without an {array, _})
-%% wrapper and it only knows how to decode UTF-8 (and ASCII).
-%%
-%% JSON terms are decoded as follows (javascript -> erlang):
-%% <ul>
-%% <li>{"key": "value"} ->
-%% {struct, [{&lt;&lt;"key">>, &lt;&lt;"value">>}]}</li>
-%% <li>["array", 123, 12.34, true, false, null] ->
-%% [&lt;&lt;"array">>, 123, 12.34, true, false, null]
-%% </li>
-%% </ul>
-%% <ul>
-%% <li>Strings in JSON decode to UTF-8 binaries in Erlang</li>
-%% <li>Objects decode to {struct, PropList}</li>
-%% <li>Numbers decode to integer or float</li>
-%% <li>true, false, null decode to their respective terms.</li>
-%% </ul>
-%% The encoder will accept the same format that the decoder will produce,
-%% but will also allow additional cases for leniency:
-%% <ul>
-%% <li>atoms other than true, false, null will be considered UTF-8
-%% strings (even as a proplist key)
-%% </li>
-%% <li>{json, IoList} will insert IoList directly into the output
-%% with no validation
-%% </li>
-%% <li>{array, Array} will be encoded as Array
-%% (legacy mochijson style)
-%% </li>
-%% <li>A non-empty raw proplist will be encoded as an object as long
-%% as the first pair does not have an atom key of json, struct,
-%% or array
-%% </li>
-%% </ul>
-
--module(mochijson2).
--author('bob@mochimedia.com').
--export([encoder/1, encode/1]).
--export([decoder/1, decode/1, decode/2]).
-
-%% This is a macro to placate syntax highlighters..
--define(Q, $\").
--define(ADV_COL(S, N), S#decoder{offset=N+S#decoder.offset,
- column=N+S#decoder.column}).
--define(INC_COL(S), S#decoder{offset=1+S#decoder.offset,
- column=1+S#decoder.column}).
--define(INC_LINE(S), S#decoder{offset=1+S#decoder.offset,
- column=1,
- line=1+S#decoder.line}).
--define(INC_CHAR(S, C),
- case C of
- $\n ->
- S#decoder{column=1,
- line=1+S#decoder.line,
- offset=1+S#decoder.offset};
- _ ->
- S#decoder{column=1+S#decoder.column,
- offset=1+S#decoder.offset}
- end).
--define(IS_WHITESPACE(C),
- (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)).
-
-%% @type json_string() = atom | binary()
-%% @type json_number() = integer() | float()
-%% @type json_array() = [json_term()]
-%% @type json_object() = {struct, [{json_string(), json_term()}]}
-%% @type json_eep18_object() = {[{json_string(), json_term()}]}
-%% @type json_iolist() = {json, iolist()}
-%% @type json_term() = json_string() | json_number() | json_array() |
-%% json_object() | json_eep18_object() | json_iolist()
-
--record(encoder, {handler=null,
- utf8=false}).
-
--record(decoder, {object_hook=null,
- offset=0,
- line=1,
- column=1,
- state=null}).
-
-%% @spec encoder([encoder_option()]) -> function()
-%% @doc Create an encoder/1 with the given options.
-%% @type encoder_option() = handler_option() | utf8_option()
-%% @type utf8_option() = boolean(). Emit unicode as utf8 (default - false)
-encoder(Options) ->
- State = parse_encoder_options(Options, #encoder{}),
- fun (O) -> json_encode(O, State) end.
-
-%% @spec encode(json_term()) -> iolist()
-%% @doc Encode the given as JSON to an iolist.
-encode(Any) ->
- json_encode(Any, #encoder{}).
-
-%% @spec decoder([decoder_option()]) -> function()
-%% @doc Create a decoder/1 with the given options.
-decoder(Options) ->
- State = parse_decoder_options(Options, #decoder{}),
- fun (O) -> json_decode(O, State) end.
-
-%% @spec decode(iolist(), [{format, proplist | eep18 | struct}]) -> json_term()
-%% @doc Decode the given iolist to Erlang terms using the given object format
-%% for decoding, where proplist returns JSON objects as [{binary(), json_term()}]
-%% proplists, eep18 returns JSON objects as {[binary(), json_term()]}, and struct
-%% returns them as-is.
-decode(S, Options) ->
- json_decode(S, parse_decoder_options(Options, #decoder{})).
-
-%% @spec decode(iolist()) -> json_term()
-%% @doc Decode the given iolist to Erlang terms.
-decode(S) ->
- json_decode(S, #decoder{}).
-
-%% Internal API
-
-parse_encoder_options([], State) ->
- State;
-parse_encoder_options([{handler, Handler} | Rest], State) ->
- parse_encoder_options(Rest, State#encoder{handler=Handler});
-parse_encoder_options([{utf8, Switch} | Rest], State) ->
- parse_encoder_options(Rest, State#encoder{utf8=Switch}).
-
-parse_decoder_options([], State) ->
- State;
-parse_decoder_options([{object_hook, Hook} | Rest], State) ->
- parse_decoder_options(Rest, State#decoder{object_hook=Hook});
-parse_decoder_options([{format, Format} | Rest], State)
- when Format =:= struct orelse Format =:= eep18 orelse Format =:= proplist ->
- parse_decoder_options(Rest, State#decoder{object_hook=Format}).
-
-json_encode(true, _State) ->
- <<"true">>;
-json_encode(false, _State) ->
- <<"false">>;
-json_encode(null, _State) ->
- <<"null">>;
-json_encode(I, _State) when is_integer(I) ->
- integer_to_list(I);
-json_encode(F, _State) when is_float(F) ->
- mochinum:digits(F);
-json_encode(S, State) when is_binary(S); is_atom(S) ->
- json_encode_string(S, State);
-json_encode([{K, _}|_] = Props, State) when (K =/= struct andalso
- K =/= array andalso
- K =/= json) ->
- json_encode_proplist(Props, State);
-json_encode({struct, Props}, State) when is_list(Props) ->
- json_encode_proplist(Props, State);
-json_encode({Props}, State) when is_list(Props) ->
- json_encode_proplist(Props, State);
-json_encode({}, State) ->
- json_encode_proplist([], State);
-json_encode(Array, State) when is_list(Array) ->
- json_encode_array(Array, State);
-json_encode({array, Array}, State) when is_list(Array) ->
- json_encode_array(Array, State);
-json_encode({json, IoList}, _State) ->
- IoList;
-json_encode(Bad, #encoder{handler=null}) ->
- exit({json_encode, {bad_term, Bad}});
-json_encode(Bad, State=#encoder{handler=Handler}) ->
- json_encode(Handler(Bad), State).
-
-json_encode_array([], _State) ->
- <<"[]">>;
-json_encode_array(L, State) ->
- F = fun (O, Acc) ->
- [$,, json_encode(O, State) | Acc]
- end,
- [$, | Acc1] = lists:foldl(F, "[", L),
- lists:reverse([$\] | Acc1]).
-
-json_encode_proplist([], _State) ->
- <<"{}">>;
-json_encode_proplist(Props, State) ->
- F = fun ({K, V}, Acc) ->
- KS = json_encode_string(K, State),
- VS = json_encode(V, State),
- [$,, VS, $:, KS | Acc]
- end,
- [$, | Acc1] = lists:foldl(F, "{", Props),
- lists:reverse([$\} | Acc1]).
-
-json_encode_string(A, State) when is_atom(A) ->
- L = atom_to_list(A),
- case json_string_is_safe(L) of
- true ->
- [?Q, L, ?Q];
- false ->
- json_encode_string_unicode(xmerl_ucs:from_utf8(L), State, [?Q])
- end;
-json_encode_string(B, State) when is_binary(B) ->
- case json_bin_is_safe(B) of
- true ->
- [?Q, B, ?Q];
- false ->
- json_encode_string_unicode(xmerl_ucs:from_utf8(B), State, [?Q])
- end;
-json_encode_string(I, _State) when is_integer(I) ->
- [?Q, integer_to_list(I), ?Q];
-json_encode_string(L, State) when is_list(L) ->
- case json_string_is_safe(L) of
- true ->
- [?Q, L, ?Q];
- false ->
- json_encode_string_unicode(L, State, [?Q])
- end.
-
-json_string_is_safe([]) ->
- true;
-json_string_is_safe([C | Rest]) ->
- case C of
- ?Q ->
- false;
- $\\ ->
- false;
- $\b ->
- false;
- $\f ->
- false;
- $\n ->
- false;
- $\r ->
- false;
- $\t ->
- false;
- C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
- false;
- C when C < 16#7f ->
- json_string_is_safe(Rest);
- _ ->
- false
- end.
-
-json_bin_is_safe(<<>>) ->
- true;
-json_bin_is_safe(<<C, Rest/binary>>) ->
- case C of
- ?Q ->
- false;
- $\\ ->
- false;
- $\b ->
- false;
- $\f ->
- false;
- $\n ->
- false;
- $\r ->
- false;
- $\t ->
- false;
- C when C >= 0, C < $\s; C >= 16#7f ->
- false;
- C when C < 16#7f ->
- json_bin_is_safe(Rest)
- end.
-
-json_encode_string_unicode([], _State, Acc) ->
- lists:reverse([$\" | Acc]);
-json_encode_string_unicode([C | Cs], State, Acc) ->
- Acc1 = case C of
- ?Q ->
- [?Q, $\\ | Acc];
- %% Escaping solidus is only useful when trying to protect
- %% against "</script>" injection attacks which are only
- %% possible when JSON is inserted into a HTML document
- %% in-line. mochijson2 does not protect you from this, so
- %% if you do insert directly into HTML then you need to
- %% uncomment the following case or escape the output of encode.
- %%
- %% $/ ->
- %% [$/, $\\ | Acc];
- %%
- $\\ ->
- [$\\, $\\ | Acc];
- $\b ->
- [$b, $\\ | Acc];
- $\f ->
- [$f, $\\ | Acc];
- $\n ->
- [$n, $\\ | Acc];
- $\r ->
- [$r, $\\ | Acc];
- $\t ->
- [$t, $\\ | Acc];
- C when C >= 0, C < $\s ->
- [unihex(C) | Acc];
- C when C >= 16#7f, C =< 16#10FFFF, State#encoder.utf8 ->
- [xmerl_ucs:to_utf8(C) | Acc];
- C when C >= 16#7f, C =< 16#10FFFF, not State#encoder.utf8 ->
- [unihex(C) | Acc];
- C when C < 16#7f ->
- [C | Acc];
- _ ->
- exit({json_encode, {bad_char, C}})
- end,
- json_encode_string_unicode(Cs, State, Acc1).
-
-hexdigit(C) when C >= 0, C =< 9 ->
- C + $0;
-hexdigit(C) when C =< 15 ->
- C + $a - 10.
-
-unihex(C) when C < 16#10000 ->
- <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
- Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
- [$\\, $u | Digits];
-unihex(C) when C =< 16#10FFFF ->
- N = C - 16#10000,
- S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
- S2 = 16#dc00 bor (N band 16#3ff),
- [unihex(S1), unihex(S2)].
-
-json_decode(L, S) when is_list(L) ->
- json_decode(iolist_to_binary(L), S);
-json_decode(B, S) ->
- {Res, S1} = decode1(B, S),
- {eof, _} = tokenize(B, S1#decoder{state=trim}),
- Res.
-
-decode1(B, S=#decoder{state=null}) ->
- case tokenize(B, S#decoder{state=any}) of
- {{const, C}, S1} ->
- {C, S1};
- {start_array, S1} ->
- decode_array(B, S1);
- {start_object, S1} ->
- decode_object(B, S1)
- end.
-
-make_object(V, #decoder{object_hook=N}) when N =:= null orelse N =:= struct ->
- V;
-make_object({struct, P}, #decoder{object_hook=eep18}) ->
- {P};
-make_object({struct, P}, #decoder{object_hook=proplist}) ->
- P;
-make_object(V, #decoder{object_hook=Hook}) ->
- Hook(V).
-
-decode_object(B, S) ->
- decode_object(B, S#decoder{state=key}, []).
-
-decode_object(B, S=#decoder{state=key}, Acc) ->
- case tokenize(B, S) of
- {end_object, S1} ->
- V = make_object({struct, lists:reverse(Acc)}, S1),
- {V, S1#decoder{state=null}};
- {{const, K}, S1} ->
- {colon, S2} = tokenize(B, S1),
- {V, S3} = decode1(B, S2#decoder{state=null}),
- decode_object(B, S3#decoder{state=comma}, [{K, V} | Acc])
- end;
-decode_object(B, S=#decoder{state=comma}, Acc) ->
- case tokenize(B, S) of
- {end_object, S1} ->
- V = make_object({struct, lists:reverse(Acc)}, S1),
- {V, S1#decoder{state=null}};
- {comma, S1} ->
- decode_object(B, S1#decoder{state=key}, Acc)
- end.
-
-decode_array(B, S) ->
- decode_array(B, S#decoder{state=any}, []).
-
-decode_array(B, S=#decoder{state=any}, Acc) ->
- case tokenize(B, S) of
- {end_array, S1} ->
- {lists:reverse(Acc), S1#decoder{state=null}};
- {start_array, S1} ->
- {Array, S2} = decode_array(B, S1),
- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
- {start_object, S1} ->
- {Array, S2} = decode_object(B, S1),
- decode_array(B, S2#decoder{state=comma}, [Array | Acc]);
- {{const, Const}, S1} ->
- decode_array(B, S1#decoder{state=comma}, [Const | Acc])
- end;
-decode_array(B, S=#decoder{state=comma}, Acc) ->
- case tokenize(B, S) of
- {end_array, S1} ->
- {lists:reverse(Acc), S1#decoder{state=null}};
- {comma, S1} ->
- decode_array(B, S1#decoder{state=any}, Acc)
- end.
-
-tokenize_string(B, S=#decoder{offset=O}) ->
- case tokenize_string_fast(B, O) of
- {escape, O1} ->
- Length = O1 - O,
- S1 = ?ADV_COL(S, Length),
- <<_:O/binary, Head:Length/binary, _/binary>> = B,
- tokenize_string(B, S1, lists:reverse(binary_to_list(Head)));
- O1 ->
- Length = O1 - O,
- <<_:O/binary, String:Length/binary, ?Q, _/binary>> = B,
- {{const, String}, ?ADV_COL(S, Length + 1)}
- end.
-
-tokenize_string_fast(B, O) ->
- case B of
- <<_:O/binary, ?Q, _/binary>> ->
- O;
- <<_:O/binary, $\\, _/binary>> ->
- {escape, O};
- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
- tokenize_string_fast(B, 1 + O);
- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
- tokenize_string_fast(B, 2 + O);
- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
- tokenize_string_fast(B, 3 + O);
- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
- tokenize_string_fast(B, 4 + O);
- _ ->
- throw(invalid_utf8)
- end.
-
-tokenize_string(B, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, ?Q, _/binary>> ->
- {{const, iolist_to_binary(lists:reverse(Acc))}, ?INC_COL(S)};
- <<_:O/binary, "\\\"", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\" | Acc]);
- <<_:O/binary, "\\\\", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\\ | Acc]);
- <<_:O/binary, "\\/", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$/ | Acc]);
- <<_:O/binary, "\\b", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\b | Acc]);
- <<_:O/binary, "\\f", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\f | Acc]);
- <<_:O/binary, "\\n", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\n | Acc]);
- <<_:O/binary, "\\r", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\r | Acc]);
- <<_:O/binary, "\\t", _/binary>> ->
- tokenize_string(B, ?ADV_COL(S, 2), [$\t | Acc]);
- <<_:O/binary, "\\u", C3, C2, C1, C0, Rest/binary>> ->
- C = erlang:list_to_integer([C3, C2, C1, C0], 16),
- if C > 16#D7FF, C < 16#DC00 ->
- %% coalesce UTF-16 surrogate pair
- <<"\\u", D3, D2, D1, D0, _/binary>> = Rest,
- D = erlang:list_to_integer([D3,D2,D1,D0], 16),
- [CodePoint] = xmerl_ucs:from_utf16be(<<C:16/big-unsigned-integer,
- D:16/big-unsigned-integer>>),
- Acc1 = lists:reverse(xmerl_ucs:to_utf8(CodePoint), Acc),
- tokenize_string(B, ?ADV_COL(S, 12), Acc1);
- true ->
- Acc1 = lists:reverse(xmerl_ucs:to_utf8(C), Acc),
- tokenize_string(B, ?ADV_COL(S, 6), Acc1)
- end;
- <<_:O/binary, C1, _/binary>> when C1 < 128 ->
- tokenize_string(B, ?INC_CHAR(S, C1), [C1 | Acc]);
- <<_:O/binary, C1, C2, _/binary>> when C1 >= 194, C1 =< 223,
- C2 >= 128, C2 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 2), [C2, C1 | Acc]);
- <<_:O/binary, C1, C2, C3, _/binary>> when C1 >= 224, C1 =< 239,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 3), [C3, C2, C1 | Acc]);
- <<_:O/binary, C1, C2, C3, C4, _/binary>> when C1 >= 240, C1 =< 244,
- C2 >= 128, C2 =< 191,
- C3 >= 128, C3 =< 191,
- C4 >= 128, C4 =< 191 ->
- tokenize_string(B, ?ADV_COL(S, 4), [C4, C3, C2, C1 | Acc]);
- _ ->
- throw(invalid_utf8)
- end.
-
-tokenize_number(B, S) ->
- case tokenize_number(B, sign, S, []) of
- {{int, Int}, S1} ->
- {{const, list_to_integer(Int)}, S1};
- {{float, Float}, S1} ->
- {{const, list_to_float(Float)}, S1}
- end.
-
-tokenize_number(B, sign, S=#decoder{offset=O}, []) ->
- case B of
- <<_:O/binary, $-, _/binary>> ->
- tokenize_number(B, int, ?INC_COL(S), [$-]);
- _ ->
- tokenize_number(B, int, S, [])
- end;
-tokenize_number(B, int, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, $0, _/binary>> ->
- tokenize_number(B, frac, ?INC_COL(S), [$0 | Acc]);
- <<_:O/binary, C, _/binary>> when C >= $1 andalso C =< $9 ->
- tokenize_number(B, int1, ?INC_COL(S), [C | Acc])
- end;
-tokenize_number(B, int1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, int1, ?INC_COL(S), [C | Acc]);
- _ ->
- tokenize_number(B, frac, S, Acc)
- end;
-tokenize_number(B, frac, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, $., C, _/binary>> when C >= $0, C =< $9 ->
- tokenize_number(B, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
- tokenize_number(B, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
- _ ->
- {{int, lists:reverse(Acc)}, S}
- end;
-tokenize_number(B, frac1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, frac1, ?INC_COL(S), [C | Acc]);
- <<_:O/binary, E, _/binary>> when E =:= $e orelse E =:= $E ->
- tokenize_number(B, esign, ?INC_COL(S), [$e | Acc]);
- _ ->
- {{float, lists:reverse(Acc)}, S}
- end;
-tokenize_number(B, esign, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C =:= $- orelse C=:= $+ ->
- tokenize_number(B, eint, ?INC_COL(S), [C | Acc]);
- _ ->
- tokenize_number(B, eint, S, Acc)
- end;
-tokenize_number(B, eint, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc])
- end;
-tokenize_number(B, eint1, S=#decoder{offset=O}, Acc) ->
- case B of
- <<_:O/binary, C, _/binary>> when C >= $0 andalso C =< $9 ->
- tokenize_number(B, eint1, ?INC_COL(S), [C | Acc]);
- _ ->
- {{float, lists:reverse(Acc)}, S}
- end.
-
-tokenize(B, S=#decoder{offset=O}) ->
- case B of
- <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) ->
- tokenize(B, ?INC_CHAR(S, C));
- <<_:O/binary, "{", _/binary>> ->
- {start_object, ?INC_COL(S)};
- <<_:O/binary, "}", _/binary>> ->
- {end_object, ?INC_COL(S)};
- <<_:O/binary, "[", _/binary>> ->
- {start_array, ?INC_COL(S)};
- <<_:O/binary, "]", _/binary>> ->
- {end_array, ?INC_COL(S)};
- <<_:O/binary, ",", _/binary>> ->
- {comma, ?INC_COL(S)};
- <<_:O/binary, ":", _/binary>> ->
- {colon, ?INC_COL(S)};
- <<_:O/binary, "null", _/binary>> ->
- {{const, null}, ?ADV_COL(S, 4)};
- <<_:O/binary, "true", _/binary>> ->
- {{const, true}, ?ADV_COL(S, 4)};
- <<_:O/binary, "false", _/binary>> ->
- {{const, false}, ?ADV_COL(S, 5)};
- <<_:O/binary, "\"", _/binary>> ->
- tokenize_string(B, ?INC_COL(S));
- <<_:O/binary, C, _/binary>> when (C >= $0 andalso C =< $9)
- orelse C =:= $- ->
- tokenize_number(B, S);
- <<_:O/binary>> ->
- trim = S#decoder.state,
- {eof, S}
- end.
-%%
-%% Tests
-%%
--ifdef(TEST).
--include_lib("eunit/include/eunit.hrl").
-
-
-%% testing constructs borrowed from the Yaws JSON implementation.
-
-%% Create an object from a list of Key/Value pairs.
-
-obj_new() ->
- {struct, []}.
-
-is_obj({struct, Props}) ->
- F = fun ({K, _}) when is_binary(K) -> true end,
- lists:all(F, Props).
-
-obj_from_list(Props) ->
- Obj = {struct, Props},
- ?assert(is_obj(Obj)),
- Obj.
-
-%% Test for equivalence of Erlang terms.
-%% Due to arbitrary order of construction, equivalent objects might
-%% compare unequal as erlang terms, so we need to carefully recurse
-%% through aggregates (tuples and objects).
-
-equiv({struct, Props1}, {struct, Props2}) ->
- equiv_object(Props1, Props2);
-equiv(L1, L2) when is_list(L1), is_list(L2) ->
- equiv_list(L1, L2);
-equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
-equiv(B1, B2) when is_binary(B1), is_binary(B2) -> B1 == B2;
-equiv(A, A) when A =:= true orelse A =:= false orelse A =:= null -> true.
-
-%% Object representation and traversal order is unknown.
-%% Use the sledgehammer and sort property lists.
-
-equiv_object(Props1, Props2) ->
- L1 = lists:keysort(1, Props1),
- L2 = lists:keysort(1, Props2),
- Pairs = lists:zip(L1, L2),
- true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
- equiv(K1, K2) and equiv(V1, V2)
- end, Pairs).
-
-%% Recursively compare tuple elements for equivalence.
-
-equiv_list([], []) ->
- true;
-equiv_list([V1 | L1], [V2 | L2]) ->
- equiv(V1, V2) andalso equiv_list(L1, L2).
-
-decode_test() ->
- [1199344435545.0, 1] = decode(<<"[1199344435545.0,1]">>),
- <<16#F0,16#9D,16#9C,16#95>> = decode([34,"\\ud835","\\udf15",34]).
-
-e2j_vec_test() ->
- test_one(e2j_test_vec(utf8), 1).
-
-test_one([], _N) ->
- %% io:format("~p tests passed~n", [N-1]),
- ok;
-test_one([{E, J} | Rest], N) ->
- %% io:format("[~p] ~p ~p~n", [N, E, J]),
- true = equiv(E, decode(J)),
- true = equiv(E, decode(encode(E))),
- test_one(Rest, 1+N).
-
-e2j_test_vec(utf8) ->
- [
- {1, "1"},
- {3.1416, "3.14160"}, %% text representation may truncate, trail zeroes
- {-1, "-1"},
- {-3.1416, "-3.14160"},
- {12.0e10, "1.20000e+11"},
- {1.234E+10, "1.23400e+10"},
- {-1.234E-10, "-1.23400e-10"},
- {10.0, "1.0e+01"},
- {123.456, "1.23456E+2"},
- {10.0, "1e1"},
- {<<"foo">>, "\"foo\""},
- {<<"foo", 5, "bar">>, "\"foo\\u0005bar\""},
- {<<"">>, "\"\""},
- {<<"\n\n\n">>, "\"\\n\\n\\n\""},
- {<<"\" \b\f\r\n\t\"">>, "\"\\\" \\b\\f\\r\\n\\t\\\"\""},
- {obj_new(), "{}"},
- {obj_from_list([{<<"foo">>, <<"bar">>}]), "{\"foo\":\"bar\"}"},
- {obj_from_list([{<<"foo">>, <<"bar">>}, {<<"baz">>, 123}]),
- "{\"foo\":\"bar\",\"baz\":123}"},
- {[], "[]"},
- {[[]], "[[]]"},
- {[1, <<"foo">>], "[1,\"foo\"]"},
-
- %% json array in a json object
- {obj_from_list([{<<"foo">>, [123]}]),
- "{\"foo\":[123]}"},
-
- %% json object in a json object
- {obj_from_list([{<<"foo">>, obj_from_list([{<<"bar">>, true}])}]),
- "{\"foo\":{\"bar\":true}}"},
-
- %% fold evaluation order
- {obj_from_list([{<<"foo">>, []},
- {<<"bar">>, obj_from_list([{<<"baz">>, true}])},
- {<<"alice">>, <<"bob">>}]),
- "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
-
- %% json object in a json array
- {[-123, <<"foo">>, obj_from_list([{<<"bar">>, []}]), null],
- "[-123,\"foo\",{\"bar\":[]},null]"}
- ].
-
-%% test utf8 encoding
-encoder_utf8_test() ->
- %% safe conversion case (default)
- [34,"\\u0001","\\u0442","\\u0435","\\u0441","\\u0442",34] =
- encode(<<1,"\321\202\320\265\321\201\321\202">>),
-
- %% raw utf8 output (optional)
- Enc = mochijson2:encoder([{utf8, true}]),
- [34,"\\u0001",[209,130],[208,181],[209,129],[209,130],34] =
- Enc(<<1,"\321\202\320\265\321\201\321\202">>).
-
-input_validation_test() ->
- Good = [
- {16#00A3, <<?Q, 16#C2, 16#A3, ?Q>>}, %% pound
- {16#20AC, <<?Q, 16#E2, 16#82, 16#AC, ?Q>>}, %% euro
- {16#10196, <<?Q, 16#F0, 16#90, 16#86, 16#96, ?Q>>} %% denarius
- ],
- lists:foreach(fun({CodePoint, UTF8}) ->
- Expect = list_to_binary(xmerl_ucs:to_utf8(CodePoint)),
- Expect = decode(UTF8)
- end, Good),
-
- Bad = [
- %% 2nd, 3rd, or 4th byte of a multi-byte sequence w/o leading byte
- <<?Q, 16#80, ?Q>>,
- %% missing continuations, last byte in each should be 80-BF
- <<?Q, 16#C2, 16#7F, ?Q>>,
- <<?Q, 16#E0, 16#80,16#7F, ?Q>>,
- <<?Q, 16#F0, 16#80, 16#80, 16#7F, ?Q>>,
- %% we don't support code points > 10FFFF per RFC 3629
- <<?Q, 16#F5, 16#80, 16#80, 16#80, ?Q>>,
- %% escape characters trigger a different code path
- <<?Q, $\\, $\n, 16#80, ?Q>>
- ],
- lists:foreach(
- fun(X) ->
- ok = try decode(X) catch invalid_utf8 -> ok end,
- %% could be {ucs,{bad_utf8_character_code}} or
- %% {json_encode,{bad_char,_}}
- {'EXIT', _} = (catch encode(X))
- end, Bad).
-
-inline_json_test() ->
- ?assertEqual(<<"\"iodata iodata\"">>,
- iolist_to_binary(
- encode({json, [<<"\"iodata">>, " iodata\""]}))),
- ?assertEqual({struct, [{<<"key">>, <<"iodata iodata">>}]},
- decode(
- encode({struct,
- [{key, {json, [<<"\"iodata">>, " iodata\""]}}]}))),
- ok.
-
-big_unicode_test() ->
- UTF8Seq = list_to_binary(xmerl_ucs:to_utf8(16#0001d120)),
- ?assertEqual(
- <<"\"\\ud834\\udd20\"">>,
- iolist_to_binary(encode(UTF8Seq))),
- ?assertEqual(
- UTF8Seq,
- decode(iolist_to_binary(encode(UTF8Seq)))),
- ok.
-
-custom_decoder_test() ->
- ?assertEqual(
- {struct, [{<<"key">>, <<"value">>}]},
- (decoder([]))("{\"key\": \"value\"}")),
- F = fun ({struct, [{<<"key">>, <<"value">>}]}) -> win end,
- ?assertEqual(
- win,
- (decoder([{object_hook, F}]))("{\"key\": \"value\"}")),
- ok.
-
-atom_test() ->
- %% JSON native atoms
- [begin
- ?assertEqual(A, decode(atom_to_list(A))),
- ?assertEqual(iolist_to_binary(atom_to_list(A)),
- iolist_to_binary(encode(A)))
- end || A <- [true, false, null]],
- %% Atom to string
- ?assertEqual(
- <<"\"foo\"">>,
- iolist_to_binary(encode(foo))),
- ?assertEqual(
- <<"\"\\ud834\\udd20\"">>,
- iolist_to_binary(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))),
- ok.
-
-key_encode_test() ->
- %% Some forms are accepted as keys that would not be strings in other
- %% cases
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{foo, 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{<<"foo">>, 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode({struct, [{"foo", 1}]}))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{foo, 1}]))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{<<"foo">>, 1}]))),
- ?assertEqual(
- <<"{\"foo\":1}">>,
- iolist_to_binary(encode([{"foo", 1}]))),
- ?assertEqual(
- <<"{\"\\ud834\\udd20\":1}">>,
- iolist_to_binary(
- encode({struct, [{[16#0001d120], 1}]}))),
- ?assertEqual(
- <<"{\"1\":1}">>,
- iolist_to_binary(encode({struct, [{1, 1}]}))),
- ok.
-
-unsafe_chars_test() ->
- Chars = "\"\\\b\f\n\r\t",
- [begin
- ?assertEqual(false, json_string_is_safe([C])),
- ?assertEqual(false, json_bin_is_safe(<<C>>)),
- ?assertEqual(<<C>>, decode(encode(<<C>>)))
- end || C <- Chars],
- ?assertEqual(
- false,
- json_string_is_safe([16#0001d120])),
- ?assertEqual(
- false,
- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8(16#0001d120)))),
- ?assertEqual(
- [16#0001d120],
- xmerl_ucs:from_utf8(
- binary_to_list(
- decode(encode(list_to_atom(xmerl_ucs:to_utf8(16#0001d120))))))),
- ?assertEqual(
- false,
- json_string_is_safe([16#110000])),
- ?assertEqual(
- false,
- json_bin_is_safe(list_to_binary(xmerl_ucs:to_utf8([16#110000])))),
- %% solidus can be escaped but isn't unsafe by default
- ?assertEqual(
- <<"/">>,
- decode(<<"\"\\/\"">>)),
- ok.
-
-int_test() ->
- ?assertEqual(0, decode("0")),
- ?assertEqual(1, decode("1")),
- ?assertEqual(11, decode("11")),
- ok.
-
-large_int_test() ->
- ?assertEqual(<<"-2147483649214748364921474836492147483649">>,
- iolist_to_binary(encode(-2147483649214748364921474836492147483649))),
- ?assertEqual(<<"2147483649214748364921474836492147483649">>,
- iolist_to_binary(encode(2147483649214748364921474836492147483649))),
- ok.
-
-float_test() ->
- ?assertEqual(<<"-2147483649.0">>, iolist_to_binary(encode(-2147483649.0))),
- ?assertEqual(<<"2147483648.0">>, iolist_to_binary(encode(2147483648.0))),
- ok.
-
-handler_test() ->
- ?assertEqual(
- {'EXIT',{json_encode,{bad_term,{x,y}}}},
- catch encode({x,y})),
- F = fun ({x,y}) -> [] end,
- ?assertEqual(
- <<"[]">>,
- iolist_to_binary((encoder([{handler, F}]))({x, y}))),
- ok.
-
-encode_empty_test_() ->
- [{A, ?_assertEqual(<<"{}">>, iolist_to_binary(encode(B)))}
- || {A, B} <- [{"eep18 {}", {}},
- {"eep18 {[]}", {[]}},
- {"{struct, []}", {struct, []}}]].
-
-encode_test_() ->
- P = [{<<"k">>, <<"v">>}],
- JSON = iolist_to_binary(encode({struct, P})),
- [{atom_to_list(F),
- ?_assertEqual(JSON, iolist_to_binary(encode(decode(JSON, [{format, F}]))))}
- || F <- [struct, eep18, proplist]].
-
-format_test_() ->
- P = [{<<"k">>, <<"v">>}],
- JSON = iolist_to_binary(encode({struct, P})),
- [{atom_to_list(F),
- ?_assertEqual(A, decode(JSON, [{format, F}]))}
- || {F, A} <- [{struct, {struct, P}},
- {eep18, {P}},
- {proplist, P}]].
-
--endif.
diff --git a/src/pg2_fixed.erl b/src/pg2_fixed.erl
index 7b6533e851..222a0bc849 100644
--- a/src/pg2_fixed.erl
+++ b/src/pg2_fixed.erl
@@ -146,14 +146,14 @@ get_closest_pid(Name) ->
[Pid] ->
Pid;
[] ->
- {_,_,X} = erlang:now(),
case get_members(Name) of
[] -> {error, {no_process, Name}};
Members ->
+ X = time_compat:erlang_system_time(micro_seconds),
lists:nth((X rem length(Members))+1, Members)
end;
Members when is_list(Members) ->
- {_,_,X} = erlang:now(),
+ X = time_compat:erlang_system_time(micro_seconds),
lists:nth((X rem length(Members))+1, Members);
Else ->
Else
diff --git a/src/pmon.erl b/src/pmon.erl
deleted file mode 100644
index cdfdc1c0c0..0000000000
--- a/src/pmon.erl
+++ /dev/null
@@ -1,109 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(pmon).
-
-%% Process Monitor
-%% ================
-%%
-%% This module monitors processes so that every process has at most
-%% 1 monitor.
-%% Processes monitored can be dynamically added and removed.
-%%
-%% Unlike erlang:[de]monitor* functions, this module
-%% provides basic querying capability and avoids contacting down nodes.
-%%
-%% It is used to monitor nodes, queue mirrors, and by
-%% the queue collector, among other things.
-
--export([new/0, new/1, monitor/2, monitor_all/2, demonitor/2,
- is_monitored/2, erase/2, monitored/1, is_empty/1]).
-
--compile({no_auto_import, [monitor/2]}).
-
--record(state, {dict, module}).
-
--ifdef(use_specs).
-
-%%----------------------------------------------------------------------------
-
--export_type([?MODULE/0]).
-
--opaque(?MODULE() :: #state{dict :: dict:dict(),
- module :: atom()}).
-
--type(item() :: pid() | {atom(), node()}).
-
--spec(new/0 :: () -> ?MODULE()).
--spec(new/1 :: ('erlang' | 'delegate') -> ?MODULE()).
--spec(monitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitor_all/2 :: ([item()], ?MODULE()) -> ?MODULE()).
--spec(demonitor/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(is_monitored/2 :: (item(), ?MODULE()) -> boolean()).
--spec(erase/2 :: (item(), ?MODULE()) -> ?MODULE()).
--spec(monitored/1 :: (?MODULE()) -> [item()]).
--spec(is_empty/1 :: (?MODULE()) -> boolean()).
-
--endif.
-
-new() -> new(erlang).
-
-new(Module) -> #state{dict = dict:new(),
- module = Module}.
-
-monitor(Item, S = #state{dict = M, module = Module}) ->
- case dict:is_key(Item, M) of
- true -> S;
- false -> case node_alive_shortcut(Item) of
- true -> Ref = Module:monitor(process, Item),
- S#state{dict = dict:store(Item, Ref, M)};
- false -> self() ! {'DOWN', fake_ref, process, Item,
- nodedown},
- S
- end
- end.
-
-monitor_all([], S) -> S; %% optimisation
-monitor_all([Item], S) -> monitor(Item, S); %% optimisation
-monitor_all(Items, S) -> lists:foldl(fun monitor/2, S, Items).
-
-demonitor(Item, S = #state{dict = M, module = Module}) ->
- case dict:find(Item, M) of
- {ok, MRef} -> Module:demonitor(MRef),
- S#state{dict = dict:erase(Item, M)};
- error -> S
- end.
-
-is_monitored(Item, #state{dict = M}) -> dict:is_key(Item, M).
-
-erase(Item, S = #state{dict = M}) -> S#state{dict = dict:erase(Item, M)}.
-
-monitored(#state{dict = M}) -> dict:fetch_keys(M).
-
-is_empty(#state{dict = M}) -> dict:size(M) == 0.
-
-%%----------------------------------------------------------------------------
-
-%% We check here to see if the node is alive in order to avoid trying
-%% to connect to it if it isn't - this can cause substantial
-%% slowdowns. We can't perform this shortcut if passed {Name, Node}
-%% since we would need to convert that into a pid for the fake 'DOWN'
-%% message, so we always return true here - but that's OK, it's just
-%% an optimisation.
-node_alive_shortcut(P) when is_pid(P) ->
- lists:member(node(P), [node() | nodes()]);
-node_alive_shortcut({_Name, _Node}) ->
- true.
diff --git a/src/priority_queue.erl b/src/priority_queue.erl
deleted file mode 100644
index 88c69513d7..0000000000
--- a/src/priority_queue.erl
+++ /dev/null
@@ -1,227 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
-%% Priority queues have essentially the same interface as ordinary
-%% queues, except that a) there is an in/3 that takes a priority, and
-%% b) we have only implemented the core API we need.
-%%
-%% Priorities should be integers - the higher the value the higher the
-%% priority - but we don't actually check that.
-%%
-%% in/2 inserts items with priority 0.
-%%
-%% We optimise the case where a priority queue is being used just like
-%% an ordinary queue. When that is the case we represent the priority
-%% queue as an ordinary queue. We could just call into the 'queue'
-%% module for that, but for efficiency we implement the relevant
-%% functions directly in here, thus saving on inter-module calls and
-%% eliminating a level of boxing.
-%%
-%% When the queue contains items with non-zero priorities, it is
-%% represented as a sorted kv list with the inverted Priority as the
-%% key and an ordinary queue as the value. Here again we use our own
-%% ordinary queue implemention for efficiency, often making recursive
-%% calls into the same function knowing that ordinary queues represent
-%% a base case.
-
-
--module(priority_queue).
-
--export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, from_list/1,
- in/2, in/3, out/1, out_p/1, join/2, filter/2, fold/3, highest/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([q/0]).
-
--type(q() :: pqueue()).
--type(priority() :: integer() | 'infinity').
--type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
--type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
-
--spec(new/0 :: () -> pqueue()).
--spec(is_queue/1 :: (any()) -> boolean()).
--spec(is_empty/1 :: (pqueue()) -> boolean()).
--spec(len/1 :: (pqueue()) -> non_neg_integer()).
--spec(to_list/1 :: (pqueue()) -> [{priority(), any()}]).
--spec(from_list/1 :: ([{priority(), any()}]) -> pqueue()).
--spec(in/2 :: (any(), pqueue()) -> pqueue()).
--spec(in/3 :: (any(), priority(), pqueue()) -> pqueue()).
--spec(out/1 :: (pqueue()) -> {empty | {value, any()}, pqueue()}).
--spec(out_p/1 :: (pqueue()) -> {empty | {value, any(), priority()}, pqueue()}).
--spec(join/2 :: (pqueue(), pqueue()) -> pqueue()).
--spec(filter/2 :: (fun ((any()) -> boolean()), pqueue()) -> pqueue()).
--spec(fold/3 ::
- (fun ((any(), priority(), A) -> A), A, pqueue()) -> A).
--spec(highest/1 :: (pqueue()) -> priority() | 'empty').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-new() ->
- {queue, [], [], 0}.
-
-is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
- true;
-is_queue({pqueue, Queues}) when is_list(Queues) ->
- lists:all(fun ({infinity, Q}) -> is_queue(Q);
- ({P, Q}) -> is_integer(P) andalso is_queue(Q)
- end, Queues);
-is_queue(_) ->
- false.
-
-is_empty({queue, [], [], 0}) ->
- true;
-is_empty(_) ->
- false.
-
-len({queue, _R, _F, L}) ->
- L;
-len({pqueue, Queues}) ->
- lists:sum([len(Q) || {_, Q} <- Queues]).
-
-to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
- [{0, V} || V <- Out ++ lists:reverse(In, [])];
-to_list({pqueue, Queues}) ->
- [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
- {0, V} <- to_list(Q)].
-
-from_list(L) ->
- lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
-
-in(Item, Q) ->
- in(Item, 0, Q).
-
-in(X, 0, {queue, [_] = In, [], 1}) ->
- {queue, [X], In, 2};
-in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
- {queue, [X|In], Out, Len + 1};
-in(X, Priority, _Q = {queue, [], [], 0}) ->
- in(X, Priority, {pqueue, []});
-in(X, Priority, Q = {queue, _, _, _}) ->
- in(X, Priority, {pqueue, [{0, Q}]});
-in(X, Priority, {pqueue, Queues}) ->
- P = maybe_negate_priority(Priority),
- {pqueue, case lists:keysearch(P, 1, Queues) of
- {value, {_, Q}} ->
- lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
- false when P == infinity ->
- [{P, {queue, [X], [], 1}} | Queues];
- false ->
- case Queues of
- [{infinity, InfQueue} | Queues1] ->
- [{infinity, InfQueue} |
- lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
- _ ->
- lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
- end
- end}.
-
-out({queue, [], [], 0} = Q) ->
- {empty, Q};
-out({queue, [V], [], 1}) ->
- {{value, V}, {queue, [], [], 0}};
-out({queue, [Y|In], [], Len}) ->
- [V|Out] = lists:reverse(In, []),
- {{value, V}, {queue, [Y], Out, Len - 1}};
-out({queue, In, [V], Len}) when is_list(In) ->
- {{value,V}, r2f(In, Len - 1)};
-out({queue, In,[V|Out], Len}) when is_list(In) ->
- {{value, V}, {queue, In, Out, Len - 1}};
-out({pqueue, [{P, Q} | Queues]}) ->
- {R, Q1} = out(Q),
- NewQ = case is_empty(Q1) of
- true -> case Queues of
- [] -> {queue, [], [], 0};
- [{0, OnlyQ}] -> OnlyQ;
- [_|_] -> {pqueue, Queues}
- end;
- false -> {pqueue, [{P, Q1} | Queues]}
- end,
- {R, NewQ}.
-
-out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
-out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
-
-add_p(R, P) -> case R of
- {empty, Q} -> {empty, Q};
- {{value, V}, Q} -> {{value, V, P}, Q}
- end.
-
-join(A, {queue, [], [], 0}) ->
- A;
-join({queue, [], [], 0}, B) ->
- B;
-join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
- {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
-join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
- {Pre, Post} =
- lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
- Post1 = case Post of
- [] -> [ {0, A} ];
- [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
- _ -> [ {0, A} | Post ]
- end,
- {pqueue, Pre ++ Post1};
-join({pqueue, APQ}, B = {queue, _, _, _}) ->
- {Pre, Post} =
- lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
- Post1 = case Post of
- [] -> [ {0, B} ];
- [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
- _ -> [ {0, B} | Post ]
- end,
- {pqueue, Pre ++ Post1};
-join({pqueue, APQ}, {pqueue, BPQ}) ->
- {pqueue, merge(APQ, BPQ, [])}.
-
-merge([], BPQ, Acc) ->
- lists:reverse(Acc, BPQ);
-merge(APQ, [], Acc) ->
- lists:reverse(Acc, APQ);
-merge([{P, A}|As], [{P, B}|Bs], Acc) ->
- merge(As, Bs, [ {P, join(A, B)} | Acc ]);
-merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
- merge(As, Bs, [ {PA, A} | Acc ]);
-merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
- merge(As, Bs, [ {PB, B} | Acc ]).
-
-filter(Pred, Q) -> fold(fun(V, P, Acc) ->
- case Pred(V) of
- true -> in(V, P, Acc);
- false -> Acc
- end
- end, new(), Q).
-
-fold(Fun, Init, Q) -> case out_p(Q) of
- {empty, _Q} -> Init;
- {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
- end.
-
-highest({queue, [], [], 0}) -> empty;
-highest({queue, _, _, _}) -> 0;
-highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
-
-r2f([], 0) -> {queue, [], [], 0};
-r2f([_] = R, 1) -> {queue, [], R, 1};
-r2f([X,Y], 2) -> {queue, [X], [Y], 2};
-r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
-
-maybe_negate_priority(infinity) -> infinity;
-maybe_negate_priority(P) -> -P.
diff --git a/ebin/rabbit_app.in b/src/rabbit.app.src
index ac60ed6825..18726c71e8 100644
--- a/ebin/rabbit_app.in
+++ b/src/rabbit.app.src
@@ -1,16 +1,15 @@
{application, rabbit, %% -*- erlang -*-
[{description, "RabbitMQ"},
{id, "RabbitMQ"},
- {vsn, "%%VSN%%"},
+ {vsn, "0.0.0"},
{modules, []},
{registered, [rabbit_amqqueue_sup,
rabbit_log,
rabbit_node_monitor,
rabbit_router,
rabbit_sup,
- rabbit_tcp_client_sup,
rabbit_direct_client_sup]},
- {applications, [kernel, stdlib, sasl, mnesia, os_mon, xmerl]},
+ {applications, [kernel, stdlib, sasl, mnesia, rabbit_common, ranch, os_mon, xmerl]},
%% we also depend on crypto, public_key and ssl but they shouldn't be
%% in here as we don't actually want to start it
{mod, {rabbit, []}},
@@ -19,6 +18,7 @@
{ssl_options, []},
{vm_memory_high_watermark, 0.4},
{vm_memory_high_watermark_paging_ratio, 0.5},
+ {memory_monitor_interval, 2500},
{disk_free_limit, 50000000}, %% 50MB
{msg_store_index_module, rabbit_msg_store_ets_index},
{backing_queue_module, rabbit_variable_queue},
@@ -29,7 +29,7 @@
{heartbeat, 60},
{msg_store_file_size_limit, 16777216},
{fhc_write_buffering, true},
- {fhc_read_buffering, true},
+ {fhc_read_buffering, false},
{queue_index_max_journal_entries, 32768},
{queue_index_embed_msgs_below, 4096},
{default_user, <<"guest">>},
@@ -38,6 +38,7 @@
{default_vhost, <<"/">>},
{default_permissions, [<<".*">>, <<".*">>, <<".*">>]},
{loopback_users, [<<"guest">>]},
+ {password_hashing_module, rabbit_password_hashing_sha256},
{cluster_nodes, {[], disc}},
{server_properties, []},
{collect_statistics, none},
@@ -55,10 +56,7 @@
{reverse_dns_lookups, false},
{cluster_partition_handling, ignore},
{cluster_keepalive_interval, 10000},
- {tcp_listen_options, [binary,
- {packet, raw},
- {reuseaddr, true},
- {backlog, 128},
+ {tcp_listen_options, [{backlog, 128},
{nodelay, true},
{linger, {true, 0}},
{exit_on_close, false}]},
@@ -84,6 +82,7 @@
{ssl_apps, [asn1, crypto, public_key, ssl]},
%% see rabbitmq-server#114
{mirroring_flow_control, true},
+ {mirroring_sync_batch_size, 4096},
%% see rabbitmq-server#227 and related tickets.
%% msg_store_credit_disc_bound only takes effect when
%% messages are persisted to the message store. If messages
diff --git a/src/rabbit.erl b/src/rabbit.erl
index bb906ede4f..2acffca97f 100644
--- a/src/rabbit.erl
+++ b/src/rabbit.erl
@@ -192,13 +192,8 @@
-include("rabbit_framing.hrl").
-include("rabbit.hrl").
--define(APPS, [os_mon, mnesia, rabbit]).
+-define(APPS, [os_mon, mnesia, rabbit_common, rabbit]).
-%% HiPE compilation uses multiple cores anyway, but some bits are
-%% IO-bound so we can go faster if we parallelise a bit more. In
-%% practice 2 processes seems just as fast as any other number > 1,
-%% and keeps the progress bar realistic-ish.
--define(HIPE_PROCESSES, 2).
-define(ASYNC_THREADS_WARNING_THRESHOLD, 8).
%%----------------------------------------------------------------------------
@@ -233,8 +228,8 @@
-spec(start/2 :: ('normal',[]) ->
{'error',
{'erlang_version_too_old',
- {'found',[any()]},
- {'required',[any(),...]}}} |
+ {'found',string(),string()},
+ {'required',string(),string()}}} |
{'ok',pid()}).
-spec(stop/1 :: (_) -> 'ok').
@@ -248,59 +243,6 @@
%%----------------------------------------------------------------------------
-%% HiPE compilation happens before we have log handlers - so we have
-%% to io:format/2, it's all we can do.
-
-maybe_hipe_compile() ->
- {ok, Want} = application:get_env(rabbit, hipe_compile),
- Can = code:which(hipe) =/= non_existing,
- case {Want, Can} of
- {true, true} -> hipe_compile();
- {true, false} -> false;
- {false, _} -> {ok, disabled}
- end.
-
-log_hipe_result({ok, disabled}) ->
- ok;
-log_hipe_result({ok, Count, Duration}) ->
- rabbit_log:info(
- "HiPE in use: compiled ~B modules in ~Bs.~n", [Count, Duration]);
-log_hipe_result(false) ->
- io:format(
- "~nNot HiPE compiling: HiPE not found in this Erlang installation.~n"),
- rabbit_log:warning(
- "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
-
-%% HiPE compilation happens before we have log handlers and can take a
-%% long time, so make an exception to our no-stdout policy and display
-%% progress via stdout.
-hipe_compile() ->
- {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
- HipeModules = [HM || HM <- HipeModulesAll, code:which(HM) =/= non_existing],
- Count = length(HipeModules),
- io:format("~nHiPE compiling: |~s|~n |",
- [string:copies("-", Count)]),
- T1 = erlang:now(),
- PidMRefs = [spawn_monitor(fun () -> [begin
- {ok, M} = hipe:c(M, [o3]),
- io:format("#")
- end || M <- Ms]
- end) ||
- Ms <- split(HipeModules, ?HIPE_PROCESSES)],
- [receive
- {'DOWN', MRef, process, _, normal} -> ok;
- {'DOWN', MRef, process, _, Reason} -> exit(Reason)
- end || {_Pid, MRef} <- PidMRefs],
- T2 = erlang:now(),
- Duration = timer:now_diff(T2, T1) div 1000000,
- io:format("|~n~nCompiled ~B modules in ~Bs~n", [Count, Duration]),
- {ok, Count, Duration}.
-
-split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
-
-split0([], Ls) -> Ls;
-split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
-
ensure_application_loaded() ->
%% We end up looking at the rabbit app's env for HiPE and log
%% handling, so it needs to be loaded. But during the tests, it
@@ -312,10 +254,12 @@ ensure_application_loaded() ->
start() ->
start_it(fun() ->
- %% We do not want to HiPE compile or upgrade
- %% mnesia after just restarting the app
+ %% We do not want to upgrade mnesia after just
+ %% restarting the app.
ok = ensure_application_loaded(),
+ HipeResult = rabbit_hipe:maybe_hipe_compile(),
ok = ensure_working_log_handlers(),
+ rabbit_hipe:log_hipe_result(HipeResult),
rabbit_node_monitor:prepare_cluster_status_files(),
rabbit_mnesia:check_cluster_consistency(),
broker_start()
@@ -324,9 +268,9 @@ start() ->
boot() ->
start_it(fun() ->
ok = ensure_application_loaded(),
- HipeResult = maybe_hipe_compile(),
+ HipeResult = rabbit_hipe:maybe_hipe_compile(),
ok = ensure_working_log_handlers(),
- log_hipe_result(HipeResult),
+ rabbit_hipe:log_hipe_result(HipeResult),
rabbit_node_monitor:prepare_cluster_status_files(),
ok = rabbit_upgrade:maybe_upgrade_mnesia(),
%% It's important that the consistency check happens after
@@ -393,7 +337,7 @@ start_apps(Apps) ->
app_utils:load_applications(Apps),
OrderedApps = app_utils:app_dependency_order(Apps, false),
case lists:member(rabbit, Apps) of
- false -> run_boot_steps(Apps); %% plugin activation
+ false -> rabbit_boot_steps:run_boot_steps(Apps); %% plugin activation
true -> ok %% will run during start of rabbit app
end,
ok = app_utils:start_applications(OrderedApps,
@@ -403,8 +347,9 @@ stop_apps(Apps) ->
ok = app_utils:stop_applications(
Apps, handle_app_error(error_during_shutdown)),
case lists:member(rabbit, Apps) of
- false -> run_cleanup_steps(Apps); %% plugin deactivation
- true -> ok %% it's all going anyway
+ %% plugin deactivation
+ false -> rabbit_boot_steps:run_cleanup_steps(Apps);
+ true -> ok %% it's all going anyway
end,
ok.
@@ -415,10 +360,6 @@ handle_app_error(Term) ->
throw({Term, App, Reason})
end.
-run_cleanup_steps(Apps) ->
- [run_step(Attrs, cleanup) || Attrs <- find_steps(Apps)],
- ok.
-
await_startup() ->
await_startup(false).
@@ -462,7 +403,8 @@ status() ->
{uptime, begin
{T,_} = erlang:statistics(wall_clock),
T div 1000
- end}],
+ end},
+ {kernel, {net_ticktime, net_kernel:get_net_ticktime()}}],
S1 ++ S2 ++ S3 ++ S4.
alarms() ->
@@ -524,7 +466,7 @@ start(normal, []) ->
log_banner(),
warn_if_kernel_config_dubious(),
warn_if_disc_io_options_dubious(),
- run_boot_steps(),
+ rabbit_boot_steps:run_boot_steps(),
{ok, SupPid};
Error ->
Error
@@ -538,75 +480,6 @@ stop(_State) ->
end,
ok.
-%%---------------------------------------------------------------------------
-%% boot step logic
-
-run_boot_steps() ->
- run_boot_steps([App || {App, _, _} <- application:loaded_applications()]).
-
-run_boot_steps(Apps) ->
- [ok = run_step(Attrs, mfa) || Attrs <- find_steps(Apps)],
- ok.
-
-find_steps(Apps) ->
- All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)),
- [Attrs || {App, _, Attrs} <- All, lists:member(App, Apps)].
-
-run_step(Attributes, AttributeName) ->
- case [MFA || {Key, MFA} <- Attributes,
- Key =:= AttributeName] of
- [] ->
- ok;
- MFAs ->
- [case apply(M,F,A) of
- ok -> ok;
- {error, Reason} -> exit({error, Reason})
- end || {M,F,A} <- MFAs],
- ok
- end.
-
-vertices({AppName, _Module, Steps}) ->
- [{StepName, {AppName, StepName, Atts}} || {StepName, Atts} <- Steps].
-
-edges({_AppName, _Module, Steps}) ->
- EnsureList = fun (L) when is_list(L) -> L;
- (T) -> [T]
- end,
- [case Key of
- requires -> {StepName, OtherStep};
- enables -> {OtherStep, StepName}
- end || {StepName, Atts} <- Steps,
- {Key, OtherStepOrSteps} <- Atts,
- OtherStep <- EnsureList(OtherStepOrSteps),
- Key =:= requires orelse Key =:= enables].
-
-sort_boot_steps(UnsortedSteps) ->
- case rabbit_misc:build_acyclic_graph(fun vertices/1, fun edges/1,
- UnsortedSteps) of
- {ok, G} ->
- %% Use topological sort to find a consistent ordering (if
- %% there is one, otherwise fail).
- SortedSteps = lists:reverse(
- [begin
- {StepName, Step} = digraph:vertex(G,
- StepName),
- Step
- end || StepName <- digraph_utils:topsort(G)]),
- digraph:delete(G),
- %% Check that all mentioned {M,F,A} triples are exported.
- case [{StepName, {M,F,A}} ||
- {_App, StepName, Attributes} <- SortedSteps,
- {mfa, {M,F,A}} <- Attributes,
- not erlang:function_exported(M, F, length(A))] of
- [] -> SortedSteps;
- MissingFns -> exit({boot_functions_not_exported, MissingFns})
- end;
- {error, {vertex, duplicate, StepName}} ->
- exit({duplicate_boot_step, StepName});
- {error, {edge, Reason, From, To}} ->
- exit({invalid_boot_step_dependency, From, To, Reason})
- end.
-
-ifdef(use_specs).
-spec(boot_error/2 :: (term(), not_available | [tuple()]) -> no_return()).
-endif.
@@ -784,11 +657,23 @@ log_broker_started(Plugins) ->
end).
erts_version_check() ->
- FoundVer = erlang:system_info(version),
- case rabbit_misc:version_compare(?ERTS_MINIMUM, FoundVer, lte) of
- true -> ok;
- false -> {error, {erlang_version_too_old,
- {found, FoundVer}, {required, ?ERTS_MINIMUM}}}
+ ERTSVer = erlang:system_info(version),
+ OTPRel = erlang:system_info(otp_release),
+ case rabbit_misc:version_compare(?ERTS_MINIMUM, ERTSVer, lte) of
+ true when ?ERTS_MINIMUM =/= ERTSVer ->
+ ok;
+ true when ?ERTS_MINIMUM =:= ERTSVer andalso ?OTP_MINIMUM =< OTPRel ->
+ %% When a critical regression or bug is found, a new OTP
+ %% release can be published without changing the ERTS
+ %% version. For instance, this is the case with R16B03 and
+ %% R16B03-1.
+ %%
+ %% In this case, we compare the release versions
+ %% alphabetically.
+ ok;
+ _ -> {error, {erlang_version_too_old,
+ {found, OTPRel, ERTSVer},
+ {required, ?OTP_MINIMUM, ?ERTS_MINIMUM}}}
end.
print_banner() ->
diff --git a/src/rabbit_access_control.erl b/src/rabbit_access_control.erl
index d9dd9cc3dc..f649e4b274 100644
--- a/src/rabbit_access_control.erl
+++ b/src/rabbit_access_control.erl
@@ -41,7 +41,7 @@
rabbit_net:socket() | inet:ip_address())
-> 'ok' | 'not_allowed').
-spec(check_vhost_access/3 ::
- (rabbit_types:user(), rabbit_types:vhost(), rabbit_net:socket())
+ (rabbit_types:user(), rabbit_types:vhost(), rabbit_net:socket() | #authz_socket_info{})
-> 'ok' | rabbit_types:channel_exit()).
-spec(check_resource_access/3 ::
(rabbit_types:user(), rabbit_types:r(atom()), permission_atom())
@@ -142,7 +142,7 @@ check_vhost_access(User = #user{username = Username,
auth_user(User, Impl), VHostPath, Sock)
end,
Mod, "access to vhost '~s' refused for user '~s'",
- [VHostPath, Username]);
+ [VHostPath, Username], not_allowed);
(_, Else) ->
Else
end, ok, Modules).
@@ -164,7 +164,11 @@ check_resource_access(User = #user{username = Username,
(_, Else) -> Else
end, ok, Modules).
+
check_access(Fun, Module, ErrStr, ErrArgs) ->
+ check_access(Fun, Module, ErrStr, ErrArgs, access_refused).
+
+check_access(Fun, Module, ErrStr, ErrArgs, ErrName) ->
Allow = case Fun() of
{error, E} ->
rabbit_log:error(ErrStr ++ " by ~s: ~p~n",
@@ -177,5 +181,5 @@ check_access(Fun, Module, ErrStr, ErrArgs) ->
true ->
ok;
false ->
- rabbit_misc:protocol_error(access_refused, ErrStr, ErrArgs)
+ rabbit_misc:protocol_error(ErrName, ErrStr, ErrArgs)
end.
diff --git a/src/rabbit_alarm.erl b/src/rabbit_alarm.erl
index 557fa31335..fdb673a61b 100644
--- a/src/rabbit_alarm.erl
+++ b/src/rabbit_alarm.erl
@@ -13,6 +13,17 @@
%% The Initial Developer of the Original Code is GoPivotal, Inc.
%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
%%
+%% There are two types of alarms handled by this module:
+%%
+%% * per-node resource (disk, memory) alarms for the whole cluster. If any node
+%% has an alarm, then all publishing should be disabled througout the
+%% cluster until all alarms clear. When a node sets such an alarm,
+%% this information is automatically propagated throughout the cluster.
+%% `#alarms.alarmed_nodes' is being used to track this type of alarms.
+%% * limits local to this node (file_descriptor_limit). Used for information
+%% purposes only: logging and getting node status. This information is not propagated
+%% throughout the cluster. `#alarms.alarms' is being used to track this type of alarms.
+%% @end
-module(rabbit_alarm).
@@ -28,20 +39,34 @@
-define(SERVER, ?MODULE).
--record(alarms, {alertees, alarmed_nodes, alarms}).
+
%%----------------------------------------------------------------------------
-ifdef(use_specs).
+-record(alarms, {alertees :: dict:dict(pid(), rabbit_types:mfargs()),
+ alarmed_nodes :: dict:dict(node(), [resource_alarm_source()]),
+ alarms :: [alarm()]}).
+
+-type(local_alarm() :: 'file_descriptor_limit').
+-type(resource_alarm_source() :: 'disk' | 'node').
+-type(resource_alarm() :: {resource_limit, resource_alarm_source(), node()}).
+-type(alarm() :: local_alarm() | resource_alarm()).
+
-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-spec(start/0 :: () -> 'ok').
-spec(stop/0 :: () -> 'ok').
-spec(register/2 :: (pid(), rabbit_types:mfargs()) -> [atom()]).
--spec(set_alarm/1 :: (any()) -> 'ok').
--spec(clear_alarm/1 :: (any()) -> 'ok').
+-spec(set_alarm/1 :: ({alarm(), []}) -> 'ok').
+-spec(clear_alarm/1 :: (alarm()) -> 'ok').
-spec(on_node_up/1 :: (node()) -> 'ok').
-spec(on_node_down/1 :: (node()) -> 'ok').
+-spec(get_alarms/0 :: () -> [{alarm(), []}]).
+
+-else.
+
+-record(alarms, {alertees, alarmed_nodes, alarms}).
-endif.
@@ -54,6 +79,7 @@ start() ->
ok = rabbit_sup:start_restartable_child(?MODULE),
ok = gen_event:add_handler(?SERVER, ?MODULE, []),
{ok, MemoryWatermark} = application:get_env(vm_memory_high_watermark),
+
rabbit_sup:start_restartable_child(
vm_memory_monitor, [MemoryWatermark,
fun (Alarm) ->
@@ -68,6 +94,10 @@ start() ->
stop() -> ok.
+%% Registers a handler that should be called on every resource alarm change.
+%% Given a call rabbit_alarm:register(Pid, {M, F, A}), the handler would be
+%% called like this: `apply(M, F, A ++ [Pid, Source, Alert])', where `Source'
+%% has the type of resource_alarm_source() and `Alert' has the type of resource_alert().
register(Pid, AlertMFA) ->
gen_event:call(?SERVER, ?MODULE, {register, Pid, AlertMFA}, infinity).
@@ -79,10 +109,10 @@ get_alarms() -> gen_event:call(?SERVER, ?MODULE, get_alarms, infinity).
on_node_up(Node) -> gen_event:notify(?SERVER, {node_up, Node}).
on_node_down(Node) -> gen_event:notify(?SERVER, {node_down, Node}).
-remote_conserve_resources(Pid, Source, true) ->
+remote_conserve_resources(Pid, Source, {true, _, _}) ->
gen_event:notify({?SERVER, node(Pid)},
{set_alarm, {{resource_limit, Source, node()}, []}});
-remote_conserve_resources(Pid, Source, false) ->
+remote_conserve_resources(Pid, Source, {false, _, _}) ->
gen_event:notify({?SERVER, node(Pid)},
{clear_alarm, {resource_limit, Source, node()}}).
@@ -98,12 +128,17 @@ handle_call({register, Pid, AlertMFA}, State = #alarms{alarmed_nodes = AN}) ->
{ok, lists:usort(lists:append([V || {_, V} <- dict:to_list(AN)])),
internal_register(Pid, AlertMFA, State)};
-handle_call(get_alarms, State = #alarms{alarms = Alarms}) ->
- {ok, Alarms, State};
+handle_call(get_alarms, State) ->
+ {ok, get_alarms(State), State};
handle_call(_Request, State) ->
{ok, not_understood, State}.
+handle_event({set_alarm, {{resource_limit, Source, Node}, []}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true -> {ok, State};
+ false -> handle_set_resource_alarm(Source, Node, State)
+ end;
handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
case lists:member(Alarm, Alarms) of
true -> {ok, State};
@@ -111,6 +146,13 @@ handle_event({set_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
handle_set_alarm(Alarm, State#alarms{alarms = UpdatedAlarms})
end;
+handle_event({clear_alarm, {resource_limit, Source, Node}}, State) ->
+ case is_node_alarmed(Source, Node, State) of
+ true ->
+ handle_clear_resource_alarm(Source, Node, State);
+ false ->
+ {ok, State}
+ end;
handle_event({clear_alarm, Alarm}, State = #alarms{alarms = Alarms}) ->
case lists:keymember(Alarm, 1, Alarms) of
true -> handle_clear_alarm(
@@ -127,8 +169,16 @@ handle_event({node_up, Node}, State) ->
{register, self(), {?MODULE, remote_conserve_resources, []}}),
{ok, State};
-handle_event({node_down, Node}, State) ->
- {ok, maybe_alert(fun dict_unappend_all/3, Node, [], false, State)};
+handle_event({node_down, Node}, #alarms{alarmed_nodes = AN} = State) ->
+ AlarmsForDeadNode = case dict:find(Node, AN) of
+ {ok, V} -> V;
+ error -> []
+ end,
+ {ok, lists:foldr(fun(Source, AccState) ->
+ rabbit_log:warning("~s resource limit alarm cleared for dead node ~p~n",
+ [Source, Node]),
+ maybe_alert(fun dict_unappend/3, Node, Source, false, AccState)
+ end, State, AlarmsForDeadNode)};
handle_event({register, Pid, AlertMFA}, State) ->
{ok, internal_register(Pid, AlertMFA, State)};
@@ -158,9 +208,6 @@ dict_append(Key, Val, Dict) ->
end,
dict:store(Key, lists:usort([Val|L]), Dict).
-dict_unappend_all(Key, _Val, Dict) ->
- dict:erase(Key, Dict).
-
dict_unappend(Key, Val, Dict) ->
L = case dict:find(Key, Dict) of
{ok, V} -> V;
@@ -172,10 +219,17 @@ dict_unappend(Key, Val, Dict) ->
X -> dict:store(Key, X, Dict)
end.
-maybe_alert(UpdateFun, Node, Source, Alert,
+maybe_alert(UpdateFun, Node, Source, WasAlertAdded,
State = #alarms{alarmed_nodes = AN,
alertees = Alertees}) ->
AN1 = UpdateFun(Node, Source, AN),
+ %% Is alarm for Source still set on any node?
+ StillHasAlerts = lists:any(fun ({_Node, NodeAlerts}) -> lists:member(Source, NodeAlerts) end, dict:to_list(AN1)),
+ case StillHasAlerts of
+ true -> ok;
+ false -> rabbit_log:warning("~s resource limit alarm cleared across the cluster~n", [Source])
+ end,
+ Alert = {WasAlertAdded, StillHasAlerts, Node},
case node() of
Node -> ok = alert_remote(Alert, Alertees, Source);
_ -> ok
@@ -202,20 +256,21 @@ internal_register(Pid, {M, F, A} = AlertMFA,
State = #alarms{alertees = Alertees}) ->
_MRef = erlang:monitor(process, Pid),
case dict:find(node(), State#alarms.alarmed_nodes) of
- {ok, Sources} -> [apply(M, F, A ++ [Pid, R, true]) || R <- Sources];
+ {ok, Sources} -> [apply(M, F, A ++ [Pid, R, {true, true, node()}]) || R <- Sources];
error -> ok
end,
NewAlertees = dict:store(Pid, AlertMFA, Alertees),
State#alarms{alertees = NewAlertees}.
-handle_set_alarm({{resource_limit, Source, Node}, []}, State) ->
+handle_set_resource_alarm(Source, Node, State) ->
rabbit_log:warning(
"~s resource limit alarm set on node ~p.~n~n"
"**********************************************************~n"
"*** Publishers will be blocked until this alarm clears ***~n"
"**********************************************************~n",
[Source, Node]),
- {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)};
+ {ok, maybe_alert(fun dict_append/3, Node, Source, true, State)}.
+
handle_set_alarm({file_descriptor_limit, []}, State) ->
rabbit_log:warning(
"file descriptor limit alarm set.~n~n"
@@ -227,13 +282,27 @@ handle_set_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~p' set~n", [Alarm]),
{ok, State}.
-handle_clear_alarm({resource_limit, Source, Node}, State) ->
+handle_clear_resource_alarm(Source, Node, State) ->
rabbit_log:warning("~s resource limit alarm cleared on node ~p~n",
[Source, Node]),
- {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)};
+ {ok, maybe_alert(fun dict_unappend/3, Node, Source, false, State)}.
+
handle_clear_alarm(file_descriptor_limit, State) ->
rabbit_log:warning("file descriptor limit alarm cleared~n"),
{ok, State};
handle_clear_alarm(Alarm, State) ->
rabbit_log:warning("alarm '~p' cleared~n", [Alarm]),
{ok, State}.
+
+is_node_alarmed(Source, Node, #alarms{alarmed_nodes = AN}) ->
+ case dict:find(Node, AN) of
+ {ok, Sources} ->
+ lists:member(Source, Sources);
+ error ->
+ false
+ end.
+
+get_alarms(#alarms{alarms = Alarms,
+ alarmed_nodes = AN}) ->
+ Alarms ++ [ {{resource_limit, Source, Node}, []}
+ || {Node, Sources} <- dict:to_list(AN), Source <- Sources ].
diff --git a/src/rabbit_amqqueue.erl b/src/rabbit_amqqueue.erl
deleted file mode 100644
index f6cc0fbdda..0000000000
--- a/src/rabbit_amqqueue.erl
+++ /dev/null
@@ -1,902 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_amqqueue).
-
--export([recover/0, stop/0, start/1, declare/5, declare/6,
- delete_immediately/1, delete/3, purge/1, forget_all_durable/1,
- delete_crashed/1, delete_crashed_internal/1]).
--export([pseudo_queue/2, immutable/1]).
--export([lookup/1, not_found_or_absent/1, with/2, with/3, with_or_die/2,
- assert_equivalence/5,
- check_exclusive_access/2, with_exclusive_access_or_die/3,
- stat/1, deliver/2, requeue/3, ack/3, reject/4]).
--export([list/0, list/1, info_keys/0, info/1, info/2, info_all/1, info_all/2]).
--export([list_down/1]).
--export([force_event_refresh/1, notify_policy_changed/1]).
--export([consumers/1, consumers_all/1, consumer_info_keys/0]).
--export([basic_get/4, basic_consume/10, basic_cancel/4, notify_decorators/1]).
--export([notify_sent/2, notify_sent_queue_down/1, resume/2]).
--export([notify_down_all/2, activate_limit_all/2, credit/5]).
--export([on_node_up/1, on_node_down/1]).
--export([update/2, store_queue/1, update_decorators/1, policy_changed/2]).
--export([start_mirroring/1, stop_mirroring/1, sync_mirrors/1,
- cancel_sync_mirrors/1]).
-
-%% internal
--export([internal_declare/2, internal_delete/1, run_backing_queue/3,
- set_ram_duration_target/2, set_maximum_since_use/2]).
-
--include("rabbit.hrl").
--include_lib("stdlib/include/qlc.hrl").
-
--define(INTEGER_ARG_TYPES, [byte, short, signedint, long]).
-
--define(MORE_CONSUMER_CREDIT_AFTER, 50).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([name/0, qmsg/0, absent_reason/0]).
-
--type(name() :: rabbit_types:r('queue')).
--type(qpids() :: [pid()]).
--type(qlen() :: rabbit_types:ok(non_neg_integer())).
--type(qfun(A) :: fun ((rabbit_types:amqqueue()) -> A | no_return())).
--type(qmsg() :: {name(), pid(), msg_id(), boolean(), rabbit_types:message()}).
--type(msg_id() :: non_neg_integer()).
--type(ok_or_errors() ::
- 'ok' | {'error', [{'error' | 'exit' | 'throw', any()}]}).
--type(absent_reason() :: 'nodedown' | 'crashed').
--type(queue_or_absent() :: rabbit_types:amqqueue() |
- {'absent', rabbit_types:amqqueue(),absent_reason()}).
--type(not_found_or_absent() ::
- 'not_found' | {'absent', rabbit_types:amqqueue(), absent_reason()}).
--spec(recover/0 :: () -> [rabbit_types:amqqueue()]).
--spec(stop/0 :: () -> 'ok').
--spec(start/1 :: ([rabbit_types:amqqueue()]) -> 'ok').
--spec(declare/5 ::
- (name(), boolean(), boolean(),
- rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
- -> {'new' | 'existing' | 'absent' | 'owner_died',
- rabbit_types:amqqueue()} | rabbit_types:channel_exit()).
--spec(declare/6 ::
- (name(), boolean(), boolean(),
- rabbit_framing:amqp_table(), rabbit_types:maybe(pid()), node())
- -> {'new' | 'existing' | 'owner_died', rabbit_types:amqqueue()} |
- {'absent', rabbit_types:amqqueue(), absent_reason()} |
- rabbit_types:channel_exit()).
--spec(internal_declare/2 ::
- (rabbit_types:amqqueue(), boolean())
- -> queue_or_absent() | rabbit_misc:thunk(queue_or_absent())).
--spec(update/2 ::
- (name(),
- fun((rabbit_types:amqqueue()) -> rabbit_types:amqqueue()))
- -> 'not_found' | rabbit_types:amqqueue()).
--spec(lookup/1 ::
- (name()) -> rabbit_types:ok(rabbit_types:amqqueue()) |
- rabbit_types:error('not_found');
- ([name()]) -> [rabbit_types:amqqueue()]).
--spec(not_found_or_absent/1 :: (name()) -> not_found_or_absent()).
--spec(with/2 :: (name(), qfun(A)) ->
- A | rabbit_types:error(not_found_or_absent())).
--spec(with/3 :: (name(), qfun(A), fun((not_found_or_absent()) -> B)) -> A | B).
--spec(with_or_die/2 ::
- (name(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(assert_equivalence/5 ::
- (rabbit_types:amqqueue(), boolean(), boolean(),
- rabbit_framing:amqp_table(), rabbit_types:maybe(pid()))
- -> 'ok' | rabbit_types:channel_exit() |
- rabbit_types:connection_exit()).
--spec(check_exclusive_access/2 ::
- (rabbit_types:amqqueue(), pid())
- -> 'ok' | rabbit_types:channel_exit()).
--spec(with_exclusive_access_or_die/3 ::
- (name(), pid(), qfun(A)) -> A | rabbit_types:channel_exit()).
--spec(list/0 :: () -> [rabbit_types:amqqueue()]).
--spec(list/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
--spec(list_down/1 :: (rabbit_types:vhost()) -> [rabbit_types:amqqueue()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (rabbit_types:amqqueue()) -> rabbit_types:infos()).
--spec(info/2 ::
- (rabbit_types:amqqueue(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
--spec(info_all/2 :: (rabbit_types:vhost(), rabbit_types:info_keys())
- -> [rabbit_types:infos()]).
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
--spec(notify_policy_changed/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(consumers/1 :: (rabbit_types:amqqueue())
- -> [{pid(), rabbit_types:ctag(), boolean(),
- non_neg_integer(), rabbit_framing:amqp_table()}]).
--spec(consumer_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(consumers_all/1 ::
- (rabbit_types:vhost())
- -> [{name(), pid(), rabbit_types:ctag(), boolean(),
- non_neg_integer(), rabbit_framing:amqp_table()}]).
--spec(stat/1 ::
- (rabbit_types:amqqueue())
- -> {'ok', non_neg_integer(), non_neg_integer()}).
--spec(delete_immediately/1 :: (qpids()) -> 'ok').
--spec(delete/3 ::
- (rabbit_types:amqqueue(), 'false', 'false')
- -> qlen();
- (rabbit_types:amqqueue(), 'true' , 'false')
- -> qlen() | rabbit_types:error('in_use');
- (rabbit_types:amqqueue(), 'false', 'true' )
- -> qlen() | rabbit_types:error('not_empty');
- (rabbit_types:amqqueue(), 'true' , 'true' )
- -> qlen() |
- rabbit_types:error('in_use') |
- rabbit_types:error('not_empty')).
--spec(delete_crashed/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(delete_crashed_internal/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(purge/1 :: (rabbit_types:amqqueue()) -> qlen()).
--spec(forget_all_durable/1 :: (node()) -> 'ok').
--spec(deliver/2 :: ([rabbit_types:amqqueue()], rabbit_types:delivery()) ->
- qpids()).
--spec(requeue/3 :: (pid(), [msg_id()], pid()) -> 'ok').
--spec(ack/3 :: (pid(), [msg_id()], pid()) -> 'ok').
--spec(reject/4 :: (pid(), [msg_id()], boolean(), pid()) -> 'ok').
--spec(notify_down_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(activate_limit_all/2 :: (qpids(), pid()) -> ok_or_errors()).
--spec(basic_get/4 :: (rabbit_types:amqqueue(), pid(), boolean(), pid()) ->
- {'ok', non_neg_integer(), qmsg()} | 'empty').
--spec(credit/5 :: (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(),
- non_neg_integer(), boolean()) -> 'ok').
--spec(basic_consume/10 ::
- (rabbit_types:amqqueue(), boolean(), pid(), pid(), boolean(),
- non_neg_integer(), rabbit_types:ctag(), boolean(),
- rabbit_framing:amqp_table(), any())
- -> rabbit_types:ok_or_error('exclusive_consume_unavailable')).
--spec(basic_cancel/4 ::
- (rabbit_types:amqqueue(), pid(), rabbit_types:ctag(), any()) -> 'ok').
--spec(notify_decorators/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(notify_sent/2 :: (pid(), pid()) -> 'ok').
--spec(notify_sent_queue_down/1 :: (pid()) -> 'ok').
--spec(resume/2 :: (pid(), pid()) -> 'ok').
--spec(internal_delete/1 ::
- (name()) -> rabbit_types:ok_or_error('not_found') |
- rabbit_types:connection_exit() |
- fun (() -> rabbit_types:ok_or_error('not_found') |
- rabbit_types:connection_exit())).
--spec(run_backing_queue/3 ::
- (pid(), atom(),
- (fun ((atom(), A) -> {[rabbit_types:msg_id()], A}))) -> 'ok').
--spec(set_ram_duration_target/2 :: (pid(), number() | 'infinity') -> 'ok').
--spec(set_maximum_since_use/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(on_node_up/1 :: (node()) -> 'ok').
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(pseudo_queue/2 :: (name(), pid()) -> rabbit_types:amqqueue()).
--spec(immutable/1 :: (rabbit_types:amqqueue()) -> rabbit_types:amqqueue()).
--spec(store_queue/1 :: (rabbit_types:amqqueue()) -> 'ok').
--spec(update_decorators/1 :: (name()) -> 'ok').
--spec(policy_changed/2 ::
- (rabbit_types:amqqueue(), rabbit_types:amqqueue()) -> 'ok').
--spec(start_mirroring/1 :: (pid()) -> 'ok').
--spec(stop_mirroring/1 :: (pid()) -> 'ok').
--spec(sync_mirrors/1 :: (pid()) -> 'ok' | rabbit_types:error('not_mirrored')).
--spec(cancel_sync_mirrors/1 :: (pid()) -> 'ok' | {'ok', 'not_syncing'}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
--define(CONSUMER_INFO_KEYS,
- [queue_name, channel_pid, consumer_tag, ack_required, prefetch_count,
- arguments]).
-
-recover() ->
- %% Clear out remnants of old incarnation, in case we restarted
- %% faster than other nodes handled DOWN messages from us.
- on_node_down(node()),
- DurableQueues = find_durable_queues(),
- {ok, BQ} = application:get_env(rabbit, backing_queue_module),
-
- %% We rely on BQ:start/1 returning the recovery terms in the same
- %% order as the supplied queue names, so that we can zip them together
- %% for further processing in recover_durable_queues.
- {ok, OrderedRecoveryTerms} =
- BQ:start([QName || #amqqueue{name = QName} <- DurableQueues]),
- {ok,_} = supervisor:start_child(
- rabbit_sup,
- {rabbit_amqqueue_sup_sup,
- {rabbit_amqqueue_sup_sup, start_link, []},
- transient, infinity, supervisor, [rabbit_amqqueue_sup_sup]}),
- recover_durable_queues(lists:zip(DurableQueues, OrderedRecoveryTerms)).
-
-stop() ->
- ok = supervisor:terminate_child(rabbit_sup, rabbit_amqqueue_sup_sup),
- ok = supervisor:delete_child(rabbit_sup, rabbit_amqqueue_sup_sup),
- {ok, BQ} = application:get_env(rabbit, backing_queue_module),
- ok = BQ:stop().
-
-start(Qs) ->
- %% At this point all recovered queues and their bindings are
- %% visible to routing, so now it is safe for them to complete
- %% their initialisation (which may involve interacting with other
- %% queues).
- [Pid ! {self(), go} || #amqqueue{pid = Pid} <- Qs],
- ok.
-
-find_durable_queues() ->
- Node = node(),
- mnesia:async_dirty(
- fun () ->
- qlc:e(qlc:q([Q || Q = #amqqueue{name = Name,
- pid = Pid}
- <- mnesia:table(rabbit_durable_queue),
- node(Pid) == Node,
- mnesia:read(rabbit_queue, Name, read) =:= []]))
- end).
-
-recover_durable_queues(QueuesAndRecoveryTerms) ->
- {Results, Failures} =
- gen_server2:mcall(
- [{rabbit_amqqueue_sup_sup:start_queue_process(node(), Q, recovery),
- {init, {self(), Terms}}} || {Q, Terms} <- QueuesAndRecoveryTerms]),
- [rabbit_log:error("Queue ~p failed to initialise: ~p~n",
- [Pid, Error]) || {Pid, Error} <- Failures],
- [Q || {_, {new, Q}} <- Results].
-
-declare(QueueName, Durable, AutoDelete, Args, Owner) ->
- declare(QueueName, Durable, AutoDelete, Args, Owner, node()).
-
-
-%% The Node argument suggests where the queue (master if mirrored)
-%% should be. Note that in some cases (e.g. with "nodes" policy in
-%% effect) this might not be possible to satisfy.
-declare(QueueName, Durable, AutoDelete, Args, Owner, Node) ->
- ok = check_declare_arguments(QueueName, Args),
- Q = rabbit_queue_decorator:set(
- rabbit_policy:set(#amqqueue{name = QueueName,
- durable = Durable,
- auto_delete = AutoDelete,
- arguments = Args,
- exclusive_owner = Owner,
- pid = none,
- slave_pids = [],
- sync_slave_pids = [],
- recoverable_slaves = [],
- gm_pids = [],
- state = live})),
- Node = rabbit_mirror_queue_misc:initial_queue_node(Q, Node),
- gen_server2:call(
- rabbit_amqqueue_sup_sup:start_queue_process(Node, Q, declare),
- {init, new}, infinity).
-
-internal_declare(Q, true) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- ok = store_queue(Q#amqqueue{state = live}),
- rabbit_misc:const(Q)
- end);
-internal_declare(Q = #amqqueue{name = QueueName}, false) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- case mnesia:wread({rabbit_queue, QueueName}) of
- [] ->
- case not_found_or_absent(QueueName) of
- not_found -> Q1 = rabbit_policy:set(Q),
- Q2 = Q1#amqqueue{state = live},
- ok = store_queue(Q2),
- B = add_default_binding(Q1),
- fun () -> B(), Q1 end;
- {absent, _Q, _} = R -> rabbit_misc:const(R)
- end;
- [ExistingQ] ->
- rabbit_misc:const(ExistingQ)
- end
- end).
-
-update(Name, Fun) ->
- case mnesia:wread({rabbit_queue, Name}) of
- [Q = #amqqueue{durable = Durable}] ->
- Q1 = Fun(Q),
- ok = mnesia:write(rabbit_queue, Q1, write),
- case Durable of
- true -> ok = mnesia:write(rabbit_durable_queue, Q1, write);
- _ -> ok
- end,
- Q1;
- [] ->
- not_found
- end.
-
-store_queue(Q = #amqqueue{durable = true}) ->
- ok = mnesia:write(rabbit_durable_queue,
- Q#amqqueue{slave_pids = [],
- sync_slave_pids = [],
- gm_pids = [],
- decorators = undefined}, write),
- store_queue_ram(Q);
-store_queue(Q = #amqqueue{durable = false}) ->
- store_queue_ram(Q).
-
-store_queue_ram(Q) ->
- ok = mnesia:write(rabbit_queue, rabbit_queue_decorator:set(Q), write).
-
-update_decorators(Name) ->
- rabbit_misc:execute_mnesia_transaction(
- fun() ->
- case mnesia:wread({rabbit_queue, Name}) of
- [Q] -> store_queue_ram(Q),
- ok;
- [] -> ok
- end
- end).
-
-policy_changed(Q1 = #amqqueue{decorators = Decorators1},
- Q2 = #amqqueue{decorators = Decorators2}) ->
- rabbit_mirror_queue_misc:update_mirrors(Q1, Q2),
- D1 = rabbit_queue_decorator:select(Decorators1),
- D2 = rabbit_queue_decorator:select(Decorators2),
- [ok = M:policy_changed(Q1, Q2) || M <- lists:usort(D1 ++ D2)],
- %% Make sure we emit a stats event even if nothing
- %% mirroring-related has changed - the policy may have changed anyway.
- notify_policy_changed(Q1).
-
-add_default_binding(#amqqueue{name = QueueName}) ->
- ExchangeName = rabbit_misc:r(QueueName, exchange, <<>>),
- RoutingKey = QueueName#resource.name,
- rabbit_binding:add(#binding{source = ExchangeName,
- destination = QueueName,
- key = RoutingKey,
- args = []}).
-
-lookup([]) -> []; %% optimisation
-lookup([Name]) -> ets:lookup(rabbit_queue, Name); %% optimisation
-lookup(Names) when is_list(Names) ->
- %% Normally we'd call mnesia:dirty_read/1 here, but that is quite
- %% expensive for reasons explained in rabbit_misc:dirty_read/1.
- lists:append([ets:lookup(rabbit_queue, Name) || Name <- Names]);
-lookup(Name) ->
- rabbit_misc:dirty_read({rabbit_queue, Name}).
-
-not_found_or_absent(Name) ->
- %% NB: we assume that the caller has already performed a lookup on
- %% rabbit_queue and not found anything
- case mnesia:read({rabbit_durable_queue, Name}) of
- [] -> not_found;
- [Q] -> {absent, Q, nodedown} %% Q exists on stopped node
- end.
-
-not_found_or_absent_dirty(Name) ->
- %% We should read from both tables inside a tx, to get a
- %% consistent view. But the chances of an inconsistency are small,
- %% and only affect the error kind.
- case rabbit_misc:dirty_read({rabbit_durable_queue, Name}) of
- {error, not_found} -> not_found;
- {ok, Q} -> {absent, Q, nodedown}
- end.
-
-with(Name, F, E) ->
- case lookup(Name) of
- {ok, Q = #amqqueue{state = crashed}} ->
- E({absent, Q, crashed});
- {ok, Q = #amqqueue{pid = QPid}} ->
- %% We check is_process_alive(QPid) in case we receive a
- %% nodedown (for example) in F() that has nothing to do
- %% with the QPid. F() should be written s.t. that this
- %% cannot happen, so we bail if it does since that
- %% indicates a code bug and we don't want to get stuck in
- %% the retry loop.
- rabbit_misc:with_exit_handler(
- fun () -> false = rabbit_mnesia:is_process_alive(QPid),
- timer:sleep(25),
- with(Name, F, E)
- end, fun () -> F(Q) end);
- {error, not_found} ->
- E(not_found_or_absent_dirty(Name))
- end.
-
-with(Name, F) -> with(Name, F, fun (E) -> {error, E} end).
-
-with_or_die(Name, F) ->
- with(Name, F, fun (not_found) -> rabbit_misc:not_found(Name);
- ({absent, Q, Reason}) -> rabbit_misc:absent(Q, Reason)
- end).
-
-assert_equivalence(#amqqueue{name = QName,
- durable = Durable,
- auto_delete = AD} = Q,
- Durable1, AD1, Args1, Owner) ->
- rabbit_misc:assert_field_equivalence(Durable, Durable1, QName, durable),
- rabbit_misc:assert_field_equivalence(AD, AD1, QName, auto_delete),
- assert_args_equivalence(Q, Args1),
- check_exclusive_access(Q, Owner, strict).
-
-check_exclusive_access(Q, Owner) -> check_exclusive_access(Q, Owner, lax).
-
-check_exclusive_access(#amqqueue{exclusive_owner = Owner}, Owner, _MatchType) ->
- ok;
-check_exclusive_access(#amqqueue{exclusive_owner = none}, _ReaderPid, lax) ->
- ok;
-check_exclusive_access(#amqqueue{name = QueueName}, _ReaderPid, _MatchType) ->
- rabbit_misc:protocol_error(
- resource_locked,
- "cannot obtain exclusive access to locked ~s",
- [rabbit_misc:rs(QueueName)]).
-
-with_exclusive_access_or_die(Name, ReaderPid, F) ->
- with_or_die(Name,
- fun (Q) -> check_exclusive_access(Q, ReaderPid), F(Q) end).
-
-assert_args_equivalence(#amqqueue{name = QueueName, arguments = Args},
- RequiredArgs) ->
- rabbit_misc:assert_args_equivalence(Args, RequiredArgs, QueueName,
- [Key || {Key, _Fun} <- declare_args()]).
-
-check_declare_arguments(QueueName, Args) ->
- check_arguments(QueueName, Args, declare_args()).
-
-check_consume_arguments(QueueName, Args) ->
- check_arguments(QueueName, Args, consume_args()).
-
-check_arguments(QueueName, Args, Validators) ->
- [case rabbit_misc:table_lookup(Args, Key) of
- undefined -> ok;
- TypeVal -> case Fun(TypeVal, Args) of
- ok -> ok;
- {error, Error} -> rabbit_misc:protocol_error(
- precondition_failed,
- "invalid arg '~s' for ~s: ~255p",
- [Key, rabbit_misc:rs(QueueName),
- Error])
- end
- end || {Key, Fun} <- Validators],
- ok.
-
-declare_args() ->
- [{<<"x-expires">>, fun check_expires_arg/2},
- {<<"x-message-ttl">>, fun check_message_ttl_arg/2},
- {<<"x-dead-letter-exchange">>, fun check_dlxname_arg/2},
- {<<"x-dead-letter-routing-key">>, fun check_dlxrk_arg/2},
- {<<"x-max-length">>, fun check_non_neg_int_arg/2},
- {<<"x-max-length-bytes">>, fun check_non_neg_int_arg/2},
- {<<"x-max-priority">>, fun check_non_neg_int_arg/2}].
-
-consume_args() -> [{<<"x-priority">>, fun check_int_arg/2},
- {<<"x-cancel-on-ha-failover">>, fun check_bool_arg/2}].
-
-check_int_arg({Type, _}, _) ->
- case lists:member(Type, ?INTEGER_ARG_TYPES) of
- true -> ok;
- false -> {error, {unacceptable_type, Type}}
- end.
-
-check_bool_arg({bool, _}, _) -> ok;
-check_bool_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
-
-check_non_neg_int_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok when Val >= 0 -> ok;
- ok -> {error, {value_negative, Val}};
- Error -> Error
- end.
-
-check_expires_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok when Val == 0 -> {error, {value_zero, Val}};
- ok -> rabbit_misc:check_expiry(Val);
- Error -> Error
- end.
-
-check_message_ttl_arg({Type, Val}, Args) ->
- case check_int_arg({Type, Val}, Args) of
- ok -> rabbit_misc:check_expiry(Val);
- Error -> Error
- end.
-
-%% Note that the validity of x-dead-letter-exchange is already verified
-%% by rabbit_channel's queue.declare handler.
-check_dlxname_arg({longstr, _}, _) -> ok;
-check_dlxname_arg({Type, _}, _) -> {error, {unacceptable_type, Type}}.
-
-check_dlxrk_arg({longstr, _}, Args) ->
- case rabbit_misc:table_lookup(Args, <<"x-dead-letter-exchange">>) of
- undefined -> {error, routing_key_but_no_dlx_defined};
- _ -> ok
- end;
-check_dlxrk_arg({Type, _}, _Args) ->
- {error, {unacceptable_type, Type}}.
-
-list() -> mnesia:dirty_match_object(rabbit_queue, #amqqueue{_ = '_'}).
-
-list(VHostPath) -> list(VHostPath, rabbit_queue).
-
-%% Not dirty_match_object since that would not be transactional when used in a
-%% tx context
-list(VHostPath, TableName) ->
- mnesia:async_dirty(
- fun () ->
- mnesia:match_object(
- TableName,
- #amqqueue{name = rabbit_misc:r(VHostPath, queue), _ = '_'},
- read)
- end).
-
-list_down(VHostPath) ->
- Present = list(VHostPath),
- Durable = list(VHostPath, rabbit_durable_queue),
- PresentS = sets:from_list([N || #amqqueue{name = N} <- Present]),
- sets:to_list(sets:filter(fun (#amqqueue{name = N}) ->
- not sets:is_element(N, PresentS)
- end, sets:from_list(Durable))).
-
-info_keys() -> rabbit_amqqueue_process:info_keys().
-
-map(Qs, F) -> rabbit_misc:filter_exit_map(F, Qs).
-
-info(Q = #amqqueue{ state = crashed }) -> info_down(Q, crashed);
-info(#amqqueue{ pid = QPid }) -> delegate:call(QPid, info).
-
-info(Q = #amqqueue{ state = crashed }, Items) ->
- info_down(Q, Items, crashed);
-info(#amqqueue{ pid = QPid }, Items) ->
- case delegate:call(QPid, {info, Items}) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-info_down(Q, DownReason) ->
- info_down(Q, rabbit_amqqueue_process:info_keys(), DownReason).
-
-info_down(Q, Items, DownReason) ->
- [{Item, i_down(Item, Q, DownReason)} || Item <- Items].
-
-i_down(name, #amqqueue{name = Name}, _) -> Name;
-i_down(durable, #amqqueue{durable = Dur}, _) -> Dur;
-i_down(auto_delete, #amqqueue{auto_delete = AD}, _) -> AD;
-i_down(arguments, #amqqueue{arguments = Args}, _) -> Args;
-i_down(pid, #amqqueue{pid = QPid}, _) -> QPid;
-i_down(recoverable_slaves, #amqqueue{recoverable_slaves = RS}, _) -> RS;
-i_down(state, _Q, DownReason) -> DownReason;
-i_down(K, _Q, _DownReason) ->
- case lists:member(K, rabbit_amqqueue_process:info_keys()) of
- true -> '';
- false -> throw({bad_argument, K})
- end.
-
-info_all(VHostPath) ->
- map(list(VHostPath), fun (Q) -> info(Q) end) ++
- map(list_down(VHostPath), fun (Q) -> info_down(Q, down) end).
-
-info_all(VHostPath, Items) ->
- map(list(VHostPath), fun (Q) -> info(Q, Items) end) ++
- map(list_down(VHostPath), fun (Q) -> info_down(Q, Items, down) end).
-
-force_event_refresh(Ref) ->
- [gen_server2:cast(Q#amqqueue.pid,
- {force_event_refresh, Ref}) || Q <- list()],
- ok.
-
-notify_policy_changed(#amqqueue{pid = QPid}) ->
- gen_server2:cast(QPid, policy_changed).
-
-consumers(#amqqueue{ pid = QPid }) -> delegate:call(QPid, consumers).
-
-consumer_info_keys() -> ?CONSUMER_INFO_KEYS.
-
-consumers_all(VHostPath) ->
- ConsumerInfoKeys=consumer_info_keys(),
- lists:append(
- map(list(VHostPath),
- fun (Q) ->
- [lists:zip(
- ConsumerInfoKeys,
- [Q#amqqueue.name, ChPid, CTag, AckRequired, Prefetch, Args]) ||
- {ChPid, CTag, AckRequired, Prefetch, Args} <- consumers(Q)]
- end)).
-
-stat(#amqqueue{pid = QPid}) -> delegate:call(QPid, stat).
-
-delete_immediately(QPids) ->
- [gen_server2:cast(QPid, delete_immediately) || QPid <- QPids],
- ok.
-
-delete(#amqqueue{ pid = QPid }, IfUnused, IfEmpty) ->
- delegate:call(QPid, {delete, IfUnused, IfEmpty}).
-
-delete_crashed(#amqqueue{ pid = QPid } = Q) ->
- ok = rpc:call(node(QPid), ?MODULE, delete_crashed_internal, [Q]).
-
-delete_crashed_internal(Q = #amqqueue{ name = QName }) ->
- {ok, BQ} = application:get_env(rabbit, backing_queue_module),
- BQ:delete_crashed(Q),
- ok = internal_delete(QName).
-
-purge(#amqqueue{ pid = QPid }) -> delegate:call(QPid, purge).
-
-requeue(QPid, MsgIds, ChPid) -> delegate:call(QPid, {requeue, MsgIds, ChPid}).
-
-ack(QPid, MsgIds, ChPid) -> delegate:cast(QPid, {ack, MsgIds, ChPid}).
-
-reject(QPid, Requeue, MsgIds, ChPid) ->
- delegate:cast(QPid, {reject, Requeue, MsgIds, ChPid}).
-
-notify_down_all(QPids, ChPid) ->
- {_, Bads} = delegate:call(QPids, {notify_down, ChPid}),
- case lists:filter(
- fun ({_Pid, {exit, {R, _}, _}}) -> rabbit_misc:is_abnormal_exit(R);
- ({_Pid, _}) -> false
- end, Bads) of
- [] -> ok;
- Bads1 -> {error, Bads1}
- end.
-
-activate_limit_all(QPids, ChPid) ->
- delegate:cast(QPids, {activate_limit, ChPid}).
-
-credit(#amqqueue{pid = QPid}, ChPid, CTag, Credit, Drain) ->
- delegate:cast(QPid, {credit, ChPid, CTag, Credit, Drain}).
-
-basic_get(#amqqueue{pid = QPid}, ChPid, NoAck, LimiterPid) ->
- delegate:call(QPid, {basic_get, ChPid, NoAck, LimiterPid}).
-
-basic_consume(#amqqueue{pid = QPid, name = QName}, NoAck, ChPid, LimiterPid,
- LimiterActive, ConsumerPrefetchCount, ConsumerTag,
- ExclusiveConsume, Args, OkMsg) ->
- ok = check_consume_arguments(QName, Args),
- delegate:call(QPid, {basic_consume, NoAck, ChPid, LimiterPid, LimiterActive,
- ConsumerPrefetchCount, ConsumerTag, ExclusiveConsume,
- Args, OkMsg}).
-
-basic_cancel(#amqqueue{pid = QPid}, ChPid, ConsumerTag, OkMsg) ->
- delegate:call(QPid, {basic_cancel, ChPid, ConsumerTag, OkMsg}).
-
-notify_decorators(#amqqueue{pid = QPid}) ->
- delegate:cast(QPid, notify_decorators).
-
-notify_sent(QPid, ChPid) ->
- Key = {consumer_credit_to, QPid},
- put(Key, case get(Key) of
- 1 -> gen_server2:cast(
- QPid, {notify_sent, ChPid,
- ?MORE_CONSUMER_CREDIT_AFTER}),
- ?MORE_CONSUMER_CREDIT_AFTER;
- undefined -> erlang:monitor(process, QPid),
- ?MORE_CONSUMER_CREDIT_AFTER - 1;
- C -> C - 1
- end),
- ok.
-
-notify_sent_queue_down(QPid) ->
- erase({consumer_credit_to, QPid}),
- ok.
-
-resume(QPid, ChPid) -> delegate:cast(QPid, {resume, ChPid}).
-
-internal_delete1(QueueName, OnlyDurable) ->
- ok = mnesia:delete({rabbit_queue, QueueName}),
- %% this 'guarded' delete prevents unnecessary writes to the mnesia
- %% disk log
- case mnesia:wread({rabbit_durable_queue, QueueName}) of
- [] -> ok;
- [_] -> ok = mnesia:delete({rabbit_durable_queue, QueueName})
- end,
- %% we want to execute some things, as decided by rabbit_exchange,
- %% after the transaction.
- rabbit_binding:remove_for_destination(QueueName, OnlyDurable).
-
-internal_delete(QueueName) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () ->
- case {mnesia:wread({rabbit_queue, QueueName}),
- mnesia:wread({rabbit_durable_queue, QueueName})} of
- {[], []} ->
- rabbit_misc:const({error, not_found});
- _ ->
- Deletions = internal_delete1(QueueName, false),
- T = rabbit_binding:process_deletions(Deletions),
- fun() ->
- ok = T(),
- ok = rabbit_event:notify(queue_deleted,
- [{name, QueueName}])
- end
- end
- end).
-
-forget_all_durable(Node) ->
- %% Note rabbit is not running so we avoid e.g. the worker pool. Also why
- %% we don't invoke the return from rabbit_binding:process_deletions/1.
- {atomic, ok} =
- mnesia:sync_transaction(
- fun () ->
- Qs = mnesia:match_object(rabbit_durable_queue,
- #amqqueue{_ = '_'}, write),
- [forget_node_for_queue(Node, Q) ||
- #amqqueue{pid = Pid} = Q <- Qs,
- node(Pid) =:= Node],
- ok
- end),
- ok.
-
-%% Try to promote a slave while down - it should recover as a
-%% master. We try to take the oldest slave here for best chance of
-%% recovery.
-forget_node_for_queue(DeadNode, Q = #amqqueue{recoverable_slaves = RS}) ->
- forget_node_for_queue(DeadNode, RS, Q).
-
-forget_node_for_queue(_DeadNode, [], #amqqueue{name = Name}) ->
- %% No slaves to recover from, queue is gone.
- %% Don't process_deletions since that just calls callbacks and we
- %% are not really up.
- internal_delete1(Name, true);
-
-%% Should not happen, but let's be conservative.
-forget_node_for_queue(DeadNode, [DeadNode | T], Q) ->
- forget_node_for_queue(DeadNode, T, Q);
-
-forget_node_for_queue(DeadNode, [H|T], Q) ->
- case node_permits_offline_promotion(H) of
- false -> forget_node_for_queue(DeadNode, T, Q);
- true -> Q1 = Q#amqqueue{pid = rabbit_misc:node_to_fake_pid(H)},
- ok = mnesia:write(rabbit_durable_queue, Q1, write)
- end.
-
-node_permits_offline_promotion(Node) ->
- case node() of
- Node -> not rabbit:is_running(); %% [1]
- _ -> Running = rabbit_mnesia:cluster_nodes(running),
- not lists:member(Node, Running) %% [2]
- end.
-%% [1] In this case if we are a real running node (i.e. rabbitmqctl
-%% has RPCed into us) then we cannot allow promotion. If on the other
-%% hand we *are* rabbitmqctl impersonating the node for offline
-%% node-forgetting then we can.
-%%
-%% [2] This is simpler; as long as it's down that's OK
-
-run_backing_queue(QPid, Mod, Fun) ->
- gen_server2:cast(QPid, {run_backing_queue, Mod, Fun}).
-
-set_ram_duration_target(QPid, Duration) ->
- gen_server2:cast(QPid, {set_ram_duration_target, Duration}).
-
-set_maximum_since_use(QPid, Age) ->
- gen_server2:cast(QPid, {set_maximum_since_use, Age}).
-
-start_mirroring(QPid) -> ok = delegate:cast(QPid, start_mirroring).
-stop_mirroring(QPid) -> ok = delegate:cast(QPid, stop_mirroring).
-
-sync_mirrors(QPid) -> delegate:call(QPid, sync_mirrors).
-cancel_sync_mirrors(QPid) -> delegate:call(QPid, cancel_sync_mirrors).
-
-on_node_up(Node) ->
- ok = rabbit_misc:execute_mnesia_transaction(
- fun () ->
- Qs = mnesia:match_object(rabbit_queue,
- #amqqueue{_ = '_'}, write),
- [maybe_clear_recoverable_node(Node, Q) || Q <- Qs],
- ok
- end).
-
-maybe_clear_recoverable_node(Node,
- #amqqueue{sync_slave_pids = SPids,
- recoverable_slaves = RSs} = Q) ->
- case lists:member(Node, RSs) of
- true ->
- %% There is a race with
- %% rabbit_mirror_queue_slave:record_synchronised/1 called
- %% by the incoming slave node and this function, called
- %% by the master node. If this function is executed after
- %% record_synchronised/1, the node is erroneously removed
- %% from the recoverable slaves list.
- %%
- %% We check if the slave node's queue PID is alive. If it is
- %% the case, then this function is executed after. In this
- %% situation, we don't touch the queue record, it is already
- %% correct.
- DoClearNode =
- case [SP || SP <- SPids, node(SP) =:= Node] of
- [SPid] -> not rabbit_misc:is_process_alive(SPid);
- _ -> true
- end,
- if
- DoClearNode -> RSs1 = RSs -- [Node],
- store_queue(
- Q#amqqueue{recoverable_slaves = RSs1});
- true -> ok
- end;
- false ->
- ok
- end.
-
-on_node_down(Node) ->
- rabbit_misc:execute_mnesia_tx_with_tail(
- fun () -> QsDels =
- qlc:e(qlc:q([{QName, delete_queue(QName)} ||
- #amqqueue{name = QName, pid = Pid,
- slave_pids = []}
- <- mnesia:table(rabbit_queue),
- node(Pid) == Node andalso
- not rabbit_mnesia:is_process_alive(Pid)])),
- {Qs, Dels} = lists:unzip(QsDels),
- T = rabbit_binding:process_deletions(
- lists:foldl(fun rabbit_binding:combine_deletions/2,
- rabbit_binding:new_deletions(), Dels)),
- fun () ->
- T(),
- lists:foreach(
- fun(QName) ->
- ok = rabbit_event:notify(queue_deleted,
- [{name, QName}])
- end, Qs)
- end
- end).
-
-delete_queue(QueueName) ->
- ok = mnesia:delete({rabbit_queue, QueueName}),
- rabbit_binding:remove_transient_for_destination(QueueName).
-
-pseudo_queue(QueueName, Pid) ->
- #amqqueue{name = QueueName,
- durable = false,
- auto_delete = false,
- arguments = [],
- pid = Pid,
- slave_pids = []}.
-
-immutable(Q) -> Q#amqqueue{pid = none,
- slave_pids = none,
- sync_slave_pids = none,
- recoverable_slaves = none,
- gm_pids = none,
- policy = none,
- decorators = none,
- state = none}.
-
-deliver([], _Delivery) ->
- %% /dev/null optimisation
- [];
-
-deliver(Qs, Delivery = #delivery{flow = Flow}) ->
- {MPids, SPids} = qpids(Qs),
- QPids = MPids ++ SPids,
- %% We use up two credits to send to a slave since the message
- %% arrives at the slave from two directions. We will ack one when
- %% the slave receives the message direct from the channel, and the
- %% other when it receives it via GM.
- case Flow of
- %% Here we are tracking messages sent by the rabbit_channel
- %% process. We are accessing the rabbit_channel process
- %% dictionary.
- flow -> [credit_flow:send(QPid) || QPid <- QPids],
- [credit_flow:send(QPid) || QPid <- SPids];
- noflow -> ok
- end,
-
- %% We let slaves know that they were being addressed as slaves at
- %% the time - if they receive such a message from the channel
- %% after they have become master they should mark the message as
- %% 'delivered' since they do not know what the master may have
- %% done with it.
- MMsg = {deliver, Delivery, false},
- SMsg = {deliver, Delivery, true},
- delegate:cast(MPids, MMsg),
- delegate:cast(SPids, SMsg),
- QPids.
-
-qpids([]) -> {[], []}; %% optimisation
-qpids([#amqqueue{pid = QPid, slave_pids = SPids}]) -> {[QPid], SPids}; %% opt
-qpids(Qs) ->
- {MPids, SPids} = lists:foldl(fun (#amqqueue{pid = QPid, slave_pids = SPids},
- {MPidAcc, SPidAcc}) ->
- {[QPid | MPidAcc], [SPids | SPidAcc]}
- end, {[], []}, Qs),
- {MPids, lists:append(SPids)}.
diff --git a/src/rabbit_amqqueue_process.erl b/src/rabbit_amqqueue_process.erl
index e828af7c49..ee331180ed 100644
--- a/src/rabbit_amqqueue_process.erl
+++ b/src/rabbit_amqqueue_process.erl
@@ -318,7 +318,8 @@ process_args_policy(State = #q{q = Q,
{<<"dead-letter-routing-key">>, fun res_arg/2, fun init_dlx_rkey/2},
{<<"message-ttl">>, fun res_min/2, fun init_ttl/2},
{<<"max-length">>, fun res_min/2, fun init_max_length/2},
- {<<"max-length-bytes">>, fun res_min/2, fun init_max_bytes/2}],
+ {<<"max-length-bytes">>, fun res_min/2, fun init_max_bytes/2},
+ {<<"queue-mode">>, fun res_arg/2, fun init_queue_mode/2}],
drop_expired_msgs(
lists:foldl(fun({Name, Resolve, Fun}, StateN) ->
Fun(args_policy_lookup(Name, Resolve, Q), StateN)
@@ -361,6 +362,13 @@ init_max_bytes(MaxBytes, State) ->
{_Dropped, State1} = maybe_drop_head(State#q{max_bytes = MaxBytes}),
State1.
+init_queue_mode(undefined, State) ->
+ State;
+init_queue_mode(Mode, State = #q {backing_queue = BQ,
+ backing_queue_state = BQS}) ->
+ BQS1 = BQ:set_queue_mode(binary_to_existing_atom(Mode, utf8), BQS),
+ State#q{backing_queue_state = BQS1}.
+
reply(Reply, NewState) ->
{NewState1, Timeout} = next_state(NewState),
{reply, Reply, ensure_stats_timer(ensure_rate_timer(NewState1)), Timeout}.
@@ -422,7 +430,7 @@ ensure_ttl_timer(undefined, State) ->
State;
ensure_ttl_timer(Expiry, State = #q{ttl_timer_ref = undefined,
args_policy_version = Version}) ->
- After = (case Expiry - now_micros() of
+ After = (case Expiry - time_compat:os_system_time(micro_seconds) of
V when V > 0 -> V + 999; %% always fire later
_ -> 0
end) div 1000,
@@ -742,7 +750,7 @@ calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
{ok, MsgTTL} = rabbit_basic:parse_expiration(Props),
case lists:min([TTL, MsgTTL]) of
undefined -> undefined;
- T -> now_micros() + T * 1000
+ T -> time_compat:os_system_time(micro_seconds) + T * 1000
end.
%% Logically this function should invoke maybe_send_drained/2.
@@ -753,7 +761,8 @@ calculate_msg_expiry(#basic_message{content = Content}, TTL) ->
drop_expired_msgs(State) ->
case is_empty(State) of
true -> State;
- false -> drop_expired_msgs(now_micros(), State)
+ false -> drop_expired_msgs(time_compat:os_system_time(micro_seconds),
+ State)
end.
drop_expired_msgs(Now, State = #q{backing_queue_state = BQS,
@@ -816,8 +825,6 @@ stop(State) -> stop(noreply, State).
stop(noreply, State) -> {stop, normal, State};
stop(Reply, State) -> {stop, normal, Reply, State}.
-now_micros() -> timer:now_diff(now(), {0,0,0}).
-
infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
i(name, #q{q = #amqqueue{name = Name}}) -> Name;
@@ -1330,8 +1337,11 @@ handle_pre_hibernate(State = #q{backing_queue = BQ,
BQS3 = BQ:handle_pre_hibernate(BQS2),
rabbit_event:if_enabled(
State, #q.stats_timer,
- fun () -> emit_stats(State, [{idle_since, now()},
- {consumer_utilisation, ''}]) end),
+ fun () -> emit_stats(State,
+ [{idle_since,
+ time_compat:os_system_time(milli_seconds)},
+ {consumer_utilisation, ''}])
+ end),
State1 = rabbit_event:stop_stats_timer(State#q{backing_queue_state = BQS3},
#q.stats_timer),
{hibernate, stop_rate_timer(State1)}.
diff --git a/src/rabbit_auth_backend_internal.erl b/src/rabbit_auth_backend_internal.erl
index 2b2a0ba20e..ce5474dc9e 100644
--- a/src/rabbit_auth_backend_internal.erl
+++ b/src/rabbit_auth_backend_internal.erl
@@ -25,15 +25,19 @@
-export([add_user/2, delete_user/1, lookup_user/1,
change_password/2, clear_password/1,
- hash_password/1, change_password_hash/2,
+ hash_password/2, change_password_hash/2,
set_tags/2, set_permissions/5, clear_permissions/2]).
-export([user_info_keys/0, perms_info_keys/0,
user_perms_info_keys/0, vhost_perms_info_keys/0,
user_vhost_perms_info_keys/0,
- list_users/0, list_permissions/0,
- list_user_permissions/1, list_vhost_permissions/1,
+ list_users/0, list_users/2, list_permissions/0,
+ list_user_permissions/1, list_user_permissions/3,
+ list_vhost_permissions/1, list_vhost_permissions/3,
list_user_vhost_permissions/2]).
+%% for testing
+-export([hashing_module_for_user/1]).
+
%%----------------------------------------------------------------------------
-ifdef(use_specs).
@@ -48,7 +52,7 @@
-spec(change_password/2 :: (rabbit_types:username(), rabbit_types:password())
-> 'ok').
-spec(clear_password/1 :: (rabbit_types:username()) -> 'ok').
--spec(hash_password/1 :: (rabbit_types:password())
+-spec(hash_password/2 :: (module(), rabbit_types:password())
-> rabbit_types:password_hash()).
-spec(change_password_hash/2 :: (rabbit_types:username(),
rabbit_types:password_hash()) -> 'ok').
@@ -63,11 +67,16 @@
-spec(vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(user_vhost_perms_info_keys/0 :: () -> rabbit_types:info_keys()).
-spec(list_users/0 :: () -> [rabbit_types:infos()]).
+-spec(list_users/2 :: (reference(), pid()) -> 'ok').
-spec(list_permissions/0 :: () -> [rabbit_types:infos()]).
-spec(list_user_permissions/1 ::
(rabbit_types:username()) -> [rabbit_types:infos()]).
+-spec(list_user_permissions/3 ::
+ (rabbit_types:username(), reference(), pid()) -> 'ok').
-spec(list_vhost_permissions/1 ::
(rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(list_vhost_permissions/3 ::
+ (rabbit_types:vhost(), reference(), pid()) -> 'ok').
-spec(list_user_vhost_permissions/2 ::
(rabbit_types:username(), rabbit_types:vhost())
-> [rabbit_types:infos()]).
@@ -77,13 +86,22 @@
%%----------------------------------------------------------------------------
%% Implementation of rabbit_auth_backend
+%% Returns a password hashing module for the user record provided. If
+%% there is no information in the record, we consider it to be legacy
+%% (inserted by a version older than 3.6.0) and fall back to MD5, the
+%% now obsolete hashing function.
+hashing_module_for_user(#internal_user{
+ hashing_algorithm = ModOrUndefined}) ->
+ rabbit_password:hashing_mod(ModOrUndefined).
+
user_login_authentication(Username, []) ->
internal_check_user_login(Username, fun(_) -> true end);
user_login_authentication(Username, [{password, Cleartext}]) ->
internal_check_user_login(
Username,
- fun (#internal_user{password_hash = <<Salt:4/binary, Hash/binary>>}) ->
- Hash =:= salted_md5(Salt, Cleartext);
+ fun (#internal_user{password_hash = <<Salt:4/binary, Hash/binary>>} = U) ->
+ Hash =:= rabbit_password:salted_hash(
+ hashing_module_for_user(U), Salt, Cleartext);
(#internal_user{}) ->
false
end);
@@ -147,17 +165,19 @@ permission_index(read) -> #permission.read.
add_user(Username, Password) ->
rabbit_log:info("Creating user '~s'~n", [Username]),
+ %% hash_password will pick the hashing function configured for us
+ %% but we also need to store a hint as part of the record, so we
+ %% retrieve it here one more time
+ HashingMod = rabbit_password:hashing_mod(),
+ User = #internal_user{username = Username,
+ password_hash = hash_password(HashingMod, Password),
+ tags = [],
+ hashing_algorithm = HashingMod},
R = rabbit_misc:execute_mnesia_transaction(
fun () ->
case mnesia:wread({rabbit_user, Username}) of
[] ->
- ok = mnesia:write(
- rabbit_user,
- #internal_user{username = Username,
- password_hash =
- hash_password(Password),
- tags = []},
- write);
+ ok = mnesia:write(rabbit_user, User, write);
_ ->
mnesia:abort({user_already_exists, Username})
end
@@ -191,7 +211,8 @@ lookup_user(Username) ->
change_password(Username, Password) ->
rabbit_log:info("Changing password for '~s'~n", [Username]),
- R = change_password_hash(Username, hash_password(Password)),
+ R = change_password_hash(Username,
+ hash_password(rabbit_password:hashing_mod(), Password)),
rabbit_event:notify(user_password_changed, [{name, Username}]),
R.
@@ -201,13 +222,8 @@ clear_password(Username) ->
rabbit_event:notify(user_password_cleared, [{name, Username}]),
R.
-hash_password(Cleartext) ->
- {A1,A2,A3} = now(),
- random:seed(A1, A2, A3),
- Salt = random:uniform(16#ffffffff),
- SaltBin = <<Salt:32>>,
- Hash = salted_md5(SaltBin, Cleartext),
- <<SaltBin/binary, Hash/binary>>.
+hash_password(HashingMod, Cleartext) ->
+ rabbit_password:hash(HashingMod, Cleartext).
change_password_hash(Username, PasswordHash) ->
update_user(Username, fun(User) ->
@@ -215,10 +231,6 @@ change_password_hash(Username, PasswordHash) ->
password_hash = PasswordHash }
end).
-salted_md5(Salt, Cleartext) ->
- Salted = <<Salt/binary, Cleartext/binary>>,
- erlang:md5(Salted).
-
set_tags(Username, Tags) ->
rabbit_log:info("Setting user tags for user '~s' to ~p~n",
[Username, Tags]),
@@ -299,26 +311,28 @@ user_perms_info_keys() -> [vhost | ?PERMS_INFO_KEYS].
user_vhost_perms_info_keys() -> ?PERMS_INFO_KEYS.
list_users() ->
- [[{user, Username}, {tags, Tags}] ||
- #internal_user{username = Username, tags = Tags} <-
- mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})].
+ [extract_internal_user_params(U) ||
+ U <- mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})].
+
+list_users(Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(U) -> extract_internal_user_params(U) end,
+ mnesia:dirty_match_object(rabbit_user, #internal_user{_ = '_'})).
list_permissions() ->
list_permissions(perms_info_keys(), match_user_vhost('_', '_')).
list_permissions(Keys, QueryThunk) ->
- [filter_props(Keys, [{user, Username},
- {vhost, VHostPath},
- {configure, ConfigurePerm},
- {write, WritePerm},
- {read, ReadPerm}]) ||
- #user_permission{user_vhost = #user_vhost{username = Username,
- virtual_host = VHostPath},
- permission = #permission{ configure = ConfigurePerm,
- write = WritePerm,
- read = ReadPerm}} <-
- %% TODO: use dirty ops instead
- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+ [extract_user_permission_params(Keys, U) ||
+ %% TODO: use dirty ops instead
+ U <- rabbit_misc:execute_mnesia_transaction(QueryThunk)].
+
+list_permissions(Keys, QueryThunk, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(U) -> extract_user_permission_params(Keys, U) end,
+ %% TODO: use dirty ops instead
+ rabbit_misc:execute_mnesia_transaction(QueryThunk)).
filter_props(Keys, Props) -> [T || T = {K, _} <- Props, lists:member(K, Keys)].
@@ -327,17 +341,46 @@ list_user_permissions(Username) ->
user_perms_info_keys(),
rabbit_misc:with_user(Username, match_user_vhost(Username, '_'))).
+list_user_permissions(Username, Ref, AggregatorPid) ->
+ list_permissions(
+ user_perms_info_keys(),
+ rabbit_misc:with_user(Username, match_user_vhost(Username, '_')),
+ Ref, AggregatorPid).
+
list_vhost_permissions(VHostPath) ->
list_permissions(
vhost_perms_info_keys(),
rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath))).
+list_vhost_permissions(VHostPath, Ref, AggregatorPid) ->
+ list_permissions(
+ vhost_perms_info_keys(),
+ rabbit_vhost:with(VHostPath, match_user_vhost('_', VHostPath)),
+ Ref, AggregatorPid).
+
list_user_vhost_permissions(Username, VHostPath) ->
list_permissions(
user_vhost_perms_info_keys(),
rabbit_misc:with_user_and_vhost(
Username, VHostPath, match_user_vhost(Username, VHostPath))).
+extract_user_permission_params(Keys, #user_permission{
+ user_vhost =
+ #user_vhost{username = Username,
+ virtual_host = VHostPath},
+ permission = #permission{
+ configure = ConfigurePerm,
+ write = WritePerm,
+ read = ReadPerm}}) ->
+ filter_props(Keys, [{user, Username},
+ {vhost, VHostPath},
+ {configure, ConfigurePerm},
+ {write, WritePerm},
+ {read, ReadPerm}]).
+
+extract_internal_user_params(#internal_user{username = Username, tags = Tags}) ->
+ [{user, Username}, {tags, Tags}].
+
match_user_vhost(Username, VHostPath) ->
fun () -> mnesia:match_object(
rabbit_user_permission,
diff --git a/src/rabbit_auth_mechanism.erl b/src/rabbit_auth_mechanism.erl
deleted file mode 100644
index 78e3e7dd4b..0000000000
--- a/src/rabbit_auth_mechanism.erl
+++ /dev/null
@@ -1,56 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_auth_mechanism).
-
--ifdef(use_specs).
-
-%% A description.
--callback description() -> [proplists:property()].
-
-%% If this mechanism is enabled, should it be offered for a given socket?
-%% (primarily so EXTERNAL can be SSL-only)
--callback should_offer(rabbit_net:socket()) -> boolean().
-
-%% Called before authentication starts. Should create a state
-%% object to be passed through all the stages of authentication.
--callback init(rabbit_net:socket()) -> any().
-
-%% Handle a stage of authentication. Possible responses:
-%% {ok, User}
-%% Authentication succeeded, and here's the user record.
-%% {challenge, Challenge, NextState}
-%% Another round is needed. Here's the state I want next time.
-%% {protocol_error, Msg, Args}
-%% Client got the protocol wrong. Log and die.
-%% {refused, Username, Msg, Args}
-%% Client failed authentication. Log and die.
--callback handle_response(binary(), any()) ->
- {'ok', rabbit_types:user()} |
- {'challenge', binary(), any()} |
- {'protocol_error', string(), [any()]} |
- {'refused', rabbit_types:username() | none, string(), [any()]}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {should_offer, 1}, {init, 1}, {handle_response, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_authn_backend.erl b/src/rabbit_authn_backend.erl
deleted file mode 100644
index b9cb0d3669..0000000000
--- a/src/rabbit_authn_backend.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_authn_backend).
-
--include("rabbit.hrl").
-
--ifdef(use_specs).
-
-%% Check a user can log in, given a username and a proplist of
-%% authentication information (e.g. [{password, Password}]). If your
-%% backend is not to be used for authentication, this should always
-%% refuse access.
-%%
-%% Possible responses:
-%% {ok, User}
-%% Authentication succeeded, and here's the user record.
-%% {error, Error}
-%% Something went wrong. Log and die.
-%% {refused, Msg, Args}
-%% Client failed authentication. Log and die.
--callback user_login_authentication(rabbit_types:username(), [term()]) ->
- {'ok', rabbit_types:auth_user()} |
- {'refused', string(), [any()]} |
- {'error', any()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{user_login_authentication, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_authz_backend.erl b/src/rabbit_authz_backend.erl
deleted file mode 100644
index 495a79695d..0000000000
--- a/src/rabbit_authz_backend.erl
+++ /dev/null
@@ -1,76 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_authz_backend).
-
--include("rabbit.hrl").
-
--ifdef(use_specs).
-
-%% Check a user can log in, when this backend is being used for
-%% authorisation only. Authentication has already taken place
-%% successfully, but we need to check that the user exists in this
-%% backend, and initialise any impl field we will want to have passed
-%% back in future calls to check_vhost_access/3 and
-%% check_resource_access/3.
-%%
-%% Possible responses:
-%% {ok, Impl}
-%% {ok, Impl, Tags}
-%% User authorisation succeeded, and here's the impl and potential extra tags fields.
-%% {error, Error}
-%% Something went wrong. Log and die.
-%% {refused, Msg, Args}
-%% User authorisation failed. Log and die.
--callback user_login_authorization(rabbit_types:username()) ->
- {'ok', any()} |
- {'ok', any(), any()} |
- {'refused', string(), [any()]} |
- {'error', any()}.
-
-%% Given #auth_user and vhost, can a user log in to a vhost?
-%% Possible responses:
-%% true
-%% false
-%% {error, Error}
-%% Something went wrong. Log and die.
--callback check_vhost_access(rabbit_types:auth_user(),
- rabbit_types:vhost(), rabbit_net:socket()) ->
- boolean() | {'error', any()}.
-
-%% Given #auth_user, resource and permission, can a user access a resource?
-%%
-%% Possible responses:
-%% true
-%% false
-%% {error, Error}
-%% Something went wrong. Log and die.
--callback check_resource_access(rabbit_types:auth_user(),
- rabbit_types:r(atom()),
- rabbit_access_control:permission_atom()) ->
- boolean() | {'error', any()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{user_login_authorization, 1},
- {check_vhost_access, 3}, {check_resource_access, 3}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_backing_queue.erl b/src/rabbit_backing_queue.erl
deleted file mode 100644
index d6cd3ca43d..0000000000
--- a/src/rabbit_backing_queue.erl
+++ /dev/null
@@ -1,269 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_backing_queue).
-
--export([info_keys/0]).
-
--define(INFO_KEYS, [messages_ram, messages_ready_ram,
- messages_unacknowledged_ram, messages_persistent,
- message_bytes, message_bytes_ready,
- message_bytes_unacknowledged, message_bytes_ram,
- message_bytes_persistent,
- disk_reads, disk_writes, backing_queue_status]).
-
--ifdef(use_specs).
-
-%% We can't specify a per-queue ack/state with callback signatures
--type(ack() :: any()).
--type(state() :: any()).
-
--type(flow() :: 'flow' | 'noflow').
--type(msg_ids() :: [rabbit_types:msg_id()]).
--type(fetch_result(Ack) ::
- ('empty' | {rabbit_types:basic_message(), boolean(), Ack})).
--type(drop_result(Ack) ::
- ('empty' | {rabbit_types:msg_id(), Ack})).
--type(recovery_terms() :: [term()] | 'non_clean_shutdown').
--type(recovery_info() :: 'new' | recovery_terms()).
--type(purged_msg_count() :: non_neg_integer()).
--type(async_callback() ::
- fun ((atom(), fun ((atom(), state()) -> state())) -> 'ok')).
--type(duration() :: ('undefined' | 'infinity' | number())).
-
--type(msg_fun(A) :: fun ((rabbit_types:basic_message(), ack(), A) -> A)).
--type(msg_pred() :: fun ((rabbit_types:message_properties()) -> boolean())).
-
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
-
-%% Called on startup with a list of durable queue names. The queues
-%% aren't being started at this point, but this call allows the
-%% backing queue to perform any checking necessary for the consistency
-%% of those queues, or initialise any other shared resources.
-%%
-%% The list of queue recovery terms returned as {ok, Terms} must be given
-%% in the same order as the list of queue names supplied.
--callback start([rabbit_amqqueue:name()]) -> rabbit_types:ok(recovery_terms()).
-
-%% Called to tear down any state/resources. NB: Implementations should
-%% not depend on this function being called on shutdown and instead
-%% should hook into the rabbit supervision hierarchy.
--callback stop() -> 'ok'.
-
-%% Initialise the backing queue and its state.
-%%
-%% Takes
-%% 1. the amqqueue record
-%% 2. a term indicating whether the queue is an existing queue that
-%% should be recovered or not. When 'new' is given, no recovery is
-%% taking place, otherwise a list of recovery terms is given, or
-%% the atom 'non_clean_shutdown' if no recovery terms are available.
-%% 3. an asynchronous callback which accepts a function of type
-%% backing-queue-state to backing-queue-state. This callback
-%% function can be safely invoked from any process, which makes it
-%% useful for passing messages back into the backing queue,
-%% especially as the backing queue does not have control of its own
-%% mailbox.
--callback init(rabbit_types:amqqueue(), recovery_info(),
- async_callback()) -> state().
-
-%% Called on queue shutdown when queue isn't being deleted.
--callback terminate(any(), state()) -> state().
-
-%% Called when the queue is terminating and needs to delete all its
-%% content.
--callback delete_and_terminate(any(), state()) -> state().
-
-%% Called to clean up after a crashed queue. In this case we don't
-%% have a process and thus a state(), we are just removing on-disk data.
--callback delete_crashed(rabbit_types:amqqueue()) -> 'ok'.
-
-%% Remove all 'fetchable' messages from the queue, i.e. all messages
-%% except those that have been fetched already and are pending acks.
--callback purge(state()) -> {purged_msg_count(), state()}.
-
-%% Remove all messages in the queue which have been fetched and are
-%% pending acks.
--callback purge_acks(state()) -> state().
-
-%% Publish a message.
--callback publish(rabbit_types:basic_message(),
- rabbit_types:message_properties(), boolean(), pid(), flow(),
- state()) -> state().
-
-%% Called for messages which have already been passed straight
-%% out to a client. The queue will be empty for these calls
-%% (i.e. saves the round trip through the backing queue).
--callback publish_delivered(rabbit_types:basic_message(),
- rabbit_types:message_properties(), pid(), flow(),
- state())
- -> {ack(), state()}.
-
-%% Called to inform the BQ about messages which have reached the
-%% queue, but are not going to be further passed to BQ.
--callback discard(rabbit_types:msg_id(), pid(), flow(), state()) -> state().
-
-%% Return ids of messages which have been confirmed since the last
-%% invocation of this function (or initialisation).
-%%
-%% Message ids should only appear in the result of drain_confirmed
-%% under the following circumstances:
-%%
-%% 1. The message appears in a call to publish_delivered/4 and the
-%% first argument (ack_required) is false; or
-%% 2. The message is fetched from the queue with fetch/2 and the first
-%% argument (ack_required) is false; or
-%% 3. The message is acked (ack/2 is called for the message); or
-%% 4. The message is fully fsync'd to disk in such a way that the
-%% recovery of the message is guaranteed in the event of a crash of
-%% this rabbit node (excluding hardware failure).
-%%
-%% In addition to the above conditions, a message id may only appear
-%% in the result of drain_confirmed if
-%% #message_properties.needs_confirming = true when the msg was
-%% published (through whichever means) to the backing queue.
-%%
-%% It is legal for the same message id to appear in the results of
-%% multiple calls to drain_confirmed, which means that the backing
-%% queue is not required to keep track of which messages it has
-%% already confirmed. The confirm will be issued to the publisher the
-%% first time the message id appears in the result of
-%% drain_confirmed. All subsequent appearances of that message id will
-%% be ignored.
--callback drain_confirmed(state()) -> {msg_ids(), state()}.
-
-%% Drop messages from the head of the queue while the supplied
-%% predicate on message properties returns true. Returns the first
-%% message properties for which the predictate returned false, or
-%% 'undefined' if the whole backing queue was traversed w/o the
-%% predicate ever returning false.
--callback dropwhile(msg_pred(), state())
- -> {rabbit_types:message_properties() | undefined, state()}.
-
-%% Like dropwhile, except messages are fetched in "require
-%% acknowledgement" mode and are passed, together with their ack tag,
-%% to the supplied function. The function is also fed an
-%% accumulator. The result of fetchwhile is as for dropwhile plus the
-%% accumulator.
--callback fetchwhile(msg_pred(), msg_fun(A), A, state())
- -> {rabbit_types:message_properties() | undefined,
- A, state()}.
-
-%% Produce the next message.
--callback fetch(true, state()) -> {fetch_result(ack()), state()};
- (false, state()) -> {fetch_result(undefined), state()}.
-
-%% Remove the next message.
--callback drop(true, state()) -> {drop_result(ack()), state()};
- (false, state()) -> {drop_result(undefined), state()}.
-
-%% Acktags supplied are for messages which can now be forgotten
-%% about. Must return 1 msg_id per Ack, in the same order as Acks.
--callback ack([ack()], state()) -> {msg_ids(), state()}.
-
-%% Reinsert messages into the queue which have already been delivered
-%% and were pending acknowledgement.
--callback requeue([ack()], state()) -> {msg_ids(), state()}.
-
-%% Fold over messages by ack tag. The supplied function is called with
-%% each message, its ack tag, and an accumulator.
--callback ackfold(msg_fun(A), A, state(), [ack()]) -> {A, state()}.
-
-%% Fold over all the messages in a queue and return the accumulated
-%% results, leaving the queue undisturbed.
--callback fold(fun((rabbit_types:basic_message(),
- rabbit_types:message_properties(),
- boolean(), A) -> {('stop' | 'cont'), A}),
- A, state()) -> {A, state()}.
-
-%% How long is my queue?
--callback len(state()) -> non_neg_integer().
-
-%% Is my queue empty?
--callback is_empty(state()) -> boolean().
-
-%% What's the queue depth, where depth = length + number of pending acks
--callback depth(state()) -> non_neg_integer().
-
-%% For the next three functions, the assumption is that you're
-%% monitoring something like the ingress and egress rates of the
-%% queue. The RAM duration is thus the length of time represented by
-%% the messages held in RAM given the current rates. If you want to
-%% ignore all of this stuff, then do so, and return 0 in
-%% ram_duration/1.
-
-%% The target is to have no more messages in RAM than indicated by the
-%% duration and the current queue rates.
--callback set_ram_duration_target(duration(), state()) -> state().
-
-%% Optionally recalculate the duration internally (likely to be just
-%% update your internal rates), and report how many seconds the
-%% messages in RAM represent given the current rates of the queue.
--callback ram_duration(state()) -> {duration(), state()}.
-
-%% Should 'timeout' be called as soon as the queue process can manage
-%% (either on an empty mailbox, or when a timer fires)?
--callback needs_timeout(state()) -> 'false' | 'timed' | 'idle'.
-
-%% Called (eventually) after needs_timeout returns 'idle' or 'timed'.
-%% Note this may be called more than once for each 'idle' or 'timed'
-%% returned from needs_timeout
--callback timeout(state()) -> state().
-
-%% Called immediately before the queue hibernates.
--callback handle_pre_hibernate(state()) -> state().
-
-%% Called when more credit has become available for credit_flow.
--callback resume(state()) -> state().
-
-%% Used to help prioritisation in rabbit_amqqueue_process. The rate of
-%% inbound messages and outbound messages at the moment.
--callback msg_rates(state()) -> {float(), float()}.
-
--callback info(atom(), state()) -> any().
-
-%% Passed a function to be invoked with the relevant backing queue's
-%% state. Useful for when the backing queue or other components need
-%% to pass functions into the backing queue.
--callback invoke(atom(), fun ((atom(), A) -> A), state()) -> state().
-
-%% Called prior to a publish or publish_delivered call. Allows the BQ
-%% to signal that it's already seen this message, (e.g. it was published
-%% or discarded previously) and thus the message should be dropped.
--callback is_duplicate(rabbit_types:basic_message(), state())
- -> {boolean(), state()}.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{start, 1}, {stop, 0}, {init, 3}, {terminate, 2},
- {delete_and_terminate, 2}, {delete_crashed, 1}, {purge, 1},
- {purge_acks, 1}, {publish, 6},
- {publish_delivered, 5}, {discard, 4}, {drain_confirmed, 1},
- {dropwhile, 2}, {fetchwhile, 4}, {fetch, 2},
- {drop, 2}, {ack, 2}, {requeue, 2}, {ackfold, 4}, {fold, 3}, {len, 1},
- {is_empty, 1}, {depth, 1}, {set_ram_duration_target, 2},
- {ram_duration, 1}, {needs_timeout, 1}, {timeout, 1},
- {handle_pre_hibernate, 1}, {resume, 1}, {msg_rates, 1},
- {info, 2}, {invoke, 3}, {is_duplicate, 2}] ;
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-info_keys() -> ?INFO_KEYS.
diff --git a/src/rabbit_basic.erl b/src/rabbit_basic.erl
deleted file mode 100644
index efc5ce2745..0000000000
--- a/src/rabbit_basic.erl
+++ /dev/null
@@ -1,321 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_basic).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([publish/4, publish/5, publish/1,
- message/3, message/4, properties/1, prepend_table_header/3,
- extract_headers/1, map_headers/2, delivery/4, header_routes/1,
- parse_expiration/1, header/2, header/3]).
--export([build_content/2, from_content/1, msg_size/1, maybe_gc_large_msg/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(properties_input() ::
- (rabbit_framing:amqp_property_record() | [{atom(), any()}])).
--type(publish_result() ::
- ({ok, [pid()]} | rabbit_types:error('not_found'))).
--type(header() :: any()).
--type(headers() :: rabbit_framing:amqp_table() | 'undefined').
-
--type(exchange_input() :: (rabbit_types:exchange() | rabbit_exchange:name())).
--type(body_input() :: (binary() | [binary()])).
-
--spec(publish/4 ::
- (exchange_input(), rabbit_router:routing_key(), properties_input(),
- body_input()) -> publish_result()).
--spec(publish/5 ::
- (exchange_input(), rabbit_router:routing_key(), boolean(),
- properties_input(), body_input()) -> publish_result()).
--spec(publish/1 ::
- (rabbit_types:delivery()) -> publish_result()).
--spec(delivery/4 ::
- (boolean(), boolean(), rabbit_types:message(), undefined | integer()) ->
- rabbit_types:delivery()).
--spec(message/4 ::
- (rabbit_exchange:name(), rabbit_router:routing_key(),
- properties_input(), binary()) -> rabbit_types:message()).
--spec(message/3 ::
- (rabbit_exchange:name(), rabbit_router:routing_key(),
- rabbit_types:decoded_content()) ->
- rabbit_types:ok_or_error2(rabbit_types:message(), any())).
--spec(properties/1 ::
- (properties_input()) -> rabbit_framing:amqp_property_record()).
-
--spec(prepend_table_header/3 ::
- (binary(), rabbit_framing:amqp_table(), headers()) -> headers()).
-
--spec(header/2 ::
- (header(), headers()) -> 'undefined' | any()).
--spec(header/3 ::
- (header(), headers(), any()) -> 'undefined' | any()).
-
--spec(extract_headers/1 :: (rabbit_types:content()) -> headers()).
-
--spec(map_headers/2 :: (fun((headers()) -> headers()), rabbit_types:content())
- -> rabbit_types:content()).
-
--spec(header_routes/1 ::
- (undefined | rabbit_framing:amqp_table()) -> [string()]).
--spec(build_content/2 :: (rabbit_framing:amqp_property_record(),
- binary() | [binary()]) -> rabbit_types:content()).
--spec(from_content/1 :: (rabbit_types:content()) ->
- {rabbit_framing:amqp_property_record(), binary()}).
--spec(parse_expiration/1 ::
- (rabbit_framing:amqp_property_record())
- -> rabbit_types:ok_or_error2('undefined' | non_neg_integer(), any())).
-
--spec(msg_size/1 :: (rabbit_types:content() | rabbit_types:message()) ->
- non_neg_integer()).
-
--spec(maybe_gc_large_msg/1 ::
- (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(Exchange, RoutingKeyBin, Properties, Body) ->
- publish(Exchange, RoutingKeyBin, false, Properties, Body).
-
-%% Convenience function, for avoiding round-trips in calls across the
-%% erlang distributed network.
-publish(X = #exchange{name = XName}, RKey, Mandatory, Props, Body) ->
- Message = message(XName, RKey, properties(Props), Body),
- publish(X, delivery(Mandatory, false, Message, undefined));
-publish(XName, RKey, Mandatory, Props, Body) ->
- Message = message(XName, RKey, properties(Props), Body),
- publish(delivery(Mandatory, false, Message, undefined)).
-
-publish(Delivery = #delivery{
- message = #basic_message{exchange_name = XName}}) ->
- case rabbit_exchange:lookup(XName) of
- {ok, X} -> publish(X, Delivery);
- Err -> Err
- end.
-
-publish(X, Delivery) ->
- Qs = rabbit_amqqueue:lookup(rabbit_exchange:route(X, Delivery)),
- DeliveredQPids = rabbit_amqqueue:deliver(Qs, Delivery),
- {ok, DeliveredQPids}.
-
-delivery(Mandatory, Confirm, Message, MsgSeqNo) ->
- #delivery{mandatory = Mandatory, confirm = Confirm, sender = self(),
- message = Message, msg_seq_no = MsgSeqNo, flow = noflow}.
-
-build_content(Properties, BodyBin) when is_binary(BodyBin) ->
- build_content(Properties, [BodyBin]);
-
-build_content(Properties, PFR) ->
- %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
- {ClassId, _MethodId} =
- rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
- #content{class_id = ClassId,
- properties = Properties,
- properties_bin = none,
- protocol = none,
- payload_fragments_rev = PFR}.
-
-from_content(Content) ->
- #content{class_id = ClassId,
- properties = Props,
- payload_fragments_rev = FragmentsRev} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
- {ClassId, _MethodId} =
- rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
- {Props, list_to_binary(lists:reverse(FragmentsRev))}.
-
-%% This breaks the spec rule forbidding message modification
-strip_header(#content{properties = #'P_basic'{headers = undefined}}
- = DecodedContent, _Key) ->
- DecodedContent;
-strip_header(#content{properties = Props = #'P_basic'{headers = Headers}}
- = DecodedContent, Key) ->
- case lists:keysearch(Key, 1, Headers) of
- false -> DecodedContent;
- {value, Found} -> Headers0 = lists:delete(Found, Headers),
- rabbit_binary_generator:clear_encoded_content(
- DecodedContent#content{
- properties = Props#'P_basic'{
- headers = Headers0}})
- end.
-
-message(XName, RoutingKey, #content{properties = Props} = DecodedContent) ->
- try
- {ok, #basic_message{
- exchange_name = XName,
- content = strip_header(DecodedContent, ?DELETED_HEADER),
- id = rabbit_guid:gen(),
- is_persistent = is_message_persistent(DecodedContent),
- routing_keys = [RoutingKey |
- header_routes(Props#'P_basic'.headers)]}}
- catch
- {error, _Reason} = Error -> Error
- end.
-
-message(XName, RoutingKey, RawProperties, Body) ->
- Properties = properties(RawProperties),
- Content = build_content(Properties, Body),
- {ok, Msg} = message(XName, RoutingKey, Content),
- Msg.
-
-properties(P = #'P_basic'{}) ->
- P;
-properties(P) when is_list(P) ->
- %% Yes, this is O(length(P) * record_info(size, 'P_basic') / 2),
- %% i.e. slow. Use the definition of 'P_basic' directly if
- %% possible!
- lists:foldl(fun ({Key, Value}, Acc) ->
- case indexof(record_info(fields, 'P_basic'), Key) of
- 0 -> throw({unknown_basic_property, Key});
- N -> setelement(N + 1, Acc, Value)
- end
- end, #'P_basic'{}, P).
-
-prepend_table_header(Name, Info, undefined) ->
- prepend_table_header(Name, Info, []);
-prepend_table_header(Name, Info, Headers) ->
- case rabbit_misc:table_lookup(Headers, Name) of
- {array, Existing} ->
- prepend_table(Name, Info, Existing, Headers);
- undefined ->
- prepend_table(Name, Info, [], Headers);
- Other ->
- Headers2 = prepend_table(Name, Info, [], Headers),
- set_invalid_header(Name, Other, Headers2)
- end.
-
-prepend_table(Name, Info, Prior, Headers) ->
- rabbit_misc:set_table_value(Headers, Name, array, [{table, Info} | Prior]).
-
-set_invalid_header(Name, {_, _}=Value, Headers) when is_list(Headers) ->
- case rabbit_misc:table_lookup(Headers, ?INVALID_HEADERS_KEY) of
- undefined ->
- set_invalid([{Name, array, [Value]}], Headers);
- {table, ExistingHdr} ->
- update_invalid(Name, Value, ExistingHdr, Headers);
- Other ->
- %% somehow the x-invalid-headers header is corrupt
- Invalid = [{?INVALID_HEADERS_KEY, array, [Other]}],
- set_invalid_header(Name, Value, set_invalid(Invalid, Headers))
- end.
-
-set_invalid(NewHdr, Headers) ->
- rabbit_misc:set_table_value(Headers, ?INVALID_HEADERS_KEY, table, NewHdr).
-
-update_invalid(Name, Value, ExistingHdr, Header) ->
- Values = case rabbit_misc:table_lookup(ExistingHdr, Name) of
- undefined -> [Value];
- {array, Prior} -> [Value | Prior]
- end,
- NewHdr = rabbit_misc:set_table_value(ExistingHdr, Name, array, Values),
- set_invalid(NewHdr, Header).
-
-header(_Header, undefined) ->
- undefined;
-header(_Header, []) ->
- undefined;
-header(Header, Headers) ->
- header(Header, Headers, undefined).
-
-header(Header, Headers, Default) ->
- case lists:keysearch(Header, 1, Headers) of
- false -> Default;
- {value, Val} -> Val
- end.
-
-extract_headers(Content) ->
- #content{properties = #'P_basic'{headers = Headers}} =
- rabbit_binary_parser:ensure_content_decoded(Content),
- Headers.
-
-map_headers(F, Content) ->
- Content1 = rabbit_binary_parser:ensure_content_decoded(Content),
- #content{properties = #'P_basic'{headers = Headers} = Props} = Content1,
- Headers1 = F(Headers),
- rabbit_binary_generator:clear_encoded_content(
- Content1#content{properties = Props#'P_basic'{headers = Headers1}}).
-
-indexof(L, Element) -> indexof(L, Element, 1).
-
-indexof([], _Element, _N) -> 0;
-indexof([Element | _Rest], Element, N) -> N;
-indexof([_ | Rest], Element, N) -> indexof(Rest, Element, N + 1).
-
-is_message_persistent(#content{properties = #'P_basic'{
- delivery_mode = Mode}}) ->
- case Mode of
- 1 -> false;
- 2 -> true;
- undefined -> false;
- Other -> throw({error, {delivery_mode_unknown, Other}})
- end.
-
-%% Extract CC routes from headers
-header_routes(undefined) ->
- [];
-header_routes(HeadersTable) ->
- lists:append(
- [case rabbit_misc:table_lookup(HeadersTable, HeaderKey) of
- {array, Routes} -> [Route || {longstr, Route} <- Routes];
- undefined -> [];
- {Type, _Val} -> throw({error, {unacceptable_type_in_header,
- binary_to_list(HeaderKey), Type}})
- end || HeaderKey <- ?ROUTING_HEADERS]).
-
-parse_expiration(#'P_basic'{expiration = undefined}) ->
- {ok, undefined};
-parse_expiration(#'P_basic'{expiration = Expiration}) ->
- case string:to_integer(binary_to_list(Expiration)) of
- {error, no_integer} = E ->
- E;
- {N, ""} ->
- case rabbit_misc:check_expiry(N) of
- ok -> {ok, N};
- E = {error, _} -> E
- end;
- {_, S} ->
- {error, {leftover_string, S}}
- end.
-
-%% Some processes (channel, writer) can get huge amounts of binary
-%% garbage when processing huge messages at high speed (since we only
-%% do enough reductions to GC every few hundred messages, and if each
-%% message is 1MB then that's ugly). So count how many bytes of
-%% message we have processed, and force a GC every so often.
-maybe_gc_large_msg(Content) ->
- Size = msg_size(Content),
- Current = case get(msg_size_for_gc) of
- undefined -> 0;
- C -> C
- end,
- New = Current + Size,
- put(msg_size_for_gc, case New > 1000000 of
- true -> erlang:garbage_collect(),
- 0;
- false -> New
- end),
- Size.
-
-msg_size(#content{payload_fragments_rev = PFR}) -> iolist_size(PFR);
-msg_size(#basic_message{content = Content}) -> msg_size(Content).
diff --git a/src/rabbit_binary_generator.erl b/src/rabbit_binary_generator.erl
deleted file mode 100644
index 34f2d601aa..0000000000
--- a/src/rabbit_binary_generator.erl
+++ /dev/null
@@ -1,241 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_binary_generator).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([build_simple_method_frame/3,
- build_simple_content_frames/4,
- build_heartbeat_frame/0]).
--export([generate_table/1]).
--export([check_empty_frame_size/0]).
--export([ensure_content_encoded/2, clear_encoded_content/1]).
--export([map_exception/3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(frame() :: [binary()]).
-
--spec(build_simple_method_frame/3 ::
- (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
- rabbit_types:protocol())
- -> frame()).
--spec(build_simple_content_frames/4 ::
- (rabbit_channel:channel_number(), rabbit_types:content(),
- non_neg_integer(), rabbit_types:protocol())
- -> [frame()]).
--spec(build_heartbeat_frame/0 :: () -> frame()).
--spec(generate_table/1 :: (rabbit_framing:amqp_table()) -> binary()).
--spec(check_empty_frame_size/0 :: () -> 'ok').
--spec(ensure_content_encoded/2 ::
- (rabbit_types:content(), rabbit_types:protocol()) ->
- rabbit_types:encoded_content()).
--spec(clear_encoded_content/1 ::
- (rabbit_types:content()) -> rabbit_types:unencoded_content()).
--spec(map_exception/3 :: (rabbit_channel:channel_number(),
- rabbit_types:amqp_error() | any(),
- rabbit_types:protocol()) ->
- {rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record()}).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-build_simple_method_frame(ChannelInt, MethodRecord, Protocol) ->
- MethodFields = Protocol:encode_method_fields(MethodRecord),
- MethodName = rabbit_misc:method_record_type(MethodRecord),
- {ClassId, MethodId} = Protocol:method_id(MethodName),
- create_frame(1, ChannelInt, [<<ClassId:16, MethodId:16>>, MethodFields]).
-
-build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) ->
- #content{class_id = ClassId,
- properties_bin = ContentPropertiesBin,
- payload_fragments_rev = PayloadFragmentsRev} =
- ensure_content_encoded(Content, Protocol),
- {BodySize, ContentFrames} =
- build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt),
- HeaderFrame = create_frame(2, ChannelInt,
- [<<ClassId:16, 0:16, BodySize:64>>,
- ContentPropertiesBin]),
- [HeaderFrame | ContentFrames].
-
-build_content_frames(FragsRev, FrameMax, ChannelInt) ->
- BodyPayloadMax = if FrameMax == 0 -> iolist_size(FragsRev);
- true -> FrameMax - ?EMPTY_FRAME_SIZE
- end,
- build_content_frames(0, [], BodyPayloadMax, [],
- lists:reverse(FragsRev), BodyPayloadMax, ChannelInt).
-
-build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
- [], _BodyPayloadMax, _ChannelInt) ->
- {SizeAcc, lists:reverse(FramesAcc)};
-build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
- Frags, BodyPayloadMax, ChannelInt)
- when FragSizeRem == 0 orelse Frags == [] ->
- Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)),
- FrameSize = BodyPayloadMax - FragSizeRem,
- build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc],
- BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt);
-build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
- [Frag | Frags], BodyPayloadMax, ChannelInt) ->
- Size = size(Frag),
- {NewFragSizeRem, NewFragAcc, NewFrags} =
- if Size == 0 -> {FragSizeRem, FragAcc, Frags};
- Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags};
- true -> <<Head:FragSizeRem/binary, Tail/binary>> =
- Frag,
- {0, [Head | FragAcc], [Tail | Frags]}
- end,
- build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc,
- NewFrags, BodyPayloadMax, ChannelInt).
-
-build_heartbeat_frame() ->
- create_frame(?FRAME_HEARTBEAT, 0, <<>>).
-
-create_frame(TypeInt, ChannelInt, Payload) ->
- [<<TypeInt:8, ChannelInt:16, (iolist_size(Payload)):32>>, Payload,
- ?FRAME_END].
-
-%% table_field_to_binary supports the AMQP 0-8/0-9 standard types, S,
-%% I, D, T and F, as well as the QPid extensions b, d, f, l, s, t, x,
-%% and V.
-table_field_to_binary({FName, T, V}) ->
- [short_string_to_binary(FName) | field_value_to_binary(T, V)].
-
-field_value_to_binary(longstr, V) -> [$S | long_string_to_binary(V)];
-field_value_to_binary(signedint, V) -> [$I, <<V:32/signed>>];
-field_value_to_binary(decimal, V) -> {Before, After} = V,
- [$D, Before, <<After:32>>];
-field_value_to_binary(timestamp, V) -> [$T, <<V:64>>];
-field_value_to_binary(table, V) -> [$F | table_to_binary(V)];
-field_value_to_binary(array, V) -> [$A | array_to_binary(V)];
-field_value_to_binary(byte, V) -> [$b, <<V:8/signed>>];
-field_value_to_binary(double, V) -> [$d, <<V:64/float>>];
-field_value_to_binary(float, V) -> [$f, <<V:32/float>>];
-field_value_to_binary(long, V) -> [$l, <<V:64/signed>>];
-field_value_to_binary(short, V) -> [$s, <<V:16/signed>>];
-field_value_to_binary(bool, V) -> [$t, if V -> 1; true -> 0 end];
-field_value_to_binary(binary, V) -> [$x | long_string_to_binary(V)];
-field_value_to_binary(void, _V) -> [$V].
-
-table_to_binary(Table) when is_list(Table) ->
- BinTable = generate_table_iolist(Table),
- [<<(iolist_size(BinTable)):32>> | BinTable].
-
-array_to_binary(Array) when is_list(Array) ->
- BinArray = generate_array_iolist(Array),
- [<<(iolist_size(BinArray)):32>> | BinArray].
-
-generate_table(Table) when is_list(Table) ->
- list_to_binary(generate_table_iolist(Table)).
-
-generate_table_iolist(Table) ->
- lists:map(fun table_field_to_binary/1, Table).
-
-generate_array_iolist(Array) ->
- lists:map(fun ({T, V}) -> field_value_to_binary(T, V) end, Array).
-
-short_string_to_binary(String) ->
- Len = string_length(String),
- if Len < 256 -> [<<Len:8>>, String];
- true -> exit(content_properties_shortstr_overflow)
- end.
-
-long_string_to_binary(String) ->
- Len = string_length(String),
- [<<Len:32>>, String].
-
-string_length(String) when is_binary(String) -> size(String);
-string_length(String) -> length(String).
-
-check_empty_frame_size() ->
- %% Intended to ensure that EMPTY_FRAME_SIZE is defined correctly.
- case iolist_size(create_frame(?FRAME_BODY, 0, <<>>)) of
- ?EMPTY_FRAME_SIZE -> ok;
- ComputedSize -> exit({incorrect_empty_frame_size,
- ComputedSize, ?EMPTY_FRAME_SIZE})
- end.
-
-ensure_content_encoded(Content = #content{properties_bin = PropBin,
- protocol = Protocol}, Protocol)
- when PropBin =/= none ->
- Content;
-ensure_content_encoded(Content = #content{properties = none,
- properties_bin = PropBin,
- protocol = Protocol}, Protocol1)
- when PropBin =/= none ->
- Props = Protocol:decode_properties(Content#content.class_id, PropBin),
- Content#content{properties = Props,
- properties_bin = Protocol1:encode_properties(Props),
- protocol = Protocol1};
-ensure_content_encoded(Content = #content{properties = Props}, Protocol)
- when Props =/= none ->
- Content#content{properties_bin = Protocol:encode_properties(Props),
- protocol = Protocol}.
-
-clear_encoded_content(Content = #content{properties_bin = none,
- protocol = none}) ->
- Content;
-clear_encoded_content(Content = #content{properties = none}) ->
- %% Only clear when we can rebuild the properties_bin later in
- %% accordance to the content record definition comment - maximum
- %% one of properties and properties_bin can be 'none'
- Content;
-clear_encoded_content(Content = #content{}) ->
- Content#content{properties_bin = none, protocol = none}.
-
-%% NB: this function is also used by the Erlang client
-map_exception(Channel, Reason, Protocol) ->
- {SuggestedClose, ReplyCode, ReplyText, FailedMethod} =
- lookup_amqp_exception(Reason, Protocol),
- {ClassId, MethodId} = case FailedMethod of
- {_, _} -> FailedMethod;
- none -> {0, 0};
- _ -> Protocol:method_id(FailedMethod)
- end,
- case SuggestedClose orelse (Channel == 0) of
- true -> {0, #'connection.close'{reply_code = ReplyCode,
- reply_text = ReplyText,
- class_id = ClassId,
- method_id = MethodId}};
- false -> {Channel, #'channel.close'{reply_code = ReplyCode,
- reply_text = ReplyText,
- class_id = ClassId,
- method_id = MethodId}}
- end.
-
-lookup_amqp_exception(#amqp_error{name = Name,
- explanation = Expl,
- method = Method},
- Protocol) ->
- {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name),
- ExplBin = amqp_exception_explanation(Text, Expl),
- {ShouldClose, Code, ExplBin, Method};
-lookup_amqp_exception(Other, Protocol) ->
- rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
- {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
- {ShouldClose, Code, Text, none}.
-
-amqp_exception_explanation(Text, Expl) ->
- ExplBin = list_to_binary(Expl),
- CompleteTextBin = <<Text/binary, " - ", ExplBin/binary>>,
- if size(CompleteTextBin) > 255 -> <<CompleteTextBin:252/binary, "...">>;
- true -> CompleteTextBin
- end.
diff --git a/src/rabbit_binary_parser.erl b/src/rabbit_binary_parser.erl
deleted file mode 100644
index 8b3bf3e6f5..0000000000
--- a/src/rabbit_binary_parser.erl
+++ /dev/null
@@ -1,161 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_binary_parser).
-
--include("rabbit.hrl").
-
--export([parse_table/1]).
--export([ensure_content_decoded/1, clear_decoded_content/1]).
--export([validate_utf8/1, assert_utf8/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(parse_table/1 :: (binary()) -> rabbit_framing:amqp_table()).
--spec(ensure_content_decoded/1 ::
- (rabbit_types:content()) -> rabbit_types:decoded_content()).
--spec(clear_decoded_content/1 ::
- (rabbit_types:content()) -> rabbit_types:undecoded_content()).
--spec(validate_utf8/1 :: (binary()) -> 'ok' | 'error').
--spec(assert_utf8/1 :: (binary()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
-%% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
-
--define(SIMPLE_PARSE_TABLE(BType, Pattern, RType),
- parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- BType, Pattern, Rest/binary>>) ->
- [{NameString, RType, Value} | parse_table(Rest)]).
-
-%% Note that we try to put these in approximately the order we expect
-%% to hit them, that's why the empty binary is half way through.
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{NameString, longstr, Value} | parse_table(Rest)];
-
-?SIMPLE_PARSE_TABLE($I, Value:32/signed, signedint);
-?SIMPLE_PARSE_TABLE($T, Value:64/unsigned, timestamp);
-
-parse_table(<<>>) ->
- [];
-
-?SIMPLE_PARSE_TABLE($b, Value:8/signed, byte);
-?SIMPLE_PARSE_TABLE($d, Value:64/float, double);
-?SIMPLE_PARSE_TABLE($f, Value:32/float, float);
-?SIMPLE_PARSE_TABLE($l, Value:64/signed, long);
-?SIMPLE_PARSE_TABLE($s, Value:16/signed, short);
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $t, Value:8/unsigned, Rest/binary>>) ->
- [{NameString, bool, (Value /= 0)} | parse_table(Rest)];
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
- [{NameString, decimal, {Before, After}} | parse_table(Rest)];
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{NameString, table, parse_table(Value)} | parse_table(Rest)];
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{NameString, array, parse_array(Value)} | parse_table(Rest)];
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{NameString, binary, Value} | parse_table(Rest)];
-
-parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
- $V, Rest/binary>>) ->
- [{NameString, void, undefined} | parse_table(Rest)].
-
--define(SIMPLE_PARSE_ARRAY(BType, Pattern, RType),
- parse_array(<<BType, Pattern, Rest/binary>>) ->
- [{RType, Value} | parse_array(Rest)]).
-
-parse_array(<<$S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{longstr, Value} | parse_array(Rest)];
-
-?SIMPLE_PARSE_ARRAY($I, Value:32/signed, signedint);
-?SIMPLE_PARSE_ARRAY($T, Value:64/unsigned, timestamp);
-
-parse_array(<<>>) ->
- [];
-
-?SIMPLE_PARSE_ARRAY($b, Value:8/signed, byte);
-?SIMPLE_PARSE_ARRAY($d, Value:64/float, double);
-?SIMPLE_PARSE_ARRAY($f, Value:32/float, float);
-?SIMPLE_PARSE_ARRAY($l, Value:64/signed, long);
-?SIMPLE_PARSE_ARRAY($s, Value:16/signed, short);
-
-parse_array(<<$t, Value:8/unsigned, Rest/binary>>) ->
- [{bool, (Value /= 0)} | parse_array(Rest)];
-
-parse_array(<<$D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
- [{decimal, {Before, After}} | parse_array(Rest)];
-
-parse_array(<<$F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{table, parse_table(Value)} | parse_array(Rest)];
-
-parse_array(<<$A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{array, parse_array(Value)} | parse_array(Rest)];
-
-parse_array(<<$x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
- [{binary, Value} | parse_array(Rest)];
-
-parse_array(<<$V, Rest/binary>>) ->
- [{void, undefined} | parse_array(Rest)].
-
-ensure_content_decoded(Content = #content{properties = Props})
- when Props =/= none ->
- Content;
-ensure_content_decoded(Content = #content{properties_bin = PropBin,
- protocol = Protocol})
- when PropBin =/= none ->
- Content#content{properties = Protocol:decode_properties(
- Content#content.class_id, PropBin)}.
-
-clear_decoded_content(Content = #content{properties = none}) ->
- Content;
-clear_decoded_content(Content = #content{properties_bin = none}) ->
- %% Only clear when we can rebuild the properties later in
- %% accordance to the content record definition comment - maximum
- %% one of properties and properties_bin can be 'none'
- Content;
-clear_decoded_content(Content = #content{}) ->
- Content#content{properties = none}.
-
-assert_utf8(B) ->
- case validate_utf8(B) of
- ok -> ok;
- error -> rabbit_misc:protocol_error(
- frame_error, "Malformed UTF-8 in shortstr", [])
- end.
-
-validate_utf8(Bin) ->
- try
- xmerl_ucs:from_utf8(Bin),
- ok
- catch exit:{ucs, _} ->
- error
- end.
diff --git a/src/rabbit_binding.erl b/src/rabbit_binding.erl
index 77a9277c4a..609daf612e 100644
--- a/src/rabbit_binding.erl
+++ b/src/rabbit_binding.erl
@@ -22,7 +22,7 @@
list_for_source_and_destination/2]).
-export([new_deletions/0, combine_deletions/2, add_deletion/3,
process_deletions/1]).
--export([info_keys/0, info/1, info/2, info_all/1, info_all/2]).
+-export([info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4]).
%% these must all be run inside a mnesia tx
-export([has_for_source/1, remove_for_source/1,
remove_for_destination/2, remove_transient_for_destination/1]).
@@ -78,6 +78,8 @@
-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
-> [rabbit_types:infos()]).
+-spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid()) -> 'ok').
-spec(has_for_source/1 :: (rabbit_types:binding_source()) -> boolean()).
-spec(remove_for_source/1 :: (rabbit_types:binding_source()) -> bindings()).
-spec(remove_for_destination/2 ::
@@ -284,6 +286,10 @@ info_all(VHostPath) -> map(VHostPath, fun (B) -> info(B) end).
info_all(VHostPath, Items) -> map(VHostPath, fun (B) -> info(B, Items) end).
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(B) -> info(B, Items) end, list(VHostPath)).
+
has_for_source(SrcName) ->
Match = #route{binding = #binding{source = SrcName, _ = '_'}},
%% we need to check for semi-durable routes (which subsumes
diff --git a/src/rabbit_boot_steps.erl b/src/rabbit_boot_steps.erl
new file mode 100644
index 0000000000..21366877db
--- /dev/null
+++ b/src/rabbit_boot_steps.erl
@@ -0,0 +1,97 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2014 GoPivotal, Inc. All rights reserved.
+%%
+
+-module(rabbit_boot_steps).
+
+-export([run_boot_steps/0, run_boot_steps/1, run_cleanup_steps/1]).
+-export([find_steps/0, find_steps/1]).
+
+run_boot_steps() ->
+ run_boot_steps(loaded_applications()).
+
+run_boot_steps(Apps) ->
+ [ok = run_step(Attrs, mfa) || {_, _, Attrs} <- find_steps(Apps)],
+ ok.
+
+run_cleanup_steps(Apps) ->
+ [run_step(Attrs, cleanup) || {_, _, Attrs} <- find_steps(Apps)],
+ ok.
+
+loaded_applications() ->
+ [App || {App, _, _} <- application:loaded_applications()].
+
+find_steps() ->
+ find_steps(loaded_applications()).
+
+find_steps(Apps) ->
+ All = sort_boot_steps(rabbit_misc:all_module_attributes(rabbit_boot_step)),
+ [Step || {App, _, _} = Step <- All, lists:member(App, Apps)].
+
+run_step(Attributes, AttributeName) ->
+ case [MFA || {Key, MFA} <- Attributes,
+ Key =:= AttributeName] of
+ [] ->
+ ok;
+ MFAs ->
+ [case apply(M,F,A) of
+ ok -> ok;
+ {error, Reason} -> exit({error, Reason})
+ end || {M,F,A} <- MFAs],
+ ok
+ end.
+
+vertices({AppName, _Module, Steps}) ->
+ [{StepName, {AppName, StepName, Atts}} || {StepName, Atts} <- Steps].
+
+edges({_AppName, _Module, Steps}) ->
+ EnsureList = fun (L) when is_list(L) -> L;
+ (T) -> [T]
+ end,
+ [case Key of
+ requires -> {StepName, OtherStep};
+ enables -> {OtherStep, StepName}
+ end || {StepName, Atts} <- Steps,
+ {Key, OtherStepOrSteps} <- Atts,
+ OtherStep <- EnsureList(OtherStepOrSteps),
+ Key =:= requires orelse Key =:= enables].
+
+sort_boot_steps(UnsortedSteps) ->
+ case rabbit_misc:build_acyclic_graph(fun vertices/1, fun edges/1,
+ UnsortedSteps) of
+ {ok, G} ->
+ %% Use topological sort to find a consistent ordering (if
+ %% there is one, otherwise fail).
+ SortedSteps = lists:reverse(
+ [begin
+ {StepName, Step} = digraph:vertex(G,
+ StepName),
+ Step
+ end || StepName <- digraph_utils:topsort(G)]),
+ digraph:delete(G),
+ %% Check that all mentioned {M,F,A} triples are exported.
+ case [{StepName, {M,F,A}} ||
+ {_App, StepName, Attributes} <- SortedSteps,
+ {mfa, {M,F,A}} <- Attributes,
+ code:ensure_loaded(M) =/= {module, M} orelse
+ not erlang:function_exported(M, F, length(A))] of
+ [] -> SortedSteps;
+ MissingFns -> exit({boot_functions_not_exported, MissingFns})
+ end;
+ {error, {vertex, duplicate, StepName}} ->
+ exit({duplicate_boot_step, StepName});
+ {error, {edge, Reason, From, To}} ->
+ exit({invalid_boot_step_dependency, From, To, Reason})
+ end.
diff --git a/src/rabbit_channel.erl b/src/rabbit_channel.erl
deleted file mode 100644
index b23a8410c5..0000000000
--- a/src/rabbit_channel.erl
+++ /dev/null
@@ -1,1897 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_channel).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--behaviour(gen_server2).
-
--export([start_link/11, do/2, do/3, do_flow/3, flush/1, shutdown/1]).
--export([send_command/2, deliver/4, deliver_reply/2,
- send_credit_reply/2, send_drained/2]).
--export([list/0, info_keys/0, info/1, info/2, info_all/0, info_all/1]).
--export([refresh_config_local/0, ready_for_close/1]).
--export([force_event_refresh/1]).
-
--export([init/1, terminate/2, code_change/3, handle_call/3, handle_cast/2,
- handle_info/2, handle_pre_hibernate/1, prioritise_call/4,
- prioritise_cast/3, prioritise_info/3, format_message_queue/2]).
-%% Internal
--export([list_local/0, deliver_reply_local/3]).
-
--record(ch, {state, protocol, channel, reader_pid, writer_pid, conn_pid,
- conn_name, limiter, tx, next_tag, unacked_message_q, user,
- virtual_host, most_recently_declared_queue,
- queue_names, queue_monitors, consumer_mapping,
- queue_consumers, delivering_queues,
- queue_collector_pid, stats_timer, confirm_enabled, publish_seqno,
- unconfirmed, confirmed, mandatory, capabilities, trace_state,
- consumer_prefetch, reply_consumer,
- %% flow | noflow, see rabbitmq-server#114
- delivery_flow}).
-
--define(MAX_PERMISSION_CACHE_SIZE, 12).
-
--define(STATISTICS_KEYS,
- [pid,
- transactional,
- confirm,
- consumer_count,
- messages_unacknowledged,
- messages_unconfirmed,
- messages_uncommitted,
- acks_uncommitted,
- prefetch_count,
- global_prefetch_count,
- state]).
-
--define(CREATION_EVENT_KEYS,
- [pid,
- name,
- connection,
- number,
- user,
- vhost]).
-
--define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
-
--define(INCR_STATS(Incs, Measure, State),
- case rabbit_event:stats_level(State, #ch.stats_timer) of
- fine -> incr_stats(Incs, Measure);
- _ -> ok
- end).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([channel_number/0]).
-
--type(channel_number() :: non_neg_integer()).
-
--spec(start_link/11 ::
- (channel_number(), pid(), pid(), pid(), string(),
- rabbit_types:protocol(), rabbit_types:user(), rabbit_types:vhost(),
- rabbit_framing:amqp_table(), pid(), pid()) ->
- rabbit_types:ok_pid_or_error()).
--spec(do/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(do/3 :: (pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(do_flow/3 :: (pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:maybe(rabbit_types:content())) -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(shutdown/1 :: (pid()) -> 'ok').
--spec(send_command/2 :: (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(deliver/4 ::
- (pid(), rabbit_types:ctag(), boolean(), rabbit_amqqueue:qmsg())
- -> 'ok').
--spec(deliver_reply/2 :: (binary(), rabbit_types:delivery()) -> 'ok').
--spec(deliver_reply_local/3 ::
- (pid(), binary(), rabbit_types:delivery()) -> 'ok').
--spec(send_credit_reply/2 :: (pid(), non_neg_integer()) -> 'ok').
--spec(send_drained/2 :: (pid(), [{rabbit_types:ctag(), non_neg_integer()}])
- -> 'ok').
--spec(list/0 :: () -> [pid()]).
--spec(list_local/0 :: () -> [pid()]).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(info_all/0 :: () -> [rabbit_types:infos()]).
--spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(refresh_config_local/0 :: () -> 'ok').
--spec(ready_for_close/1 :: (pid()) -> 'ok').
--spec(force_event_refresh/1 :: (reference()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User,
- VHost, Capabilities, CollectorPid, Limiter) ->
- gen_server2:start_link(
- ?MODULE, [Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol,
- User, VHost, Capabilities, CollectorPid, Limiter], []).
-
-do(Pid, Method) ->
- do(Pid, Method, none).
-
-do(Pid, Method, Content) ->
- gen_server2:cast(Pid, {method, Method, Content, noflow}).
-
-do_flow(Pid, Method, Content) ->
- %% Here we are tracking messages sent by the rabbit_reader
- %% process. We are accessing the rabbit_reader process dictionary.
- credit_flow:send(Pid),
- gen_server2:cast(Pid, {method, Method, Content, flow}).
-
-flush(Pid) ->
- gen_server2:call(Pid, flush, infinity).
-
-shutdown(Pid) ->
- gen_server2:cast(Pid, terminate).
-
-send_command(Pid, Msg) ->
- gen_server2:cast(Pid, {command, Msg}).
-
-deliver(Pid, ConsumerTag, AckRequired, Msg) ->
- gen_server2:cast(Pid, {deliver, ConsumerTag, AckRequired, Msg}).
-
-deliver_reply(<<"amq.rabbitmq.reply-to.", Rest/binary>>, Delivery) ->
- case decode_fast_reply_to(Rest) of
- {ok, Pid, Key} ->
- delegate:invoke_no_result(
- Pid, {?MODULE, deliver_reply_local, [Key, Delivery]});
- error ->
- ok
- end.
-
-%% We want to ensure people can't use this mechanism to send a message
-%% to an arbitrary process and kill it!
-deliver_reply_local(Pid, Key, Delivery) ->
- case pg_local:in_group(rabbit_channels, Pid) of
- true -> gen_server2:cast(Pid, {deliver_reply, Key, Delivery});
- false -> ok
- end.
-
-declare_fast_reply_to(<<"amq.rabbitmq.reply-to">>) ->
- exists;
-declare_fast_reply_to(<<"amq.rabbitmq.reply-to.", Rest/binary>>) ->
- case decode_fast_reply_to(Rest) of
- {ok, Pid, Key} ->
- Msg = {declare_fast_reply_to, Key},
- rabbit_misc:with_exit_handler(
- rabbit_misc:const(not_found),
- fun() -> gen_server2:call(Pid, Msg, infinity) end);
- error ->
- not_found
- end;
-declare_fast_reply_to(_) ->
- not_found.
-
-decode_fast_reply_to(Rest) ->
- case string:tokens(binary_to_list(Rest), ".") of
- [PidEnc, Key] -> Pid = binary_to_term(base64:decode(PidEnc)),
- {ok, Pid, Key};
- _ -> error
- end.
-
-send_credit_reply(Pid, Len) ->
- gen_server2:cast(Pid, {send_credit_reply, Len}).
-
-send_drained(Pid, CTagCredit) ->
- gen_server2:cast(Pid, {send_drained, CTagCredit}).
-
-list() ->
- rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
- rabbit_channel, list_local, []).
-
-list_local() ->
- pg_local:get_members(rabbit_channels).
-
-info_keys() -> ?INFO_KEYS.
-
-info(Pid) ->
- gen_server2:call(Pid, info, infinity).
-
-info(Pid, Items) ->
- case gen_server2:call(Pid, {info, Items}, infinity) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-info_all() ->
- rabbit_misc:filter_exit_map(fun (C) -> info(C) end, list()).
-
-info_all(Items) ->
- rabbit_misc:filter_exit_map(fun (C) -> info(C, Items) end, list()).
-
-refresh_config_local() ->
- rabbit_misc:upmap(
- fun (C) -> gen_server2:call(C, refresh_config, infinity) end,
- list_local()),
- ok.
-
-ready_for_close(Pid) ->
- gen_server2:cast(Pid, ready_for_close).
-
-force_event_refresh(Ref) ->
- [gen_server2:cast(C, {force_event_refresh, Ref}) || C <- list()],
- ok.
-
-%%---------------------------------------------------------------------------
-
-init([Channel, ReaderPid, WriterPid, ConnPid, ConnName, Protocol, User, VHost,
- Capabilities, CollectorPid, LimiterPid]) ->
- process_flag(trap_exit, true),
- ?store_proc_name({ConnName, Channel}),
- ok = pg_local:join(rabbit_channels, self()),
- Flow = case rabbit_misc:get_env(rabbit, mirroring_flow_control, true) of
- true -> flow;
- false -> noflow
- end,
- State = #ch{state = starting,
- protocol = Protocol,
- channel = Channel,
- reader_pid = ReaderPid,
- writer_pid = WriterPid,
- conn_pid = ConnPid,
- conn_name = ConnName,
- limiter = rabbit_limiter:new(LimiterPid),
- tx = none,
- next_tag = 1,
- unacked_message_q = queue:new(),
- user = User,
- virtual_host = VHost,
- most_recently_declared_queue = <<>>,
- queue_names = dict:new(),
- queue_monitors = pmon:new(),
- consumer_mapping = dict:new(),
- queue_consumers = dict:new(),
- delivering_queues = sets:new(),
- queue_collector_pid = CollectorPid,
- confirm_enabled = false,
- publish_seqno = 1,
- unconfirmed = dtree:empty(),
- confirmed = [],
- mandatory = dtree:empty(),
- capabilities = Capabilities,
- trace_state = rabbit_trace:init(VHost),
- consumer_prefetch = 0,
- reply_consumer = none,
- delivery_flow = Flow},
- State1 = rabbit_event:init_stats_timer(State, #ch.stats_timer),
- rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State1)),
- rabbit_event:if_enabled(State1, #ch.stats_timer,
- fun() -> emit_stats(State1) end),
- {ok, State1, hibernate,
- {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
-
-prioritise_call(Msg, _From, _Len, _State) ->
- case Msg of
- info -> 9;
- {info, _Items} -> 9;
- _ -> 0
- end.
-
-prioritise_cast(Msg, _Len, _State) ->
- case Msg of
- {confirm, _MsgSeqNos, _QPid} -> 5;
- {mandatory_received, _MsgSeqNo, _QPid} -> 5;
- _ -> 0
- end.
-
-prioritise_info(Msg, _Len, _State) ->
- case Msg of
- emit_stats -> 7;
- _ -> 0
- end.
-
-handle_call(flush, _From, State) ->
- reply(ok, State);
-
-handle_call(info, _From, State) ->
- reply(infos(?INFO_KEYS, State), State);
-
-handle_call({info, Items}, _From, State) ->
- try
- reply({ok, infos(Items, State)}, State)
- catch Error -> reply({error, Error}, State)
- end;
-
-handle_call(refresh_config, _From, State = #ch{virtual_host = VHost}) ->
- reply(ok, State#ch{trace_state = rabbit_trace:init(VHost)});
-
-handle_call({declare_fast_reply_to, Key}, _From,
- State = #ch{reply_consumer = Consumer}) ->
- reply(case Consumer of
- {_, _, Key} -> exists;
- _ -> not_found
- end, State);
-
-handle_call(_Request, _From, State) ->
- noreply(State).
-
-handle_cast({method, Method, Content, Flow},
- State = #ch{reader_pid = Reader,
- virtual_host = VHost}) ->
- case Flow of
- %% We are going to process a message from the rabbit_reader
- %% process, so here we ack it. In this case we are accessing
- %% the rabbit_channel process dictionary.
- flow -> credit_flow:ack(Reader);
- noflow -> ok
- end,
- try handle_method(rabbit_channel_interceptor:intercept_method(
- expand_shortcuts(Method, State), VHost),
- Content, State) of
- {reply, Reply, NewState} ->
- ok = send(Reply, NewState),
- noreply(NewState);
- {noreply, NewState} ->
- noreply(NewState);
- stop ->
- {stop, normal, State}
- catch
- exit:Reason = #amqp_error{} ->
- MethodName = rabbit_misc:method_record_type(Method),
- handle_exception(Reason#amqp_error{method = MethodName}, State);
- _:Reason ->
- {stop, {Reason, erlang:get_stacktrace()}, State}
- end;
-
-handle_cast(ready_for_close, State = #ch{state = closing,
- writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command_sync(WriterPid, #'channel.close_ok'{}),
- {stop, normal, State};
-
-handle_cast(terminate, State = #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:flush(WriterPid),
- {stop, normal, State};
-
-handle_cast({command, #'basic.consume_ok'{consumer_tag = CTag} = Msg}, State) ->
- ok = send(Msg, State),
- noreply(consumer_monitor(CTag, State));
-
-handle_cast({command, Msg}, State) ->
- ok = send(Msg, State),
- noreply(State);
-
-handle_cast({deliver, _CTag, _AckReq, _Msg}, State = #ch{state = closing}) ->
- noreply(State);
-handle_cast({deliver, ConsumerTag, AckRequired,
- Msg = {_QName, QPid, _MsgId, Redelivered,
- #basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content}}},
- State = #ch{writer_pid = WriterPid,
- next_tag = DeliveryTag}) ->
- ok = rabbit_writer:send_command_and_notify(
- WriterPid, QPid, self(),
- #'basic.deliver'{consumer_tag = ConsumerTag,
- delivery_tag = DeliveryTag,
- redelivered = Redelivered,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey},
- Content),
- rabbit_basic:maybe_gc_large_msg(Content),
- noreply(record_sent(ConsumerTag, AckRequired, Msg, State));
-
-handle_cast({deliver_reply, _K, _Del}, State = #ch{state = closing}) ->
- noreply(State);
-handle_cast({deliver_reply, _K, _Del}, State = #ch{reply_consumer = none}) ->
- noreply(State);
-handle_cast({deliver_reply, Key, #delivery{message =
- #basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content}}},
- State = #ch{writer_pid = WriterPid,
- next_tag = DeliveryTag,
- reply_consumer = {ConsumerTag, _Suffix, Key}}) ->
- ok = rabbit_writer:send_command(
- WriterPid,
- #'basic.deliver'{consumer_tag = ConsumerTag,
- delivery_tag = DeliveryTag,
- redelivered = false,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey},
- Content),
- noreply(State);
-handle_cast({deliver_reply, _K1, _}, State=#ch{reply_consumer = {_, _, _K2}}) ->
- noreply(State);
-
-handle_cast({send_credit_reply, Len}, State = #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command(
- WriterPid, #'basic.credit_ok'{available = Len}),
- noreply(State);
-
-handle_cast({send_drained, CTagCredit}, State = #ch{writer_pid = WriterPid}) ->
- [ok = rabbit_writer:send_command(
- WriterPid, #'basic.credit_drained'{consumer_tag = ConsumerTag,
- credit_drained = CreditDrained})
- || {ConsumerTag, CreditDrained} <- CTagCredit],
- noreply(State);
-
-handle_cast({force_event_refresh, Ref}, State) ->
- rabbit_event:notify(channel_created, infos(?CREATION_EVENT_KEYS, State),
- Ref),
- noreply(rabbit_event:init_stats_timer(State, #ch.stats_timer));
-
-handle_cast({mandatory_received, MsgSeqNo}, State = #ch{mandatory = Mand}) ->
- %% NB: don't call noreply/1 since we don't want to send confirms.
- noreply_coalesce(State#ch{mandatory = dtree:drop(MsgSeqNo, Mand)});
-
-handle_cast({confirm, MsgSeqNos, QPid}, State = #ch{unconfirmed = UC}) ->
- {MXs, UC1} = dtree:take(MsgSeqNos, QPid, UC),
- %% NB: don't call noreply/1 since we don't want to send confirms.
- noreply_coalesce(record_confirms(MXs, State#ch{unconfirmed = UC1})).
-
-handle_info({bump_credit, Msg}, State) ->
- %% A rabbit_amqqueue_process is granting credit to our channel. If
- %% our channel was being blocked by this process, and no other
- %% process is blocking our channel, then this channel will be
- %% unblocked. This means that any credit that was deferred will be
- %% sent to rabbit_reader processs that might be blocked by this
- %% particular channel.
- credit_flow:handle_bump_msg(Msg),
- noreply(State);
-
-handle_info(timeout, State) ->
- noreply(State);
-
-handle_info(emit_stats, State) ->
- emit_stats(State),
- State1 = rabbit_event:reset_stats_timer(State, #ch.stats_timer),
- %% NB: don't call noreply/1 since we don't want to kick off the
- %% stats timer.
- {noreply, send_confirms(State1), hibernate};
-
-handle_info({'DOWN', _MRef, process, QPid, Reason}, State) ->
- State1 = handle_publishing_queue_down(QPid, Reason, State),
- State3 = handle_consuming_queue_down(QPid, State1),
- State4 = handle_delivering_queue_down(QPid, State3),
- %% A rabbit_amqqueue_process has died. If our channel was being
- %% blocked by this process, and no other process is blocking our
- %% channel, then this channel will be unblocked. This means that
- %% any credit that was deferred will be sent to the rabbit_reader
- %% processs that might be blocked by this particular channel.
- credit_flow:peer_down(QPid),
- #ch{queue_names = QNames, queue_monitors = QMons} = State4,
- case dict:find(QPid, QNames) of
- {ok, QName} -> erase_queue_stats(QName);
- error -> ok
- end,
- noreply(State4#ch{queue_names = dict:erase(QPid, QNames),
- queue_monitors = pmon:erase(QPid, QMons)});
-
-handle_info({'EXIT', _Pid, Reason}, State) ->
- {stop, Reason, State}.
-
-handle_pre_hibernate(State) ->
- ok = clear_permission_cache(),
- rabbit_event:if_enabled(
- State, #ch.stats_timer,
- fun () -> emit_stats(State, [{idle_since, now()}]) end),
- {hibernate, rabbit_event:stop_stats_timer(State, #ch.stats_timer)}.
-
-terminate(Reason, State) ->
- {Res, _State1} = notify_queues(State),
- case Reason of
- normal -> ok = Res;
- shutdown -> ok = Res;
- {shutdown, _Term} -> ok = Res;
- _ -> ok
- end,
- pg_local:leave(rabbit_channels, self()),
- rabbit_event:if_enabled(State, #ch.stats_timer,
- fun() -> emit_stats(State) end),
- rabbit_event:notify(channel_closed, [{pid, self()}]).
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-format_message_queue(Opt, MQ) -> rabbit_misc:format_message_queue(Opt, MQ).
-
-%%---------------------------------------------------------------------------
-
-log(Level, Fmt, Args) -> rabbit_log:log(channel, Level, Fmt, Args).
-
-reply(Reply, NewState) -> {reply, Reply, next_state(NewState), hibernate}.
-
-noreply(NewState) -> {noreply, next_state(NewState), hibernate}.
-
-next_state(State) -> ensure_stats_timer(send_confirms(State)).
-
-noreply_coalesce(State = #ch{confirmed = C}) ->
- Timeout = case C of [] -> hibernate; _ -> 0 end,
- {noreply, ensure_stats_timer(State), Timeout}.
-
-ensure_stats_timer(State) ->
- rabbit_event:ensure_stats_timer(State, #ch.stats_timer, emit_stats).
-
-return_ok(State, true, _Msg) -> {noreply, State};
-return_ok(State, false, Msg) -> {reply, Msg, State}.
-
-ok_msg(true, _Msg) -> undefined;
-ok_msg(false, Msg) -> Msg.
-
-send(_Command, #ch{state = closing}) ->
- ok;
-send(Command, #ch{writer_pid = WriterPid}) ->
- ok = rabbit_writer:send_command(WriterPid, Command).
-
-handle_exception(Reason, State = #ch{protocol = Protocol,
- channel = Channel,
- writer_pid = WriterPid,
- reader_pid = ReaderPid,
- conn_pid = ConnPid,
- conn_name = ConnName,
- virtual_host = VHost,
- user = User}) ->
- %% something bad's happened: notify_queues may not be 'ok'
- {_Result, State1} = notify_queues(State),
- case rabbit_binary_generator:map_exception(Channel, Reason, Protocol) of
- {Channel, CloseMethod} ->
- log(error, "Channel error on connection ~p (~s, vhost: '~s',"
- " user: '~s'), channel ~p:~n~p~n",
- [ConnPid, ConnName, VHost, User#user.username,
- Channel, Reason]),
- ok = rabbit_writer:send_command(WriterPid, CloseMethod),
- {noreply, State1};
- {0, _} ->
- ReaderPid ! {channel_exit, Channel, Reason},
- {stop, normal, State1}
- end.
-
--ifdef(use_specs).
--spec(precondition_failed/1 :: (string()) -> no_return()).
--endif.
-precondition_failed(Format) -> precondition_failed(Format, []).
-
--ifdef(use_specs).
--spec(precondition_failed/2 :: (string(), [any()]) -> no_return()).
--endif.
-precondition_failed(Format, Params) ->
- rabbit_misc:protocol_error(precondition_failed, Format, Params).
-
-return_queue_declare_ok(#resource{name = ActualName},
- NoWait, MessageCount, ConsumerCount, State) ->
- return_ok(State#ch{most_recently_declared_queue = ActualName}, NoWait,
- #'queue.declare_ok'{queue = ActualName,
- message_count = MessageCount,
- consumer_count = ConsumerCount}).
-
-check_resource_access(User, Resource, Perm) ->
- V = {Resource, Perm},
- Cache = case get(permission_cache) of
- undefined -> [];
- Other -> Other
- end,
- case lists:member(V, Cache) of
- true -> ok;
- false -> ok = rabbit_access_control:check_resource_access(
- User, Resource, Perm),
- CacheTail = lists:sublist(Cache, ?MAX_PERMISSION_CACHE_SIZE-1),
- put(permission_cache, [V | CacheTail])
- end.
-
-clear_permission_cache() -> erase(permission_cache),
- ok.
-
-check_configure_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, configure).
-
-check_write_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, write).
-
-check_read_permitted(Resource, #ch{user = User}) ->
- check_resource_access(User, Resource, read).
-
-check_user_id_header(#'P_basic'{user_id = undefined}, _) ->
- ok;
-check_user_id_header(#'P_basic'{user_id = Username},
- #ch{user = #user{username = Username}}) ->
- ok;
-check_user_id_header(
- #'P_basic'{}, #ch{user = #user{authz_backends =
- [{rabbit_auth_backend_dummy, _}]}}) ->
- ok;
-check_user_id_header(#'P_basic'{user_id = Claimed},
- #ch{user = #user{username = Actual,
- tags = Tags}}) ->
- case lists:member(impersonator, Tags) of
- true -> ok;
- false -> precondition_failed(
- "user_id property set to '~s' but authenticated user was "
- "'~s'", [Claimed, Actual])
- end.
-
-check_expiration_header(Props) ->
- case rabbit_basic:parse_expiration(Props) of
- {ok, _} -> ok;
- {error, E} -> precondition_failed("invalid expiration '~s': ~p",
- [Props#'P_basic'.expiration, E])
- end.
-
-check_internal_exchange(#exchange{name = Name, internal = true}) ->
- rabbit_misc:protocol_error(access_refused,
- "cannot publish to internal ~s",
- [rabbit_misc:rs(Name)]);
-check_internal_exchange(_) ->
- ok.
-
-check_msg_size(Content) ->
- Size = rabbit_basic:maybe_gc_large_msg(Content),
- case Size > ?MAX_MSG_SIZE of
- true -> precondition_failed("message size ~B larger than max size ~B",
- [Size, ?MAX_MSG_SIZE]);
- false -> ok
- end.
-
-qbin_to_resource(QueueNameBin, State) ->
- name_to_resource(queue, QueueNameBin, State).
-
-name_to_resource(Type, NameBin, #ch{virtual_host = VHostPath}) ->
- rabbit_misc:r(VHostPath, Type, NameBin).
-
-expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = <<>>}) ->
- rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
-expand_queue_name_shortcut(<<>>, #ch{most_recently_declared_queue = MRDQ}) ->
- MRDQ;
-expand_queue_name_shortcut(QueueNameBin, _) ->
- QueueNameBin.
-
-expand_routing_key_shortcut(<<>>, <<>>,
- #ch{most_recently_declared_queue = <<>>}) ->
- rabbit_misc:protocol_error(not_found, "no previously declared queue", []);
-expand_routing_key_shortcut(<<>>, <<>>,
- #ch{most_recently_declared_queue = MRDQ}) ->
- MRDQ;
-expand_routing_key_shortcut(_QueueNameBin, RoutingKey, _State) ->
- RoutingKey.
-
-expand_shortcuts(#'basic.get' {queue = Q} = M, State) ->
- M#'basic.get' {queue = expand_queue_name_shortcut(Q, State)};
-expand_shortcuts(#'basic.consume'{queue = Q} = M, State) ->
- M#'basic.consume'{queue = expand_queue_name_shortcut(Q, State)};
-expand_shortcuts(#'queue.delete' {queue = Q} = M, State) ->
- M#'queue.delete' {queue = expand_queue_name_shortcut(Q, State)};
-expand_shortcuts(#'queue.purge' {queue = Q} = M, State) ->
- M#'queue.purge' {queue = expand_queue_name_shortcut(Q, State)};
-expand_shortcuts(#'queue.bind' {queue = Q, routing_key = K} = M, State) ->
- M#'queue.bind' {queue = expand_queue_name_shortcut(Q, State),
- routing_key = expand_routing_key_shortcut(Q, K, State)};
-expand_shortcuts(#'queue.unbind' {queue = Q, routing_key = K} = M, State) ->
- M#'queue.unbind' {queue = expand_queue_name_shortcut(Q, State),
- routing_key = expand_routing_key_shortcut(Q, K, State)};
-expand_shortcuts(M, _State) ->
- M.
-
-check_not_default_exchange(#resource{kind = exchange, name = <<"">>}) ->
- rabbit_misc:protocol_error(
- access_refused, "operation not permitted on the default exchange", []);
-check_not_default_exchange(_) ->
- ok.
-
-check_exchange_deletion(XName = #resource{name = <<"amq.", _/binary>>,
- kind = exchange}) ->
- rabbit_misc:protocol_error(
- access_refused, "deletion of system ~s not allowed",
- [rabbit_misc:rs(XName)]);
-check_exchange_deletion(_) ->
- ok.
-
-%% check that an exchange/queue name does not contain the reserved
-%% "amq." prefix.
-%%
-%% As per the AMQP 0-9-1 spec, the exclusion of "amq." prefixed names
-%% only applies on actual creation, and not in the cases where the
-%% entity already exists or passive=true.
-%%
-%% NB: We deliberately do not enforce the other constraints on names
-%% required by the spec.
-check_name(Kind, NameBin = <<"amq.", _/binary>>) ->
- rabbit_misc:protocol_error(
- access_refused,
- "~s name '~s' contains reserved prefix 'amq.*'",[Kind, NameBin]);
-check_name(_Kind, NameBin) ->
- NameBin.
-
-maybe_set_fast_reply_to(
- C = #content{properties = P = #'P_basic'{reply_to =
- <<"amq.rabbitmq.reply-to">>}},
- #ch{reply_consumer = ReplyConsumer}) ->
- case ReplyConsumer of
- none -> rabbit_misc:protocol_error(
- precondition_failed,
- "fast reply consumer does not exist", []);
- {_, Suf, _K} -> Rep = <<"amq.rabbitmq.reply-to.", Suf/binary>>,
- rabbit_binary_generator:clear_encoded_content(
- C#content{properties = P#'P_basic'{reply_to = Rep}})
- end;
-maybe_set_fast_reply_to(C, _State) ->
- C.
-
-record_confirms([], State) ->
- State;
-record_confirms(MXs, State = #ch{confirmed = C}) ->
- State#ch{confirmed = [MXs | C]}.
-
-handle_method(#'channel.open'{}, _, State = #ch{state = starting}) ->
- %% Don't leave "starting" as the state for 5s. TODO is this TRTTD?
- State1 = State#ch{state = running},
- rabbit_event:if_enabled(State1, #ch.stats_timer,
- fun() -> emit_stats(State1) end),
- {reply, #'channel.open_ok'{}, State1};
-
-handle_method(#'channel.open'{}, _, _State) ->
- rabbit_misc:protocol_error(
- command_invalid, "second 'channel.open' seen", []);
-
-handle_method(_Method, _, #ch{state = starting}) ->
- rabbit_misc:protocol_error(channel_error, "expected 'channel.open'", []);
-
-handle_method(#'channel.close_ok'{}, _, #ch{state = closing}) ->
- stop;
-
-handle_method(#'channel.close'{}, _, State = #ch{writer_pid = WriterPid,
- state = closing}) ->
- ok = rabbit_writer:send_command(WriterPid, #'channel.close_ok'{}),
- {noreply, State};
-
-handle_method(_Method, _, State = #ch{state = closing}) ->
- {noreply, State};
-
-handle_method(#'channel.close'{}, _, State = #ch{reader_pid = ReaderPid}) ->
- {ok, State1} = notify_queues(State),
- %% We issue the channel.close_ok response after a handshake with
- %% the reader, the other half of which is ready_for_close. That
- %% way the reader forgets about the channel before we send the
- %% response (and this channel process terminates). If we didn't do
- %% that, a channel.open for the same channel number, which a
- %% client is entitled to send as soon as it has received the
- %% close_ok, might be received by the reader before it has seen
- %% the termination and hence be sent to the old, now dead/dying
- %% channel process, instead of a new process, and thus lost.
- ReaderPid ! {channel_closing, self()},
- {noreply, State1};
-
-%% Even though the spec prohibits the client from sending commands
-%% while waiting for the reply to a synchronous command, we generally
-%% do allow this...except in the case of a pending tx.commit, where
-%% it could wreak havoc.
-handle_method(_Method, _, #ch{tx = Tx})
- when Tx =:= committing orelse Tx =:= failed ->
- rabbit_misc:protocol_error(
- channel_error, "unexpected command while processing 'tx.commit'", []);
-
-handle_method(#'access.request'{},_, State) ->
- {reply, #'access.request_ok'{ticket = 1}, State};
-
-handle_method(#'basic.publish'{immediate = true}, _Content, _State) ->
- rabbit_misc:protocol_error(not_implemented, "immediate=true", []);
-
-handle_method(#'basic.publish'{exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- mandatory = Mandatory},
- Content, State = #ch{virtual_host = VHostPath,
- tx = Tx,
- channel = ChannelNum,
- confirm_enabled = ConfirmEnabled,
- trace_state = TraceState,
- user = #user{username = Username},
- conn_name = ConnName,
- delivery_flow = Flow}) ->
- check_msg_size(Content),
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_write_permitted(ExchangeName, State),
- Exchange = rabbit_exchange:lookup_or_die(ExchangeName),
- check_internal_exchange(Exchange),
- %% We decode the content's properties here because we're almost
- %% certain to want to look at delivery-mode and priority.
- DecodedContent = #content {properties = Props} =
- maybe_set_fast_reply_to(
- rabbit_binary_parser:ensure_content_decoded(Content), State),
- check_user_id_header(Props, State),
- check_expiration_header(Props),
- DoConfirm = Tx =/= none orelse ConfirmEnabled,
- {MsgSeqNo, State1} =
- case DoConfirm orelse Mandatory of
- false -> {undefined, State};
- true -> SeqNo = State#ch.publish_seqno,
- {SeqNo, State#ch{publish_seqno = SeqNo + 1}}
- end,
- case rabbit_basic:message(ExchangeName, RoutingKey, DecodedContent) of
- {ok, Message} ->
- Delivery = rabbit_basic:delivery(
- Mandatory, DoConfirm, Message, MsgSeqNo),
- QNames = rabbit_exchange:route(Exchange, Delivery),
- rabbit_trace:tap_in(Message, QNames, ConnName, ChannelNum,
- Username, TraceState),
- DQ = {Delivery#delivery{flow = Flow}, QNames},
- {noreply, case Tx of
- none -> deliver_to_queues(DQ, State1);
- {Msgs, Acks} -> Msgs1 = queue:in(DQ, Msgs),
- State1#ch{tx = {Msgs1, Acks}}
- end};
- {error, Reason} ->
- precondition_failed("invalid message: ~p", [Reason])
- end;
-
-handle_method(#'basic.nack'{delivery_tag = DeliveryTag,
- multiple = Multiple,
- requeue = Requeue}, _, State) ->
- reject(DeliveryTag, Requeue, Multiple, State);
-
-handle_method(#'basic.ack'{delivery_tag = DeliveryTag,
- multiple = Multiple},
- _, State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
- {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
- State1 = State#ch{unacked_message_q = Remaining},
- {noreply, case Tx of
- none -> ack(Acked, State1),
- State1;
- {Msgs, Acks} -> Acks1 = ack_cons(ack, Acked, Acks),
- State1#ch{tx = {Msgs, Acks1}}
- end};
-
-handle_method(#'basic.get'{queue = QueueNameBin, no_ack = NoAck},
- _, State = #ch{writer_pid = WriterPid,
- conn_pid = ConnPid,
- limiter = Limiter,
- next_tag = DeliveryTag}) ->
- QueueName = qbin_to_resource(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- case rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) -> rabbit_amqqueue:basic_get(
- Q, self(), NoAck, rabbit_limiter:pid(Limiter))
- end) of
- {ok, MessageCount,
- Msg = {QName, QPid, _MsgId, Redelivered,
- #basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content}}} ->
- ok = rabbit_writer:send_command(
- WriterPid,
- #'basic.get_ok'{delivery_tag = DeliveryTag,
- redelivered = Redelivered,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey,
- message_count = MessageCount},
- Content),
- State1 = monitor_delivering_queue(NoAck, QPid, QName, State),
- {noreply, record_sent(none, not(NoAck), Msg, State1)};
- empty ->
- {reply, #'basic.get_empty'{}, State}
- end;
-
-handle_method(#'basic.consume'{queue = <<"amq.rabbitmq.reply-to">>,
- consumer_tag = CTag0,
- no_ack = NoAck,
- nowait = NoWait},
- _, State = #ch{reply_consumer = ReplyConsumer,
- consumer_mapping = ConsumerMapping}) ->
- case dict:find(CTag0, ConsumerMapping) of
- error ->
- case {ReplyConsumer, NoAck} of
- {none, true} ->
- CTag = case CTag0 of
- <<>> -> rabbit_guid:binary(
- rabbit_guid:gen_secure(), "amq.ctag");
- Other -> Other
- end,
- %% Precalculate both suffix and key; base64 encoding is
- %% expensive
- Key = base64:encode(rabbit_guid:gen_secure()),
- PidEnc = base64:encode(term_to_binary(self())),
- Suffix = <<PidEnc/binary, ".", Key/binary>>,
- Consumer = {CTag, Suffix, binary_to_list(Key)},
- State1 = State#ch{reply_consumer = Consumer},
- case NoWait of
- true -> {noreply, State1};
- false -> Rep = #'basic.consume_ok'{consumer_tag = CTag},
- {reply, Rep, State1}
- end;
- {_, false} ->
- rabbit_misc:protocol_error(
- precondition_failed,
- "reply consumer cannot acknowledge", []);
- _ ->
- rabbit_misc:protocol_error(
- precondition_failed, "reply consumer already set", [])
- end;
- {ok, _} ->
- %% Attempted reuse of consumer tag.
- rabbit_misc:protocol_error(
- not_allowed, "attempt to reuse consumer tag '~s'", [CTag0])
- end;
-
-handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
- _, State = #ch{reply_consumer = {ConsumerTag, _, _}}) ->
- State1 = State#ch{reply_consumer = none},
- case NoWait of
- true -> {noreply, State1};
- false -> Rep = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
- {reply, Rep, State1}
- end;
-
-handle_method(#'basic.consume'{queue = QueueNameBin,
- consumer_tag = ConsumerTag,
- no_local = _, % FIXME: implement
- no_ack = NoAck,
- exclusive = ExclusiveConsume,
- nowait = NoWait,
- arguments = Args},
- _, State = #ch{consumer_prefetch = ConsumerPrefetch,
- consumer_mapping = ConsumerMapping}) ->
- case dict:find(ConsumerTag, ConsumerMapping) of
- error ->
- QueueName = qbin_to_resource(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- ActualConsumerTag =
- case ConsumerTag of
- <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
- "amq.ctag");
- Other -> Other
- end,
- case basic_consume(
- QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
- ExclusiveConsume, Args, NoWait, State) of
- {ok, State1} ->
- {noreply, State1};
- {error, exclusive_consume_unavailable} ->
- rabbit_misc:protocol_error(
- access_refused, "~s in exclusive use",
- [rabbit_misc:rs(QueueName)])
- end;
- {ok, _} ->
- %% Attempted reuse of consumer tag.
- rabbit_misc:protocol_error(
- not_allowed, "attempt to reuse consumer tag '~s'", [ConsumerTag])
- end;
-
-handle_method(#'basic.cancel'{consumer_tag = ConsumerTag, nowait = NoWait},
- _, State = #ch{consumer_mapping = ConsumerMapping,
- queue_consumers = QCons}) ->
- OkMsg = #'basic.cancel_ok'{consumer_tag = ConsumerTag},
- case dict:find(ConsumerTag, ConsumerMapping) of
- error ->
- %% Spec requires we ignore this situation.
- return_ok(State, NoWait, OkMsg);
- {ok, {Q = #amqqueue{pid = QPid}, _CParams}} ->
- ConsumerMapping1 = dict:erase(ConsumerTag, ConsumerMapping),
- QCons1 =
- case dict:find(QPid, QCons) of
- error -> QCons;
- {ok, CTags} -> CTags1 = gb_sets:delete(ConsumerTag, CTags),
- case gb_sets:is_empty(CTags1) of
- true -> dict:erase(QPid, QCons);
- false -> dict:store(QPid, CTags1, QCons)
- end
- end,
- NewState = State#ch{consumer_mapping = ConsumerMapping1,
- queue_consumers = QCons1},
- %% In order to ensure that no more messages are sent to
- %% the consumer after the cancel_ok has been sent, we get
- %% the queue process to send the cancel_ok on our
- %% behalf. If we were sending the cancel_ok ourselves it
- %% might overtake a message sent previously by the queue.
- case rabbit_misc:with_exit_handler(
- fun () -> {error, not_found} end,
- fun () ->
- rabbit_amqqueue:basic_cancel(
- Q, self(), ConsumerTag, ok_msg(NoWait, OkMsg))
- end) of
- ok ->
- {noreply, NewState};
- {error, not_found} ->
- %% Spec requires we ignore this situation.
- return_ok(NewState, NoWait, OkMsg)
- end
- end;
-
-handle_method(#'basic.qos'{prefetch_size = Size}, _, _State) when Size /= 0 ->
- rabbit_misc:protocol_error(not_implemented,
- "prefetch_size!=0 (~w)", [Size]);
-
-handle_method(#'basic.qos'{global = false,
- prefetch_count = PrefetchCount}, _, State) ->
- {reply, #'basic.qos_ok'{}, State#ch{consumer_prefetch = PrefetchCount}};
-
-handle_method(#'basic.qos'{global = true,
- prefetch_count = 0},
- _, State = #ch{limiter = Limiter}) ->
- Limiter1 = rabbit_limiter:unlimit_prefetch(Limiter),
- {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
-
-handle_method(#'basic.qos'{global = true,
- prefetch_count = PrefetchCount},
- _, State = #ch{limiter = Limiter, unacked_message_q = UAMQ}) ->
- %% TODO queue:len(UAMQ) is not strictly right since that counts
- %% unacked messages from basic.get too. Pretty obscure though.
- Limiter1 = rabbit_limiter:limit_prefetch(Limiter,
- PrefetchCount, queue:len(UAMQ)),
- case ((not rabbit_limiter:is_active(Limiter)) andalso
- rabbit_limiter:is_active(Limiter1)) of
- true -> rabbit_amqqueue:activate_limit_all(
- consumer_queues(State#ch.consumer_mapping), self());
- false -> ok
- end,
- {reply, #'basic.qos_ok'{}, State#ch{limiter = Limiter1}};
-
-handle_method(#'basic.recover_async'{requeue = true},
- _, State = #ch{unacked_message_q = UAMQ, limiter = Limiter}) ->
- OkFun = fun () -> ok end,
- UAMQL = queue:to_list(UAMQ),
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- rabbit_misc:with_exit_handler(
- OkFun,
- fun () -> rabbit_amqqueue:requeue(QPid, MsgIds, self()) end)
- end, lists:reverse(UAMQL)),
- ok = notify_limiter(Limiter, UAMQL),
- %% No answer required - basic.recover is the newer, synchronous
- %% variant of this method
- {noreply, State#ch{unacked_message_q = queue:new()}};
-
-handle_method(#'basic.recover_async'{requeue = false}, _, _State) ->
- rabbit_misc:protocol_error(not_implemented, "requeue=false", []);
-
-handle_method(#'basic.recover'{requeue = Requeue}, Content, State) ->
- {noreply, State1} = handle_method(#'basic.recover_async'{requeue = Requeue},
- Content, State),
- {reply, #'basic.recover_ok'{}, State1};
-
-handle_method(#'basic.reject'{delivery_tag = DeliveryTag, requeue = Requeue},
- _, State) ->
- reject(DeliveryTag, Requeue, false, State);
-
-handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
- type = TypeNameBin,
- passive = false,
- durable = Durable,
- auto_delete = AutoDelete,
- internal = Internal,
- nowait = NoWait,
- arguments = Args},
- _, State = #ch{virtual_host = VHostPath}) ->
- CheckedType = rabbit_exchange:check_type(TypeNameBin),
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- check_configure_permitted(ExchangeName, State),
- X = case rabbit_exchange:lookup(ExchangeName) of
- {ok, FoundX} -> FoundX;
- {error, not_found} ->
- check_name('exchange', ExchangeNameBin),
- AeKey = <<"alternate-exchange">>,
- case rabbit_misc:r_arg(VHostPath, exchange, Args, AeKey) of
- undefined -> ok;
- {error, {invalid_type, Type}} ->
- precondition_failed(
- "invalid type '~s' for arg '~s' in ~s",
- [Type, AeKey, rabbit_misc:rs(ExchangeName)]);
- AName -> check_read_permitted(ExchangeName, State),
- check_write_permitted(AName, State),
- ok
- end,
- rabbit_exchange:declare(ExchangeName,
- CheckedType,
- Durable,
- AutoDelete,
- Internal,
- Args)
- end,
- ok = rabbit_exchange:assert_equivalence(X, CheckedType, Durable,
- AutoDelete, Internal, Args),
- return_ok(State, NoWait, #'exchange.declare_ok'{});
-
-handle_method(#'exchange.declare'{exchange = ExchangeNameBin,
- passive = true,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath}) ->
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- _ = rabbit_exchange:lookup_or_die(ExchangeName),
- return_ok(State, NoWait, #'exchange.declare_ok'{});
-
-handle_method(#'exchange.delete'{exchange = ExchangeNameBin,
- if_unused = IfUnused,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath}) ->
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- check_not_default_exchange(ExchangeName),
- check_exchange_deletion(ExchangeName),
- check_configure_permitted(ExchangeName, State),
- case rabbit_exchange:delete(ExchangeName, IfUnused) of
- {error, not_found} ->
- return_ok(State, NoWait, #'exchange.delete_ok'{});
- {error, in_use} ->
- precondition_failed("~s in use", [rabbit_misc:rs(ExchangeName)]);
- ok ->
- return_ok(State, NoWait, #'exchange.delete_ok'{})
- end;
-
-handle_method(#'exchange.bind'{destination = DestinationNameBin,
- source = SourceNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:add/2,
- SourceNameBin, exchange, DestinationNameBin, RoutingKey,
- Arguments, #'exchange.bind_ok'{}, NoWait, State);
-
-handle_method(#'exchange.unbind'{destination = DestinationNameBin,
- source = SourceNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:remove/2,
- SourceNameBin, exchange, DestinationNameBin, RoutingKey,
- Arguments, #'exchange.unbind_ok'{}, NoWait, State);
-
-%% Note that all declares to these are effectively passive. If it
-%% exists it by definition has one consumer.
-handle_method(#'queue.declare'{queue = <<"amq.rabbitmq.reply-to",
- _/binary>> = QueueNameBin,
- nowait = NoWait}, _,
- State = #ch{virtual_host = VHost}) ->
- QueueName = rabbit_misc:r(VHost, queue, QueueNameBin),
- case declare_fast_reply_to(QueueNameBin) of
- exists -> return_queue_declare_ok(QueueName, NoWait, 0, 1, State);
- not_found -> rabbit_misc:not_found(QueueName)
- end;
-
-handle_method(#'queue.declare'{queue = QueueNameBin,
- passive = false,
- durable = DurableDeclare,
- exclusive = ExclusiveDeclare,
- auto_delete = AutoDelete,
- nowait = NoWait,
- arguments = Args} = Declare,
- _, State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid,
- queue_collector_pid = CollectorPid}) ->
- Owner = case ExclusiveDeclare of
- true -> ConnPid;
- false -> none
- end,
- Durable = DurableDeclare andalso not ExclusiveDeclare,
- ActualNameBin = case QueueNameBin of
- <<>> -> rabbit_guid:binary(rabbit_guid:gen_secure(),
- "amq.gen");
- Other -> check_name('queue', Other)
- end,
- QueueName = rabbit_misc:r(VHostPath, queue, ActualNameBin),
- check_configure_permitted(QueueName, State),
- case rabbit_amqqueue:with(
- QueueName,
- fun (Q) -> ok = rabbit_amqqueue:assert_equivalence(
- Q, Durable, AutoDelete, Args, Owner),
- maybe_stat(NoWait, Q)
- end) of
- {ok, MessageCount, ConsumerCount} ->
- return_queue_declare_ok(QueueName, NoWait, MessageCount,
- ConsumerCount, State);
- {error, not_found} ->
- DlxKey = <<"x-dead-letter-exchange">>,
- case rabbit_misc:r_arg(VHostPath, exchange, Args, DlxKey) of
- undefined ->
- ok;
- {error, {invalid_type, Type}} ->
- precondition_failed(
- "invalid type '~s' for arg '~s' in ~s",
- [Type, DlxKey, rabbit_misc:rs(QueueName)]);
- DLX ->
- check_read_permitted(QueueName, State),
- check_write_permitted(DLX, State),
- ok
- end,
- case rabbit_amqqueue:declare(QueueName, Durable, AutoDelete,
- Args, Owner) of
- {new, #amqqueue{pid = QPid}} ->
- %% We need to notify the reader within the channel
- %% process so that we can be sure there are no
- %% outstanding exclusive queues being declared as
- %% the connection shuts down.
- ok = case Owner of
- none -> ok;
- _ -> rabbit_queue_collector:register(
- CollectorPid, QPid)
- end,
- return_queue_declare_ok(QueueName, NoWait, 0, 0, State);
- {existing, _Q} ->
- %% must have been created between the stat and the
- %% declare. Loop around again.
- handle_method(Declare, none, State);
- {absent, Q, Reason} ->
- rabbit_misc:absent(Q, Reason);
- {owner_died, _Q} ->
- %% Presumably our own days are numbered since the
- %% connection has died. Pretend the queue exists though,
- %% just so nothing fails.
- return_queue_declare_ok(QueueName, NoWait, 0, 0, State)
- end;
- {error, {absent, Q, Reason}} ->
- rabbit_misc:absent(Q, Reason)
- end;
-
-handle_method(#'queue.declare'{queue = QueueNameBin,
- passive = true,
- nowait = NoWait},
- _, State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid}) ->
- QueueName = rabbit_misc:r(VHostPath, queue, QueueNameBin),
- {{ok, MessageCount, ConsumerCount}, #amqqueue{} = Q} =
- rabbit_amqqueue:with_or_die(
- QueueName, fun (Q) -> {maybe_stat(NoWait, Q), Q} end),
- ok = rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
- return_queue_declare_ok(QueueName, NoWait, MessageCount, ConsumerCount,
- State);
-
-handle_method(#'queue.delete'{queue = QueueNameBin,
- if_unused = IfUnused,
- if_empty = IfEmpty,
- nowait = NoWait},
- _, State = #ch{conn_pid = ConnPid}) ->
- QueueName = qbin_to_resource(QueueNameBin, State),
- check_configure_permitted(QueueName, State),
- case rabbit_amqqueue:with(
- QueueName,
- fun (Q) ->
- rabbit_amqqueue:check_exclusive_access(Q, ConnPid),
- rabbit_amqqueue:delete(Q, IfUnused, IfEmpty)
- end,
- fun (not_found) -> {ok, 0};
- ({absent, Q, crashed}) -> rabbit_amqqueue:delete_crashed(Q),
- {ok, 0};
- ({absent, Q, Reason}) -> rabbit_misc:absent(Q, Reason)
- end) of
- {error, in_use} ->
- precondition_failed("~s in use", [rabbit_misc:rs(QueueName)]);
- {error, not_empty} ->
- precondition_failed("~s not empty", [rabbit_misc:rs(QueueName)]);
- {ok, PurgedMessageCount} ->
- return_ok(State, NoWait,
- #'queue.delete_ok'{message_count = PurgedMessageCount})
- end;
-
-handle_method(#'queue.bind'{queue = QueueNameBin,
- exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- nowait = NoWait,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:add/2,
- ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
- #'queue.bind_ok'{}, NoWait, State);
-
-handle_method(#'queue.unbind'{queue = QueueNameBin,
- exchange = ExchangeNameBin,
- routing_key = RoutingKey,
- arguments = Arguments}, _, State) ->
- binding_action(fun rabbit_binding:remove/2,
- ExchangeNameBin, queue, QueueNameBin, RoutingKey, Arguments,
- #'queue.unbind_ok'{}, false, State);
-
-handle_method(#'queue.purge'{queue = QueueNameBin, nowait = NoWait},
- _, State = #ch{conn_pid = ConnPid}) ->
- QueueName = qbin_to_resource(QueueNameBin, State),
- check_read_permitted(QueueName, State),
- {ok, PurgedMessageCount} = rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) -> rabbit_amqqueue:purge(Q) end),
- return_ok(State, NoWait,
- #'queue.purge_ok'{message_count = PurgedMessageCount});
-
-handle_method(#'tx.select'{}, _, #ch{confirm_enabled = true}) ->
- precondition_failed("cannot switch from confirm to tx mode");
-
-handle_method(#'tx.select'{}, _, State = #ch{tx = none}) ->
- {reply, #'tx.select_ok'{}, State#ch{tx = new_tx()}};
-
-handle_method(#'tx.select'{}, _, State) ->
- {reply, #'tx.select_ok'{}, State};
-
-handle_method(#'tx.commit'{}, _, #ch{tx = none}) ->
- precondition_failed("channel is not transactional");
-
-handle_method(#'tx.commit'{}, _, State = #ch{tx = {Msgs, Acks},
- limiter = Limiter}) ->
- State1 = rabbit_misc:queue_fold(fun deliver_to_queues/2, State, Msgs),
- Rev = fun (X) -> lists:reverse(lists:sort(X)) end,
- lists:foreach(fun ({ack, A}) -> ack(Rev(A), State1);
- ({Requeue, A}) -> reject(Requeue, Rev(A), Limiter)
- end, lists:reverse(Acks)),
- {noreply, maybe_complete_tx(State1#ch{tx = committing})};
-
-handle_method(#'tx.rollback'{}, _, #ch{tx = none}) ->
- precondition_failed("channel is not transactional");
-
-handle_method(#'tx.rollback'{}, _, State = #ch{unacked_message_q = UAMQ,
- tx = {_Msgs, Acks}}) ->
- AcksL = lists:append(lists:reverse([lists:reverse(L) || {_, L} <- Acks])),
- UAMQ1 = queue:from_list(lists:usort(AcksL ++ queue:to_list(UAMQ))),
- {reply, #'tx.rollback_ok'{}, State#ch{unacked_message_q = UAMQ1,
- tx = new_tx()}};
-
-handle_method(#'confirm.select'{}, _, #ch{tx = {_, _}}) ->
- precondition_failed("cannot switch from tx to confirm mode");
-
-handle_method(#'confirm.select'{nowait = NoWait}, _, State) ->
- return_ok(State#ch{confirm_enabled = true},
- NoWait, #'confirm.select_ok'{});
-
-handle_method(#'channel.flow'{active = true}, _, State) ->
- {reply, #'channel.flow_ok'{active = true}, State};
-
-handle_method(#'channel.flow'{active = false}, _, _State) ->
- rabbit_misc:protocol_error(not_implemented, "active=false", []);
-
-handle_method(#'basic.credit'{consumer_tag = CTag,
- credit = Credit,
- drain = Drain},
- _, State = #ch{consumer_mapping = Consumers}) ->
- case dict:find(CTag, Consumers) of
- {ok, {Q, _CParams}} -> ok = rabbit_amqqueue:credit(
- Q, self(), CTag, Credit, Drain),
- {noreply, State};
- error -> precondition_failed(
- "unknown consumer tag '~s'", [CTag])
- end;
-
-handle_method(_MethodRecord, _Content, _State) ->
- rabbit_misc:protocol_error(
- command_invalid, "unimplemented method", []).
-
-%%----------------------------------------------------------------------------
-
-%% We get the queue process to send the consume_ok on our behalf. This
-%% is for symmetry with basic.cancel - see the comment in that method
-%% for why.
-basic_consume(QueueName, NoAck, ConsumerPrefetch, ActualConsumerTag,
- ExclusiveConsume, Args, NoWait,
- State = #ch{conn_pid = ConnPid,
- limiter = Limiter,
- consumer_mapping = ConsumerMapping}) ->
- case rabbit_amqqueue:with_exclusive_access_or_die(
- QueueName, ConnPid,
- fun (Q) ->
- {rabbit_amqqueue:basic_consume(
- Q, NoAck, self(),
- rabbit_limiter:pid(Limiter),
- rabbit_limiter:is_active(Limiter),
- ConsumerPrefetch, ActualConsumerTag,
- ExclusiveConsume, Args,
- ok_msg(NoWait, #'basic.consume_ok'{
- consumer_tag = ActualConsumerTag})),
- Q}
- end) of
- {ok, Q = #amqqueue{pid = QPid, name = QName}} ->
- CM1 = dict:store(
- ActualConsumerTag,
- {Q, {NoAck, ConsumerPrefetch, ExclusiveConsume, Args}},
- ConsumerMapping),
- State1 = monitor_delivering_queue(
- NoAck, QPid, QName,
- State#ch{consumer_mapping = CM1}),
- {ok, case NoWait of
- true -> consumer_monitor(ActualConsumerTag, State1);
- false -> State1
- end};
- {{error, exclusive_consume_unavailable} = E, _Q} ->
- E
- end.
-
-maybe_stat(false, Q) -> rabbit_amqqueue:stat(Q);
-maybe_stat(true, _Q) -> {ok, 0, 0}.
-
-consumer_monitor(ConsumerTag,
- State = #ch{consumer_mapping = ConsumerMapping,
- queue_monitors = QMons,
- queue_consumers = QCons}) ->
- {#amqqueue{pid = QPid}, _CParams} =
- dict:fetch(ConsumerTag, ConsumerMapping),
- QCons1 = dict:update(QPid, fun (CTags) ->
- gb_sets:insert(ConsumerTag, CTags)
- end,
- gb_sets:singleton(ConsumerTag), QCons),
- State#ch{queue_monitors = pmon:monitor(QPid, QMons),
- queue_consumers = QCons1}.
-
-monitor_delivering_queue(NoAck, QPid, QName,
- State = #ch{queue_names = QNames,
- queue_monitors = QMons,
- delivering_queues = DQ}) ->
- State#ch{queue_names = dict:store(QPid, QName, QNames),
- queue_monitors = pmon:monitor(QPid, QMons),
- delivering_queues = case NoAck of
- true -> DQ;
- false -> sets:add_element(QPid, DQ)
- end}.
-
-handle_publishing_queue_down(QPid, Reason, State = #ch{unconfirmed = UC,
- mandatory = Mand}) ->
- {MMsgs, Mand1} = dtree:take(QPid, Mand),
- [basic_return(Msg, State, no_route) || {_, Msg} <- MMsgs],
- State1 = State#ch{mandatory = Mand1},
- case rabbit_misc:is_abnormal_exit(Reason) of
- true -> {MXs, UC1} = dtree:take_all(QPid, UC),
- send_nacks(MXs, State1#ch{unconfirmed = UC1});
- false -> {MXs, UC1} = dtree:take(QPid, UC),
- record_confirms(MXs, State1#ch{unconfirmed = UC1})
-
- end.
-
-handle_consuming_queue_down(QPid, State = #ch{queue_consumers = QCons,
- queue_names = QNames}) ->
- ConsumerTags = case dict:find(QPid, QCons) of
- error -> gb_sets:new();
- {ok, CTags} -> CTags
- end,
- gb_sets:fold(
- fun (CTag, StateN = #ch{consumer_mapping = CMap}) ->
- QName = dict:fetch(QPid, QNames),
- case queue_down_consumer_action(CTag, CMap) of
- remove ->
- cancel_consumer(CTag, QName, StateN);
- {recover, {NoAck, ConsumerPrefetch, Exclusive, Args}} ->
- case catch basic_consume( %% [0]
- QName, NoAck, ConsumerPrefetch, CTag,
- Exclusive, Args, true, StateN) of
- {ok, StateN1} -> StateN1;
- _ -> cancel_consumer(CTag, QName, StateN)
- end
- end
- end, State#ch{queue_consumers = dict:erase(QPid, QCons)}, ConsumerTags).
-
-%% [0] There is a slight danger here that if a queue is deleted and
-%% then recreated again the reconsume will succeed even though it was
-%% not an HA failover. But the likelihood is not great and most users
-%% are unlikely to care.
-
-cancel_consumer(CTag, QName, State = #ch{capabilities = Capabilities,
- consumer_mapping = CMap}) ->
- case rabbit_misc:table_lookup(
- Capabilities, <<"consumer_cancel_notify">>) of
- {bool, true} -> ok = send(#'basic.cancel'{consumer_tag = CTag,
- nowait = true}, State);
- _ -> ok
- end,
- rabbit_event:notify(consumer_deleted, [{consumer_tag, CTag},
- {channel, self()},
- {queue, QName}]),
- State#ch{consumer_mapping = dict:erase(CTag, CMap)}.
-
-queue_down_consumer_action(CTag, CMap) ->
- {_, {_, _, _, Args} = ConsumeSpec} = dict:fetch(CTag, CMap),
- case rabbit_misc:table_lookup(Args, <<"x-cancel-on-ha-failover">>) of
- {bool, true} -> remove;
- _ -> {recover, ConsumeSpec}
- end.
-
-handle_delivering_queue_down(QPid, State = #ch{delivering_queues = DQ}) ->
- State#ch{delivering_queues = sets:del_element(QPid, DQ)}.
-
-binding_action(Fun, ExchangeNameBin, DestinationType, DestinationNameBin,
- RoutingKey, Arguments, ReturnMethod, NoWait,
- State = #ch{virtual_host = VHostPath,
- conn_pid = ConnPid }) ->
- DestinationName = name_to_resource(DestinationType, DestinationNameBin, State),
- check_write_permitted(DestinationName, State),
- ExchangeName = rabbit_misc:r(VHostPath, exchange, ExchangeNameBin),
- [check_not_default_exchange(N) || N <- [DestinationName, ExchangeName]],
- check_read_permitted(ExchangeName, State),
- case Fun(#binding{source = ExchangeName,
- destination = DestinationName,
- key = RoutingKey,
- args = Arguments},
- fun (_X, Q = #amqqueue{}) ->
- try rabbit_amqqueue:check_exclusive_access(Q, ConnPid)
- catch exit:Reason -> {error, Reason}
- end;
- (_X, #exchange{}) ->
- ok
- end) of
- {error, {resources_missing, [{not_found, Name} | _]}} ->
- rabbit_misc:not_found(Name);
- {error, {resources_missing, [{absent, Q, Reason} | _]}} ->
- rabbit_misc:absent(Q, Reason);
- {error, binding_not_found} ->
- rabbit_misc:protocol_error(
- not_found, "no binding ~s between ~s and ~s",
- [RoutingKey, rabbit_misc:rs(ExchangeName),
- rabbit_misc:rs(DestinationName)]);
- {error, {binding_invalid, Fmt, Args}} ->
- rabbit_misc:protocol_error(precondition_failed, Fmt, Args);
- {error, #amqp_error{} = Error} ->
- rabbit_misc:protocol_error(Error);
- ok -> return_ok(State, NoWait, ReturnMethod)
- end.
-
-basic_return(#basic_message{exchange_name = ExchangeName,
- routing_keys = [RoutingKey | _CcRoutes],
- content = Content},
- State = #ch{protocol = Protocol, writer_pid = WriterPid},
- Reason) ->
- ?INCR_STATS([{exchange_stats, ExchangeName, 1}], return_unroutable, State),
- {_Close, ReplyCode, ReplyText} = Protocol:lookup_amqp_exception(Reason),
- ok = rabbit_writer:send_command(
- WriterPid,
- #'basic.return'{reply_code = ReplyCode,
- reply_text = ReplyText,
- exchange = ExchangeName#resource.name,
- routing_key = RoutingKey},
- Content).
-
-reject(DeliveryTag, Requeue, Multiple,
- State = #ch{unacked_message_q = UAMQ, tx = Tx}) ->
- {Acked, Remaining} = collect_acks(UAMQ, DeliveryTag, Multiple),
- State1 = State#ch{unacked_message_q = Remaining},
- {noreply, case Tx of
- none -> reject(Requeue, Acked, State1#ch.limiter),
- State1;
- {Msgs, Acks} -> Acks1 = ack_cons(Requeue, Acked, Acks),
- State1#ch{tx = {Msgs, Acks1}}
- end}.
-
-%% NB: Acked is in youngest-first order
-reject(Requeue, Acked, Limiter) ->
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- rabbit_amqqueue:reject(QPid, Requeue, MsgIds, self())
- end, Acked),
- ok = notify_limiter(Limiter, Acked).
-
-record_sent(ConsumerTag, AckRequired,
- Msg = {QName, QPid, MsgId, Redelivered, _Message},
- State = #ch{unacked_message_q = UAMQ,
- next_tag = DeliveryTag,
- trace_state = TraceState,
- user = #user{username = Username},
- conn_name = ConnName,
- channel = ChannelNum}) ->
- ?INCR_STATS([{queue_stats, QName, 1}], case {ConsumerTag, AckRequired} of
- {none, true} -> get;
- {none, false} -> get_no_ack;
- {_ , true} -> deliver;
- {_ , false} -> deliver_no_ack
- end, State),
- case Redelivered of
- true -> ?INCR_STATS([{queue_stats, QName, 1}], redeliver, State);
- false -> ok
- end,
- rabbit_trace:tap_out(Msg, ConnName, ChannelNum, Username, TraceState),
- UAMQ1 = case AckRequired of
- true -> queue:in({DeliveryTag, ConsumerTag, {QPid, MsgId}},
- UAMQ);
- false -> UAMQ
- end,
- State#ch{unacked_message_q = UAMQ1, next_tag = DeliveryTag + 1}.
-
-%% NB: returns acks in youngest-first order
-collect_acks(Q, 0, true) ->
- {lists:reverse(queue:to_list(Q)), queue:new()};
-collect_acks(Q, DeliveryTag, Multiple) ->
- collect_acks([], [], Q, DeliveryTag, Multiple).
-
-collect_acks(ToAcc, PrefixAcc, Q, DeliveryTag, Multiple) ->
- case queue:out(Q) of
- {{value, UnackedMsg = {CurrentDeliveryTag, _ConsumerTag, _Msg}},
- QTail} ->
- if CurrentDeliveryTag == DeliveryTag ->
- {[UnackedMsg | ToAcc],
- case PrefixAcc of
- [] -> QTail;
- _ -> queue:join(
- queue:from_list(lists:reverse(PrefixAcc)),
- QTail)
- end};
- Multiple ->
- collect_acks([UnackedMsg | ToAcc], PrefixAcc,
- QTail, DeliveryTag, Multiple);
- true ->
- collect_acks(ToAcc, [UnackedMsg | PrefixAcc],
- QTail, DeliveryTag, Multiple)
- end;
- {empty, _} ->
- precondition_failed("unknown delivery tag ~w", [DeliveryTag])
- end.
-
-%% NB: Acked is in youngest-first order
-ack(Acked, State = #ch{queue_names = QNames}) ->
- foreach_per_queue(
- fun (QPid, MsgIds) ->
- ok = rabbit_amqqueue:ack(QPid, MsgIds, self()),
- ?INCR_STATS(case dict:find(QPid, QNames) of
- {ok, QName} -> Count = length(MsgIds),
- [{queue_stats, QName, Count}];
- error -> []
- end, ack, State)
- end, Acked),
- ok = notify_limiter(State#ch.limiter, Acked).
-
-%% {Msgs, Acks}
-%%
-%% Msgs is a queue.
-%%
-%% Acks looks s.t. like this:
-%% [{false,[5,4]},{true,[3]},{ack,[2,1]}, ...]
-%%
-%% Each element is a pair consisting of a tag and a list of
-%% ack'ed/reject'ed msg ids. The tag is one of 'ack' (to ack), 'true'
-%% (reject w requeue), 'false' (reject w/o requeue). The msg ids, as
-%% well as the list overall, are in "most-recent (generally youngest)
-%% ack first" order.
-new_tx() -> {queue:new(), []}.
-
-notify_queues(State = #ch{state = closing}) ->
- {ok, State};
-notify_queues(State = #ch{consumer_mapping = Consumers,
- delivering_queues = DQ }) ->
- QPids = sets:to_list(
- sets:union(sets:from_list(consumer_queues(Consumers)), DQ)),
- {rabbit_amqqueue:notify_down_all(QPids, self()), State#ch{state = closing}}.
-
-foreach_per_queue(_F, []) ->
- ok;
-foreach_per_queue(F, [{_DTag, _CTag, {QPid, MsgId}}]) -> %% common case
- F(QPid, [MsgId]);
-%% NB: UAL should be in youngest-first order; the tree values will
-%% then be in oldest-first order
-foreach_per_queue(F, UAL) ->
- T = lists:foldl(fun ({_DTag, _CTag, {QPid, MsgId}}, T) ->
- rabbit_misc:gb_trees_cons(QPid, MsgId, T)
- end, gb_trees:empty(), UAL),
- rabbit_misc:gb_trees_foreach(F, T).
-
-consumer_queues(Consumers) ->
- lists:usort([QPid || {_Key, {#amqqueue{pid = QPid}, _CParams}}
- <- dict:to_list(Consumers)]).
-
-%% tell the limiter about the number of acks that have been received
-%% for messages delivered to subscribed consumers, but not acks for
-%% messages sent in a response to a basic.get (identified by their
-%% 'none' consumer tag)
-notify_limiter(Limiter, Acked) ->
- %% optimisation: avoid the potentially expensive 'foldl' in the
- %% common case.
- case rabbit_limiter:is_active(Limiter) of
- false -> ok;
- true -> case lists:foldl(fun ({_, none, _}, Acc) -> Acc;
- ({_, _, _}, Acc) -> Acc + 1
- end, 0, Acked) of
- 0 -> ok;
- Count -> rabbit_limiter:ack(Limiter, Count)
- end
- end.
-
-deliver_to_queues({#delivery{message = #basic_message{exchange_name = XName},
- confirm = false,
- mandatory = false},
- []}, State) -> %% optimisation
- ?INCR_STATS([{exchange_stats, XName, 1}], publish, State),
- State;
-deliver_to_queues({Delivery = #delivery{message = Message = #basic_message{
- exchange_name = XName},
- mandatory = Mandatory,
- confirm = Confirm,
- msg_seq_no = MsgSeqNo},
- DelQNames}, State = #ch{queue_names = QNames,
- queue_monitors = QMons}) ->
- Qs = rabbit_amqqueue:lookup(DelQNames),
- DeliveredQPids = rabbit_amqqueue:deliver(Qs, Delivery),
- %% The pmon:monitor_all/2 monitors all queues to which we
- %% delivered. But we want to monitor even queues we didn't deliver
- %% to, since we need their 'DOWN' messages to clean
- %% queue_names. So we also need to monitor each QPid from
- %% queues. But that only gets the masters (which is fine for
- %% cleaning queue_names), so we need the union of both.
- %%
- %% ...and we need to add even non-delivered queues to queue_names
- %% since alternative algorithms to update queue_names less
- %% frequently would in fact be more expensive in the common case.
- {QNames1, QMons1} =
- lists:foldl(fun (#amqqueue{pid = QPid, name = QName},
- {QNames0, QMons0}) ->
- {case dict:is_key(QPid, QNames0) of
- true -> QNames0;
- false -> dict:store(QPid, QName, QNames0)
- end, pmon:monitor(QPid, QMons0)}
- end, {QNames, pmon:monitor_all(DeliveredQPids, QMons)}, Qs),
- State1 = State#ch{queue_names = QNames1,
- queue_monitors = QMons1},
- %% NB: the order here is important since basic.returns must be
- %% sent before confirms.
- State2 = process_routing_mandatory(Mandatory, DeliveredQPids, MsgSeqNo,
- Message, State1),
- State3 = process_routing_confirm( Confirm, DeliveredQPids, MsgSeqNo,
- XName, State2),
- ?INCR_STATS([{exchange_stats, XName, 1} |
- [{queue_exchange_stats, {QName, XName}, 1} ||
- QPid <- DeliveredQPids,
- {ok, QName} <- [dict:find(QPid, QNames1)]]],
- publish, State3),
- State3.
-
-process_routing_mandatory(false, _, _MsgSeqNo, _Msg, State) ->
- State;
-process_routing_mandatory(true, [], _MsgSeqNo, Msg, State) ->
- ok = basic_return(Msg, State, no_route),
- State;
-process_routing_mandatory(true, QPids, MsgSeqNo, Msg, State) ->
- State#ch{mandatory = dtree:insert(MsgSeqNo, QPids, Msg,
- State#ch.mandatory)}.
-
-process_routing_confirm(false, _, _MsgSeqNo, _XName, State) ->
- State;
-process_routing_confirm(true, [], MsgSeqNo, XName, State) ->
- record_confirms([{MsgSeqNo, XName}], State);
-process_routing_confirm(true, QPids, MsgSeqNo, XName, State) ->
- State#ch{unconfirmed = dtree:insert(MsgSeqNo, QPids, XName,
- State#ch.unconfirmed)}.
-
-send_nacks([], State) ->
- State;
-send_nacks(_MXs, State = #ch{state = closing,
- tx = none}) -> %% optimisation
- State;
-send_nacks(MXs, State = #ch{tx = none}) ->
- coalesce_and_send([MsgSeqNo || {MsgSeqNo, _} <- MXs],
- fun(MsgSeqNo, Multiple) ->
- #'basic.nack'{delivery_tag = MsgSeqNo,
- multiple = Multiple}
- end, State);
-send_nacks(_MXs, State = #ch{state = closing}) -> %% optimisation
- State#ch{tx = failed};
-send_nacks(_, State) ->
- maybe_complete_tx(State#ch{tx = failed}).
-
-send_confirms(State = #ch{tx = none, confirmed = []}) ->
- State;
-send_confirms(State = #ch{tx = none, confirmed = C}) ->
- case rabbit_node_monitor:pause_partition_guard() of
- ok -> MsgSeqNos =
- lists:foldl(
- fun ({MsgSeqNo, XName}, MSNs) ->
- ?INCR_STATS([{exchange_stats, XName, 1}],
- confirm, State),
- [MsgSeqNo | MSNs]
- end, [], lists:append(C)),
- send_confirms(MsgSeqNos, State#ch{confirmed = []});
- pausing -> State
- end;
-send_confirms(State) ->
- case rabbit_node_monitor:pause_partition_guard() of
- ok -> maybe_complete_tx(State);
- pausing -> State
- end.
-
-send_confirms([], State) ->
- State;
-send_confirms(_Cs, State = #ch{state = closing}) -> %% optimisation
- State;
-send_confirms([MsgSeqNo], State) ->
- ok = send(#'basic.ack'{delivery_tag = MsgSeqNo}, State),
- State;
-send_confirms(Cs, State) ->
- coalesce_and_send(Cs, fun(MsgSeqNo, Multiple) ->
- #'basic.ack'{delivery_tag = MsgSeqNo,
- multiple = Multiple}
- end, State).
-
-coalesce_and_send(MsgSeqNos, MkMsgFun, State = #ch{unconfirmed = UC}) ->
- SMsgSeqNos = lists:usort(MsgSeqNos),
- CutOff = case dtree:is_empty(UC) of
- true -> lists:last(SMsgSeqNos) + 1;
- false -> {SeqNo, _XName} = dtree:smallest(UC), SeqNo
- end,
- {Ms, Ss} = lists:splitwith(fun(X) -> X < CutOff end, SMsgSeqNos),
- case Ms of
- [] -> ok;
- _ -> ok = send(MkMsgFun(lists:last(Ms), true), State)
- end,
- [ok = send(MkMsgFun(SeqNo, false), State) || SeqNo <- Ss],
- State.
-
-ack_cons(Tag, Acked, [{Tag, Acks} | L]) -> [{Tag, Acked ++ Acks} | L];
-ack_cons(Tag, Acked, Acks) -> [{Tag, Acked} | Acks].
-
-ack_len(Acks) -> lists:sum([length(L) || {ack, L} <- Acks]).
-
-maybe_complete_tx(State = #ch{tx = {_, _}}) ->
- State;
-maybe_complete_tx(State = #ch{unconfirmed = UC}) ->
- case dtree:is_empty(UC) of
- false -> State;
- true -> complete_tx(State#ch{confirmed = []})
- end.
-
-complete_tx(State = #ch{tx = committing}) ->
- ok = send(#'tx.commit_ok'{}, State),
- State#ch{tx = new_tx()};
-complete_tx(State = #ch{tx = failed}) ->
- {noreply, State1} = handle_exception(
- rabbit_misc:amqp_error(
- precondition_failed, "partial tx completion", [],
- 'tx.commit'),
- State),
- State1#ch{tx = new_tx()}.
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(pid, _) -> self();
-i(connection, #ch{conn_pid = ConnPid}) -> ConnPid;
-i(number, #ch{channel = Channel}) -> Channel;
-i(user, #ch{user = User}) -> User#user.username;
-i(vhost, #ch{virtual_host = VHost}) -> VHost;
-i(transactional, #ch{tx = Tx}) -> Tx =/= none;
-i(confirm, #ch{confirm_enabled = CE}) -> CE;
-i(name, State) -> name(State);
-i(consumer_count, #ch{consumer_mapping = CM}) -> dict:size(CM);
-i(messages_unconfirmed, #ch{unconfirmed = UC}) -> dtree:size(UC);
-i(messages_unacknowledged, #ch{unacked_message_q = UAMQ}) -> queue:len(UAMQ);
-i(messages_uncommitted, #ch{tx = {Msgs, _Acks}}) -> queue:len(Msgs);
-i(messages_uncommitted, #ch{}) -> 0;
-i(acks_uncommitted, #ch{tx = {_Msgs, Acks}}) -> ack_len(Acks);
-i(acks_uncommitted, #ch{}) -> 0;
-i(state, #ch{state = running}) -> credit_flow:state();
-i(state, #ch{state = State}) -> State;
-i(prefetch_count, #ch{consumer_prefetch = C}) -> C;
-i(global_prefetch_count, #ch{limiter = Limiter}) ->
- rabbit_limiter:get_prefetch_limit(Limiter);
-i(Item, _) ->
- throw({bad_argument, Item}).
-
-name(#ch{conn_name = ConnName, channel = Channel}) ->
- list_to_binary(rabbit_misc:format("~s (~p)", [ConnName, Channel])).
-
-incr_stats(Incs, Measure) ->
- [update_measures(Type, Key, Inc, Measure) || {Type, Key, Inc} <- Incs].
-
-update_measures(Type, Key, Inc, Measure) ->
- Measures = case get({Type, Key}) of
- undefined -> [];
- D -> D
- end,
- Cur = case orddict:find(Measure, Measures) of
- error -> 0;
- {ok, C} -> C
- end,
- put({Type, Key}, orddict:store(Measure, Cur + Inc, Measures)).
-
-emit_stats(State) -> emit_stats(State, []).
-
-emit_stats(State, Extra) ->
- Coarse = infos(?STATISTICS_KEYS, State),
- case rabbit_event:stats_level(State, #ch.stats_timer) of
- coarse -> rabbit_event:notify(channel_stats, Extra ++ Coarse);
- fine -> Fine = [{channel_queue_stats,
- [{QName, Stats} ||
- {{queue_stats, QName}, Stats} <- get()]},
- {channel_exchange_stats,
- [{XName, Stats} ||
- {{exchange_stats, XName}, Stats} <- get()]},
- {channel_queue_exchange_stats,
- [{QX, Stats} ||
- {{queue_exchange_stats, QX}, Stats} <- get()]}],
- rabbit_event:notify(channel_stats, Extra ++ Coarse ++ Fine)
- end.
-
-erase_queue_stats(QName) ->
- erase({queue_stats, QName}),
- [erase({queue_exchange_stats, QX}) ||
- {{queue_exchange_stats, QX = {QName0, _}}, _} <- get(),
- QName0 =:= QName].
diff --git a/src/rabbit_channel_interceptor.erl b/src/rabbit_channel_interceptor.erl
deleted file mode 100644
index 25c5df8a7b..0000000000
--- a/src/rabbit_channel_interceptor.erl
+++ /dev/null
@@ -1,91 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
-%% Since the AMQP methods used here are queue related,
-%% maybe we want this to be a queue_interceptor.
-
--module(rabbit_channel_interceptor).
-
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([intercept_method/2]).
-
--ifdef(use_specs).
-
--type(intercept_method() :: rabbit_framing:amqp_method_name()).
--type(original_method() :: rabbit_framing:amqp_method_record()).
--type(processed_method() :: rabbit_framing:amqp_method_record()).
-
--callback description() -> [proplists:property()].
-
--callback intercept(original_method(), rabbit_types:vhost()) ->
- processed_method() | rabbit_misc:channel_or_connection_exit().
-
-%% Whether the interceptor wishes to intercept the amqp method
--callback applies_to(intercept_method()) -> boolean().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {intercept, 2}, {applies_to, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-intercept_method(#'basic.publish'{} = M, _VHost) -> M;
-intercept_method(#'basic.ack'{} = M, _VHost) -> M;
-intercept_method(#'basic.nack'{} = M, _VHost) -> M;
-intercept_method(#'basic.reject'{} = M, _VHost) -> M;
-intercept_method(#'basic.credit'{} = M, _VHost) -> M;
-intercept_method(M, VHost) ->
- intercept_method(M, VHost, select(rabbit_misc:method_record_type(M))).
-
-intercept_method(M, _VHost, []) ->
- M;
-intercept_method(M, VHost, [I]) ->
- M2 = I:intercept(M, VHost),
- case validate_method(M, M2) of
- true ->
- M2;
- _ ->
- internal_error("Interceptor: ~p expected "
- "to return method: ~p but returned: ~p",
- [I, rabbit_misc:method_record_type(M),
- rabbit_misc:method_record_type(M2)])
- end;
-intercept_method(M, _VHost, Is) ->
- internal_error("More than one interceptor for method: ~p -- ~p",
- [rabbit_misc:method_record_type(M), Is]).
-
-%% select the interceptors that apply to intercept_method().
-select(Method) ->
- [M || {_, M} <- rabbit_registry:lookup_all(channel_interceptor),
- code:which(M) =/= non_existing,
- M:applies_to(Method)].
-
-validate_method(M, M2) ->
- rabbit_misc:method_record_type(M) =:= rabbit_misc:method_record_type(M2).
-
-%% keep dialyzer happy
--spec internal_error(string(), [any()]) -> no_return().
-internal_error(Format, Args) ->
- rabbit_misc:protocol_error(internal_error, Format, Args).
diff --git a/src/rabbit_channel_sup.erl b/src/rabbit_channel_sup.erl
index e8f45f7305..66f6a81697 100644
--- a/src/rabbit_channel_sup.erl
+++ b/src/rabbit_channel_sup.erl
@@ -16,6 +16,16 @@
-module(rabbit_channel_sup).
+%% Supervises processes that implement AMQP 0-9-1 channels:
+%%
+%% * Channel process itself
+%% * Network writer (for network connections)
+%% * Limiter (handles channel QoS and flow control)
+%%
+%% Every rabbit_channel_sup is supervised by rabbit_channel_sup_sup.
+%%
+%% See also rabbit_channel, rabbit_writer, rabbit_limiter.
+
-behaviour(supervisor2).
-export([start_link/1]).
diff --git a/src/rabbit_channel_sup_sup.erl b/src/rabbit_channel_sup_sup.erl
index 2be2af91a7..9cfbb78a2b 100644
--- a/src/rabbit_channel_sup_sup.erl
+++ b/src/rabbit_channel_sup_sup.erl
@@ -16,6 +16,11 @@
-module(rabbit_channel_sup_sup).
+%% Supervisor for AMQP 0-9-1 channels. Every AMQP 0-9-1 connection has
+%% one of these.
+%%
+%% See also rabbit_channel_sup, rabbit_connection_helper_sup, rabbit_reader.
+
-behaviour(supervisor2).
-export([start_link/0, start_channel/2]).
diff --git a/src/rabbit_cli.erl b/src/rabbit_cli.erl
index 1dfbb11b2a..c170e9d9f0 100644
--- a/src/rabbit_cli.erl
+++ b/src/rabbit_cli.erl
@@ -18,7 +18,7 @@
-include("rabbit_cli.hrl").
-export([main/3, start_distribution/0, start_distribution/1,
- parse_arguments/4, rpc_call/4, rpc_call/5]).
+ parse_arguments/4, rpc_call/4, rpc_call/5, rpc_call/7]).
%%----------------------------------------------------------------------------
@@ -39,6 +39,9 @@
([{atom(), [{string(), optdef()}]} | atom()],
[{string(), optdef()}], string(), [string()]) -> parse_result()).
-spec(rpc_call/4 :: (node(), atom(), atom(), [any()]) -> any()).
+-spec(rpc_call/5 :: (node(), atom(), atom(), [any()], number()) -> any()).
+-spec(rpc_call/7 :: (node(), atom(), atom(), [any()], reference(), pid(),
+ number()) -> any()).
-endif.
@@ -65,10 +68,10 @@ main(ParseFun, DoFun, UsageMod) ->
%% thrown errors into normal return values
case catch DoFun(Command, Node, Args, Opts) of
ok ->
- rabbit_misc:quit(0);
+ rabbit_misc:quit(?EX_OK);
{ok, Result} ->
- rabbit_ctl_misc:print_cmd_result(Command, Result),
- rabbit_misc:quit(0);
+ rabbit_control_misc:print_cmd_result(Command, Result),
+ rabbit_misc:quit(?EX_OK);
{'EXIT', {function_clause, [{?MODULE, action, _} | _]}} -> %% < R15
PrintInvalidCommandError(),
usage(UsageMod);
@@ -78,43 +81,48 @@ main(ParseFun, DoFun, UsageMod) ->
{error, {missing_dependencies, Missing, Blame}} ->
print_error("dependent plugins ~p not found; used by ~p.",
[Missing, Blame]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_CONFIG);
{'EXIT', {badarg, _}} ->
print_error("invalid parameter: ~p", [Args]),
- usage(UsageMod);
+ usage(UsageMod, ?EX_DATAERR);
{error, {Problem, Reason}} when is_atom(Problem), is_binary(Reason) ->
%% We handle this common case specially to avoid ~p since
%% that has i18n issues
print_error("~s: ~s", [Problem, Reason]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_SOFTWARE);
{error, Reason} ->
print_error("~p", [Reason]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_SOFTWARE);
{error_string, Reason} ->
print_error("~s", [Reason]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_SOFTWARE);
{badrpc, {'EXIT', Reason}} ->
print_error("~p", [Reason]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_SOFTWARE);
{badrpc, Reason} ->
case Reason of
timeout ->
- print_error("operation ~w on node ~w timed out", [Command, Node]);
+ print_error("operation ~w on node ~w timed out", [Command, Node]),
+ rabbit_misc:quit(?EX_TEMPFAIL);
_ ->
print_error("unable to connect to node ~w: ~w", [Node, Reason]),
- print_badrpc_diagnostics([Node])
- end,
- rabbit_misc:quit(2);
+ print_badrpc_diagnostics([Node]),
+ rabbit_misc:quit(?EX_UNAVAILABLE)
+ end;
{badrpc_multi, Reason, Nodes} ->
print_error("unable to connect to nodes ~p: ~w", [Nodes, Reason]),
print_badrpc_diagnostics(Nodes),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_UNAVAILABLE);
+ function_clause ->
+ print_error("operation ~w used with invalid parameter: ~p",
+ [Command, Args]),
+ usage(UsageMod);
{refused, Username, _, _} ->
print_error("failed to authenticate user \"~s\"", [Username]),
- rabbit_misc:quit(2);
+ rabbit_misc:quit(?EX_NOUSER);
Other ->
print_error("~p", [Other]),
- rabbit_misc:quit(2)
+ rabbit_misc:quit(?EX_SOFTWARE)
end.
start_distribution() ->
@@ -132,8 +140,11 @@ name_type() ->
end.
usage(Mod) ->
+ usage(Mod, ?EX_USAGE).
+
+usage(Mod, ExitCode) ->
io:format("~s", [Mod:usage()]),
- rabbit_misc:quit(1).
+ rabbit_misc:quit(ExitCode).
%%----------------------------------------------------------------------------
@@ -229,3 +240,6 @@ rpc_call(Node, Mod, Fun, Args, Timeout) ->
Time -> net_kernel:set_net_ticktime(Time, 0),
rpc:call(Node, Mod, Fun, Args, Timeout)
end.
+
+rpc_call(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+ rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
diff --git a/src/rabbit_command_assembler.erl b/src/rabbit_command_assembler.erl
deleted file mode 100644
index f93b85b122..0000000000
--- a/src/rabbit_command_assembler.erl
+++ /dev/null
@@ -1,137 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_command_assembler).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([analyze_frame/3, init/1, process/2]).
-
-%%----------------------------------------------------------------------------
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([frame/0]).
-
--type(frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
- ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
- ?FRAME_TRACE | ?FRAME_HEARTBEAT).
--type(protocol() :: rabbit_framing:protocol()).
--type(method() :: rabbit_framing:amqp_method_record()).
--type(class_id() :: rabbit_framing:amqp_class_id()).
--type(weight() :: non_neg_integer()).
--type(body_size() :: non_neg_integer()).
--type(content() :: rabbit_types:undecoded_content()).
-
--type(frame() ::
- {'method', rabbit_framing:amqp_method_name(), binary()} |
- {'content_header', class_id(), weight(), body_size(), binary()} |
- {'content_body', binary()}).
-
--type(state() ::
- {'method', protocol()} |
- {'content_header', method(), class_id(), protocol()} |
- {'content_body', method(), body_size(), class_id(), protocol()}).
-
--spec(analyze_frame/3 :: (frame_type(), binary(), protocol()) ->
- frame() | 'heartbeat' | 'error').
-
--spec(init/1 :: (protocol()) -> {ok, state()}).
--spec(process/2 :: (frame(), state()) ->
- {ok, state()} |
- {ok, method(), state()} |
- {ok, method(), content(), state()} |
- {error, rabbit_types:amqp_error()}).
-
--endif.
-
-%%--------------------------------------------------------------------
-
-analyze_frame(?FRAME_METHOD,
- <<ClassId:16, MethodId:16, MethodFields/binary>>,
- Protocol) ->
- MethodName = Protocol:lookup_method_name({ClassId, MethodId}),
- {method, MethodName, MethodFields};
-analyze_frame(?FRAME_HEADER,
- <<ClassId:16, Weight:16, BodySize:64, Properties/binary>>,
- _Protocol) ->
- {content_header, ClassId, Weight, BodySize, Properties};
-analyze_frame(?FRAME_BODY, Body, _Protocol) ->
- {content_body, Body};
-analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) ->
- heartbeat;
-analyze_frame(_Type, _Body, _Protocol) ->
- error.
-
-init(Protocol) -> {ok, {method, Protocol}}.
-
-process({method, MethodName, FieldsBin}, {method, Protocol}) ->
- try
- Method = Protocol:decode_method_fields(MethodName, FieldsBin),
- case Protocol:method_has_content(MethodName) of
- true -> {ClassId, _MethodId} = Protocol:method_id(MethodName),
- {ok, {content_header, Method, ClassId, Protocol}};
- false -> {ok, Method, {method, Protocol}}
- end
- catch exit:#amqp_error{} = Reason -> {error, Reason}
- end;
-process(_Frame, {method, _Protocol}) ->
- unexpected_frame("expected method frame, "
- "got non method frame instead", [], none);
-process({content_header, ClassId, 0, 0, PropertiesBin},
- {content_header, Method, ClassId, Protocol}) ->
- Content = empty_content(ClassId, PropertiesBin, Protocol),
- {ok, Method, Content, {method, Protocol}};
-process({content_header, ClassId, 0, BodySize, PropertiesBin},
- {content_header, Method, ClassId, Protocol}) ->
- Content = empty_content(ClassId, PropertiesBin, Protocol),
- {ok, {content_body, Method, BodySize, Content, Protocol}};
-process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin},
- {content_header, Method, ClassId, _Protocol}) ->
- unexpected_frame("expected content header for class ~w, "
- "got one for class ~w instead",
- [ClassId, HeaderClassId], Method);
-process(_Frame, {content_header, Method, ClassId, _Protocol}) ->
- unexpected_frame("expected content header for class ~w, "
- "got non content header frame instead", [ClassId], Method);
-process({content_body, FragmentBin},
- {content_body, Method, RemainingSize,
- Content = #content{payload_fragments_rev = Fragments}, Protocol}) ->
- NewContent = Content#content{
- payload_fragments_rev = [FragmentBin | Fragments]},
- case RemainingSize - size(FragmentBin) of
- 0 -> {ok, Method, NewContent, {method, Protocol}};
- Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}}
- end;
-process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) ->
- unexpected_frame("expected content body, "
- "got non content body frame instead", [], Method).
-
-%%--------------------------------------------------------------------
-
-empty_content(ClassId, PropertiesBin, Protocol) ->
- #content{class_id = ClassId,
- properties = none,
- properties_bin = PropertiesBin,
- protocol = Protocol,
- payload_fragments_rev = []}.
-
-unexpected_frame(Format, Params, Method) when is_atom(Method) ->
- {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)};
-unexpected_frame(Format, Params, Method) ->
- unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)).
diff --git a/src/rabbit_connection_helper_sup.erl b/src/rabbit_connection_helper_sup.erl
index d3c05ee416..d6492331ef 100644
--- a/src/rabbit_connection_helper_sup.erl
+++ b/src/rabbit_connection_helper_sup.erl
@@ -16,6 +16,15 @@
-module(rabbit_connection_helper_sup).
+%% Supervises auxiliary processes of AMQP 0-9-1 connections:
+%%
+%% * Channel supervisor
+%% * Heartbeat receiver
+%% * Heartbeat sender
+%% * Exclusive queue collector
+%%
+%% See also rabbit_heartbeat, rabbit_channel_sup_sup, rabbit_queue_collector.
+
-behaviour(supervisor2).
-export([start_link/0]).
diff --git a/src/rabbit_connection_sup.erl b/src/rabbit_connection_sup.erl
index 982608556a..09cddb0247 100644
--- a/src/rabbit_connection_sup.erl
+++ b/src/rabbit_connection_sup.erl
@@ -16,9 +16,19 @@
-module(rabbit_connection_sup).
+%% Supervisor for a (network) AMQP 0-9-1 client connection.
+%%
+%% Supervises
+%%
+%% * rabbit_reader
+%% * Auxiliary process supervisor
+%%
+%% See also rabbit_reader, rabbit_connection_helper_sup.
+
-behaviour(supervisor2).
+-behaviour(ranch_protocol).
--export([start_link/0, reader/1]).
+-export([start_link/4, reader/1]).
-export([init/1]).
@@ -28,14 +38,14 @@
-ifdef(use_specs).
--spec(start_link/0 :: () -> {'ok', pid(), pid()}).
+-spec(start_link/4 :: (any(), rabbit_net:socket(), module(), any()) -> {'ok', pid(), pid()}).
-spec(reader/1 :: (pid()) -> pid()).
-endif.
%%--------------------------------------------------------------------------
-start_link() ->
+start_link(Ref, Sock, _Transport, _Opts) ->
{ok, SupPid} = supervisor2:start_link(?MODULE, []),
%% We need to get channels in the hierarchy here so they get shut
%% down after the reader, so the reader gets a chance to terminate
@@ -55,7 +65,7 @@ start_link() ->
{ok, ReaderPid} =
supervisor2:start_child(
SupPid,
- {reader, {rabbit_reader, start_link, [HelperSup]},
+ {reader, {rabbit_reader, start_link, [HelperSup, Ref, Sock]},
intrinsic, ?MAX_WAIT, worker, [rabbit_reader]}),
{ok, SupPid, ReaderPid}.
diff --git a/src/rabbit_control_main.erl b/src/rabbit_control_main.erl
index 057a9f4e44..c064e82ac6 100644
--- a/src/rabbit_control_main.erl
+++ b/src/rabbit_control_main.erl
@@ -18,11 +18,11 @@
-include("rabbit.hrl").
-include("rabbit_cli.hrl").
--export([start/0, stop/0, parse_arguments/2, action/5,
+-export([start/0, stop/0, parse_arguments/2, action/5, action/6,
sync_queue/1, cancel_sync_queue/1, become/1,
purge_queue/1]).
--import(rabbit_cli, [rpc_call/4, rpc_call/5]).
+-import(rabbit_cli, [rpc_call/4, rpc_call/5, rpc_call/7]).
-define(EXTERNAL_CHECK_INTERVAL, 1000).
@@ -88,6 +88,7 @@
{trace_on, [?VHOST_DEF]},
{trace_off, [?VHOST_DEF]},
set_vm_memory_high_watermark,
+ set_disk_free_limit,
help
]).
@@ -417,10 +418,35 @@ action(set_vm_memory_high_watermark, Node, [Arg], _Opts, Inform) ->
rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark, [Frac]);
action(set_vm_memory_high_watermark, Node, ["absolute", Arg], _Opts, Inform) ->
- Limit = list_to_integer(Arg),
- Inform("Setting memory threshold on ~p to ~p bytes", [Node, Limit]),
- rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark,
- [{absolute, Limit}]);
+ case rabbit_resource_monitor_misc:parse_information_unit(Arg) of
+ {ok, Limit} ->
+ Inform("Setting memory threshold on ~p to ~p bytes", [Node, Limit]),
+ rpc_call(Node, vm_memory_monitor, set_vm_memory_high_watermark,
+ [{absolute, Limit}]);
+ {error, parse_error} ->
+ {error_string, "Unable to parse absolute memory limit value ~p", [Arg]}
+ end;
+
+action(set_disk_free_limit, Node, [Arg], _Opts, Inform) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Arg) of
+ {ok, Limit} ->
+ Inform("Setting disk free limit on ~p to ~p bytes", [Node, Limit]),
+ rpc_call(Node, rabbit_disk_monitor, set_disk_free_limit, [Limit]);
+ {error, parse_error} ->
+ {error_string, "Unable to parse disk free limit value ~p", [Arg]}
+ end;
+
+action(set_disk_free_limit, Node, ["mem_relative", Arg], _Opts, Inform) ->
+ Frac = list_to_float(case string:chr(Arg, $.) of
+ 0 -> Arg ++ ".0";
+ _ -> Arg
+ end),
+ Inform("Setting disk free limit on ~p to ~p of total RAM", [Node, Frac]),
+ rpc_call(Node,
+ rabbit_disk_monitor,
+ set_disk_free_limit,
+ [{mem_relative, Frac}]);
+
action(set_permissions, Node, [Username, CPerm, WPerm, RPerm], Opts, Inform) ->
VHost = proplists:get_value(?VHOST_OPT, Opts),
@@ -514,62 +540,53 @@ action(purge_queue, Node, [Q], Opts, Inform, Timeout) ->
action(list_users, Node, [], _Opts, Inform, Timeout) ->
Inform("Listing users", []),
- display_info_list(
- call(Node, {rabbit_auth_backend_internal, list_users, []}, Timeout),
- rabbit_auth_backend_internal:user_info_keys());
+ call(Node, {rabbit_auth_backend_internal, list_users, []},
+ rabbit_auth_backend_internal:user_info_keys(), true, Timeout);
action(list_permissions, Node, [], Opts, Inform, Timeout) ->
VHost = proplists:get_value(?VHOST_OPT, Opts),
Inform("Listing permissions in vhost \"~s\"", [VHost]),
- display_info_list(call(Node, {rabbit_auth_backend_internal,
- list_vhost_permissions, [VHost]}, Timeout),
- rabbit_auth_backend_internal:vhost_perms_info_keys());
+ call(Node, {rabbit_auth_backend_internal, list_vhost_permissions, [VHost]},
+ rabbit_auth_backend_internal:vhost_perms_info_keys(), true, Timeout);
action(list_parameters, Node, [], Opts, Inform, Timeout) ->
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
Inform("Listing runtime parameters", []),
- display_info_list(
- rpc_call(Node, rabbit_runtime_parameters, list_formatted, [VHostArg],
- Timeout),
- rabbit_runtime_parameters:info_keys());
+ call(Node, {rabbit_runtime_parameters, list_formatted, [VHostArg]},
+ rabbit_runtime_parameters:info_keys(), Timeout);
action(list_policies, Node, [], Opts, Inform, Timeout) ->
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
Inform("Listing policies", []),
- display_info_list(rpc_call(Node, rabbit_policy, list_formatted, [VHostArg],
- Timeout),
- rabbit_policy:info_keys());
+ call(Node, {rabbit_policy, list_formatted, [VHostArg]},
+ rabbit_policy:info_keys(), Timeout);
action(list_vhosts, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing vhosts", []),
ArgAtoms = default_if_empty(Args, [name]),
- display_info_list(call(Node, {rabbit_vhost, info_all, []}, Timeout),
- ArgAtoms);
+ call(Node, {rabbit_vhost, info_all, []}, ArgAtoms, true, Timeout);
action(list_user_permissions, _Node, _Args = [], _Opts, _Inform, _Timeout) ->
{error_string,
"list_user_permissions expects a username argument, but none provided."};
action(list_user_permissions, Node, Args = [_Username], _Opts, Inform, Timeout) ->
Inform("Listing permissions for user ~p", Args),
- display_info_list(call(Node, {rabbit_auth_backend_internal,
- list_user_permissions, Args}, Timeout),
- rabbit_auth_backend_internal:user_perms_info_keys());
+ call(Node, {rabbit_auth_backend_internal, list_user_permissions, Args},
+ rabbit_auth_backend_internal:user_perms_info_keys(), true, Timeout);
action(list_queues, Node, Args, Opts, Inform, Timeout) ->
Inform("Listing queues", []),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
ArgAtoms = default_if_empty(Args, [name, messages]),
- display_info_list(rpc_call(Node, rabbit_amqqueue, info_all,
- [VHostArg, ArgAtoms], Timeout),
- ArgAtoms);
+ call(Node, {rabbit_amqqueue, info_all, [VHostArg, ArgAtoms]},
+ ArgAtoms, Timeout);
action(list_exchanges, Node, Args, Opts, Inform, Timeout) ->
Inform("Listing exchanges", []),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
ArgAtoms = default_if_empty(Args, [name, type]),
- display_info_list(rpc_call(Node, rabbit_exchange, info_all,
- [VHostArg, ArgAtoms], Timeout),
- ArgAtoms);
+ call(Node, {rabbit_exchange, info_all, [VHostArg, ArgAtoms]},
+ ArgAtoms, Timeout);
action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
Inform("Listing bindings", []),
@@ -577,32 +594,27 @@ action(list_bindings, Node, Args, Opts, Inform, Timeout) ->
ArgAtoms = default_if_empty(Args, [source_name, source_kind,
destination_name, destination_kind,
routing_key, arguments]),
- display_info_list(rpc_call(Node, rabbit_binding, info_all,
- [VHostArg, ArgAtoms], Timeout),
- ArgAtoms);
+ call(Node, {rabbit_binding, info_all, [VHostArg, ArgAtoms]},
+ ArgAtoms, Timeout);
action(list_connections, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing connections", []),
ArgAtoms = default_if_empty(Args, [user, peer_host, peer_port, state]),
- display_info_list(rpc_call(Node, rabbit_networking, connection_info_all,
- [ArgAtoms], Timeout),
- ArgAtoms);
+ call(Node, {rabbit_networking, connection_info_all, [ArgAtoms]},
+ ArgAtoms, Timeout);
action(list_channels, Node, Args, _Opts, Inform, Timeout) ->
Inform("Listing channels", []),
ArgAtoms = default_if_empty(Args, [pid, user, consumer_count,
messages_unacknowledged]),
- display_info_list(rpc_call(Node, rabbit_channel, info_all, [ArgAtoms],
- Timeout),
- ArgAtoms);
+ call(Node, {rabbit_channel, info_all, [ArgAtoms]},
+ ArgAtoms, Timeout);
action(list_consumers, Node, _Args, Opts, Inform, Timeout) ->
Inform("Listing consumers", []),
VHostArg = list_to_binary(proplists:get_value(?VHOST_OPT, Opts)),
- display_info_list(rpc_call(Node, rabbit_amqqueue, consumers_all, [VHostArg],
- Timeout),
- rabbit_amqqueue:consumer_info_keys()).
-
+ call(Node, {rabbit_amqqueue, consumers_all, [VHostArg]},
+ rabbit_amqqueue:consumer_info_keys(), Timeout).
format_parse_error({_Line, Mod, Err}) -> lists:flatten(Mod:format_error(Err)).
@@ -694,6 +706,15 @@ default_if_empty(List, Default) when is_list(List) ->
true -> [list_to_atom(X) || X <- List]
end.
+display_info_message(Result, InfoItemKeys) ->
+ display_row([format_info_item(
+ case proplists:lookup(X, Result) of
+ none when is_list(Result), length(Result) > 0 ->
+ exit({error, {bad_info_key, X}});
+ none -> Result;
+ {X, Value} -> Value
+ end) || X <- InfoItemKeys]).
+
display_info_list(Results, InfoItemKeys) when is_list(Results) ->
lists:foreach(
fun (Result) -> display_row(
@@ -766,8 +787,30 @@ ensure_app_running(Node) ->
call(Node, {Mod, Fun, Args}) ->
rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args)).
-call(Node, {Mod, Fun, Args}, Timeout) ->
- rpc_call(Node, Mod, Fun, lists:map(fun list_to_binary_utf8/1, Args), Timeout).
+call(Node, {Mod, Fun, Args}, InfoKeys, Timeout) ->
+ call(Node, {Mod, Fun, Args}, InfoKeys, false, Timeout).
+
+call(Node, {Mod, Fun, Args}, InfoKeys, ToBinUtf8, Timeout) ->
+ Args0 = case ToBinUtf8 of
+ true -> lists:map(fun list_to_binary_utf8/1, Args);
+ false -> Args
+ end,
+ Ref = make_ref(),
+ Pid = self(),
+ spawn_link(
+ fun () ->
+ case rabbit_cli:rpc_call(Node, Mod, Fun, Args0,
+ Ref, Pid, Timeout) of
+ {error, _} = Error ->
+ Pid ! {error, Error};
+ {bad_argument, _} = Error ->
+ Pid ! {error, Error};
+ _ ->
+ ok
+ end
+ end),
+ rabbit_control_misc:wait_for_info_messages(
+ Pid, Ref, InfoKeys, fun display_info_message/2, Timeout).
list_to_binary_utf8(L) ->
B = list_to_binary(L),
diff --git a/src/rabbit_dead_letter.erl b/src/rabbit_dead_letter.erl
index 29032df856..5a4aad10da 100644
--- a/src/rabbit_dead_letter.erl
+++ b/src/rabbit_dead_letter.erl
@@ -53,7 +53,7 @@ make_msg(Msg = #basic_message{content = Content,
_ -> {[RK], fun (H) -> lists:keydelete(<<"CC">>, 1, H) end}
end,
ReasonBin = list_to_binary(atom_to_list(Reason)),
- TimeSec = rabbit_misc:now_ms() div 1000,
+ TimeSec = time_compat:os_system_time(seconds),
PerMsgTTL = per_msg_ttl_header(Content#content.properties),
HeadersFun2 =
fun (Headers) ->
diff --git a/src/rabbit_diagnostics.erl b/src/rabbit_diagnostics.erl
index 531f3f922e..ac1392d68f 100644
--- a/src/rabbit_diagnostics.erl
+++ b/src/rabbit_diagnostics.erl
@@ -17,8 +17,8 @@
-module(rabbit_diagnostics).
-define(PROCESS_INFO,
- [registered_name, current_stacktrace, initial_call, dictionary,
- message_queue_len, links, monitors, monitored_by, heap_size]).
+ [registered_name, current_stacktrace, initial_call, message_queue_len,
+ links, monitors, monitored_by, heap_size]).
-export([maybe_stuck/0, maybe_stuck/1, top_memory_use/0, top_memory_use/1,
top_binary_refs/0, top_binary_refs/1]).
@@ -27,17 +27,17 @@ maybe_stuck() -> maybe_stuck(5000).
maybe_stuck(Timeout) ->
Pids = processes(),
- io:format("There are ~p processes.~n", [length(Pids)]),
+ io:format("~s There are ~p processes.~n", [get_time(), length(Pids)]),
maybe_stuck(Pids, Timeout).
maybe_stuck(Pids, Timeout) when Timeout =< 0 ->
- io:format("Found ~p suspicious processes.~n", [length(Pids)]),
- [io:format("~p~n", [info(Pid)]) || Pid <- Pids],
+ io:format("~s Found ~p suspicious processes.~n", [get_time(), length(Pids)]),
+ [io:format("~s ~p~n", [get_time(), info(Pid)]) || Pid <- Pids],
ok;
maybe_stuck(Pids, Timeout) ->
Pids2 = [P || P <- Pids, looks_stuck(P)],
- io:format("Investigated ~p processes this round, ~pms to go.~n",
- [length(Pids2), Timeout]),
+ io:format("~s Investigated ~p processes this round, ~pms to go.~n",
+ [get_time(), length(Pids2), Timeout]),
timer:sleep(500),
maybe_stuck(Pids2, Timeout - 500).
@@ -80,19 +80,19 @@ top_memory_use() -> top_memory_use(30).
top_memory_use(Count) ->
Pids = processes(),
- io:format("Memory use: top ~p of ~p processes.~n", [Count, length(Pids)]),
+ io:format("~s Memory use: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
Procs = [{info(Pid, memory, 0), info(Pid)} || Pid <- Pids],
Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
- io:format("~p~n", [Sorted]).
+ io:format("~s ~p~n", [get_time(), Sorted]).
top_binary_refs() -> top_binary_refs(30).
top_binary_refs(Count) ->
Pids = processes(),
- io:format("Binary refs: top ~p of ~p processes.~n", [Count, length(Pids)]),
+ io:format("~s Binary refs: top ~p of ~p processes.~n", [get_time(), Count, length(Pids)]),
Procs = [{{binary_refs, binary_refs(Pid)}, info(Pid)} || Pid <- Pids],
Sorted = lists:sublist(lists:reverse(lists:sort(Procs)), Count),
- io:format("~p~n", [Sorted]).
+ io:format("~s ~p~n", [get_time(), Sorted]).
binary_refs(Pid) ->
{binary, Refs} = info(Pid, binary, []),
@@ -111,3 +111,16 @@ info(Pid, Infos, Default) ->
false -> Default
end
end.
+
+get_time() ->
+ {{Y,M,D}, {H,Min,Sec}} = calendar:local_time(),
+ [ integer_to_list(Y), "-",
+ prefix_zero(integer_to_list(M)), "-",
+ prefix_zero(integer_to_list(D)), " ",
+ prefix_zero(integer_to_list(H)), ":",
+ prefix_zero(integer_to_list(Min)), ":",
+ prefix_zero(integer_to_list(Sec))
+ ].
+
+prefix_zero([C]) -> [$0, C];
+prefix_zero([_,_] = Full) -> Full.
diff --git a/src/rabbit_direct.erl b/src/rabbit_direct.erl
index d79ef4aeb9..623d16215b 100644
--- a/src/rabbit_direct.erl
+++ b/src/rabbit_direct.erl
@@ -102,14 +102,20 @@ notify_auth_result(Username, AuthResult, ExtraProps) ->
ExtraProps,
rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
+authz_socket_info_direct(Infos) ->
+ #authz_socket_info{sockname={proplists:get_value(host, Infos),
+ proplists:get_value(port, Infos)},
+ peername={proplists:get_value(peer_host, Infos),
+ proplists:get_value(peer_port, Infos)}}.
+
connect1(User, VHost, Protocol, Pid, Infos) ->
- try rabbit_access_control:check_vhost_access(User, VHost, undefined) of
+ try rabbit_access_control:check_vhost_access(User, VHost, authz_socket_info_direct(Infos)) of
ok -> ok = pg_local:join(rabbit_direct, Pid),
rabbit_event:notify(connection_created, Infos),
{ok, {User, rabbit_reader:server_properties(Protocol)}}
catch
- exit:#amqp_error{name = access_refused} ->
- {error, access_refused}
+ exit:#amqp_error{name = Reason = not_allowed} ->
+ {error, Reason}
end.
start_channel(Number, ClientChannelPid, ConnPid, ConnName, Protocol, User,
diff --git a/src/rabbit_disk_monitor.erl b/src/rabbit_disk_monitor.erl
index c65631a24c..30b11c0c11 100644
--- a/src/rabbit_disk_monitor.erl
+++ b/src/rabbit_disk_monitor.erl
@@ -16,6 +16,19 @@
-module(rabbit_disk_monitor).
+%% Disk monitoring server. Monitors free disk space
+%% periodically and sets alarms when it is below a certain
+%% watermark (configurable either as an absolute value or
+%% relative to the memory limit).
+%%
+%% Disk monitoring is done by shelling out to /usr/bin/df
+%% instead of related built-in OTP functions because currently
+%% this is the most reliable way of determining free disk space
+%% for the partition our internal database is on.
+%%
+%% Update interval is dynamically calculated assuming disk
+%% space is being filled at FAST_RATE.
+
-behaviour(gen_server).
-export([start_link/1]).
@@ -31,24 +44,35 @@
-define(SERVER, ?MODULE).
-define(DEFAULT_MIN_DISK_CHECK_INTERVAL, 100).
-define(DEFAULT_MAX_DISK_CHECK_INTERVAL, 10000).
+-define(DEFAULT_DISK_FREE_LIMIT, 50000000).
%% 250MB/s i.e. 250kB/ms
-define(FAST_RATE, (250 * 1000)).
--record(state, {dir,
- limit,
- actual,
- min_interval,
- max_interval,
- timer,
- alarmed,
- enabled
- }).
+-record(state, {
+ %% monitor partition on which this directory resides
+ dir,
+ %% configured limit in bytes
+ limit,
+ %% last known free disk space amount in bytes
+ actual,
+ %% minimum check interval
+ min_interval,
+ %% maximum check interval
+ max_interval,
+ %% timer that drives periodic checks
+ timer,
+ %% is free disk space alarm currently in effect?
+ alarmed,
+ %% is monitoring enabled? false on unsupported
+ %% platforms
+ enabled
+}).
%%----------------------------------------------------------------------------
-ifdef(use_specs).
--type(disk_free_limit() :: (integer() | {'mem_relative', float()})).
+-type(disk_free_limit() :: (integer() | string() | {'mem_relative', float()})).
-spec(start_link/1 :: (disk_free_limit()) -> rabbit_types:ok_pid_or_error()).
-spec(get_disk_free_limit/0 :: () -> integer()).
-spec(set_disk_free_limit/1 :: (disk_free_limit()) -> 'ok').
@@ -210,10 +234,17 @@ parse_free_win32(CommandResult) ->
[{capture, all_but_first, list}]),
list_to_integer(lists:reverse(Free)).
-interpret_limit({mem_relative, R}) ->
- round(R * vm_memory_monitor:get_total_memory());
-interpret_limit(L) ->
- L.
+interpret_limit({mem_relative, Relative})
+ when is_float(Relative), Relative < 1 ->
+ round(Relative * vm_memory_monitor:get_total_memory());
+interpret_limit(Absolute) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Absolute) of
+ {ok, ParsedAbsolute} -> ParsedAbsolute;
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse disk_free_limit value ~p",
+ [Absolute]),
+ ?DEFAULT_DISK_FREE_LIMIT
+ end.
emit_update_info(StateStr, CurrentFree, Limit) ->
rabbit_log:info(
diff --git a/src/rabbit_error_logger.erl b/src/rabbit_error_logger.erl
index eecb2d64d9..425d171bae 100644
--- a/src/rabbit_error_logger.erl
+++ b/src/rabbit_error_logger.erl
@@ -101,7 +101,7 @@ publish(_Other, _Format, _Data, _State) ->
publish1(RoutingKey, Format, Data, LogExch) ->
%% 0-9-1 says the timestamp is a "64 bit POSIX timestamp". That's
%% second resolution, not millisecond.
- Timestamp = rabbit_misc:now_ms() div 1000,
+ Timestamp = time_compat:os_system_time(seconds),
Args = [truncate:term(A, ?LOG_TRUNC) || A <- Data],
{ok, _DeliveredQPids} =
diff --git a/src/rabbit_event.erl b/src/rabbit_event.erl
deleted file mode 100644
index 13bf6bc6f8..0000000000
--- a/src/rabbit_event.erl
+++ /dev/null
@@ -1,164 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_event).
-
--include("rabbit.hrl").
-
--export([start_link/0]).
--export([init_stats_timer/2, init_disabled_stats_timer/2,
- ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
--export([stats_level/2, if_enabled/3]).
--export([notify/2, notify/3, notify_if/3]).
--export([sync_notify/2, sync_notify/3]).
-
-%%----------------------------------------------------------------------------
-
--record(state, {level, interval, timer}).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
-
--type(event_type() :: atom()).
--type(event_props() :: term()).
--type(event_timestamp() ::
- {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
-
--type(event() :: #event { type :: event_type(),
- props :: event_props(),
- reference :: 'none' | reference(),
- timestamp :: event_timestamp() }).
-
--type(level() :: 'none' | 'coarse' | 'fine').
-
--type(timer_fun() :: fun (() -> 'ok')).
--type(container() :: tuple()).
--type(pos() :: non_neg_integer()).
-
--spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
--spec(init_stats_timer/2 :: (container(), pos()) -> container()).
--spec(init_disabled_stats_timer/2 :: (container(), pos()) -> container()).
--spec(ensure_stats_timer/3 :: (container(), pos(), term()) -> container()).
--spec(stop_stats_timer/2 :: (container(), pos()) -> container()).
--spec(reset_stats_timer/2 :: (container(), pos()) -> container()).
--spec(stats_level/2 :: (container(), pos()) -> level()).
--spec(if_enabled/3 :: (container(), pos(), timer_fun()) -> 'ok').
--spec(notify/2 :: (event_type(), event_props()) -> 'ok').
--spec(notify/3 :: (event_type(), event_props(), reference() | 'none') -> 'ok').
--spec(notify_if/3 :: (boolean(), event_type(), event_props()) -> 'ok').
--spec(sync_notify/2 :: (event_type(), event_props()) -> 'ok').
--spec(sync_notify/3 :: (event_type(), event_props(),
- reference() | 'none') -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link() ->
- gen_event:start_link({local, ?MODULE}).
-
-%% The idea is, for each stat-emitting object:
-%%
-%% On startup:
-%% init_stats_timer(State)
-%% notify(created event)
-%% if_enabled(internal_emit_stats) - so we immediately send something
-%%
-%% On wakeup:
-%% ensure_stats_timer(State, emit_stats)
-%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
-%%
-%% emit_stats:
-%% if_enabled(internal_emit_stats)
-%% reset_stats_timer(State) - just bookkeeping
-%%
-%% Pre-hibernation:
-%% if_enabled(internal_emit_stats)
-%% stop_stats_timer(State)
-%%
-%% internal_emit_stats:
-%% notify(stats)
-
-init_stats_timer(C, P) ->
- {ok, StatsLevel} = application:get_env(rabbit, collect_statistics),
- {ok, Interval} = application:get_env(rabbit, collect_statistics_interval),
- setelement(P, C, #state{level = StatsLevel, interval = Interval,
- timer = undefined}).
-
-init_disabled_stats_timer(C, P) ->
- setelement(P, C, #state{level = none, interval = 0, timer = undefined}).
-
-ensure_stats_timer(C, P, Msg) ->
- case element(P, C) of
- #state{level = Level, interval = Interval, timer = undefined} = State
- when Level =/= none ->
- TRef = erlang:send_after(Interval, self(), Msg),
- setelement(P, C, State#state{timer = TRef});
- #state{} ->
- C
- end.
-
-stop_stats_timer(C, P) ->
- case element(P, C) of
- #state{timer = TRef} = State when TRef =/= undefined ->
- case erlang:cancel_timer(TRef) of
- false -> C;
- _ -> setelement(P, C, State#state{timer = undefined})
- end;
- #state{} ->
- C
- end.
-
-reset_stats_timer(C, P) ->
- case element(P, C) of
- #state{timer = TRef} = State when TRef =/= undefined ->
- setelement(P, C, State#state{timer = undefined});
- #state{} ->
- C
- end.
-
-stats_level(C, P) ->
- #state{level = Level} = element(P, C),
- Level.
-
-if_enabled(C, P, Fun) ->
- case element(P, C) of
- #state{level = none} -> ok;
- #state{} -> Fun(), ok
- end.
-
-notify_if(true, Type, Props) -> notify(Type, Props);
-notify_if(false, _Type, _Props) -> ok.
-
-notify(Type, Props) -> notify(Type, Props, none).
-
-notify(Type, Props, Ref) ->
- gen_event:notify(?MODULE, event_cons(Type, Props, Ref)).
-
-sync_notify(Type, Props) -> sync_notify(Type, Props, none).
-
-sync_notify(Type, Props, Ref) ->
- gen_event:sync_notify(?MODULE, event_cons(Type, Props, Ref)).
-
-event_cons(Type, Props, Ref) ->
- #event{type = Type,
- props = Props,
- reference = Ref,
- timestamp = os:timestamp()}.
-
diff --git a/src/rabbit_exchange.erl b/src/rabbit_exchange.erl
index 459334455f..ec5065a1d6 100644
--- a/src/rabbit_exchange.erl
+++ b/src/rabbit_exchange.erl
@@ -22,7 +22,7 @@
assert_equivalence/6, assert_args_equivalence/2, check_type/1,
lookup/1, lookup_or_die/1, list/0, list/1, lookup_scratch/2,
update_scratch/3, update_decorators/1, immutable/1,
- info_keys/0, info/1, info/2, info_all/1, info_all/2,
+ info_keys/0, info/1, info/2, info_all/1, info_all/2, info_all/4,
route/2, delete/2, validate_binding/2]).
%% these must be run inside a mnesia tx
-export([maybe_auto_delete/2, serial/1, peek_serial/1, update/2]).
@@ -82,6 +82,9 @@
-spec(info_all/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
-spec(info_all/2 ::(rabbit_types:vhost(), rabbit_types:info_keys())
-> [rabbit_types:infos()]).
+-spec(info_all/4 ::(rabbit_types:vhost(), rabbit_types:info_keys(),
+ reference(), pid())
+ -> 'ok').
-spec(route/2 :: (rabbit_types:exchange(), rabbit_types:delivery())
-> [rabbit_amqqueue:name()]).
-spec(delete/2 ::
@@ -340,6 +343,10 @@ info_all(VHostPath) -> map(VHostPath, fun (X) -> info(X) end).
info_all(VHostPath, Items) -> map(VHostPath, fun (X) -> info(X, Items) end).
+info_all(VHostPath, Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(X) -> info(X, Items) end, list(VHostPath)).
+
route(#exchange{name = #resource{virtual_host = VHost, name = RName} = XName,
decorators = Decorators} = X,
#delivery{message = #basic_message{routing_keys = RKs}} = Delivery) ->
diff --git a/src/rabbit_exchange_decorator.erl b/src/rabbit_exchange_decorator.erl
deleted file mode 100644
index 7c5bfdf913..0000000000
--- a/src/rabbit_exchange_decorator.erl
+++ /dev/null
@@ -1,128 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_decorator).
-
--include("rabbit.hrl").
-
--export([select/2, set/1, register/2, unregister/1]).
-
-%% This is like an exchange type except that:
-%%
-%% 1) It applies to all exchanges as soon as it is installed, therefore
-%% 2) It is not allowed to affect validation, so no validate/1 or
-%% assert_args_equivalence/2
-%%
-%% It's possible in the future we might make decorators
-%% able to manipulate messages as they are published.
-
--ifdef(use_specs).
-
--type(tx() :: 'transaction' | 'none').
--type(serial() :: pos_integer() | tx()).
-
--callback description() -> [proplists:property()].
-
-%% Should Rabbit ensure that all binding events that are
-%% delivered to an individual exchange can be serialised? (they
-%% might still be delivered out of order, but there'll be a
-%% serial number).
--callback serialise_events(rabbit_types:exchange()) -> boolean().
-
-%% called after declaration and recovery
--callback create(tx(), rabbit_types:exchange()) -> 'ok'.
-
-%% called after exchange (auto)deletion.
--callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
- 'ok'.
-
-%% called when the policy attached to this exchange changes.
--callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
- 'ok'.
-
-%% called after a binding has been added or recovered
--callback add_binding(serial(), rabbit_types:exchange(),
- rabbit_types:binding()) -> 'ok'.
-
-%% called after bindings have been deleted.
--callback remove_bindings(serial(), rabbit_types:exchange(),
- [rabbit_types:binding()]) -> 'ok'.
-
-%% Allows additional destinations to be added to the routing decision.
--callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
- [rabbit_amqqueue:name() | rabbit_exchange:name()].
-
-%% Whether the decorator wishes to receive callbacks for the exchange
-%% none:no callbacks, noroute:all callbacks except route, all:all callbacks
--callback active_for(rabbit_types:exchange()) -> 'none' | 'noroute' | 'all'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {serialise_events, 1}, {create, 2}, {delete, 3},
- {policy_changed, 2}, {add_binding, 3}, {remove_bindings, 3},
- {route, 2}, {active_for, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-%% select a subset of active decorators
-select(all, {Route, NoRoute}) -> filter(Route ++ NoRoute);
-select(route, {Route, _NoRoute}) -> filter(Route);
-select(raw, {Route, NoRoute}) -> Route ++ NoRoute.
-
-filter(Modules) ->
- [M || M <- Modules, code:which(M) =/= non_existing].
-
-set(X) ->
- Decs = lists:foldl(fun (D, {Route, NoRoute}) ->
- ActiveFor = D:active_for(X),
- {cons_if_eq(all, ActiveFor, D, Route),
- cons_if_eq(noroute, ActiveFor, D, NoRoute)}
- end, {[], []}, list()),
- X#exchange{decorators = Decs}.
-
-list() -> [M || {_, M} <- rabbit_registry:lookup_all(exchange_decorator)].
-
-cons_if_eq(Select, Select, Item, List) -> [Item | List];
-cons_if_eq(_Select, _Other, _Item, List) -> List.
-
-register(TypeName, ModuleName) ->
- rabbit_registry:register(exchange_decorator, TypeName, ModuleName),
- [maybe_recover(X) || X <- rabbit_exchange:list()],
- ok.
-
-unregister(TypeName) ->
- rabbit_registry:unregister(exchange_decorator, TypeName),
- [maybe_recover(X) || X <- rabbit_exchange:list()],
- ok.
-
-maybe_recover(X = #exchange{name = Name,
- decorators = Decs}) ->
- #exchange{decorators = Decs1} = set(X),
- Old = lists:sort(select(all, Decs)),
- New = lists:sort(select(all, Decs1)),
- case New of
- Old -> ok;
- _ -> %% TODO create a tx here for non-federation decorators
- [M:create(none, X) || M <- New -- Old],
- rabbit_exchange:update_decorators(Name)
- end.
diff --git a/src/rabbit_exchange_type.erl b/src/rabbit_exchange_type.erl
deleted file mode 100644
index 92c1de6c21..0000000000
--- a/src/rabbit_exchange_type.erl
+++ /dev/null
@@ -1,81 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_exchange_type).
-
--ifdef(use_specs).
-
--type(tx() :: 'transaction' | 'none').
--type(serial() :: pos_integer() | tx()).
-
--callback description() -> [proplists:property()].
-
-%% Should Rabbit ensure that all binding events that are
-%% delivered to an individual exchange can be serialised? (they
-%% might still be delivered out of order, but there'll be a
-%% serial number).
--callback serialise_events() -> boolean().
-
-%% The no_return is there so that we can have an "invalid" exchange
-%% type (see rabbit_exchange_type_invalid).
--callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
- rabbit_router:match_result().
-
-%% called BEFORE declaration, to check args etc; may exit with #amqp_error{}
--callback validate(rabbit_types:exchange()) -> 'ok'.
-
-%% called BEFORE declaration, to check args etc
--callback validate_binding(rabbit_types:exchange(), rabbit_types:binding()) ->
- rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
-
-%% called after declaration and recovery
--callback create(tx(), rabbit_types:exchange()) -> 'ok'.
-
-%% called after exchange (auto)deletion.
--callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
- 'ok'.
-
-%% called when the policy attached to this exchange changes.
--callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
- 'ok'.
-
-%% called after a binding has been added or recovered
--callback add_binding(serial(), rabbit_types:exchange(),
- rabbit_types:binding()) -> 'ok'.
-
-%% called after bindings have been deleted.
--callback remove_bindings(serial(), rabbit_types:exchange(),
- [rabbit_types:binding()]) -> 'ok'.
-
-%% called when comparing exchanges for equivalence - should return ok or
-%% exit with #amqp_error{}
--callback assert_args_equivalence(rabbit_types:exchange(),
- rabbit_framing:amqp_table()) ->
- 'ok' | rabbit_types:connection_exit().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{description, 0}, {serialise_events, 0}, {route, 2},
- {validate, 1}, {validate_binding, 2}, {policy_changed, 2},
- {create, 2}, {delete, 3}, {add_binding, 3}, {remove_bindings, 3},
- {assert_args_equivalence, 2}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_heartbeat.erl b/src/rabbit_heartbeat.erl
deleted file mode 100644
index 993076770f..0000000000
--- a/src/rabbit_heartbeat.erl
+++ /dev/null
@@ -1,166 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_heartbeat).
-
--export([start/6, start/7]).
--export([start_heartbeat_sender/4, start_heartbeat_receiver/4,
- pause_monitor/1, resume_monitor/1]).
-
--export([system_continue/3, system_terminate/4, system_code_change/4]).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([heartbeaters/0]).
-
--type(heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}).
-
--type(heartbeat_callback() :: fun (() -> any())).
-
--spec(start/6 ::
- (pid(), rabbit_net:socket(),
- non_neg_integer(), heartbeat_callback(),
- non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
-
--spec(start/7 ::
- (pid(), rabbit_net:socket(), rabbit_types:proc_name(),
- non_neg_integer(), heartbeat_callback(),
- non_neg_integer(), heartbeat_callback()) -> heartbeaters()).
-
--spec(start_heartbeat_sender/4 ::
- (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
- rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
--spec(start_heartbeat_receiver/4 ::
- (rabbit_net:socket(), non_neg_integer(), heartbeat_callback(),
- rabbit_types:proc_type_and_name()) -> rabbit_types:ok(pid())).
-
--spec(pause_monitor/1 :: (heartbeaters()) -> 'ok').
--spec(resume_monitor/1 :: (heartbeaters()) -> 'ok').
-
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,{_, _}) -> any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-start(SupPid, Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
- start(SupPid, Sock, unknown,
- SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun).
-
-start(SupPid, Sock, Identity,
- SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
- {ok, Sender} =
- start_heartbeater(SendTimeoutSec, SupPid, Sock,
- SendFun, heartbeat_sender,
- start_heartbeat_sender, Identity),
- {ok, Receiver} =
- start_heartbeater(ReceiveTimeoutSec, SupPid, Sock,
- ReceiveFun, heartbeat_receiver,
- start_heartbeat_receiver, Identity),
- {Sender, Receiver}.
-
-start_heartbeat_sender(Sock, TimeoutSec, SendFun, Identity) ->
- %% the 'div 2' is there so that we don't end up waiting for nearly
- %% 2 * TimeoutSec before sending a heartbeat in the boundary case
- %% where the last message was sent just after a heartbeat.
- heartbeater({Sock, TimeoutSec * 1000 div 2, send_oct, 0,
- fun () -> SendFun(), continue end}, Identity).
-
-start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun, Identity) ->
- %% we check for incoming data every interval, and time out after
- %% two checks with no change. As a result we will time out between
- %% 2 and 3 intervals after the last data has been received.
- heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1,
- fun () -> ReceiveFun(), stop end}, Identity).
-
-pause_monitor({_Sender, none}) -> ok;
-pause_monitor({_Sender, Receiver}) -> Receiver ! pause, ok.
-
-resume_monitor({_Sender, none}) -> ok;
-resume_monitor({_Sender, Receiver}) -> Receiver ! resume, ok.
-
-system_continue(_Parent, Deb, {Params, State}) ->
- heartbeater(Params, Deb, State).
-
-system_terminate(Reason, _Parent, _Deb, _State) ->
- exit(Reason).
-
-system_code_change(Misc, _Module, _OldVsn, _Extra) ->
- {ok, Misc}.
-
-%%----------------------------------------------------------------------------
-start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback,
- _Identity) ->
- {ok, none};
-start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback,
- Identity) ->
- supervisor2:start_child(
- SupPid, {Name,
- {rabbit_heartbeat, Callback,
- [Sock, TimeoutSec, TimeoutFun, {Name, Identity}]},
- transient, ?MAX_WAIT, worker, [rabbit_heartbeat]}).
-
-heartbeater(Params, Identity) ->
- Deb = sys:debug_options([]),
- {ok, proc_lib:spawn_link(fun () ->
- rabbit_misc:store_proc_name(Identity),
- heartbeater(Params, Deb, {0, 0})
- end)}.
-
-heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params,
- Deb, {StatVal, SameCount} = State) ->
- Recurse = fun (State1) -> heartbeater(Params, Deb, State1) end,
- System = fun (From, Req) ->
- sys:handle_system_msg(
- Req, From, self(), ?MODULE, Deb, {Params, State})
- end,
- receive
- pause ->
- receive
- resume -> Recurse({0, 0});
- {system, From, Req} -> System(From, Req);
- Other -> exit({unexpected_message, Other})
- end;
- {system, From, Req} ->
- System(From, Req);
- Other ->
- exit({unexpected_message, Other})
- after TimeoutMillisec ->
- case rabbit_net:getstat(Sock, [StatName]) of
- {ok, [{StatName, NewStatVal}]} ->
- if NewStatVal =/= StatVal ->
- Recurse({NewStatVal, 0});
- SameCount < Threshold ->
- Recurse({NewStatVal, SameCount + 1});
- true ->
- case Handler() of
- stop -> ok;
- continue -> Recurse({NewStatVal, 0})
- end
- end;
- {error, einval} ->
- %% the socket is dead, most likely because the
- %% connection is being shut down -> terminate
- ok;
- {error, Reason} ->
- exit({cannot_get_socket_stats, Reason})
- end
- end.
diff --git a/src/rabbit_hipe.erl b/src/rabbit_hipe.erl
new file mode 100644
index 0000000000..0302d82839
--- /dev/null
+++ b/src/rabbit_hipe.erl
@@ -0,0 +1,98 @@
+-module(rabbit_hipe).
+
+%% HiPE compilation uses multiple cores anyway, but some bits are
+%% IO-bound so we can go faster if we parallelise a bit more. In
+%% practice 2 processes seems just as fast as any other number > 1,
+%% and keeps the progress bar realistic-ish.
+-define(HIPE_PROCESSES, 2).
+-export([maybe_hipe_compile/0, log_hipe_result/1]).
+
+%% HiPE compilation happens before we have log handlers - so we have
+%% to io:format/2, it's all we can do.
+
+maybe_hipe_compile() ->
+ {ok, Want} = application:get_env(rabbit, hipe_compile),
+ Can = code:which(hipe) =/= non_existing,
+ case {Want, Can} of
+ {true, true} -> hipe_compile();
+ {true, false} -> false;
+ {false, _} -> {ok, disabled}
+ end.
+
+log_hipe_result({ok, disabled}) ->
+ ok;
+log_hipe_result({ok, already_compiled}) ->
+ rabbit_log:info(
+ "HiPE in use: modules already natively compiled.~n", []);
+log_hipe_result({ok, Count, Duration}) ->
+ rabbit_log:info(
+ "HiPE in use: compiled ~B modules in ~Bs.~n", [Count, Duration]);
+log_hipe_result(false) ->
+ io:format(
+ "~nNot HiPE compiling: HiPE not found in this Erlang installation.~n"),
+ rabbit_log:warning(
+ "Not HiPE compiling: HiPE not found in this Erlang installation.~n").
+
+%% HiPE compilation happens before we have log handlers and can take a
+%% long time, so make an exception to our no-stdout policy and display
+%% progress via stdout.
+hipe_compile() ->
+ {ok, HipeModulesAll} = application:get_env(rabbit, hipe_modules),
+ HipeModules = [HM || HM <- HipeModulesAll,
+ code:which(HM) =/= non_existing andalso
+ %% We skip modules already natively compiled. This
+ %% happens when RabbitMQ is stopped (just the
+ %% application, not the entire node) and started
+ %% again.
+ already_hipe_compiled(HM)],
+ case HipeModules of
+ [] -> {ok, already_compiled};
+ _ -> do_hipe_compile(HipeModules)
+ end.
+
+already_hipe_compiled(Mod) ->
+ try
+ %% OTP 18.x or later
+ Mod:module_info(native) =:= false
+ %% OTP prior to 18.x
+ catch error:badarg ->
+ code:is_module_native(Mod) =:= false
+ end.
+
+do_hipe_compile(HipeModules) ->
+ Count = length(HipeModules),
+ io:format("~nHiPE compiling: |~s|~n |",
+ [string:copies("-", Count)]),
+ T1 = time_compat:monotonic_time(),
+ %% We use code:get_object_code/1 below to get the beam binary,
+ %% instead of letting hipe get it itself, because hipe:c/{1,2}
+ %% expects the given filename to actually exist on disk: it does not
+ %% work with an EZ archive (rabbit_common is one).
+ %%
+ %% Then we use the mode advanced hipe:compile/4 API because the
+ %% simpler hipe:c/3 is not exported (as of Erlang 18.1.4). This
+ %% advanced API does not load automatically the code, except if the
+ %% 'load' option is set.
+ PidMRefs = [spawn_monitor(fun () -> [begin
+ {M, Beam, _} =
+ code:get_object_code(M),
+ {ok, _} =
+ hipe:compile(M, [], Beam,
+ [o3, load]),
+ io:format("#")
+ end || M <- Ms]
+ end) ||
+ Ms <- split(HipeModules, ?HIPE_PROCESSES)],
+ [receive
+ {'DOWN', MRef, process, _, normal} -> ok;
+ {'DOWN', MRef, process, _, Reason} -> exit(Reason)
+ end || {_Pid, MRef} <- PidMRefs],
+ T2 = time_compat:monotonic_time(),
+ Duration = time_compat:convert_time_unit(T2 - T1, native, seconds),
+ io:format("|~n~nCompiled ~B modules in ~Bs~n", [Count, Duration]),
+ {ok, Count, Duration}.
+
+split(L, N) -> split0(L, [[] || _ <- lists:seq(1, N)]).
+
+split0([], Ls) -> Ls;
+split0([I | Is], [L | Ls]) -> split0(Is, Ls ++ [[I | L]]).
diff --git a/src/rabbit_memory_monitor.erl b/src/rabbit_memory_monitor.erl
index 7aa29fc423..380d950d05 100644
--- a/src/rabbit_memory_monitor.erl
+++ b/src/rabbit_memory_monitor.erl
@@ -41,7 +41,6 @@
}).
-define(SERVER, ?MODULE).
--define(DEFAULT_UPDATE_INTERVAL, 2500).
-define(TABLE_NAME, ?MODULE).
%% If all queues are pushed to disk (duration 0), then the sum of
@@ -87,7 +86,9 @@ report_ram_duration(Pid, QueueDuration) ->
stop() ->
gen_server2:cast(?SERVER, stop).
-conserve_resources(Pid, disk, Conserve) ->
+%% Paging should be enabled/disabled only in response to disk resource alarms
+%% for the current node.
+conserve_resources(Pid, disk, {_, Conserve, Node}) when node(Pid) =:= Node ->
gen_server2:cast(Pid, {disk_alarm, Conserve});
conserve_resources(_Pid, _Source, _Conserve) ->
ok.
@@ -110,7 +111,8 @@ memory_use(ratio) ->
%%----------------------------------------------------------------------------
init([]) ->
- {ok, TRef} = timer:send_interval(?DEFAULT_UPDATE_INTERVAL, update),
+ {ok, Interval} = application:get_env(rabbit, memory_monitor_interval),
+ {ok, TRef} = timer:send_interval(Interval, update),
Ets = ets:new(?TABLE_NAME, [set, private, {keypos, #process.pid}]),
Alarms = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
diff --git a/src/rabbit_mirror_queue_master.erl b/src/rabbit_mirror_queue_master.erl
index 7890128872..4556f72e78 100644
--- a/src/rabbit_mirror_queue_master.erl
+++ b/src/rabbit_mirror_queue_master.erl
@@ -18,11 +18,13 @@
-export([init/3, terminate/2, delete_and_terminate/2,
purge/1, purge_acks/1, publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
discard/4, fetch/2, drop/2, ack/2, requeue/2, ackfold/4, fold/3,
len/1, is_empty/1, depth/1, drain_confirmed/1,
dropwhile/2, fetchwhile/4, set_ram_duration_target/2, ram_duration/1,
needs_timeout/1, timeout/1, handle_pre_hibernate/1, resume/1,
- msg_rates/1, info/2, invoke/3, is_duplicate/2]).
+ msg_rates/1, info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4]).
-export([start/1, stop/0, delete_crashed/1]).
@@ -147,13 +149,15 @@ sync_mirrors(HandleInfo, EmitStats,
QName, "Synchronising: " ++ Fmt ++ "~n", Params)
end,
Log("~p messages to synchronise", [BQ:len(BQS)]),
- {ok, #amqqueue{slave_pids = SPids}} = rabbit_amqqueue:lookup(QName),
+ {ok, #amqqueue{slave_pids = SPids} = Q} = rabbit_amqqueue:lookup(QName),
+ SyncBatchSize = rabbit_mirror_queue_misc:sync_batch_size(Q),
+ Log("batch size: ~p", [SyncBatchSize]),
Ref = make_ref(),
Syncer = rabbit_mirror_queue_sync:master_prepare(Ref, QName, Log, SPids),
gm:broadcast(GM, {sync_start, Ref, Syncer, SPids}),
S = fun(BQSN) -> State#state{backing_queue_state = BQSN} end,
case rabbit_mirror_queue_sync:master_go(
- Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) of
+ Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) of
{shutdown, R, BQS1} -> {stop, R, S(BQS1)};
{sync_died, R, BQS1} -> Log("~p", [R]),
{ok, S(BQS1)};
@@ -241,6 +245,27 @@ publish(Msg = #basic_message { id = MsgId }, MsgProps, IsDelivered, ChPid, Flow,
BQS1 = BQ:publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS),
ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+batch_publish(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {Publishes1, false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId },
+ MsgProps, _IsDelivered}, {Pubs, false, Sizes}) ->
+ {[{Msg, MsgProps, true} | Pubs], %% [0]
+ false = dict:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {[], false, 0}, Publishes),
+ Publishes2 = lists:reverse(Publishes1),
+ ok = gm:broadcast(GM, {batch_publish, ChPid, Flow, Publishes2},
+ MsgSizes),
+ BQS1 = BQ:batch_publish(Publishes2, ChPid, Flow, BQS),
+ ensure_monitoring(ChPid, State #state { backing_queue_state = BQS1 }).
+%% [0] When the slave process handles the publish command, it sets the
+%% IsDelivered flag to true, so to avoid iterating over the messages
+%% again at the slave, we do it here.
+
publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
ChPid, Flow, State = #state { gm = GM,
seen_status = SS,
@@ -253,6 +278,23 @@ publish_delivered(Msg = #basic_message { id = MsgId }, MsgProps,
State1 = State #state { backing_queue_state = BQS1 },
{AckTag, ensure_monitoring(ChPid, State1)}.
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #state { gm = GM,
+ seen_status = SS,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ {false, MsgSizes} =
+ lists:foldl(fun ({Msg = #basic_message { id = MsgId }, _MsgProps},
+ {false, Sizes}) ->
+ {false = dict:is_key(MsgId, SS), %% ASSERTION
+ Sizes + rabbit_basic:msg_size(Msg)}
+ end, {false, 0}, Publishes),
+ ok = gm:broadcast(GM, {batch_publish_delivered, ChPid, Flow, Publishes},
+ MsgSizes),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ State1 = State #state { backing_queue_state = BQS1 },
+ {AckTags, ensure_monitoring(ChPid, State1)}.
+
discard(MsgId, ChPid, Flow, State = #state { gm = GM,
backing_queue = BQ,
backing_queue_state = BQS,
@@ -444,6 +486,18 @@ is_duplicate(Message = #basic_message { id = MsgId },
confirmed = [MsgId | Confirmed] }}
end.
+set_queue_mode(Mode, State = #state { gm = GM,
+ backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ ok = gm:broadcast(GM, {set_queue_mode, Mode}),
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ State #state { backing_queue_state = BQS1 }.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
%% ---------------------------------------------------------------------------
%% Other exported functions
%% ---------------------------------------------------------------------------
diff --git a/src/rabbit_mirror_queue_misc.erl b/src/rabbit_mirror_queue_misc.erl
index 2ef1c73a75..849efa3611 100644
--- a/src/rabbit_mirror_queue_misc.erl
+++ b/src/rabbit_mirror_queue_misc.erl
@@ -22,7 +22,7 @@
initial_queue_node/2, suggested_queue_nodes/1,
is_mirrored/1, update_mirrors/2, validate_policy/1,
maybe_auto_sync/1, maybe_drop_master_after_sync/1,
- log_info/3, log_warning/3]).
+ sync_batch_size/1, log_info/3, log_warning/3]).
%% for testing only
-export([module/1]).
@@ -39,10 +39,13 @@
{mfa, {rabbit_registry, register,
[policy_validator, <<"ha-sync-mode">>, ?MODULE]}},
{mfa, {rabbit_registry, register,
+ [policy_validator, <<"ha-sync-batch-size">>, ?MODULE]}},
+ {mfa, {rabbit_registry, register,
[policy_validator, <<"ha-promote-on-shutdown">>, ?MODULE]}},
{requires, rabbit_registry},
{enables, recovery}]}).
+
%%----------------------------------------------------------------------------
-ifdef(use_specs).
@@ -330,6 +333,14 @@ module(Mode) when is_binary(Mode) ->
end
end.
+validate_mode(Mode) ->
+ case module(Mode) of
+ {ok, _Module} ->
+ ok;
+ not_mirrored ->
+ {error, "~p is not a valid ha-mode value", [Mode]}
+ end.
+
is_mirrored(Q) ->
case module(Q) of
{ok, _} -> true;
@@ -353,6 +364,22 @@ maybe_auto_sync(Q = #amqqueue{pid = QPid}) ->
ok
end.
+sync_batch_size(#amqqueue{} = Q) ->
+ case policy(<<"ha-sync-batch-size">>, Q) of
+ none -> %% we need this case because none > 1 == true
+ default_batch_size();
+ BatchSize when BatchSize > 1 ->
+ BatchSize;
+ _ ->
+ default_batch_size()
+ end.
+
+-define(DEFAULT_BATCH_SIZE, 4096).
+
+default_batch_size() ->
+ rabbit_misc:get_env(rabbit, mirroring_sync_batch_size,
+ ?DEFAULT_BATCH_SIZE).
+
update_mirrors(OldQ = #amqqueue{pid = QPid},
NewQ = #amqqueue{pid = QPid}) ->
case {is_mirrored(OldQ), is_mirrored(NewQ)} of
@@ -408,25 +435,37 @@ validate_policy(KeyList) ->
Mode = proplists:get_value(<<"ha-mode">>, KeyList, none),
Params = proplists:get_value(<<"ha-params">>, KeyList, none),
SyncMode = proplists:get_value(<<"ha-sync-mode">>, KeyList, none),
+ SyncBatchSize = proplists:get_value(
+ <<"ha-sync-batch-size">>, KeyList, none),
PromoteOnShutdown = proplists:get_value(
<<"ha-promote-on-shutdown">>, KeyList, none),
- case {Mode, Params, SyncMode, PromoteOnShutdown} of
- {none, none, none, none} ->
+ case {Mode, Params, SyncMode, SyncBatchSize, PromoteOnShutdown} of
+ {none, none, none, none, none} ->
ok;
- {none, _, _, _} ->
+ {none, _, _, _, _} ->
{error, "ha-mode must be specified to specify ha-params, "
"ha-sync-mode or ha-promote-on-shutdown", []};
_ ->
- case module(Mode) of
- {ok, M} -> case M:validate_policy(Params) of
- ok -> case validate_sync_mode(SyncMode) of
- ok -> validate_pos(PromoteOnShutdown);
- E -> E
- end;
- E -> E
- end;
- _ -> {error, "~p is not a valid ha-mode value", [Mode]}
- end
+ validate_policies(
+ [{Mode, fun validate_mode/1},
+ {Params, ha_params_validator(Mode)},
+ {SyncMode, fun validate_sync_mode/1},
+ {SyncBatchSize, fun validate_sync_batch_size/1},
+ {PromoteOnShutdown, fun validate_pos/1}])
+ end.
+
+ha_params_validator(Mode) ->
+ fun(Val) ->
+ {ok, M} = module(Mode),
+ M:validate_policy(Val)
+ end.
+
+validate_policies([]) ->
+ ok;
+validate_policies([{Val, Validator} | Rest]) ->
+ case Validator(Val) of
+ ok -> validate_policies(Rest);
+ E -> E
end.
validate_sync_mode(SyncMode) ->
@@ -438,6 +477,14 @@ validate_sync_mode(SyncMode) ->
"or \"automatic\", got ~p", [Mode]}
end.
+validate_sync_batch_size(none) ->
+ ok;
+validate_sync_batch_size(N) when is_integer(N) andalso N > 0 ->
+ ok;
+validate_sync_batch_size(N) ->
+ {error, "ha-sync-batch-size takes an integer greather than 0, "
+ "~p given", [N]}.
+
validate_pos(PromoteOnShutdown) ->
case PromoteOnShutdown of
<<"always">> -> ok;
diff --git a/src/rabbit_mirror_queue_mode_exactly.erl b/src/rabbit_mirror_queue_mode_exactly.erl
index 0c0b7a10e8..4721ad6136 100644
--- a/src/rabbit_mirror_queue_mode_exactly.erl
+++ b/src/rabbit_mirror_queue_mode_exactly.erl
@@ -45,8 +45,9 @@ suggested_queue_nodes(Count, MNode, SNodes, _SSNodes, Poss) ->
end}.
shuffle(L) ->
- {A1,A2,A3} = now(),
- random:seed(A1, A2, A3),
+ random:seed(erlang:phash2([node()]),
+ time_compat:monotonic_time(),
+ time_compat:unique_integer()),
{_, L1} = lists:unzip(lists:keysort(1, [{random:uniform(), N} || N <- L])),
L1.
diff --git a/src/rabbit_mirror_queue_slave.erl b/src/rabbit_mirror_queue_slave.erl
index 7f309ab0b7..225c21dd54 100644
--- a/src/rabbit_mirror_queue_slave.erl
+++ b/src/rabbit_mirror_queue_slave.erl
@@ -256,13 +256,10 @@ handle_cast({gm, Instruction}, State) ->
handle_cast({deliver, Delivery = #delivery{sender = Sender, flow = Flow}, true},
State) ->
%% Asynchronous, non-"mandatory", deliver mode.
- case Flow of
- %% We are acking messages to the channel process that sent us
- %% the message delivery. See
- %% rabbit_amqqueue_process:handle_ch_down for more info.
- flow -> credit_flow:ack(Sender);
- noflow -> ok
- end,
+ %% We are acking messages to the channel process that sent us
+ %% the message delivery. See
+ %% rabbit_amqqueue_process:handle_ch_down for more info.
+ maybe_flow_ack(Sender, Flow),
noreply(maybe_enqueue_message(Delivery, State));
handle_cast({sync_start, Ref, Syncer},
@@ -658,10 +655,7 @@ promote_me(From, #state { q = Q = #amqqueue { name = QName },
%% need to send an ack for these messages since the channel is waiting
%% for one for the via-GM case and we will not now receive one.
promote_delivery(Delivery = #delivery{sender = Sender, flow = Flow}) ->
- case Flow of
- flow -> credit_flow:ack(Sender);
- noflow -> ok
- end,
+ maybe_flow_ack(Sender, Flow),
Delivery#delivery{mandatory = false}.
noreply(State) ->
@@ -851,6 +845,15 @@ process_instruction({publish, ChPid, Flow, MsgProps,
publish_or_discard(published, ChPid, MsgId, State),
BQS1 = BQ:publish(Msg, MsgProps, true, ChPid, Flow, BQS),
{ok, State1 #state { backing_queue_state = BQS1 }};
+process_instruction({batch_publish, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
+ lists:foldl(fun ({#basic_message { id = MsgId },
+ _MsgProps, _IsDelivered}, St) ->
+ publish_or_discard(published, ChPid, MsgId, St)
+ end, State, Publishes),
+ BQS1 = BQ:batch_publish(Publishes, ChPid, Flow, BQS),
+ {ok, State1 #state { backing_queue_state = BQS1 }};
process_instruction({publish_delivered, ChPid, Flow, MsgProps,
Msg = #basic_message { id = MsgId }}, State) ->
maybe_flow_ack(ChPid, Flow),
@@ -860,6 +863,24 @@ process_instruction({publish_delivered, ChPid, Flow, MsgProps,
{AckTag, BQS1} = BQ:publish_delivered(Msg, MsgProps, ChPid, Flow, BQS),
{ok, maybe_store_ack(true, MsgId, AckTag,
State1 #state { backing_queue_state = BQS1 })};
+process_instruction({batch_publish_delivered, ChPid, Flow, Publishes}, State) ->
+ maybe_flow_ack(ChPid, Flow),
+ {MsgIds,
+ State1 = #state { backing_queue = BQ, backing_queue_state = BQS }} =
+ lists:foldl(fun ({#basic_message { id = MsgId }, _MsgProps},
+ {MsgIds, St}) ->
+ {[MsgId | MsgIds],
+ publish_or_discard(published, ChPid, MsgId, St)}
+ end, {[], State}, Publishes),
+ true = BQ:is_empty(BQS),
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Publishes, ChPid, Flow, BQS),
+ MsgIdsAndAcks = lists:zip(lists:reverse(MsgIds), AckTags),
+ State2 = lists:foldl(
+ fun ({MsgId, AckTag}, St) ->
+ maybe_store_ack(true, MsgId, AckTag, St)
+ end, State1 #state { backing_queue_state = BQS1 },
+ MsgIdsAndAcks),
+ {ok, State2};
process_instruction({discard, ChPid, Flow, MsgId}, State) ->
maybe_flow_ack(ChPid, Flow),
State1 = #state { backing_queue = BQ, backing_queue_state = BQS } =
@@ -921,10 +942,15 @@ process_instruction({delete_and_terminate, Reason},
State = #state { backing_queue = BQ,
backing_queue_state = BQS }) ->
BQ:delete_and_terminate(Reason, BQS),
- {stop, State #state { backing_queue_state = undefined }}.
+ {stop, State #state { backing_queue_state = undefined }};
+process_instruction({set_queue_mode, Mode},
+ State = #state { backing_queue = BQ,
+ backing_queue_state = BQS }) ->
+ BQS1 = BQ:set_queue_mode(Mode, BQS),
+ {ok, State #state { backing_queue_state = BQS1 }}.
-maybe_flow_ack(ChPid, flow) -> credit_flow:ack(ChPid);
-maybe_flow_ack(_ChPid, noflow) -> ok.
+maybe_flow_ack(Sender, flow) -> credit_flow:ack(Sender);
+maybe_flow_ack(_Sender, noflow) -> ok.
msg_ids_to_acktags(MsgIds, MA) ->
{AckTags, MA1} =
diff --git a/src/rabbit_mirror_queue_sync.erl b/src/rabbit_mirror_queue_sync.erl
index 9a8d55f94b..2d8bdfa860 100644
--- a/src/rabbit_mirror_queue_sync.erl
+++ b/src/rabbit_mirror_queue_sync.erl
@@ -18,7 +18,7 @@
-include("rabbit.hrl").
--export([master_prepare/4, master_go/7, slave/7]).
+-export([master_prepare/4, master_go/8, slave/7]).
-define(SYNC_PROGRESS_INTERVAL, 1000000).
@@ -45,7 +45,7 @@
%% || <--- ready ---- || ||
%% || <--- next* ---- || || }
%% || ---- msg* ----> || || } loop
-%% || || ---- sync_msg* ----> || }
+%% || || ---- sync_msgs* ---> || }
%% || || <--- (credit)* ----- || }
%% || <--- next ---- || ||
%% || ---- done ----> || ||
@@ -63,9 +63,10 @@
-spec(master_prepare/4 :: (reference(), rabbit_amqqueue:name(),
log_fun(), [pid()]) -> pid()).
--spec(master_go/7 :: (pid(), reference(), log_fun(),
+-spec(master_go/8 :: (pid(), reference(), log_fun(),
rabbit_mirror_queue_master:stats_fun(),
rabbit_mirror_queue_master:stats_fun(),
+ non_neg_integer(),
bq(), bqs()) ->
{'already_synced', bqs()} | {'ok', bqs()} |
{'shutdown', any(), bqs()} |
@@ -88,46 +89,65 @@ master_prepare(Ref, QName, Log, SPids) ->
syncer(Ref, Log, MPid, SPids)
end).
-master_go(Syncer, Ref, Log, HandleInfo, EmitStats, BQ, BQS) ->
+master_go(Syncer, Ref, Log, HandleInfo, EmitStats, SyncBatchSize, BQ, BQS) ->
Args = {Syncer, Ref, Log, HandleInfo, EmitStats, rabbit_misc:get_parent()},
receive
{'EXIT', Syncer, normal} -> {already_synced, BQS};
{'EXIT', Syncer, Reason} -> {sync_died, Reason, BQS};
{ready, Syncer} -> EmitStats({syncing, 0}),
- master_go0(Args, BQ, BQS)
+ master_batch_go0(Args, SyncBatchSize,
+ BQ, BQS)
end.
-master_go0(Args, BQ, BQS) ->
- case BQ:fold(fun (Msg, MsgProps, Unacked, Acc) ->
- master_send(Msg, MsgProps, Unacked, Args, Acc)
- end, {0, erlang:now()}, BQS) of
+master_batch_go0(Args, BatchSize, BQ, BQS) ->
+ FoldFun =
+ fun (Msg, MsgProps, Unacked, Acc) ->
+ Acc1 = append_to_acc(Msg, MsgProps, Unacked, Acc),
+ case maybe_master_batch_send(Acc1, BatchSize) of
+ true -> master_batch_send(Args, Acc1);
+ false -> {cont, Acc1}
+ end
+ end,
+ FoldAcc = {[], 0, {0, BQ:depth(BQS)}, time_compat:monotonic_time()},
+ bq_fold(FoldFun, FoldAcc, Args, BQ, BQS).
+
+master_batch_send({Syncer, Ref, Log, HandleInfo, EmitStats, Parent},
+ {Batch, I, {Curr, Len}, Last}) ->
+ T = maybe_emit_stats(Last, I, EmitStats, Log),
+ HandleInfo({syncing, I}),
+ handle_set_maximum_since_use(),
+ SyncMsg = {msgs, Ref, lists:reverse(Batch)},
+ NewAcc = {[], I + length(Batch), {Curr, Len}, T},
+ master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent).
+
+%% Either send messages when we reach the last one in the queue or
+%% whenever we have accumulated BatchSize messages.
+maybe_master_batch_send({_, _, {Len, Len}, _}, _BatchSize) ->
+ true;
+maybe_master_batch_send({_, _, {Curr, _Len}, _}, BatchSize)
+ when Curr rem BatchSize =:= 0 ->
+ true;
+maybe_master_batch_send(_Acc, _BatchSize) ->
+ false.
+
+bq_fold(FoldFun, FoldAcc, Args, BQ, BQS) ->
+ case BQ:fold(FoldFun, FoldAcc, BQS) of
{{shutdown, Reason}, BQS1} -> {shutdown, Reason, BQS1};
{{sync_died, Reason}, BQS1} -> {sync_died, Reason, BQS1};
{_, BQS1} -> master_done(Args, BQS1)
end.
-master_send(Msg, MsgProps, Unacked,
- {Syncer, Ref, Log, HandleInfo, EmitStats, Parent}, {I, Last}) ->
- T = case timer:now_diff(erlang:now(), Last) > ?SYNC_PROGRESS_INTERVAL of
- true -> EmitStats({syncing, I}),
- Log("~p messages", [I]),
- erlang:now();
- false -> Last
- end,
- HandleInfo({syncing, I}),
- receive
- {'$gen_cast', {set_maximum_since_use, Age}} ->
- ok = file_handle_cache:set_maximum_since_use(Age)
- after 0 ->
- ok
- end,
+append_to_acc(Msg, MsgProps, Unacked, {Batch, I, {Curr, Len}, T}) ->
+ {[{Msg, MsgProps, Unacked} | Batch], I, {Curr + 1, Len}, T}.
+
+master_send_receive(SyncMsg, NewAcc, Syncer, Ref, Parent) ->
receive
{'$gen_call', From,
cancel_sync_mirrors} -> stop_syncer(Syncer, {cancel, Ref}),
gen_server2:reply(From, ok),
{stop, cancelled};
- {next, Ref} -> Syncer ! {msg, Ref, Msg, MsgProps, Unacked},
- {cont, {I + 1, T}};
+ {next, Ref} -> Syncer ! SyncMsg,
+ {cont, NewAcc};
{'EXIT', Parent, Reason} -> {stop, {shutdown, Reason}};
{'EXIT', Syncer, Reason} -> {stop, {sync_died, Reason}}
end.
@@ -147,6 +167,24 @@ stop_syncer(Syncer, Msg) ->
after 0 -> ok
end.
+maybe_emit_stats(Last, I, EmitStats, Log) ->
+ Interval = time_compat:convert_time_unit(
+ time_compat:monotonic_time() - Last, native, micro_seconds),
+ case Interval > ?SYNC_PROGRESS_INTERVAL of
+ true -> EmitStats({syncing, I}),
+ Log("~p messages", [I]),
+ time_compat:monotonic_time();
+ false -> Last
+ end.
+
+handle_set_maximum_since_use() ->
+ receive
+ {'$gen_cast', {set_maximum_since_use, Age}} ->
+ ok = file_handle_cache:set_maximum_since_use(Age)
+ after 0 ->
+ ok
+ end.
+
%% Master
%% ---------------------------------------------------------------------------
%% Syncer
@@ -182,12 +220,9 @@ await_slaves(Ref, SPids) ->
syncer_loop(Ref, MPid, SPids) ->
MPid ! {next, Ref},
receive
- {msg, Ref, Msg, MsgProps, Unacked} ->
+ {msgs, Ref, Msgs} ->
SPids1 = wait_for_credit(SPids),
- [begin
- credit_flow:send(SPid),
- SPid ! {sync_msg, Ref, Msg, MsgProps, Unacked}
- end || SPid <- SPids1],
+ broadcast(SPids1, {sync_msgs, Ref, Msgs}),
syncer_loop(Ref, MPid, SPids1);
{cancel, Ref} ->
%% We don't tell the slaves we will die - so when we do
@@ -198,6 +233,12 @@ syncer_loop(Ref, MPid, SPids) ->
[SPid ! {sync_complete, Ref} || SPid <- SPids]
end.
+broadcast(SPids, Msg) ->
+ [begin
+ credit_flow:send(SPid),
+ SPid ! Msg
+ end || SPid <- SPids].
+
wait_for_credit(SPids) ->
case credit_flow:blocked() of
true -> receive
@@ -258,17 +299,9 @@ slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
update_ram_duration ->
{TRef1, BQS1} = UpdateRamDuration(BQ, BQS),
slave_sync_loop(Args, {MA, TRef1, BQS1});
- {sync_msg, Ref, Msg, Props, Unacked} ->
+ {sync_msgs, Ref, Batch} ->
credit_flow:ack(Syncer),
- Props1 = Props#message_properties{needs_confirming = false},
- {MA1, BQS1} =
- case Unacked of
- false -> {MA,
- BQ:publish(Msg, Props1, true, none, noflow, BQS)};
- true -> {AckTag, BQS2} = BQ:publish_delivered(
- Msg, Props1, none, noflow, BQS),
- {[{Msg#basic_message.id, AckTag} | MA], BQS2}
- end,
+ {MA1, BQS1} = process_batch(Batch, MA, BQ, BQS),
slave_sync_loop(Args, {MA1, TRef, BQS1});
{'EXIT', Parent, Reason} ->
{stop, Reason, State};
@@ -277,3 +310,52 @@ slave_sync_loop(Args = {Ref, MRef, Syncer, BQ, UpdateRamDuration, Parent},
BQ:delete_and_terminate(Reason, BQS),
{stop, Reason, {[], TRef, undefined}}
end.
+
+%% We are partitioning messages by the Unacked element in the tuple.
+%% when unacked = true, then it's a publish_delivered message,
+%% otherwise it's a publish message.
+%%
+%% Note that we can't first partition the batch and then publish each
+%% part, since that would result in re-ordering messages, which we
+%% don't want to do.
+process_batch([], MA, _BQ, BQS) ->
+ {MA, BQS};
+process_batch(Batch, MA, BQ, BQS) ->
+ {_Msg, _MsgProps, Unacked} = hd(Batch),
+ process_batch(Batch, Unacked, [], MA, BQ, BQS).
+
+process_batch([{Msg, Props, true = Unacked} | Rest], true = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish_delivered messages don't need the IsDelivered flag,
+ %% therefore we just add {Msg, Props} to the accumulator.
+ process_batch(Rest, Unacked, [{Msg, props(Props)} | Acc],
+ MA, BQ, BQS);
+process_batch([{Msg, Props, false = Unacked} | Rest], false = Unacked,
+ Acc, MA, BQ, BQS) ->
+ %% publish messages needs the IsDelivered flag which is set to true
+ %% here.
+ process_batch(Rest, Unacked, [{Msg, props(Props), true} | Acc],
+ MA, BQ, BQS);
+process_batch(Batch, Unacked, Acc, MA, BQ, BQS) ->
+ {MA1, BQS1} = publish_batch(Unacked, lists:reverse(Acc), MA, BQ, BQS),
+ process_batch(Batch, MA1, BQ, BQS1).
+
+%% Unacked msgs are published via batch_publish.
+publish_batch(false, Batch, MA, BQ, BQS) ->
+ batch_publish(Batch, MA, BQ, BQS);
+%% Acked msgs are published via batch_publish_delivered.
+publish_batch(true, Batch, MA, BQ, BQS) ->
+ batch_publish_delivered(Batch, MA, BQ, BQS).
+
+
+batch_publish(Batch, MA, BQ, BQS) ->
+ BQS1 = BQ:batch_publish(Batch, none, noflow, BQS),
+ {MA, BQS1}.
+
+batch_publish_delivered(Batch, MA, BQ, BQS) ->
+ {AckTags, BQS1} = BQ:batch_publish_delivered(Batch, none, noflow, BQS),
+ MA1 = BQ:zip_msgs_and_acks(Batch, AckTags, MA, BQS1),
+ {MA1, BQS1}.
+
+props(Props) ->
+ Props#message_properties{needs_confirming = false}.
diff --git a/src/rabbit_misc.erl b/src/rabbit_misc.erl
deleted file mode 100644
index ed5b38e815..0000000000
--- a/src/rabbit_misc.erl
+++ /dev/null
@@ -1,1159 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_misc).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([method_record_type/1, polite_pause/0, polite_pause/1]).
--export([die/1, frame_error/2, amqp_error/4, quit/1,
- protocol_error/3, protocol_error/4, protocol_error/1]).
--export([not_found/1, absent/2]).
--export([type_class/1, assert_args_equivalence/4, assert_field_equivalence/4]).
--export([dirty_read/1]).
--export([table_lookup/2, set_table_value/4]).
--export([r/3, r/2, r_arg/4, rs/1]).
--export([enable_cover/0, report_cover/0]).
--export([enable_cover/1, report_cover/1]).
--export([start_cover/1]).
--export([confirm_to_sender/2]).
--export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1,
- filter_exit_map/2]).
--export([with_user/2, with_user_and_vhost/3]).
--export([execute_mnesia_transaction/1]).
--export([execute_mnesia_transaction/2]).
--export([execute_mnesia_tx_with_tail/1]).
--export([ensure_ok/2]).
--export([tcp_name/3, format_inet_error/1]).
--export([upmap/2, map_in_order/2]).
--export([table_filter/3]).
--export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
--export([format/2, format_many/1, format_stderr/2]).
--export([unfold/2, ceil/1, queue_fold/3]).
--export([sort_field_table/1]).
--export([pid_to_string/1, string_to_pid/1,
- pid_change_node/2, node_to_fake_pid/1]).
--export([version_compare/2, version_compare/3]).
--export([version_minor_equivalent/2]).
--export([dict_cons/3, orddict_cons/3, gb_trees_cons/3]).
--export([gb_trees_fold/3, gb_trees_foreach/2]).
--export([all_module_attributes/1, build_acyclic_graph/3]).
--export([now_ms/0]).
--export([const/1]).
--export([ntoa/1, ntoab/1]).
--export([is_process_alive/1]).
--export([pget/2, pget/3, pget_or_die/2, pset/3]).
--export([format_message_queue/2]).
--export([append_rpc_all_nodes/4]).
--export([os_cmd/1]).
--export([is_os_process_alive/1]).
--export([gb_sets_difference/2]).
--export([version/0, otp_release/0, which_applications/0]).
--export([sequence_error/1]).
--export([json_encode/1, json_decode/1, json_to_term/1, term_to_json/1]).
--export([check_expiry/1]).
--export([base64url/1]).
--export([interval_operation/4]).
--export([ensure_timer/4, stop_timer/2, send_after/3, cancel_timer/1]).
--export([get_parent/0]).
--export([store_proc_name/1, store_proc_name/2]).
--export([moving_average/4]).
--export([now_to_ms/1]).
--export([get_env/3]).
-
-%% Horrible macro to use in guards
--define(IS_BENIGN_EXIT(R),
- R =:= noproc; R =:= noconnection; R =:= nodedown; R =:= normal;
- R =:= shutdown).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([resource_name/0, thunk/1, channel_or_connection_exit/0]).
-
--type(ok_or_error() :: rabbit_types:ok_or_error(any())).
--type(thunk(T) :: fun(() -> T)).
--type(resource_name() :: binary()).
--type(channel_or_connection_exit()
- :: rabbit_types:channel_exit() | rabbit_types:connection_exit()).
--type(digraph_label() :: term()).
--type(graph_vertex_fun() ::
- fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}])).
--type(graph_edge_fun() ::
- fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}])).
--type(tref() :: {'erlang', reference()} | {timer, timer:tref()}).
-
--spec(method_record_type/1 :: (rabbit_framing:amqp_method_record())
- -> rabbit_framing:amqp_method_name()).
--spec(polite_pause/0 :: () -> 'done').
--spec(polite_pause/1 :: (non_neg_integer()) -> 'done').
--spec(die/1 ::
- (rabbit_framing:amqp_exception()) -> channel_or_connection_exit()).
-
--spec(quit/1 :: (integer()) -> no_return()).
-
--spec(frame_error/2 :: (rabbit_framing:amqp_method_name(), binary())
- -> rabbit_types:connection_exit()).
--spec(amqp_error/4 ::
- (rabbit_framing:amqp_exception(), string(), [any()],
- rabbit_framing:amqp_method_name())
- -> rabbit_types:amqp_error()).
--spec(protocol_error/3 :: (rabbit_framing:amqp_exception(), string(), [any()])
- -> channel_or_connection_exit()).
--spec(protocol_error/4 ::
- (rabbit_framing:amqp_exception(), string(), [any()],
- rabbit_framing:amqp_method_name()) -> channel_or_connection_exit()).
--spec(protocol_error/1 ::
- (rabbit_types:amqp_error()) -> channel_or_connection_exit()).
--spec(not_found/1 :: (rabbit_types:r(atom())) -> rabbit_types:channel_exit()).
--spec(absent/2 :: (rabbit_types:amqqueue(), rabbit_amqqueue:absent_reason())
- -> rabbit_types:channel_exit()).
--spec(type_class/1 :: (rabbit_framing:amqp_field_type()) -> atom()).
--spec(assert_args_equivalence/4 :: (rabbit_framing:amqp_table(),
- rabbit_framing:amqp_table(),
- rabbit_types:r(any()), [binary()]) ->
- 'ok' | rabbit_types:connection_exit()).
--spec(assert_field_equivalence/4 ::
- (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
- 'ok' | rabbit_types:connection_exit()).
--spec(equivalence_fail/4 ::
- (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
- rabbit_types:connection_exit()).
--spec(dirty_read/1 ::
- ({atom(), any()}) -> rabbit_types:ok_or_error2(any(), 'not_found')).
--spec(table_lookup/2 ::
- (rabbit_framing:amqp_table(), binary())
- -> 'undefined' | {rabbit_framing:amqp_field_type(), any()}).
--spec(set_table_value/4 ::
- (rabbit_framing:amqp_table(), binary(),
- rabbit_framing:amqp_field_type(), rabbit_framing:amqp_value())
- -> rabbit_framing:amqp_table()).
--spec(r/2 :: (rabbit_types:vhost(), K)
- -> rabbit_types:r3(rabbit_types:vhost(), K, '_')
- when is_subtype(K, atom())).
--spec(r/3 ::
- (rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name())
- -> rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
- when is_subtype(K, atom())).
--spec(r_arg/4 ::
- (rabbit_types:vhost() | rabbit_types:r(atom()), K,
- rabbit_framing:amqp_table(), binary()) ->
- undefined |
- rabbit_types:error(
- {invalid_type, rabbit_framing:amqp_field_type()}) |
- rabbit_types:r(K) when is_subtype(K, atom())).
--spec(rs/1 :: (rabbit_types:r(atom())) -> string()).
--spec(enable_cover/0 :: () -> ok_or_error()).
--spec(start_cover/1 :: ([{string(), string()} | string()]) -> 'ok').
--spec(report_cover/0 :: () -> 'ok').
--spec(enable_cover/1 :: ([file:filename() | atom()]) -> ok_or_error()).
--spec(report_cover/1 :: ([file:filename() | atom()]) -> 'ok').
--spec(throw_on_error/2 ::
- (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A).
--spec(with_exit_handler/2 :: (thunk(A), thunk(A)) -> A).
--spec(is_abnormal_exit/1 :: (any()) -> boolean()).
--spec(filter_exit_map/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(with_user/2 :: (rabbit_types:username(), thunk(A)) -> A).
--spec(with_user_and_vhost/3 ::
- (rabbit_types:username(), rabbit_types:vhost(), thunk(A))
- -> A).
--spec(execute_mnesia_transaction/1 :: (thunk(A)) -> A).
--spec(execute_mnesia_transaction/2 ::
- (thunk(A), fun ((A, boolean()) -> B)) -> B).
--spec(execute_mnesia_tx_with_tail/1 ::
- (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B))).
--spec(ensure_ok/2 :: (ok_or_error(), atom()) -> 'ok').
--spec(tcp_name/3 ::
- (atom(), inet:ip_address(), rabbit_networking:ip_port())
- -> atom()).
--spec(format_inet_error/1 :: (atom()) -> string()).
--spec(upmap/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(map_in_order/2 :: (fun ((A) -> B), [A]) -> [B]).
--spec(table_filter/3:: (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'),
- atom()) -> [A]).
--spec(dirty_read_all/1 :: (atom()) -> [any()]).
--spec(dirty_foreach_key/2 :: (fun ((any()) -> any()), atom())
- -> 'ok' | 'aborted').
--spec(dirty_dump_log/1 :: (file:filename()) -> ok_or_error()).
--spec(format/2 :: (string(), [any()]) -> string()).
--spec(format_many/1 :: ([{string(), [any()]}]) -> string()).
--spec(format_stderr/2 :: (string(), [any()]) -> 'ok').
--spec(unfold/2 :: (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}).
--spec(ceil/1 :: (number()) -> integer()).
--spec(queue_fold/3 :: (fun ((any(), B) -> B), B, queue:queue()) -> B).
--spec(sort_field_table/1 ::
- (rabbit_framing:amqp_table()) -> rabbit_framing:amqp_table()).
--spec(pid_to_string/1 :: (pid()) -> string()).
--spec(string_to_pid/1 :: (string()) -> pid()).
--spec(pid_change_node/2 :: (pid(), node()) -> pid()).
--spec(node_to_fake_pid/1 :: (atom()) -> pid()).
--spec(version_compare/2 :: (string(), string()) -> 'lt' | 'eq' | 'gt').
--spec(version_compare/3 ::
- (string(), string(), ('lt' | 'lte' | 'eq' | 'gte' | 'gt'))
- -> boolean()).
--spec(version_minor_equivalent/2 :: (string(), string()) -> boolean()).
--spec(dict_cons/3 :: (any(), any(), dict:dict()) -> dict:dict()).
--spec(orddict_cons/3 :: (any(), any(), orddict:orddict()) -> orddict:orddict()).
--spec(gb_trees_cons/3 :: (any(), any(), gb_trees:tree()) -> gb_trees:tree()).
--spec(gb_trees_fold/3 :: (fun ((any(), any(), A) -> A), A, gb_trees:tree())
- -> A).
--spec(gb_trees_foreach/2 ::
- (fun ((any(), any()) -> any()), gb_trees:tree()) -> 'ok').
--spec(all_module_attributes/1 ::
- (atom()) -> [{atom(), atom(), [term()]}]).
--spec(build_acyclic_graph/3 ::
- (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}])
- -> rabbit_types:ok_or_error2(digraph:digraph(),
- {'vertex', 'duplicate', digraph:vertex()} |
- {'edge', ({bad_vertex, digraph:vertex()} |
- {bad_edge, [digraph:vertex()]}),
- digraph:vertex(), digraph:vertex()})).
--spec(now_ms/0 :: () -> non_neg_integer()).
--spec(const/1 :: (A) -> thunk(A)).
--spec(ntoa/1 :: (inet:ip_address()) -> string()).
--spec(ntoab/1 :: (inet:ip_address()) -> string()).
--spec(is_process_alive/1 :: (pid()) -> boolean()).
--spec(pget/2 :: (term(), [term()]) -> term()).
--spec(pget/3 :: (term(), [term()], term()) -> term()).
--spec(pget_or_die/2 :: (term(), [term()]) -> term() | no_return()).
--spec(pset/3 :: (term(), term(), [term()]) -> term()).
--spec(format_message_queue/2 :: (any(), priority_queue:q()) -> term()).
--spec(append_rpc_all_nodes/4 :: ([node()], atom(), atom(), [any()]) -> [any()]).
--spec(os_cmd/1 :: (string()) -> string()).
--spec(is_os_process_alive/1 :: (non_neg_integer()) -> boolean()).
--spec(gb_sets_difference/2 :: (gb_sets:set(), gb_sets:set()) -> gb_sets:set()).
--spec(version/0 :: () -> string()).
--spec(otp_release/0 :: () -> string()).
--spec(which_applications/0 :: () -> [{atom(), string(), string()}]).
--spec(sequence_error/1 :: ([({'error', any()} | any())])
- -> {'error', any()} | any()).
--spec(json_encode/1 :: (any()) -> {'ok', string()} | {'error', any()}).
--spec(json_decode/1 :: (string()) -> {'ok', any()} | 'error').
--spec(json_to_term/1 :: (any()) -> any()).
--spec(term_to_json/1 :: (any()) -> any()).
--spec(check_expiry/1 :: (integer()) -> rabbit_types:ok_or_error(any())).
--spec(base64url/1 :: (binary()) -> string()).
--spec(interval_operation/4 ::
- ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer())
- -> {any(), non_neg_integer()}).
--spec(ensure_timer/4 :: (A, non_neg_integer(), non_neg_integer(), any()) -> A).
--spec(stop_timer/2 :: (A, non_neg_integer()) -> A).
--spec(send_after/3 :: (non_neg_integer(), pid(), any()) -> tref()).
--spec(cancel_timer/1 :: (tref()) -> 'ok').
--spec(get_parent/0 :: () -> pid()).
--spec(store_proc_name/2 :: (atom(), rabbit_types:proc_name()) -> ok).
--spec(store_proc_name/1 :: (rabbit_types:proc_type_and_name()) -> ok).
--spec(moving_average/4 :: (float(), float(), float(), float() | 'undefined')
- -> float()).
--spec(now_to_ms/1 :: ({non_neg_integer(),
- non_neg_integer(),
- non_neg_integer()}) -> pos_integer()).
--spec(get_env/3 :: (atom(), atom(), term()) -> term()).
--endif.
-
-%%----------------------------------------------------------------------------
-
-method_record_type(Record) ->
- element(1, Record).
-
-polite_pause() ->
- polite_pause(3000).
-
-polite_pause(N) ->
- receive
- after N -> done
- end.
-
-die(Error) ->
- protocol_error(Error, "~w", [Error]).
-
-frame_error(MethodName, BinaryFields) ->
- protocol_error(frame_error, "cannot decode ~w", [BinaryFields], MethodName).
-
-amqp_error(Name, ExplanationFormat, Params, Method) ->
- Explanation = format(ExplanationFormat, Params),
- #amqp_error{name = Name, explanation = Explanation, method = Method}.
-
-protocol_error(Name, ExplanationFormat, Params) ->
- protocol_error(Name, ExplanationFormat, Params, none).
-
-protocol_error(Name, ExplanationFormat, Params, Method) ->
- protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)).
-
-protocol_error(#amqp_error{} = Error) ->
- exit(Error).
-
-not_found(R) -> protocol_error(not_found, "no ~s", [rs(R)]).
-
-absent(#amqqueue{name = QueueName, pid = QPid, durable = true}, nodedown) ->
- %% The assertion of durability is mainly there because we mention
- %% durability in the error message. That way we will hopefully
- %% notice if at some future point our logic changes s.t. we get
- %% here with non-durable queues.
- protocol_error(not_found,
- "home node '~s' of durable ~s is down or inaccessible",
- [node(QPid), rs(QueueName)]);
-
-absent(#amqqueue{name = QueueName}, crashed) ->
- protocol_error(not_found,
- "~s has crashed and failed to restart", [rs(QueueName)]).
-
-type_class(byte) -> int;
-type_class(short) -> int;
-type_class(signedint) -> int;
-type_class(long) -> int;
-type_class(decimal) -> int;
-type_class(float) -> float;
-type_class(double) -> float;
-type_class(Other) -> Other.
-
-assert_args_equivalence(Orig, New, Name, Keys) ->
- [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys],
- ok.
-
-assert_args_equivalence1(Orig, New, Name, Key) ->
- {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
- case {Orig1, New1} of
- {Same, Same} ->
- ok;
- {{OrigType, OrigVal}, {NewType, NewVal}} ->
- case type_class(OrigType) == type_class(NewType) andalso
- OrigVal == NewVal of
- true -> ok;
- false -> assert_field_equivalence(OrigVal, NewVal, Name, Key)
- end;
- {OrigTypeVal, NewTypeVal} ->
- assert_field_equivalence(OrigTypeVal, NewTypeVal, Name, Key)
- end.
-
-assert_field_equivalence(_Orig, _Orig, _Name, _Key) ->
- ok;
-assert_field_equivalence(Orig, New, Name, Key) ->
- equivalence_fail(Orig, New, Name, Key).
-
-equivalence_fail(Orig, New, Name, Key) ->
- protocol_error(precondition_failed, "inequivalent arg '~s' "
- "for ~s: received ~s but current is ~s",
- [Key, rs(Name), val(New), val(Orig)]).
-
-val(undefined) ->
- "none";
-val({Type, Value}) ->
- ValFmt = case is_binary(Value) of
- true -> "~s";
- false -> "~p"
- end,
- format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]);
-val(Value) ->
- format(case is_binary(Value) of
- true -> "'~s'";
- false -> "'~p'"
- end, [Value]).
-
-%% Normally we'd call mnesia:dirty_read/1 here, but that is quite
-%% expensive due to general mnesia overheads (figuring out table types
-%% and locations, etc). We get away with bypassing these because we
-%% know that the tables we are looking at here
-%% - are not the schema table
-%% - have a local ram copy
-%% - do not have any indices
-dirty_read({Table, Key}) ->
- case ets:lookup(Table, Key) of
- [Result] -> {ok, Result};
- [] -> {error, not_found}
- end.
-
-table_lookup(Table, Key) ->
- case lists:keysearch(Key, 1, Table) of
- {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin};
- false -> undefined
- end.
-
-set_table_value(Table, Key, Type, Value) ->
- sort_field_table(
- lists:keystore(Key, 1, Table, {Key, Type, Value})).
-
-r(#resource{virtual_host = VHostPath}, Kind, Name) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = Name};
-r(VHostPath, Kind, Name) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = Name}.
-
-r(VHostPath, Kind) ->
- #resource{virtual_host = VHostPath, kind = Kind, name = '_'}.
-
-r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) ->
- r_arg(VHostPath, Kind, Table, Key);
-r_arg(VHostPath, Kind, Table, Key) ->
- case table_lookup(Table, Key) of
- {longstr, NameBin} -> r(VHostPath, Kind, NameBin);
- undefined -> undefined;
- {Type, _} -> {error, {invalid_type, Type}}
- end.
-
-rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) ->
- format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath]).
-
-enable_cover() -> enable_cover(["."]).
-
-enable_cover(Dirs) ->
- lists:foldl(fun (Dir, ok) ->
- case cover:compile_beam_directory(
- filename:join(lists:concat([Dir]),"ebin")) of
- {error, _} = Err -> Err;
- _ -> ok
- end;
- (_Dir, Err) ->
- Err
- end, ok, Dirs).
-
-start_cover(NodesS) ->
- {ok, _} = cover:start([rabbit_nodes:make(N) || N <- NodesS]),
- ok.
-
-report_cover() -> report_cover(["."]).
-
-report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok.
-
-report_cover1(Root) ->
- Dir = filename:join(Root, "cover"),
- ok = filelib:ensure_dir(filename:join(Dir, "junk")),
- lists:foreach(fun (F) -> file:delete(F) end,
- filelib:wildcard(filename:join(Dir, "*.html"))),
- {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]),
- {CT, NCT} =
- lists:foldl(
- fun (M,{CovTot, NotCovTot}) ->
- {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module),
- ok = report_coverage_percentage(SummaryFile,
- Cov, NotCov, M),
- {ok,_} = cover:analyze_to_file(
- M,
- filename:join(Dir, atom_to_list(M) ++ ".html"),
- [html]),
- {CovTot+Cov, NotCovTot+NotCov}
- end,
- {0, 0},
- lists:sort(cover:modules())),
- ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'),
- ok = file:close(SummaryFile),
- ok.
-
-report_coverage_percentage(File, Cov, NotCov, Mod) ->
- io:fwrite(File, "~6.2f ~p~n",
- [if
- Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov);
- true -> 100.0
- end,
- Mod]).
-
-confirm_to_sender(Pid, MsgSeqNos) ->
- gen_server2:cast(Pid, {confirm, MsgSeqNos, self()}).
-
-%% @doc Halts the emulator returning the given status code to the os.
-%% On Windows this function will block indefinitely so as to give the io
-%% subsystem time to flush stdout completely.
-quit(Status) ->
- case os:type() of
- {unix, _} -> halt(Status);
- {win32, _} -> init:stop(Status),
- receive
- after infinity -> ok
- end
- end.
-
-throw_on_error(E, Thunk) ->
- case Thunk() of
- {error, Reason} -> throw({E, Reason});
- {ok, Res} -> Res;
- Res -> Res
- end.
-
-with_exit_handler(Handler, Thunk) ->
- try
- Thunk()
- catch
- exit:{R, _} when ?IS_BENIGN_EXIT(R) -> Handler();
- exit:{{R, _}, _} when ?IS_BENIGN_EXIT(R) -> Handler()
- end.
-
-is_abnormal_exit(R) when ?IS_BENIGN_EXIT(R) -> false;
-is_abnormal_exit({R, _}) when ?IS_BENIGN_EXIT(R) -> false;
-is_abnormal_exit(_) -> true.
-
-filter_exit_map(F, L) ->
- Ref = make_ref(),
- lists:filter(fun (R) -> R =/= Ref end,
- [with_exit_handler(
- fun () -> Ref end,
- fun () -> F(I) end) || I <- L]).
-
-
-with_user(Username, Thunk) ->
- fun () ->
- case mnesia:read({rabbit_user, Username}) of
- [] ->
- mnesia:abort({no_such_user, Username});
- [_U] ->
- Thunk()
- end
- end.
-
-with_user_and_vhost(Username, VHostPath, Thunk) ->
- with_user(Username, rabbit_vhost:with(VHostPath, Thunk)).
-
-execute_mnesia_transaction(TxFun) ->
- %% Making this a sync_transaction allows us to use dirty_read
- %% elsewhere and get a consistent result even when that read
- %% executes on a different node.
- case worker_pool:submit(
- fun () ->
- case mnesia:is_transaction() of
- false -> DiskLogBefore = mnesia_dumper:get_log_writes(),
- Res = mnesia:sync_transaction(TxFun),
- DiskLogAfter = mnesia_dumper:get_log_writes(),
- case DiskLogAfter == DiskLogBefore of
- true -> file_handle_cache_stats:update(
- mnesia_ram_tx),
- Res;
- false -> file_handle_cache_stats:update(
- mnesia_disk_tx),
- {sync, Res}
- end;
- true -> mnesia:sync_transaction(TxFun)
- end
- end, single) of
- {sync, {atomic, Result}} -> mnesia_sync:sync(), Result;
- {sync, {aborted, Reason}} -> throw({error, Reason});
- {atomic, Result} -> Result;
- {aborted, Reason} -> throw({error, Reason})
- end.
-
-%% Like execute_mnesia_transaction/1 with additional Pre- and Post-
-%% commit function
-execute_mnesia_transaction(TxFun, PrePostCommitFun) ->
- case mnesia:is_transaction() of
- true -> throw(unexpected_transaction);
- false -> ok
- end,
- PrePostCommitFun(execute_mnesia_transaction(
- fun () ->
- Result = TxFun(),
- PrePostCommitFun(Result, true),
- Result
- end), false).
-
-%% Like execute_mnesia_transaction/2, but TxFun is expected to return a
-%% TailFun which gets called (only) immediately after the tx commit
-execute_mnesia_tx_with_tail(TxFun) ->
- case mnesia:is_transaction() of
- true -> execute_mnesia_transaction(TxFun);
- false -> TailFun = execute_mnesia_transaction(TxFun),
- TailFun()
- end.
-
-ensure_ok(ok, _) -> ok;
-ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}).
-
-tcp_name(Prefix, IPAddress, Port)
- when is_atom(Prefix) andalso is_number(Port) ->
- list_to_atom(
- format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port])).
-
-format_inet_error(E) -> format("~w (~s)", [E, format_inet_error0(E)]).
-
-format_inet_error0(address) -> "cannot connect to host/port";
-format_inet_error0(timeout) -> "timed out";
-format_inet_error0(Error) -> inet:format_error(Error).
-
-%% This is a modified version of Luke Gorrie's pmap -
-%% http://lukego.livejournal.com/6753.html - that doesn't care about
-%% the order in which results are received.
-%%
-%% WARNING: This is is deliberately lightweight rather than robust -- if F
-%% throws, upmap will hang forever, so make sure F doesn't throw!
-upmap(F, L) ->
- Parent = self(),
- Ref = make_ref(),
- [receive {Ref, Result} -> Result end
- || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]].
-
-map_in_order(F, L) ->
- lists:reverse(
- lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)).
-
-%% Apply a pre-post-commit function to all entries in a table that
-%% satisfy a predicate, and return those entries.
-%%
-%% We ignore entries that have been modified or removed.
-table_filter(Pred, PrePostCommitFun, TableName) ->
- lists:foldl(
- fun (E, Acc) ->
- case execute_mnesia_transaction(
- fun () -> mnesia:match_object(TableName, E, read) =/= []
- andalso Pred(E) end,
- fun (false, _Tx) -> false;
- (true, Tx) -> PrePostCommitFun(E, Tx), true
- end) of
- false -> Acc;
- true -> [E | Acc]
- end
- end, [], dirty_read_all(TableName)).
-
-dirty_read_all(TableName) ->
- mnesia:dirty_select(TableName, [{'$1',[],['$1']}]).
-
-dirty_foreach_key(F, TableName) ->
- dirty_foreach_key1(F, TableName, mnesia:dirty_first(TableName)).
-
-dirty_foreach_key1(_F, _TableName, '$end_of_table') ->
- ok;
-dirty_foreach_key1(F, TableName, K) ->
- case catch mnesia:dirty_next(TableName, K) of
- {'EXIT', _} ->
- aborted;
- NextKey ->
- F(K),
- dirty_foreach_key1(F, TableName, NextKey)
- end.
-
-dirty_dump_log(FileName) ->
- {ok, LH} = disk_log:open([{name, dirty_dump_log},
- {mode, read_only},
- {file, FileName}]),
- dirty_dump_log1(LH, disk_log:chunk(LH, start)),
- disk_log:close(LH).
-
-dirty_dump_log1(_LH, eof) ->
- io:format("Done.~n");
-dirty_dump_log1(LH, {K, Terms}) ->
- io:format("Chunk: ~p~n", [Terms]),
- dirty_dump_log1(LH, disk_log:chunk(LH, K));
-dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
- io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
- dirty_dump_log1(LH, disk_log:chunk(LH, K)).
-
-format(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)).
-
-format_many(List) ->
- lists:flatten([io_lib:format(F ++ "~n", A) || {F, A} <- List]).
-
-format_stderr(Fmt, Args) ->
- case os:type() of
- {unix, _} ->
- Port = open_port({fd, 0, 2}, [out]),
- port_command(Port, io_lib:format(Fmt, Args)),
- port_close(Port);
- {win32, _} ->
- %% stderr on Windows is buffered and I can't figure out a
- %% way to trigger a fflush(stderr) in Erlang. So rather
- %% than risk losing output we write to stdout instead,
- %% which appears to be unbuffered.
- io:format(Fmt, Args)
- end,
- ok.
-
-unfold(Fun, Init) ->
- unfold(Fun, [], Init).
-
-unfold(Fun, Acc, Init) ->
- case Fun(Init) of
- {true, E, I} -> unfold(Fun, [E|Acc], I);
- false -> {Acc, Init}
- end.
-
-ceil(N) ->
- T = trunc(N),
- case N == T of
- true -> T;
- false -> 1 + T
- end.
-
-queue_fold(Fun, Init, Q) ->
- case queue:out(Q) of
- {empty, _Q} -> Init;
- {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
- end.
-
-%% Sorts a list of AMQP table fields as per the AMQP spec
-sort_field_table(Arguments) ->
- lists:keysort(1, Arguments).
-
-%% This provides a string representation of a pid that is the same
-%% regardless of what node we are running on. The representation also
-%% permits easy identification of the pid's node.
-pid_to_string(Pid) when is_pid(Pid) ->
- {Node, Cre, Id, Ser} = decompose_pid(Pid),
- format("<~s.~B.~B.~B>", [Node, Cre, Id, Ser]).
-
-%% inverse of above
-string_to_pid(Str) ->
- Err = {error, {invalid_pid_syntax, Str}},
- %% The \ before the trailing $ is only there to keep emacs
- %% font-lock from getting confused.
- case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
- [{capture,all_but_first,list}]) of
- {match, [NodeStr, CreStr, IdStr, SerStr]} ->
- [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
- [CreStr, IdStr, SerStr]),
- compose_pid(list_to_atom(NodeStr), Cre, Id, Ser);
- nomatch ->
- throw(Err)
- end.
-
-pid_change_node(Pid, NewNode) ->
- {_OldNode, Cre, Id, Ser} = decompose_pid(Pid),
- compose_pid(NewNode, Cre, Id, Ser).
-
-%% node(node_to_fake_pid(Node)) =:= Node.
-node_to_fake_pid(Node) ->
- compose_pid(Node, 0, 0, 0).
-
-decompose_pid(Pid) when is_pid(Pid) ->
- %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
- %% 8.7)
- <<131,103,100,NodeLen:16,NodeBin:NodeLen/binary,Id:32,Ser:32,Cre:8>>
- = term_to_binary(Pid),
- Node = binary_to_term(<<131,100,NodeLen:16,NodeBin:NodeLen/binary>>),
- {Node, Cre, Id, Ser}.
-
-compose_pid(Node, Cre, Id, Ser) ->
- <<131,NodeEnc/binary>> = term_to_binary(Node),
- binary_to_term(<<131,103,NodeEnc/binary,Id:32,Ser:32,Cre:8>>).
-
-version_compare(A, B, lte) ->
- case version_compare(A, B) of
- eq -> true;
- lt -> true;
- gt -> false
- end;
-version_compare(A, B, gte) ->
- case version_compare(A, B) of
- eq -> true;
- gt -> true;
- lt -> false
- end;
-version_compare(A, B, Result) ->
- Result =:= version_compare(A, B).
-
-version_compare(A, A) ->
- eq;
-version_compare([], [$0 | B]) ->
- version_compare([], dropdot(B));
-version_compare([], _) ->
- lt; %% 2.3 < 2.3.1
-version_compare([$0 | A], []) ->
- version_compare(dropdot(A), []);
-version_compare(_, []) ->
- gt; %% 2.3.1 > 2.3
-version_compare(A, B) ->
- {AStr, ATl} = lists:splitwith(fun (X) -> X =/= $. end, A),
- {BStr, BTl} = lists:splitwith(fun (X) -> X =/= $. end, B),
- ANum = list_to_integer(AStr),
- BNum = list_to_integer(BStr),
- if ANum =:= BNum -> version_compare(dropdot(ATl), dropdot(BTl));
- ANum < BNum -> lt;
- ANum > BNum -> gt
- end.
-
-%% a.b.c and a.b.d match, but a.b.c and a.d.e don't. If
-%% versions do not match that pattern, just compare them.
-version_minor_equivalent(A, B) ->
- {ok, RE} = re:compile("^(\\d+\\.\\d+)(\\.\\d+)\$"),
- Opts = [{capture, all_but_first, list}],
- case {re:run(A, RE, Opts), re:run(B, RE, Opts)} of
- {{match, [A1|_]}, {match, [B1|_]}} -> A1 =:= B1;
- _ -> A =:= B
- end.
-
-dropdot(A) -> lists:dropwhile(fun (X) -> X =:= $. end, A).
-
-dict_cons(Key, Value, Dict) ->
- dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
-
-orddict_cons(Key, Value, Dict) ->
- orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
-
-gb_trees_cons(Key, Value, Tree) ->
- case gb_trees:lookup(Key, Tree) of
- {value, Values} -> gb_trees:update(Key, [Value | Values], Tree);
- none -> gb_trees:insert(Key, [Value], Tree)
- end.
-
-gb_trees_fold(Fun, Acc, Tree) ->
- gb_trees_fold1(Fun, Acc, gb_trees:next(gb_trees:iterator(Tree))).
-
-gb_trees_fold1(_Fun, Acc, none) ->
- Acc;
-gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
- gb_trees_fold1(Fun, Fun(Key, Val, Acc), gb_trees:next(It)).
-
-gb_trees_foreach(Fun, Tree) ->
- gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
-
-now_ms() ->
- timer:now_diff(now(), {0,0,0}) div 1000.
-
-module_attributes(Module) ->
- case catch Module:module_info(attributes) of
- {'EXIT', {undef, [{Module, module_info, _} | _]}} ->
- io:format("WARNING: module ~p not found, so not scanned for boot steps.~n",
- [Module]),
- [];
- {'EXIT', Reason} ->
- exit(Reason);
- V ->
- V
- end.
-
-all_module_attributes(Name) ->
- Targets =
- lists:usort(
- lists:append(
- [[{App, Module} || Module <- Modules] ||
- {App, _, _} <- application:loaded_applications(),
- {ok, Modules} <- [application:get_key(App, modules)]])),
- lists:foldl(
- fun ({App, Module}, Acc) ->
- case lists:append([Atts || {N, Atts} <- module_attributes(Module),
- N =:= Name]) of
- [] -> Acc;
- Atts -> [{App, Module, Atts} | Acc]
- end
- end, [], Targets).
-
-build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
- G = digraph:new([acyclic]),
- try
- [case digraph:vertex(G, Vertex) of
- false -> digraph:add_vertex(G, Vertex, Label);
- _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
- end || GraphElem <- Graph,
- {Vertex, Label} <- VertexFun(GraphElem)],
- [case digraph:add_edge(G, From, To) of
- {error, E} -> throw({graph_error, {edge, E, From, To}});
- _ -> ok
- end || GraphElem <- Graph,
- {From, To} <- EdgeFun(GraphElem)],
- {ok, G}
- catch {graph_error, Reason} ->
- true = digraph:delete(G),
- {error, Reason}
- end.
-
-const(X) -> fun () -> X end.
-
-%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see
-%% when IPv6 is enabled but not used (i.e. 99% of the time).
-ntoa({0,0,0,0,0,16#ffff,AB,CD}) ->
- inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256});
-ntoa(IP) ->
- inet_parse:ntoa(IP).
-
-ntoab(IP) ->
- Str = ntoa(IP),
- case string:str(Str, ":") of
- 0 -> Str;
- _ -> "[" ++ Str ++ "]"
- end.
-
-%% We try to avoid reconnecting to down nodes here; this is used in a
-%% loop in rabbit_amqqueue:on_node_down/1 and any delays we incur
-%% would be bad news.
-%%
-%% See also rabbit_mnesia:is_process_alive/1 which also requires the
-%% process be in the same running cluster as us (i.e. not partitioned
-%% or some random node).
-is_process_alive(Pid) ->
- Node = node(Pid),
- lists:member(Node, [node() | nodes()]) andalso
- rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
-
-pget(K, P) -> proplists:get_value(K, P).
-pget(K, P, D) -> proplists:get_value(K, P, D).
-
-pget_or_die(K, P) ->
- case proplists:get_value(K, P) of
- undefined -> exit({error, key_missing, K});
- V -> V
- end.
-
-pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
-
-format_message_queue(_Opt, MQ) ->
- Len = priority_queue:len(MQ),
- {Len,
- case Len > 100 of
- false -> priority_queue:to_list(MQ);
- true -> {summary,
- orddict:to_list(
- lists:foldl(
- fun ({P, V}, Counts) ->
- orddict:update_counter(
- {P, format_message_queue_entry(V)}, 1, Counts)
- end, orddict:new(), priority_queue:to_list(MQ)))}
- end}.
-
-format_message_queue_entry(V) when is_atom(V) ->
- V;
-format_message_queue_entry(V) when is_tuple(V) ->
- list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
-format_message_queue_entry(_V) ->
- '_'.
-
-append_rpc_all_nodes(Nodes, M, F, A) ->
- {ResL, _} = rpc:multicall(Nodes, M, F, A),
- lists:append([case Res of
- {badrpc, _} -> [];
- _ -> Res
- end || Res <- ResL]).
-
-os_cmd(Command) ->
- case os:type() of
- {win32, _} ->
- %% Clink workaround; see
- %% http://code.google.com/p/clink/issues/detail?id=141
- os:cmd(" " ++ Command);
- _ ->
- %% Don't just return "/bin/sh: <cmd>: not found" if not found
- Exec = hd(string:tokens(Command, " ")),
- case os:find_executable(Exec) of
- false -> throw({command_not_found, Exec});
- _ -> os:cmd(Command)
- end
- end.
-
-is_os_process_alive(Pid) ->
- with_os([{unix, fun () ->
- run_ps(Pid) =:= 0
- end},
- {win32, fun () ->
- Cmd = "tasklist /nh /fi \"pid eq " ++ Pid ++ "\" ",
- Res = os_cmd(Cmd ++ "2>&1"),
- case re:run(Res, "erl\\.exe", [{capture, none}]) of
- match -> true;
- _ -> false
- end
- end}]).
-
-with_os(Handlers) ->
- {OsFamily, _} = os:type(),
- case proplists:get_value(OsFamily, Handlers) of
- undefined -> throw({unsupported_os, OsFamily});
- Handler -> Handler()
- end.
-
-run_ps(Pid) ->
- Port = erlang:open_port({spawn, "ps -p " ++ Pid},
- [exit_status, {line, 16384},
- use_stdio, stderr_to_stdout]),
- exit_loop(Port).
-
-exit_loop(Port) ->
- receive
- {Port, {exit_status, Rc}} -> Rc;
- {Port, _} -> exit_loop(Port)
- end.
-
-gb_sets_difference(S1, S2) ->
- gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
-
-version() ->
- {ok, VSN} = application:get_key(rabbit, vsn),
- VSN.
-
-%% See http://www.erlang.org/doc/system_principles/versions.html
-otp_release() ->
- File = filename:join([code:root_dir(), "releases",
- erlang:system_info(otp_release), "OTP_VERSION"]),
- case file:read_file(File) of
- {ok, VerBin} ->
- %% 17.0 or later, we need the file for the minor version
- string:strip(binary_to_list(VerBin), both, $\n);
- {error, _} ->
- %% R16B03 or earlier (no file, otp_release is correct)
- %% or we couldn't read the file (so this is best we can do)
- erlang:system_info(otp_release)
- end.
-
-%% application:which_applications(infinity) is dangerous, since it can
-%% cause deadlocks on shutdown. So we have to use a timeout variant,
-%% but w/o creating spurious timeout errors.
-which_applications() ->
- try
- application:which_applications()
- catch
- exit:{timeout, _} -> []
- end.
-
-sequence_error([T]) -> T;
-sequence_error([{error, _} = Error | _]) -> Error;
-sequence_error([_ | Rest]) -> sequence_error(Rest).
-
-json_encode(Term) ->
- try
- {ok, mochijson2:encode(Term)}
- catch
- exit:{json_encode, E} ->
- {error, E}
- end.
-
-json_decode(Term) ->
- try
- {ok, mochijson2:decode(Term)}
- catch
- %% Sadly `mochijson2:decode/1' does not offer a nice way to catch
- %% decoding errors...
- error:_ -> error
- end.
-
-json_to_term({struct, L}) ->
- [{K, json_to_term(V)} || {K, V} <- L];
-json_to_term(L) when is_list(L) ->
- [json_to_term(I) || I <- L];
-json_to_term(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
- V =:= true orelse V =:= false ->
- V.
-
-%% This has the flaw that empty lists will never be JSON objects, so use with
-%% care.
-term_to_json([{_, _}|_] = L) ->
- {struct, [{K, term_to_json(V)} || {K, V} <- L]};
-term_to_json(L) when is_list(L) ->
- [term_to_json(I) || I <- L];
-term_to_json(V) when is_binary(V) orelse is_number(V) orelse V =:= null orelse
- V =:= true orelse V =:= false ->
- V.
-
-now_to_ms({Mega, Sec, Micro}) ->
- (Mega * 1000000 * 1000000 + Sec * 1000000 + Micro) div 1000.
-
-check_expiry(N) when N < 0 -> {error, {value_negative, N}};
-check_expiry(_N) -> ok.
-
-base64url(In) ->
- lists:reverse(lists:foldl(fun ($\+, Acc) -> [$\- | Acc];
- ($\/, Acc) -> [$\_ | Acc];
- ($\=, Acc) -> Acc;
- (Chr, Acc) -> [Chr | Acc]
- end, [], base64:encode_to_string(In))).
-
-%% Ideally, you'd want Fun to run every IdealInterval. but you don't
-%% want it to take more than MaxRatio of IdealInterval. So if it takes
-%% more then you want to run it less often. So we time how long it
-%% takes to run, and then suggest how long you should wait before
-%% running it again. Times are in millis.
-interval_operation({M, F, A}, MaxRatio, IdealInterval, LastInterval) ->
- {Micros, Res} = timer:tc(M, F, A),
- {Res, case {Micros > 1000 * (MaxRatio * IdealInterval),
- Micros > 1000 * (MaxRatio * LastInterval)} of
- {true, true} -> round(LastInterval * 1.5);
- {true, false} -> LastInterval;
- {false, false} -> lists:max([IdealInterval,
- round(LastInterval / 1.5)])
- end}.
-
-ensure_timer(State, Idx, After, Msg) ->
- case element(Idx, State) of
- undefined -> TRef = send_after(After, self(), Msg),
- setelement(Idx, State, TRef);
- _ -> State
- end.
-
-stop_timer(State, Idx) ->
- case element(Idx, State) of
- undefined -> State;
- TRef -> cancel_timer(TRef),
- setelement(Idx, State, undefined)
- end.
-
-%% timer:send_after/3 goes through a single timer process but allows
-%% long delays. erlang:send_after/3 does not have a bottleneck but
-%% only allows max 2^32-1 millis.
--define(MAX_ERLANG_SEND_AFTER, 4294967295).
-send_after(Millis, Pid, Msg) when Millis > ?MAX_ERLANG_SEND_AFTER ->
- {ok, Ref} = timer:send_after(Millis, Pid, Msg),
- {timer, Ref};
-send_after(Millis, Pid, Msg) ->
- {erlang, erlang:send_after(Millis, Pid, Msg)}.
-
-cancel_timer({erlang, Ref}) -> erlang:cancel_timer(Ref),
- ok;
-cancel_timer({timer, Ref}) -> {ok, cancel} = timer:cancel(Ref),
- ok.
-
-store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
-store_proc_name(TypeProcName) -> put(process_name, TypeProcName).
-
-%% application:get_env/3 is only available in R16B01 or later.
-get_env(Application, Key, Def) ->
- case application:get_env(Application, Key) of
- {ok, Val} -> Val;
- undefined -> Def
- end.
-
-moving_average(_Time, _HalfLife, Next, undefined) ->
- Next;
-%% We want the Weight to decrease as Time goes up (since Weight is the
-%% weight for the current sample, not the new one), so that the moving
-%% average decays at the same speed regardless of how long the time is
-%% between samplings. So we want Weight = math:exp(Something), where
-%% Something turns out to be negative.
-%%
-%% We want to determine Something here in terms of the Time taken
-%% since the last measurement, and a HalfLife. So we want Weight =
-%% math:exp(Time * Constant / HalfLife). What should Constant be? We
-%% want Weight to be 0.5 when Time = HalfLife.
-%%
-%% Plug those numbers in and you get 0.5 = math:exp(Constant). Take
-%% the log of each side and you get math:log(0.5) = Constant.
-moving_average(Time, HalfLife, Next, Current) ->
- Weight = math:exp(Time * math:log(0.5) / HalfLife),
- Next * (1 - Weight) + Current * Weight.
-
-%% -------------------------------------------------------------------------
-%% Begin copypasta from gen_server2.erl
-
-get_parent() ->
- case get('$ancestors') of
- [Parent | _] when is_pid (Parent) -> Parent;
- [Parent | _] when is_atom(Parent) -> name_to_pid(Parent);
- _ -> exit(process_was_not_started_by_proc_lib)
- end.
-
-name_to_pid(Name) ->
- case whereis(Name) of
- undefined -> case whereis_name(Name) of
- undefined -> exit(could_not_find_registerd_name);
- Pid -> Pid
- end;
- Pid -> Pid
- end.
-
-whereis_name(Name) ->
- case ets:lookup(global_names, Name) of
- [{_Name, Pid, _Method, _RPid, _Ref}] ->
- if node(Pid) == node() -> case erlang:is_process_alive(Pid) of
- true -> Pid;
- false -> undefined
- end;
- true -> Pid
- end;
- [] -> undefined
- end.
-
-%% End copypasta from gen_server2.erl
-%% -------------------------------------------------------------------------
diff --git a/src/rabbit_mnesia.erl b/src/rabbit_mnesia.erl
index 8a4354e4bc..408154ebb3 100644
--- a/src/rabbit_mnesia.erl
+++ b/src/rabbit_mnesia.erl
@@ -102,7 +102,7 @@ init() ->
ensure_mnesia_dir(),
case is_virgin_node() of
true ->
- rabbit_log:info("Database directory at ~s is empty. Initialising from scratch... ~n",
+ rabbit_log:info("Database directory at ~s is empty. Initialising from scratch...~n",
[dir()]),
init_from_config();
false ->
diff --git a/src/rabbit_msg_store.erl b/src/rabbit_msg_store.erl
index 8909484984..e463f5ffec 100644
--- a/src/rabbit_msg_store.erl
+++ b/src/rabbit_msg_store.erl
@@ -57,28 +57,51 @@
%%----------------------------------------------------------------------------
-record(msstate,
- { dir, %% store directory
- index_module, %% the module for index ops
- index_state, %% where are messages?
- current_file, %% current file name as number
- current_file_handle, %% current file handle since the last fsync?
- file_handle_cache, %% file handle cache
- sync_timer_ref, %% TRef for our interval timer
- sum_valid_data, %% sum of valid data in all files
- sum_file_size, %% sum of file sizes
- pending_gc_completion, %% things to do once GC completes
- gc_pid, %% pid of our GC
- file_handles_ets, %% tid of the shared file handles table
- file_summary_ets, %% tid of the file summary table
- cur_file_cache_ets, %% tid of current file cache table
- flying_ets, %% tid of writes/removes in flight
- dying_clients, %% set of dying clients
- clients, %% map of references of all registered clients
- %% to callbacks
- successfully_recovered, %% boolean: did we recover state?
- file_size_limit, %% how big are our files allowed to get?
- cref_to_msg_ids, %% client ref to synced messages mapping
- credit_disc_bound %% See rabbit.hrl CREDIT_DISC_BOUND
+ {
+ %% store directory
+ dir,
+ %% the module for index ops,
+ %% rabbit_msg_store_ets_index by default
+ index_module,
+ %% %% where are messages?
+ index_state,
+ %% current file name as number
+ current_file,
+ %% current file handle since the last fsync?
+ current_file_handle,
+ %% file handle cache
+ file_handle_cache,
+ %% TRef for our interval timer
+ sync_timer_ref,
+ %% sum of valid data in all files
+ sum_valid_data,
+ %% sum of file sizes
+ sum_file_size,
+ %% things to do once GC completes
+ pending_gc_completion,
+ %% pid of our GC
+ gc_pid,
+ %% tid of the shared file handles table
+ file_handles_ets,
+ %% tid of the file summary table
+ file_summary_ets,
+ %% tid of current file cache table
+ cur_file_cache_ets,
+ %% tid of writes/removes in flight
+ flying_ets,
+ %% set of dying clients
+ dying_clients,
+ %% map of references of all registered clients
+ %% to callbacks
+ clients,
+ %% boolean: did we recover state?
+ successfully_recovered,
+ %% how big are our files allowed to get?
+ file_size_limit,
+ %% client ref to synced messages mapping
+ cref_to_msg_ids,
+ %% See CREDIT_DISC_BOUND in rabbit.hrl
+ credit_disc_bound
}).
-record(client_msstate,
@@ -181,13 +204,25 @@
%% It is not recommended to set this to < 0.5
-define(GARBAGE_FRACTION, 0.5).
+%% Message store is responsible for storing messages
+%% on disk and loading them back. The store handles both
+%% persistent messages and transient ones (when a node
+%% is under RAM pressure and needs to page messages out
+%% to disk). The store is responsible for locating messages
+%% on disk and maintaining an index.
+%%
+%% There are two message stores per node: one for transient
+%% and one for persistent messages.
+%%
+%% Queue processes interact with the stores via clients.
+%%
%% The components:
%%
-%% Index: this is a mapping from MsgId to #msg_location{}:
-%% {MsgId, RefCount, File, Offset, TotalSize}
-%% By default, it's in ets, but it's also pluggable.
-%% FileSummary: this is an ets table which maps File to #file_summary{}:
-%% {File, ValidTotalSize, Left, Right, FileSize, Locked, Readers}
+%% Index: this is a mapping from MsgId to #msg_location{}.
+%% By default, it's in ETS, but other implementations can
+%% be used.
+%% FileSummary: this maps File to #file_summary{} and is stored
+%% in ETS.
%%
%% The basic idea is that messages are appended to the current file up
%% until that file becomes too big (> file_size_limit). At that point,
@@ -197,9 +232,9 @@
%% eldest file.
%%
%% We need to keep track of which messages are in which files (this is
-%% the Index); how much useful data is in each file and which files
+%% the index); how much useful data is in each file and which files
%% are on the left and right of each other. This is the purpose of the
-%% FileSummary ets table.
+%% file summary ETS table.
%%
%% As messages are removed from files, holes appear in these
%% files. The field ValidTotalSize contains the total amount of useful
@@ -213,7 +248,7 @@
%% which will compact the two files together. This keeps disk
%% utilisation high and aids performance. We deliberately do this
%% lazily in order to prevent doing GC on files which are soon to be
-%% emptied (and hence deleted) soon.
+%% emptied (and hence deleted).
%%
%% Given the compaction between two files, the left file (i.e. elder
%% file) is considered the ultimate destination for the good data in
@@ -222,14 +257,14 @@
%% file, then read back in to form a contiguous chunk of good data at
%% the start of the left file. Thus the left file is garbage collected
%% and compacted. Then the good data from the right file is copied
-%% onto the end of the left file. Index and FileSummary tables are
+%% onto the end of the left file. Index and file summary tables are
%% updated.
%%
%% On non-clean startup, we scan the files we discover, dealing with
%% the possibilites of a crash having occured during a compaction
%% (this consists of tidyup - the compaction is deliberately designed
%% such that data is duplicated on disk rather than risking it being
-%% lost), and rebuild the FileSummary ets table and Index.
+%% lost), and rebuild the file summary and index ETS table.
%%
%% So, with this design, messages move to the left. Eventually, they
%% should end up in a contiguous block on the left and are then never
@@ -279,7 +314,7 @@
%% queue, though it's likely that that's pessimistic, given the
%% requirements for compaction/combination of files.
%%
-%% The other property is that we have is the bound on the lowest
+%% The other property that we have is the bound on the lowest
%% utilisation, which should be 50% - worst case is that all files are
%% fractionally over half full and can't be combined (equivalent is
%% alternating full files and files with only one tiny message in
@@ -425,7 +460,7 @@
%% address. See the comments in the code.
%%
%% For notes on Clean Shutdown and startup, see documentation in
-%% variable_queue.
+%% rabbit_variable_queue.
%%----------------------------------------------------------------------------
%% public API
diff --git a/src/rabbit_msg_store_index.erl b/src/rabbit_msg_store_index.erl
deleted file mode 100644
index 0c7a37bcd3..0000000000
--- a/src/rabbit_msg_store_index.erl
+++ /dev/null
@@ -1,59 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_msg_store_index).
-
--include("rabbit_msg_store.hrl").
-
--ifdef(use_specs).
-
--type(dir() :: any()).
--type(index_state() :: any()).
--type(keyvalue() :: any()).
--type(fieldpos() :: non_neg_integer()).
--type(fieldvalue() :: any()).
-
--callback new(dir()) -> index_state().
--callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
--callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | keyvalue()).
--callback insert(keyvalue(), index_state()) -> 'ok'.
--callback update(keyvalue(), index_state()) -> 'ok'.
--callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
- [{fieldpos(), fieldvalue()}]),
- index_state()) -> 'ok'.
--callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
--callback delete_object(keyvalue(), index_state()) -> 'ok'.
--callback delete_by_file(fieldvalue(), index_state()) -> 'ok'.
--callback terminate(index_state()) -> any().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{new, 1},
- {recover, 1},
- {lookup, 2},
- {insert, 2},
- {update, 2},
- {update_fields, 3},
- {delete, 2},
- {delete_by_file, 2},
- {terminate, 1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_net.erl b/src/rabbit_net.erl
deleted file mode 100644
index 1731d489fa..0000000000
--- a/src/rabbit_net.erl
+++ /dev/null
@@ -1,246 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_net).
--include("rabbit.hrl").
-
--export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2,
- recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2,
- setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1,
- peercert/1, connection_string/2, socket_ends/2, is_loopback/1]).
-
-%%---------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([socket/0]).
-
--type(stat_option() ::
- 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
- 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend').
--type(ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any())).
--type(ok_or_any_error() :: rabbit_types:ok_or_error(any())).
--type(socket() :: port() | #ssl_socket{}).
--type(opts() :: [{atom(), any()} |
- {raw, non_neg_integer(), non_neg_integer(), binary()}]).
--type(host_or_ip() :: binary() | inet:ip_address()).
--spec(is_ssl/1 :: (socket()) -> boolean()).
--spec(ssl_info/1 :: (socket())
- -> 'nossl' | ok_val_or_error(
- {atom(), {atom(), atom(), atom()}})).
--spec(controlling_process/2 :: (socket(), pid()) -> ok_or_any_error()).
--spec(getstat/2 ::
- (socket(), [stat_option()])
- -> ok_val_or_error([{stat_option(), integer()}])).
--spec(recv/1 :: (socket()) ->
- {'data', [char()] | binary()} | 'closed' |
- rabbit_types:error(any()) | {'other', any()}).
--spec(sync_recv/2 :: (socket(), integer()) -> rabbit_types:ok(binary()) |
- rabbit_types:error(any())).
--spec(async_recv/3 ::
- (socket(), integer(), timeout()) -> rabbit_types:ok(any())).
--spec(port_command/2 :: (socket(), iolist()) -> 'true').
--spec(getopts/2 :: (socket(), [atom() | {raw,
- non_neg_integer(),
- non_neg_integer(),
- non_neg_integer() | binary()}])
- -> ok_val_or_error(opts())).
--spec(setopts/2 :: (socket(), opts()) -> ok_or_any_error()).
--spec(send/2 :: (socket(), binary() | iolist()) -> ok_or_any_error()).
--spec(close/1 :: (socket()) -> ok_or_any_error()).
--spec(fast_close/1 :: (socket()) -> ok_or_any_error()).
--spec(sockname/1 ::
- (socket())
- -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peername/1 ::
- (socket())
- -> ok_val_or_error({inet:ip_address(), rabbit_networking:ip_port()})).
--spec(peercert/1 ::
- (socket())
- -> 'nossl' | ok_val_or_error(rabbit_ssl:certificate())).
--spec(connection_string/2 ::
- (socket(), 'inbound' | 'outbound') -> ok_val_or_error(string())).
--spec(socket_ends/2 ::
- (socket(), 'inbound' | 'outbound')
- -> ok_val_or_error({host_or_ip(), rabbit_networking:ip_port(),
- host_or_ip(), rabbit_networking:ip_port()})).
--spec(is_loopback/1 :: (socket() | inet:ip_address()) -> boolean()).
-
--endif.
-
-%%---------------------------------------------------------------------------
-
--define(SSL_CLOSE_TIMEOUT, 5000).
-
--define(IS_SSL(Sock), is_record(Sock, ssl_socket)).
-
-is_ssl(Sock) -> ?IS_SSL(Sock).
-
-ssl_info(Sock) when ?IS_SSL(Sock) ->
- ssl:connection_info(Sock#ssl_socket.ssl);
-ssl_info(_Sock) ->
- nossl.
-
-controlling_process(Sock, Pid) when ?IS_SSL(Sock) ->
- ssl:controlling_process(Sock#ssl_socket.ssl, Pid);
-controlling_process(Sock, Pid) when is_port(Sock) ->
- gen_tcp:controlling_process(Sock, Pid).
-
-getstat(Sock, Stats) when ?IS_SSL(Sock) ->
- inet:getstat(Sock#ssl_socket.tcp, Stats);
-getstat(Sock, Stats) when is_port(Sock) ->
- inet:getstat(Sock, Stats).
-
-recv(Sock) when ?IS_SSL(Sock) ->
- recv(Sock#ssl_socket.ssl, {ssl, ssl_closed, ssl_error});
-recv(Sock) when is_port(Sock) ->
- recv(Sock, {tcp, tcp_closed, tcp_error}).
-
-recv(S, {DataTag, ClosedTag, ErrorTag}) ->
- receive
- {DataTag, S, Data} -> {data, Data};
- {ClosedTag, S} -> closed;
- {ErrorTag, S, Reason} -> {error, Reason};
- Other -> {other, Other}
- end.
-
-sync_recv(Sock, Length) when ?IS_SSL(Sock) ->
- ssl:recv(Sock#ssl_socket.ssl, Length);
-sync_recv(Sock, Length) ->
- gen_tcp:recv(Sock, Length).
-
-async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) ->
- Pid = self(),
- Ref = make_ref(),
-
- spawn(fun () -> Pid ! {inet_async, Sock, Ref,
- ssl:recv(Sock#ssl_socket.ssl, Length, Timeout)}
- end),
-
- {ok, Ref};
-async_recv(Sock, Length, infinity) when is_port(Sock) ->
- prim_inet:async_recv(Sock, Length, -1);
-async_recv(Sock, Length, Timeout) when is_port(Sock) ->
- prim_inet:async_recv(Sock, Length, Timeout).
-
-port_command(Sock, Data) when ?IS_SSL(Sock) ->
- case ssl:send(Sock#ssl_socket.ssl, Data) of
- ok -> self() ! {inet_reply, Sock, ok},
- true;
- {error, Reason} -> erlang:error(Reason)
- end;
-port_command(Sock, Data) when is_port(Sock) ->
- erlang:port_command(Sock, Data).
-
-getopts(Sock, Options) when ?IS_SSL(Sock) ->
- ssl:getopts(Sock#ssl_socket.ssl, Options);
-getopts(Sock, Options) when is_port(Sock) ->
- inet:getopts(Sock, Options).
-
-setopts(Sock, Options) when ?IS_SSL(Sock) ->
- ssl:setopts(Sock#ssl_socket.ssl, Options);
-setopts(Sock, Options) when is_port(Sock) ->
- inet:setopts(Sock, Options).
-
-send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock#ssl_socket.ssl, Data);
-send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data).
-
-close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock#ssl_socket.ssl);
-close(Sock) when is_port(Sock) -> gen_tcp:close(Sock).
-
-fast_close(Sock) when ?IS_SSL(Sock) ->
- %% We cannot simply port_close the underlying tcp socket since the
- %% TLS protocol is quite insistent that a proper closing handshake
- %% should take place (see RFC 5245 s7.2.1). So we call ssl:close
- %% instead, but that can block for a very long time, e.g. when
- %% there is lots of pending output and there is tcp backpressure,
- %% or the ssl_connection process has entered the the
- %% workaround_transport_delivery_problems function during
- %% termination, which, inexplicably, does a gen_tcp:recv(Socket,
- %% 0), which may never return if the client doesn't send a FIN or
- %% that gets swallowed by the network. Since there is no timeout
- %% variant of ssl:close, we construct our own.
- {Pid, MRef} = spawn_monitor(fun () -> ssl:close(Sock#ssl_socket.ssl) end),
- erlang:send_after(?SSL_CLOSE_TIMEOUT, self(), {Pid, ssl_close_timeout}),
- receive
- {Pid, ssl_close_timeout} ->
- erlang:demonitor(MRef, [flush]),
- exit(Pid, kill);
- {'DOWN', MRef, process, Pid, _Reason} ->
- ok
- end,
- catch port_close(Sock#ssl_socket.tcp),
- ok;
-fast_close(Sock) when is_port(Sock) ->
- catch port_close(Sock), ok.
-
-sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock#ssl_socket.ssl);
-sockname(Sock) when is_port(Sock) -> inet:sockname(Sock).
-
-peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock#ssl_socket.ssl);
-peername(Sock) when is_port(Sock) -> inet:peername(Sock).
-
-peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock#ssl_socket.ssl);
-peercert(Sock) when is_port(Sock) -> nossl.
-
-connection_string(Sock, Direction) ->
- case socket_ends(Sock, Direction) of
- {ok, {FromAddress, FromPort, ToAddress, ToPort}} ->
- {ok, rabbit_misc:format(
- "~s:~p -> ~s:~p",
- [maybe_ntoab(FromAddress), FromPort,
- maybe_ntoab(ToAddress), ToPort])};
- Error ->
- Error
- end.
-
-socket_ends(Sock, Direction) ->
- {From, To} = sock_funs(Direction),
- case {From(Sock), To(Sock)} of
- {{ok, {FromAddress, FromPort}}, {ok, {ToAddress, ToPort}}} ->
- {ok, {rdns(FromAddress), FromPort,
- rdns(ToAddress), ToPort}};
- {{error, _Reason} = Error, _} ->
- Error;
- {_, {error, _Reason} = Error} ->
- Error
- end.
-
-maybe_ntoab(Addr) when is_tuple(Addr) -> rabbit_misc:ntoab(Addr);
-maybe_ntoab(Host) -> Host.
-
-rdns(Addr) ->
- case application:get_env(rabbit, reverse_dns_lookups) of
- {ok, true} -> list_to_binary(rabbit_networking:tcp_host(Addr));
- _ -> Addr
- end.
-
-sock_funs(inbound) -> {fun peername/1, fun sockname/1};
-sock_funs(outbound) -> {fun sockname/1, fun peername/1}.
-
-is_loopback(Sock) when is_port(Sock) ; ?IS_SSL(Sock) ->
- case sockname(Sock) of
- {ok, {Addr, _Port}} -> is_loopback(Addr);
- {error, _} -> false
- end;
-%% We could parse the results of inet:getifaddrs() instead. But that
-%% would be more complex and less maybe Windows-compatible...
-is_loopback({127,_,_,_}) -> true;
-is_loopback({0,0,0,0,0,0,0,1}) -> true;
-is_loopback({0,0,0,0,0,65535,AB,CD}) -> is_loopback(ipv4(AB, CD));
-is_loopback(_) -> false.
-
-ipv4(AB, CD) -> {AB bsr 8, AB band 255, CD bsr 8, CD band 255}.
diff --git a/src/rabbit_networking.erl b/src/rabbit_networking.erl
deleted file mode 100644
index f95f8c5818..0000000000
--- a/src/rabbit_networking.erl
+++ /dev/null
@@ -1,608 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_networking).
-
--export([boot/0, start/0, start_tcp_listener/1, start_ssl_listener/2,
- stop_tcp_listener/1, on_node_down/1, active_listeners/0,
- node_listeners/1, register_connection/1, unregister_connection/1,
- connections/0, connection_info_keys/0,
- connection_info/1, connection_info/2,
- connection_info_all/0, connection_info_all/1,
- close_connection/2, force_connection_event_refresh/1, tcp_host/1]).
-
-%%used by TCP-based transports, e.g. STOMP adapter
--export([tcp_listener_addresses/1, tcp_listener_spec/6,
- ensure_ssl/0, fix_ssl_options/1, poodle_check/1, ssl_transform_fun/1]).
-
--export([tcp_listener_started/3, tcp_listener_stopped/3,
- start_client/1, start_ssl_client/2]).
-
-%% Internal
--export([connections_local/0]).
-
--import(rabbit_misc, [pget/2, pget/3, pset/3]).
-
--include("rabbit.hrl").
--include_lib("kernel/include/inet.hrl").
-
--define(FIRST_TEST_BIND_PORT, 10000).
-
-%% POODLE
--define(BAD_SSL_PROTOCOL_VERSIONS, [sslv3]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--export_type([ip_port/0, hostname/0]).
-
--type(hostname() :: inet:hostname()).
--type(ip_port() :: inet:port_number()).
-
--type(family() :: atom()).
--type(listener_config() :: ip_port() |
- {hostname(), ip_port()} |
- {hostname(), ip_port(), family()}).
--type(address() :: {inet:ip_address(), ip_port(), family()}).
--type(name_prefix() :: atom()).
--type(protocol() :: atom()).
--type(label() :: string()).
-
--spec(start/0 :: () -> 'ok').
--spec(start_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(start_ssl_listener/2 ::
- (listener_config(), rabbit_types:infos()) -> 'ok').
--spec(stop_tcp_listener/1 :: (listener_config()) -> 'ok').
--spec(active_listeners/0 :: () -> [rabbit_types:listener()]).
--spec(node_listeners/1 :: (node()) -> [rabbit_types:listener()]).
--spec(register_connection/1 :: (pid()) -> ok).
--spec(unregister_connection/1 :: (pid()) -> ok).
--spec(connections/0 :: () -> [rabbit_types:connection()]).
--spec(connections_local/0 :: () -> [rabbit_types:connection()]).
--spec(connection_info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(connection_info/1 ::
- (rabbit_types:connection()) -> rabbit_types:infos()).
--spec(connection_info/2 ::
- (rabbit_types:connection(), rabbit_types:info_keys())
- -> rabbit_types:infos()).
--spec(connection_info_all/0 :: () -> [rabbit_types:infos()]).
--spec(connection_info_all/1 ::
- (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
--spec(close_connection/2 :: (pid(), string()) -> 'ok').
--spec(force_connection_event_refresh/1 :: (reference()) -> 'ok').
-
--spec(on_node_down/1 :: (node()) -> 'ok').
--spec(tcp_listener_addresses/1 :: (listener_config()) -> [address()]).
--spec(tcp_listener_spec/6 ::
- (name_prefix(), address(), [gen_tcp:listen_option()], protocol(),
- label(), rabbit_types:mfargs()) -> supervisor:child_spec()).
--spec(ensure_ssl/0 :: () -> rabbit_types:infos()).
--spec(fix_ssl_options/1 :: (rabbit_types:infos()) -> rabbit_types:infos()).
--spec(poodle_check/1 :: (atom()) -> 'ok' | 'danger').
--spec(ssl_transform_fun/1 ::
- (rabbit_types:infos())
- -> fun ((rabbit_net:socket())
- -> rabbit_types:ok_or_error(#ssl_socket{}))).
-
--spec(boot/0 :: () -> 'ok').
--spec(start_client/1 ::
- (port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
- atom() | pid() | port() | {atom(),atom()}).
--spec(start_ssl_client/2 ::
- (_,port() | #ssl_socket{ssl::{'sslsocket',_,_}}) ->
- atom() | pid() | port() | {atom(),atom()}).
--spec(tcp_listener_started/3 ::
- (_,
- string() |
- {byte(),byte(),byte(),byte()} |
- {char(),char(),char(),char(),char(),char(),char(),char()},
- _) ->
- 'ok').
--spec(tcp_listener_stopped/3 ::
- (_,
- string() |
- {byte(),byte(),byte(),byte()} |
- {char(),char(),char(),char(),char(),char(),char(),char()},
- _) ->
- 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-boot() ->
- ok = record_distribution_listener(),
- ok = start(),
- ok = boot_tcp(),
- ok = boot_ssl().
-
-boot_tcp() ->
- {ok, TcpListeners} = application:get_env(tcp_listeners),
- [ok = start_tcp_listener(Listener) || Listener <- TcpListeners],
- ok.
-
-boot_ssl() ->
- case application:get_env(ssl_listeners) of
- {ok, []} ->
- ok;
- {ok, SslListeners} ->
- SslOpts = ensure_ssl(),
- case poodle_check('AMQP') of
- ok -> [start_ssl_listener(L, SslOpts) || L <- SslListeners];
- danger -> ok
- end,
- ok
- end.
-
-start() -> rabbit_sup:start_supervisor_child(
- rabbit_tcp_client_sup, rabbit_client_sup,
- [{local, rabbit_tcp_client_sup},
- {rabbit_connection_sup,start_link,[]}]).
-
-ensure_ssl() ->
- {ok, SslAppsConfig} = application:get_env(rabbit, ssl_apps),
- ok = app_utils:start_applications(SslAppsConfig),
- {ok, SslOptsConfig} = application:get_env(rabbit, ssl_options),
- fix_ssl_options(SslOptsConfig).
-
-poodle_check(Context) ->
- {ok, Vsn} = application:get_key(ssl, vsn),
- case rabbit_misc:version_compare(Vsn, "5.3", gte) of %% R16B01
- true -> ok;
- false -> case application:get_env(rabbit, ssl_allow_poodle_attack) of
- {ok, true} -> ok;
- _ -> log_poodle_fail(Context),
- danger
- end
- end.
-
-log_poodle_fail(Context) ->
- rabbit_log:error(
- "The installed version of Erlang (~s) contains the bug OTP-10905,~n"
- "which makes it impossible to disable SSLv3. This makes the system~n"
- "vulnerable to the POODLE attack. SSL listeners for ~s have therefore~n"
- "been disabled.~n~n"
- "You are advised to upgrade to a recent Erlang version; R16B01 is the~n"
- "first version in which this bug is fixed, but later is usually~n"
- "better.~n~n"
- "If you cannot upgrade now and want to re-enable SSL listeners, you can~n"
- "set the config item 'ssl_allow_poodle_attack' to 'true' in the~n"
- "'rabbit' section of your configuration file.~n",
- [rabbit_misc:otp_release(), Context]).
-
-fix_ssl_options(Config) ->
- fix_verify_fun(fix_ssl_protocol_versions(Config)).
-
-fix_verify_fun(SslOptsConfig) ->
- %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function
- %% takes 3 arguments and returns a tuple.
- {ok, SslAppVer} = application:get_key(ssl, vsn),
- UseNewVerifyFun = rabbit_misc:version_compare(SslAppVer, "4.0.1", gte),
- case rabbit_misc:pget(verify_fun, SslOptsConfig) of
- {Module, Function, InitialUserState} ->
- Fun = make_verify_fun(Module, Function, InitialUserState,
- UseNewVerifyFun),
- rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
- {Module, Function} ->
- Fun = make_verify_fun(Module, Function, none,
- UseNewVerifyFun),
- rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
- undefined when UseNewVerifyFun ->
- SslOptsConfig;
- undefined ->
- % unknown_ca errors are silently ignored prior to R14B unless we
- % supply this verify_fun - remove when at least R14B is required
- case proplists:get_value(verify, SslOptsConfig, verify_none) of
- verify_none -> SslOptsConfig;
- verify_peer -> [{verify_fun, fun([]) -> true;
- ([_|_]) -> false
- end}
- | SslOptsConfig]
- end
- end.
-
-make_verify_fun(Module, Function, InitialUserState, UseNewVerifyFun) ->
- try
- %% Preload the module: it is required to use
- %% erlang:function_exported/3.
- Module:module_info()
- catch
- _:Exception ->
- rabbit_log:error("SSL verify_fun: module ~s missing: ~p~n",
- [Module, Exception]),
- throw({error, {invalid_verify_fun, missing_module}})
- end,
- NewForm = erlang:function_exported(Module, Function, 3),
- OldForm = erlang:function_exported(Module, Function, 1),
- case {NewForm, OldForm} of
- {true, _} when UseNewVerifyFun ->
- %% This verify_fun is supported by Erlang R14B+ (ssl
- %% 4.0.1 and later).
- Fun = fun(OtpCert, Event, UserState) ->
- Module:Function(OtpCert, Event, UserState)
- end,
- {Fun, InitialUserState};
- {_, true} ->
- %% This verify_fun is supported by:
- %% o Erlang up-to R13B;
- %% o Erlang R14B+ for undocumented backward
- %% compatibility.
- %%
- %% InitialUserState is ignored in this case.
- fun(ErrorList) ->
- Module:Function(ErrorList)
- end;
- {_, false} when not UseNewVerifyFun ->
- rabbit_log:error("SSL verify_fun: ~s:~s/1 form required "
- "for Erlang R13B~n", [Module, Function]),
- throw({error, {invalid_verify_fun, old_form_required}});
- _ ->
- Arity = case UseNewVerifyFun of true -> 3; _ -> 1 end,
- rabbit_log:error("SSL verify_fun: no ~s:~s/~b exported~n",
- [Module, Function, Arity]),
- throw({error, {invalid_verify_fun, function_not_exported}})
- end.
-
-fix_ssl_protocol_versions(Config) ->
- case application:get_env(rabbit, ssl_allow_poodle_attack) of
- {ok, true} ->
- Config;
- _ ->
- Configured = case pget(versions, Config) of
- undefined -> pget(available, ssl:versions(), []);
- Vs -> Vs
- end,
- pset(versions, Configured -- ?BAD_SSL_PROTOCOL_VERSIONS, Config)
- end.
-
-ssl_timeout() ->
- {ok, Val} = application:get_env(rabbit, ssl_handshake_timeout),
- Val.
-
-ssl_transform_fun(SslOpts) ->
- fun (Sock) ->
- Timeout = ssl_timeout(),
- case catch ssl:ssl_accept(Sock, SslOpts, Timeout) of
- {ok, SslSock} ->
- {ok, #ssl_socket{tcp = Sock, ssl = SslSock}};
- {error, timeout} ->
- {error, {ssl_upgrade_error, timeout}};
- {error, Reason} ->
- %% We have no idea what state the ssl_connection
- %% process is in - it could still be happily
- %% going, it might be stuck, or it could be just
- %% about to fail. There is little that our caller
- %% can do but close the TCP socket, but this could
- %% cause ssl alerts to get dropped (which is bad
- %% form, according to the TLS spec). So we give
- %% the ssl_connection a little bit of time to send
- %% such alerts.
- timer:sleep(Timeout),
- {error, {ssl_upgrade_error, Reason}};
- {'EXIT', Reason} ->
- {error, {ssl_upgrade_failure, Reason}}
- end
- end.
-
-tcp_listener_addresses(Port) when is_integer(Port) ->
- tcp_listener_addresses_auto(Port);
-tcp_listener_addresses({"auto", Port}) ->
- %% Variant to prevent lots of hacking around in bash and batch files
- tcp_listener_addresses_auto(Port);
-tcp_listener_addresses({Host, Port}) ->
- %% auto: determine family IPv4 / IPv6 after converting to IP address
- tcp_listener_addresses({Host, Port, auto});
-tcp_listener_addresses({Host, Port, Family0})
- when is_integer(Port) andalso (Port >= 0) andalso (Port =< 65535) ->
- [{IPAddress, Port, Family} ||
- {IPAddress, Family} <- getaddr(Host, Family0)];
-tcp_listener_addresses({_Host, Port, _Family0}) ->
- rabbit_log:error("invalid port ~p - not 0..65535~n", [Port]),
- throw({error, {invalid_port, Port}}).
-
-tcp_listener_addresses_auto(Port) ->
- lists:append([tcp_listener_addresses(Listener) ||
- Listener <- port_to_listeners(Port)]).
-
-tcp_listener_spec(NamePrefix, {IPAddress, Port, Family}, SocketOpts,
- Protocol, Label, OnConnect) ->
- {rabbit_misc:tcp_name(NamePrefix, IPAddress, Port),
- {tcp_listener_sup, start_link,
- [IPAddress, Port, [Family | SocketOpts],
- {?MODULE, tcp_listener_started, [Protocol]},
- {?MODULE, tcp_listener_stopped, [Protocol]},
- OnConnect, Label]},
- transient, infinity, supervisor, [tcp_listener_sup]}.
-
-start_tcp_listener(Listener) ->
- start_listener(Listener, amqp, "TCP Listener",
- {?MODULE, start_client, []}).
-
-start_ssl_listener(Listener, SslOpts) ->
- start_listener(Listener, 'amqp/ssl', "SSL Listener",
- {?MODULE, start_ssl_client, [SslOpts]}).
-
-start_listener(Listener, Protocol, Label, OnConnect) ->
- [start_listener0(Address, Protocol, Label, OnConnect) ||
- Address <- tcp_listener_addresses(Listener)],
- ok.
-
-start_listener0(Address, Protocol, Label, OnConnect) ->
- Spec = tcp_listener_spec(rabbit_tcp_listener_sup, Address, tcp_opts(),
- Protocol, Label, OnConnect),
- case supervisor:start_child(rabbit_sup, Spec) of
- {ok, _} -> ok;
- {error, {shutdown, _}} -> {IPAddress, Port, _Family} = Address,
- exit({could_not_start_tcp_listener,
- {rabbit_misc:ntoa(IPAddress), Port}})
- end.
-
-stop_tcp_listener(Listener) ->
- [stop_tcp_listener0(Address) ||
- Address <- tcp_listener_addresses(Listener)],
- ok.
-
-stop_tcp_listener0({IPAddress, Port, _Family}) ->
- Name = rabbit_misc:tcp_name(rabbit_tcp_listener_sup, IPAddress, Port),
- ok = supervisor:terminate_child(rabbit_sup, Name),
- ok = supervisor:delete_child(rabbit_sup, Name).
-
-tcp_listener_started(Protocol, IPAddress, Port) ->
- %% We need the ip to distinguish e.g. 0.0.0.0 and 127.0.0.1
- %% We need the host so we can distinguish multiple instances of the above
- %% in a cluster.
- ok = mnesia:dirty_write(
- rabbit_listener,
- #listener{node = node(),
- protocol = Protocol,
- host = tcp_host(IPAddress),
- ip_address = IPAddress,
- port = Port}).
-
-tcp_listener_stopped(Protocol, IPAddress, Port) ->
- ok = mnesia:dirty_delete_object(
- rabbit_listener,
- #listener{node = node(),
- protocol = Protocol,
- host = tcp_host(IPAddress),
- ip_address = IPAddress,
- port = Port}).
-
-record_distribution_listener() ->
- {Name, Host} = rabbit_nodes:parts(node()),
- {port, Port, _Version} = erl_epmd:port_please(Name, Host),
- tcp_listener_started(clustering, {0,0,0,0,0,0,0,0}, Port).
-
-active_listeners() ->
- rabbit_misc:dirty_read_all(rabbit_listener).
-
-node_listeners(Node) ->
- mnesia:dirty_read(rabbit_listener, Node).
-
-on_node_down(Node) ->
- case lists:member(Node, nodes()) of
- false -> ok = mnesia:dirty_delete(rabbit_listener, Node);
- true -> rabbit_log:info(
- "Keep ~s listeners: the node is already back~n", [Node])
- end.
-
-start_client(Sock, SockTransform) ->
- {ok, _Child, Reader} = supervisor:start_child(rabbit_tcp_client_sup, []),
- ok = rabbit_net:controlling_process(Sock, Reader),
- Reader ! {go, Sock, SockTransform},
-
- %% In the event that somebody floods us with connections, the
- %% reader processes can spew log events at error_logger faster
- %% than it can keep up, causing its mailbox to grow unbounded
- %% until we eat all the memory available and crash. So here is a
- %% meaningless synchronous call to the underlying gen_event
- %% mechanism. When it returns the mailbox is drained, and we
- %% return to our caller to accept more connetions.
- gen_event:which_handlers(error_logger),
-
- Reader.
-
-start_client(Sock) ->
- start_client(Sock, fun (S) -> {ok, S} end).
-
-start_ssl_client(SslOpts, Sock) ->
- start_client(Sock, ssl_transform_fun(SslOpts)).
-
-register_connection(Pid) -> pg_local:join(rabbit_connections, Pid).
-
-unregister_connection(Pid) -> pg_local:leave(rabbit_connections, Pid).
-
-connections() ->
- rabbit_misc:append_rpc_all_nodes(rabbit_mnesia:cluster_nodes(running),
- rabbit_networking, connections_local, []).
-
-connections_local() -> pg_local:get_members(rabbit_connections).
-
-connection_info_keys() -> rabbit_reader:info_keys().
-
-connection_info(Pid) -> rabbit_reader:info(Pid).
-connection_info(Pid, Items) -> rabbit_reader:info(Pid, Items).
-
-connection_info_all() -> cmap(fun (Q) -> connection_info(Q) end).
-connection_info_all(Items) -> cmap(fun (Q) -> connection_info(Q, Items) end).
-
-close_connection(Pid, Explanation) ->
- rabbit_log:info("Closing connection ~p because ~p~n", [Pid, Explanation]),
- case lists:member(Pid, connections()) of
- true -> rabbit_reader:shutdown(Pid, Explanation);
- false -> throw({error, {not_a_connection_pid, Pid}})
- end.
-
-force_connection_event_refresh(Ref) ->
- [rabbit_reader:force_event_refresh(C, Ref) || C <- connections()],
- ok.
-
-%%--------------------------------------------------------------------
-
-tcp_host({0,0,0,0}) ->
- hostname();
-
-tcp_host({0,0,0,0,0,0,0,0}) ->
- hostname();
-
-tcp_host(IPAddress) ->
- case inet:gethostbyaddr(IPAddress) of
- {ok, #hostent{h_name = Name}} -> Name;
- {error, _Reason} -> rabbit_misc:ntoa(IPAddress)
- end.
-
-hostname() ->
- {ok, Hostname} = inet:gethostname(),
- case inet:gethostbyname(Hostname) of
- {ok, #hostent{h_name = Name}} -> Name;
- {error, _Reason} -> Hostname
- end.
-
-cmap(F) -> rabbit_misc:filter_exit_map(F, connections()).
-
-tcp_opts() ->
- {ok, ConfigOpts} = application:get_env(rabbit, tcp_listen_options),
- merge_essential_tcp_listen_options(ConfigOpts).
-
--define(ESSENTIAL_LISTEN_OPTIONS,
- [binary,
- {active, false},
- {packet, raw},
- {reuseaddr, true},
- {nodelay, true}]).
-
-merge_essential_tcp_listen_options(Opts) ->
- lists:foldl(fun ({K, _} = Opt, Acc) ->
- lists:keystore(K, 1, Acc, Opt);
- (Opt, Acc) ->
- [Opt | Acc]
- end , Opts, ?ESSENTIAL_LISTEN_OPTIONS).
-
-%% inet_parse:address takes care of ip string, like "0.0.0.0"
-%% inet:getaddr returns immediately for ip tuple {0,0,0,0},
-%% and runs 'inet_gethost' port process for dns lookups.
-%% On Windows inet:getaddr runs dns resolver for ip string, which may fail.
-getaddr(Host, Family) ->
- case inet_parse:address(Host) of
- {ok, IPAddress} -> [{IPAddress, resolve_family(IPAddress, Family)}];
- {error, _} -> gethostaddr(Host, Family)
- end.
-
-gethostaddr(Host, auto) ->
- Lookups = [{Family, inet:getaddr(Host, Family)} || Family <- [inet, inet6]],
- case [{IP, Family} || {Family, {ok, IP}} <- Lookups] of
- [] -> host_lookup_error(Host, Lookups);
- IPs -> IPs
- end;
-
-gethostaddr(Host, Family) ->
- case inet:getaddr(Host, Family) of
- {ok, IPAddress} -> [{IPAddress, Family}];
- {error, Reason} -> host_lookup_error(Host, Reason)
- end.
-
-host_lookup_error(Host, Reason) ->
- rabbit_log:error("invalid host ~p - ~p~n", [Host, Reason]),
- throw({error, {invalid_host, Host, Reason}}).
-
-resolve_family({_,_,_,_}, auto) -> inet;
-resolve_family({_,_,_,_,_,_,_,_}, auto) -> inet6;
-resolve_family(IP, auto) -> throw({error, {strange_family, IP}});
-resolve_family(_, F) -> F.
-
-%%--------------------------------------------------------------------
-
-%% There are three kinds of machine (for our purposes).
-%%
-%% * Those which treat IPv4 addresses as a special kind of IPv6 address
-%% ("Single stack")
-%% - Linux by default, Windows Vista and later
-%% - We also treat any (hypothetical?) IPv6-only machine the same way
-%% * Those which consider IPv6 and IPv4 to be completely separate things
-%% ("Dual stack")
-%% - OpenBSD, Windows XP / 2003, Linux if so configured
-%% * Those which do not support IPv6.
-%% - Ancient/weird OSes, Linux if so configured
-%%
-%% How to reconfigure Linux to test this:
-%% Single stack (default):
-%% echo 0 > /proc/sys/net/ipv6/bindv6only
-%% Dual stack:
-%% echo 1 > /proc/sys/net/ipv6/bindv6only
-%% IPv4 only:
-%% add ipv6.disable=1 to GRUB_CMDLINE_LINUX_DEFAULT in /etc/default/grub then
-%% sudo update-grub && sudo reboot
-%%
-%% This matters in (and only in) the case where the sysadmin (or the
-%% app descriptor) has only supplied a port and we wish to bind to
-%% "all addresses". This means different things depending on whether
-%% we're single or dual stack. On single stack binding to "::"
-%% implicitly includes all IPv4 addresses, and subsequently attempting
-%% to bind to "0.0.0.0" will fail. On dual stack, binding to "::" will
-%% only bind to IPv6 addresses, and we need another listener bound to
-%% "0.0.0.0" for IPv4. Finally, on IPv4-only systems we of course only
-%% want to bind to "0.0.0.0".
-%%
-%% Unfortunately it seems there is no way to detect single vs dual stack
-%% apart from attempting to bind to the port.
-port_to_listeners(Port) ->
- IPv4 = {"0.0.0.0", Port, inet},
- IPv6 = {"::", Port, inet6},
- case ipv6_status(?FIRST_TEST_BIND_PORT) of
- single_stack -> [IPv6];
- ipv6_only -> [IPv6];
- dual_stack -> [IPv6, IPv4];
- ipv4_only -> [IPv4]
- end.
-
-ipv6_status(TestPort) ->
- IPv4 = [inet, {ip, {0,0,0,0}}],
- IPv6 = [inet6, {ip, {0,0,0,0,0,0,0,0}}],
- case gen_tcp:listen(TestPort, IPv6) of
- {ok, LSock6} ->
- case gen_tcp:listen(TestPort, IPv4) of
- {ok, LSock4} ->
- %% Dual stack
- gen_tcp:close(LSock6),
- gen_tcp:close(LSock4),
- dual_stack;
- %% Checking the error here would only let us
- %% distinguish single stack IPv6 / IPv4 vs IPv6 only,
- %% which we figure out below anyway.
- {error, _} ->
- gen_tcp:close(LSock6),
- case gen_tcp:listen(TestPort, IPv4) of
- %% Single stack
- {ok, LSock4} -> gen_tcp:close(LSock4),
- single_stack;
- %% IPv6-only machine. Welcome to the future.
- {error, eafnosupport} -> ipv6_only; %% Linux
- {error, eprotonosupport}-> ipv6_only; %% FreeBSD
- %% Dual stack machine with something already
- %% on IPv4.
- {error, _} -> ipv6_status(TestPort + 1)
- end
- end;
- %% IPv4-only machine. Welcome to the 90s.
- {error, eafnosupport} -> %% Linux
- ipv4_only;
- {error, eprotonosupport} -> %% FreeBSD
- ipv4_only;
- %% Port in use
- {error, _} ->
- ipv6_status(TestPort + 1)
- end.
diff --git a/src/rabbit_node_monitor.erl b/src/rabbit_node_monitor.erl
index e3960c5c8a..43db5431e0 100644
--- a/src/rabbit_node_monitor.erl
+++ b/src/rabbit_node_monitor.erl
@@ -288,24 +288,28 @@ workaround_global_hang() ->
receive
global_sync_done ->
ok
- after 15000 ->
+ after 10000 ->
find_blocked_global_peers()
end.
find_blocked_global_peers() ->
+ Snapshot1 = snapshot_global_dict(),
+ timer:sleep(10000),
+ Snapshot2 = snapshot_global_dict(),
+ find_blocked_global_peers1(Snapshot2, Snapshot1).
+
+snapshot_global_dict() ->
{status, _, _, [Dict | _]} = sys:get_status(global_name_server),
- find_blocked_global_peers1(Dict).
+ [E || {{sync_tag_his, _}, _} = E <- Dict].
-find_blocked_global_peers1([{{sync_tag_his, Peer}, Timestamp} | Rest]) ->
- Diff = timer:now_diff(erlang:now(), Timestamp),
- if
- Diff >= 10000 -> unblock_global_peer(Peer);
- true -> ok
+find_blocked_global_peers1([{{sync_tag_his, Peer}, _} = Item | Rest],
+ OlderSnapshot) ->
+ case lists:member(Item, OlderSnapshot) of
+ true -> unblock_global_peer(Peer);
+ false -> ok
end,
- find_blocked_global_peers1(Rest);
-find_blocked_global_peers1([_ | Rest]) ->
- find_blocked_global_peers1(Rest);
-find_blocked_global_peers1([]) ->
+ find_blocked_global_peers1(Rest, OlderSnapshot);
+find_blocked_global_peers1([], _) ->
ok.
unblock_global_peer(PeerNode) ->
diff --git a/src/rabbit_nodes.erl b/src/rabbit_nodes.erl
deleted file mode 100644
index 57d971715b..0000000000
--- a/src/rabbit_nodes.erl
+++ /dev/null
@@ -1,221 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_nodes).
-
--export([names/1, diagnostics/1, make/1, parts/1, cookie_hash/0,
- is_running/2, is_process_running/2,
- cluster_name/0, set_cluster_name/1, ensure_epmd/0,
- all_running/0]).
-
--include_lib("kernel/include/inet.hrl").
-
--define(EPMD_TIMEOUT, 30000).
--define(TCP_DIAGNOSTIC_TIMEOUT, 5000).
-
-%%----------------------------------------------------------------------------
-%% Specs
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(names/1 :: (string()) -> rabbit_types:ok_or_error2(
- [{string(), integer()}], term())).
--spec(diagnostics/1 :: ([node()]) -> string()).
--spec(make/1 :: ({string(), string()} | string()) -> node()).
--spec(parts/1 :: (node() | string()) -> {string(), string()}).
--spec(cookie_hash/0 :: () -> string()).
--spec(is_running/2 :: (node(), atom()) -> boolean()).
--spec(is_process_running/2 :: (node(), atom()) -> boolean()).
--spec(cluster_name/0 :: () -> binary()).
--spec(set_cluster_name/1 :: (binary()) -> 'ok').
--spec(ensure_epmd/0 :: () -> 'ok').
--spec(all_running/0 :: () -> [node()]).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-names(Hostname) ->
- Self = self(),
- Ref = make_ref(),
- {Pid, MRef} = spawn_monitor(
- fun () -> Self ! {Ref, net_adm:names(Hostname)} end),
- timer:exit_after(?EPMD_TIMEOUT, Pid, timeout),
- receive
- {Ref, Names} -> erlang:demonitor(MRef, [flush]),
- Names;
- {'DOWN', MRef, process, Pid, Reason} -> {error, Reason}
- end.
-
-diagnostics(Nodes) ->
- NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
- "attempted to contact: ~p~n", [Nodes]}] ++
- [diagnostics_node(Node) || Node <- Nodes] ++
- current_node_details(),
- rabbit_misc:format_many(lists:flatten(NodeDiags)).
-
-current_node_details() ->
- [{"~ncurrent node details:~n- node name: ~w", [node()]},
- case init:get_argument(home) of
- {ok, [[Home]]} -> {"- home dir: ~s", [Home]};
- Other -> {"- no home dir: ~p", [Other]}
- end,
- {"- cookie hash: ~s", [cookie_hash()]}].
-
-diagnostics_node(Node) ->
- {Name, Host} = parts(Node),
- [{"~s:", [Node]} |
- case names(Host) of
- {error, Reason} ->
- [{" * unable to connect to epmd (port ~s) on ~s: ~s~n",
- [epmd_port(), Host, rabbit_misc:format_inet_error(Reason)]}];
- {ok, NamePorts} ->
- [{" * connected to epmd (port ~s) on ~s",
- [epmd_port(), Host]}] ++
- case net_adm:ping(Node) of
- pong -> dist_working_diagnostics(Node);
- pang -> dist_broken_diagnostics(Name, Host, NamePorts)
- end
- end].
-
-epmd_port() ->
- case init:get_argument(epmd_port) of
- {ok, [[Port | _] | _]} when is_list(Port) -> Port;
- error -> "4369"
- end.
-
-dist_working_diagnostics(Node) ->
- case rabbit:is_running(Node) of
- true -> [{" * node ~s up, 'rabbit' application running", [Node]}];
- false -> [{" * node ~s up, 'rabbit' application not running~n"
- " * running applications on ~s: ~p~n"
- " * suggestion: start_app on ~s",
- [Node, Node, remote_apps(Node), Node]}]
- end.
-
-remote_apps(Node) ->
- %% We want a timeout here because really, we don't trust the node,
- %% the last thing we want to do is hang.
- case rpc:call(Node, application, which_applications, [5000]) of
- {badrpc, _} = E -> E;
- Apps -> [App || {App, _, _} <- Apps]
- end.
-
-dist_broken_diagnostics(Name, Host, NamePorts) ->
- case [{N, P} || {N, P} <- NamePorts, N =:= Name] of
- [] ->
- {SelfName, SelfHost} = parts(node()),
- Others = [list_to_atom(N) || {N, _} <- NamePorts,
- N =/= case SelfHost of
- Host -> SelfName;
- _ -> never_matches
- end],
- OthersDiag = case Others of
- [] -> [{" no other nodes on ~s",
- [Host]}];
- _ -> [{" other nodes on ~s: ~p",
- [Host, Others]}]
- end,
- [{" * epmd reports: node '~s' not running at all", [Name]},
- OthersDiag, {" * suggestion: start the node", []}];
- [{Name, Port}] ->
- [{" * epmd reports node '~s' running on port ~b", [Name, Port]} |
- case diagnose_connect(Host, Port) of
- ok ->
- [{" * TCP connection succeeded but Erlang distribution "
- "failed~n"
- " * suggestion: hostname mismatch?~n"
- " * suggestion: is the cookie set correctly?~n"
- " * suggestion: is the Erlang distribution using TLS?", []}];
- {error, Reason} ->
- [{" * can't establish TCP connection, reason: ~s~n"
- " * suggestion: blocked by firewall?",
- [rabbit_misc:format_inet_error(Reason)]}]
- end]
- end.
-
-diagnose_connect(Host, Port) ->
- case inet:gethostbyname(Host) of
- {ok, #hostent{h_addrtype = Family}} ->
- case gen_tcp:connect(Host, Port, [Family],
- ?TCP_DIAGNOSTIC_TIMEOUT) of
- {ok, Socket} -> gen_tcp:close(Socket),
- ok;
- {error, _} = E -> E
- end;
- {error, _} = E ->
- E
- end.
-
-make({Prefix, Suffix}) -> list_to_atom(lists:append([Prefix, "@", Suffix]));
-make(NodeStr) -> make(parts(NodeStr)).
-
-parts(Node) when is_atom(Node) ->
- parts(atom_to_list(Node));
-parts(NodeStr) ->
- case lists:splitwith(fun (E) -> E =/= $@ end, NodeStr) of
- {Prefix, []} -> {_, Suffix} = parts(node()),
- {Prefix, Suffix};
- {Prefix, Suffix} -> {Prefix, tl(Suffix)}
- end.
-
-cookie_hash() ->
- base64:encode_to_string(erlang:md5(atom_to_list(erlang:get_cookie()))).
-
-is_running(Node, Application) ->
- case rpc:call(Node, rabbit_misc, which_applications, []) of
- {badrpc, _} -> false;
- Apps -> proplists:is_defined(Application, Apps)
- end.
-
-is_process_running(Node, Process) ->
- case rpc:call(Node, erlang, whereis, [Process]) of
- {badrpc, _} -> false;
- undefined -> false;
- P when is_pid(P) -> true
- end.
-
-cluster_name() ->
- rabbit_runtime_parameters:value_global(
- cluster_name, cluster_name_default()).
-
-cluster_name_default() ->
- {ID, _} = rabbit_nodes:parts(node()),
- {ok, Host} = inet:gethostname(),
- {ok, #hostent{h_name = FQDN}} = inet:gethostbyname(Host),
- list_to_binary(atom_to_list(rabbit_nodes:make({ID, FQDN}))).
-
-set_cluster_name(Name) ->
- rabbit_runtime_parameters:set_global(cluster_name, Name).
-
-ensure_epmd() ->
- {ok, Prog} = init:get_argument(progname),
- ID = random:uniform(1000000000),
- Port = open_port(
- {spawn_executable, os:find_executable(Prog)},
- [{args, ["-sname", rabbit_misc:format("epmd-starter-~b", [ID]),
- "-noshell", "-eval", "halt()."]},
- exit_status, stderr_to_stdout, use_stdio]),
- port_shutdown_loop(Port).
-
-port_shutdown_loop(Port) ->
- receive
- {Port, {exit_status, _Rc}} -> ok;
- {Port, _} -> port_shutdown_loop(Port)
- end.
-
-all_running() -> rabbit_mnesia:cluster_nodes(running).
diff --git a/src/rabbit_password.erl b/src/rabbit_password.erl
new file mode 100644
index 0000000000..7bc1b28e21
--- /dev/null
+++ b/src/rabbit_password.erl
@@ -0,0 +1,64 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_password).
+-include("rabbit.hrl").
+
+-define(DEFAULT_HASHING_MODULE, rabbit_password_hashing_sha256).
+
+%%
+%% API
+%%
+
+-export([hash/1, hash/2, generate_salt/0, salted_hash/2, salted_hash/3,
+ hashing_mod/0, hashing_mod/1]).
+
+hash(Cleartext) ->
+ hash(hashing_mod(), Cleartext).
+
+hash(HashingMod, Cleartext) ->
+ SaltBin = generate_salt(),
+ Hash = salted_hash(HashingMod, SaltBin, Cleartext),
+ <<SaltBin/binary, Hash/binary>>.
+
+generate_salt() ->
+ random:seed(erlang:phash2([node()]),
+ time_compat:monotonic_time(),
+ time_compat:unique_integer()),
+ Salt = random:uniform(16#ffffffff),
+ <<Salt:32>>.
+
+salted_hash(Salt, Cleartext) ->
+ salted_hash(hashing_mod(), Salt, Cleartext).
+
+salted_hash(Mod, Salt, Cleartext) ->
+ Fun = fun Mod:hash/1,
+ Fun(<<Salt/binary, Cleartext/binary>>).
+
+hashing_mod() ->
+ rabbit_misc:get_env(rabbit, password_hashing_module,
+ ?DEFAULT_HASHING_MODULE).
+
+hashing_mod(rabbit_password_hashing_sha256) ->
+ rabbit_password_hashing_sha256;
+hashing_mod(rabbit_password_hashing_md5) ->
+ rabbit_password_hashing_md5;
+%% fall back to the hashing function that's been used prior to 3.6.0
+hashing_mod(undefined) ->
+ rabbit_password_hashing_md5;
+%% if a custom module is configured, simply use it
+hashing_mod(CustomMod) when is_atom(CustomMod) ->
+ CustomMod.
diff --git a/src/rabbit_ctl_misc.erl b/src/rabbit_password_hashing_md5.erl
index 92ae111028..7d3e0d80f7 100644
--- a/src/rabbit_ctl_misc.erl
+++ b/src/rabbit_password_hashing_md5.erl
@@ -14,18 +14,15 @@
%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
%%
--module(rabbit_ctl_misc).
+%% Legacy hashing implementation, only used as a last resort when
+%% #internal_user.hashing_algorithm is md5 or undefined (the case in
+%% pre-3.6.0 user records).
--export([print_cmd_result/2]).
+-module(rabbit_password_hashing_md5).
-%%----------------------------------------------------------------------------
+-behaviour(rabbit_password_hashing).
--ifdef(use_specs).
+-export([hash/1]).
--spec(print_cmd_result/2 :: (atom(), term()) -> string()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-print_cmd_result(authenticate_user, _Result) -> io:format("Success~n").
+hash(Binary) ->
+ erlang:md5(Binary).
diff --git a/include/rabbit_msg_store.hrl b/src/rabbit_password_hashing_sha256.erl
index 8bcf2ce629..5d230025b5 100644
--- a/include/rabbit_msg_store.hrl
+++ b/src/rabbit_password_hashing_sha256.erl
@@ -10,16 +10,15 @@
%%
%% The Original Code is RabbitMQ.
%%
-%% The Initial Developer of the Original Code is Pivotal Software, Inc.
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
%%
--include("rabbit.hrl").
+-module(rabbit_password_hashing_sha256).
--ifdef(use_specs).
+-behaviour(rabbit_password_hashing).
--type(msg() :: any()).
+-export([hash/1]).
--endif.
-
--record(msg_location, {msg_id, ref_count, file, offset, total_size}).
+hash(Binary) ->
+ crypto:hash(sha256, Binary).
diff --git a/src/rabbit_policy_validator.erl b/src/rabbit_password_hashing_sha512.erl
index 7ebea83516..50ea22a6d2 100644
--- a/src/rabbit_policy_validator.erl
+++ b/src/rabbit_password_hashing_sha512.erl
@@ -14,26 +14,11 @@
%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
%%
--module(rabbit_policy_validator).
+-module(rabbit_password_hashing_sha512).
--ifdef(use_specs).
+-behaviour(rabbit_password_hashing).
--export_type([validate_results/0]).
+-export([hash/1]).
--type(validate_results() ::
- 'ok' | {error, string(), [term()]} | [validate_results()]).
-
--callback validate_policy([{binary(), term()}]) -> validate_results().
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [
- {validate_policy, 1}
- ];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
+hash(Binary) ->
+ crypto:hash(sha512, Binary).
diff --git a/src/rabbit_plugins.erl b/src/rabbit_plugins.erl
index 3297032346..c7f5d501bf 100644
--- a/src/rabbit_plugins.erl
+++ b/src/rabbit_plugins.erl
@@ -17,7 +17,7 @@
-module(rabbit_plugins).
-include("rabbit.hrl").
--export([setup/0, active/0, read_enabled/1, list/1, dependencies/3]).
+-export([setup/0, active/0, read_enabled/1, list/1, list/2, dependencies/3]).
-export([ensure/1]).
%%----------------------------------------------------------------------------
@@ -29,6 +29,7 @@
-spec(setup/0 :: () -> [plugin_name()]).
-spec(active/0 :: () -> [plugin_name()]).
-spec(list/1 :: (string()) -> [#plugin{}]).
+-spec(list/2 :: (string(), boolean()) -> [#plugin{}]).
-spec(read_enabled/1 :: (file:filename()) -> [plugin_name()]).
-spec(dependencies/3 :: (boolean(), [plugin_name()], [#plugin{}]) ->
[plugin_name()]).
@@ -87,14 +88,31 @@ active() ->
%% @doc Get the list of plugins which are ready to be enabled.
list(PluginsDir) ->
+ list(PluginsDir, false).
+
+list(PluginsDir, IncludeRequiredDeps) ->
EZs = [{ez, EZ} || EZ <- filelib:wildcard("*.ez", PluginsDir)],
FreeApps = [{app, App} ||
App <- filelib:wildcard("*/ebin/*.app", PluginsDir)],
+ %% We load the "rabbit" application to be sure we can get the
+ %% "applications" key. This is required for rabbitmq-plugins for
+ %% instance.
+ application:load(rabbit),
+ {ok, RabbitDeps} = application:get_key(rabbit, applications),
{AvailablePlugins, Problems} =
lists:foldl(fun ({error, EZ, Reason}, {Plugins1, Problems1}) ->
{Plugins1, [{EZ, Reason} | Problems1]};
- (Plugin = #plugin{}, {Plugins1, Problems1}) ->
- {[Plugin|Plugins1], Problems1}
+ (Plugin = #plugin{name = Name}, {Plugins1, Problems1}) ->
+ %% Applications RabbitMQ depends on (eg.
+ %% "rabbit_common") can't be considered
+ %% plugins, otherwise rabbitmq-plugins would
+ %% list them and the user may believe he can
+ %% disable them.
+ case IncludeRequiredDeps orelse
+ not lists:member(Name, RabbitDeps) of
+ true -> {[Plugin|Plugins1], Problems1};
+ false -> {Plugins1, Problems1}
+ end
end, {[], []},
[plugin_info(PluginsDir, Plug) || Plug <- EZs ++ FreeApps]),
case Problems of
diff --git a/src/rabbit_policies.erl b/src/rabbit_policies.erl
index 65f3801e3e..a4e1e9be4a 100644
--- a/src/rabbit_policies.erl
+++ b/src/rabbit_policies.erl
@@ -35,7 +35,8 @@ register() ->
{policy_validator, <<"message-ttl">>},
{policy_validator, <<"expires">>},
{policy_validator, <<"max-length">>},
- {policy_validator, <<"max-length-bytes">>}]],
+ {policy_validator, <<"max-length-bytes">>},
+ {policy_validator, <<"queue-mode">>}]],
ok.
validate_policy(Terms) ->
@@ -83,4 +84,11 @@ validate_policy0(<<"max-length-bytes">>, Value)
when is_integer(Value), Value >= 0 ->
ok;
validate_policy0(<<"max-length-bytes">>, Value) ->
- {error, "~p is not a valid maximum length in bytes", [Value]}.
+ {error, "~p is not a valid maximum length in bytes", [Value]};
+
+validate_policy0(<<"queue-mode">>, <<"default">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, <<"lazy">>) ->
+ ok;
+validate_policy0(<<"queue-mode">>, Value) ->
+ {error, "~p is not a valid queue-mode value", [Value]}.
diff --git a/src/rabbit_policy.erl b/src/rabbit_policy.erl
index 5bf5483272..dd50095517 100644
--- a/src/rabbit_policy.erl
+++ b/src/rabbit_policy.erl
@@ -29,7 +29,7 @@
-export([name/1, get/2, get_arg/3, set/1]).
-export([validate/5, notify/4, notify_clear/3]).
-export([parse_set/6, set/6, delete/2, lookup/2, list/0, list/1,
- list_formatted/1, info_keys/0]).
+ list_formatted/1, list_formatted/3, info_keys/0]).
-rabbit_boot_step({?MODULE,
[{description, "policy parameters"},
@@ -170,6 +170,10 @@ list(VHost) ->
list_formatted(VHost) ->
order_policies(list0(VHost, fun format/1)).
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(AggregatorPid, Ref,
+ fun(P) -> P end, list_formatted(VHost)).
+
list0(VHost, DefnFun) ->
[p(P, DefnFun) || P <- rabbit_runtime_parameters:list(VHost, <<"policy">>)].
diff --git a/src/rabbit_priority_queue.erl b/src/rabbit_priority_queue.erl
index 206d674abc..b58a8c535e 100644
--- a/src/rabbit_priority_queue.erl
+++ b/src/rabbit_priority_queue.erl
@@ -35,11 +35,13 @@
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
purge/1, purge_acks/1,
publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
+ batch_publish/4, batch_publish_delivered/4,
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
handle_pre_hibernate/1, resume/1, msg_rates/1,
- info/2, invoke/3, is_duplicate/2]).
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4]).
-record(state, {bq, bqss}).
-record(passthrough, {bq, bqs}).
@@ -124,7 +126,7 @@ collapse_recovery(QNames, DupNames, Recovery) ->
[dict:fetch(Name, NameToTerms) || Name <- QNames].
priorities(#amqqueue{arguments = Args}) ->
- Ints = [long, short, signedint, byte],
+ Ints = [long, short, signedint, byte, unsignedbyte, unsignedshort, unsignedint],
case rabbit_misc:table_lookup(Args, <<"x-max-priority">>) of
{Type, Max} -> case lists:member(Type, Ints) of
false -> none;
@@ -203,6 +205,18 @@ publish(Msg, MsgProps, IsDelivered, ChPid, Flow,
State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough1(publish(Msg, MsgProps, IsDelivered, ChPid, Flow, BQS)).
+batch_publish(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
+ PubDict = partition_publish_batch(Publishes),
+ lists:foldl(
+ fun ({Priority, Pubs}, St) ->
+ pick1(fun (_P, BQSN) ->
+ BQ:batch_publish(Pubs, ChPid, Flow, BQSN)
+ end, Priority, St)
+ end, State, orddict:to_list(PubDict));
+batch_publish(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(batch_publish(Publishes, ChPid, Flow, BQS)).
+
publish_delivered(Msg, MsgProps, ChPid, Flow, State = #state{bq = BQ}) ->
pick2(fun (P, BQSN) ->
{AckTag, BQSN1} = BQ:publish_delivered(
@@ -213,6 +227,25 @@ publish_delivered(Msg, MsgProps, ChPid, Flow,
State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough2(publish_delivered(Msg, MsgProps, ChPid, Flow, BQS)).
+batch_publish_delivered(Publishes, ChPid, Flow, State = #state{bq = BQ}) ->
+ PubDict = partition_publish_delivered_batch(Publishes),
+ {PrioritiesAndAcks, State1} =
+ lists:foldl(
+ fun ({Priority, Pubs}, {PriosAndAcks, St}) ->
+ {PriosAndAcks1, St1} =
+ pick2(fun (P, BQSN) ->
+ {AckTags, BQSN1} =
+ BQ:batch_publish_delivered(
+ Pubs, ChPid, Flow, BQSN),
+ {priority_on_acktags(P, AckTags), BQSN1}
+ end, Priority, St),
+ {[PriosAndAcks1 | PriosAndAcks], St1}
+ end, {[], State}, orddict:to_list(PubDict)),
+ {lists:reverse(PrioritiesAndAcks), State1};
+batch_publish_delivered(Publishes, ChPid, Flow,
+ State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough2(batch_publish_delivered(Publishes, ChPid, Flow, BQS)).
+
%% TODO this is a hack. The BQ api does not give us enough information
%% here - if we had the Msg we could look at its priority and forward
%% to the appropriate sub-BQ. But we don't so we are stuck.
@@ -376,6 +409,8 @@ info(backing_queue_status, #state{bq = BQ, bqss = BQSs}) ->
fold0(fun (P, BQSN, Acc) ->
combine_status(P, BQ:info(backing_queue_status, BQSN), Acc)
end, nothing, BQSs);
+info(head_message_timestamp, #state{bq = BQ, bqss = BQSs}) ->
+ find_head_message_timestamp(BQ, BQSs, '');
info(Item, #state{bq = BQ, bqss = BQSs}) ->
fold0(fun (_P, BQSN, Acc) ->
Acc + BQ:info(Item, BQSN)
@@ -393,6 +428,23 @@ is_duplicate(Msg, State = #state{bq = BQ}) ->
is_duplicate(Msg, State = #passthrough{bq = BQ, bqs = BQS}) ->
?passthrough2(is_duplicate(Msg, BQS)).
+set_queue_mode(Mode, State = #state{bq = BQ}) ->
+ foreach1(fun (_P, BQSN) -> BQ:set_queue_mode(Mode, BQSN) end, State);
+set_queue_mode(Mode, State = #passthrough{bq = BQ, bqs = BQS}) ->
+ ?passthrough1(set_queue_mode(Mode, BQS)).
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, #state{}) ->
+ MsgsByPriority = partition_publish_delivered_batch(Msgs),
+ lists:foldl(fun (Acks, MAs) ->
+ {P, _AckTag} = hd(Acks),
+ Pubs = orddict:fetch(P, MsgsByPriority),
+ MAs0 = zip_msgs_and_acks(Pubs, Acks),
+ MAs ++ MAs0
+ end, Accumulator, AckTags);
+zip_msgs_and_acks(Msgs, AckTags, Accumulator,
+ #passthrough{bq = BQ, bqs = BQS}) ->
+ BQ:zip_msgs_and_acks(Msgs, AckTags, Accumulator, BQS).
+
%%----------------------------------------------------------------------------
bq() ->
@@ -530,6 +582,19 @@ a(State = #state{bqss = BQSs}) ->
end.
%%----------------------------------------------------------------------------
+partition_publish_batch(Publishes) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _, _}) -> Msg end).
+
+partition_publish_delivered_batch(Publishes) ->
+ partition_publishes(
+ Publishes, fun ({Msg, _}) -> Msg end).
+
+partition_publishes(Publishes, ExtractMsg) ->
+ lists:foldl(fun (Pub, Dict) ->
+ Msg = ExtractMsg(Pub),
+ rabbit_misc:orddict_cons(priority2(Msg), Pub, Dict)
+ end, orddict:new(), Publishes).
priority(P, BQSs) when is_integer(P) ->
{P, bq_fetch(P, BQSs)};
@@ -538,18 +603,21 @@ priority(#basic_message{content = Content}, BQSs) ->
priority1(_Content, [{P, BQSN}]) ->
{P, BQSN};
-priority1(Content = #content{properties = Props},
- [{P, BQSN} | Rest]) ->
- #'P_basic'{priority = Priority0} = Props,
- Priority = case Priority0 of
- undefined -> 0;
- _ when is_integer(Priority0) -> Priority0
- end,
- case Priority >= P of
+priority1(Content, [{P, BQSN} | Rest]) ->
+ case priority2(Content) >= P of
true -> {P, BQSN};
false -> priority1(Content, Rest)
end.
+priority2(#basic_message{content = Content}) ->
+ priority2(rabbit_binary_parser:ensure_content_decoded(Content));
+priority2(#content{properties = Props}) ->
+ #'P_basic'{priority = Priority0} = Props,
+ case Priority0 of
+ undefined -> 0;
+ _ when is_integer(Priority0) -> Priority0
+ end.
+
add_maybe_infinity(infinity, _) -> infinity;
add_maybe_infinity(_, infinity) -> infinity;
add_maybe_infinity(A, B) -> A + B.
@@ -579,6 +647,32 @@ combine_status(P, New, Old) ->
cse(infinity, _) -> infinity;
cse(_, infinity) -> infinity;
+%% queue modes
+cse(_, default) -> default;
+cse(default, _) -> default;
+cse(_, lazy) -> lazy;
+cse(lazy, _) -> lazy;
+%% numerical stats
cse(A, B) when is_number(A) -> A + B;
cse({delta, _, _, _}, _) -> {delta, todo, todo, todo};
cse(A, B) -> exit({A, B}).
+
+%% When asked about 'head_message_timestamp' fro this priority queue, we
+%% walk all the backing queues, starting by the highest priority. Once a
+%% backing queue having messages (ready or unacknowledged) is found, its
+%% 'head_message_timestamp' is returned even if it is null.
+
+find_head_message_timestamp(BQ, [{_, BQSN} | Rest], Timestamp) ->
+ MsgCount = BQ:len(BQSN) + BQ:info(messages_unacknowledged_ram, BQSN),
+ if
+ MsgCount =/= 0 -> BQ:info(head_message_timestamp, BQSN);
+ true -> find_head_message_timestamp(BQ, Rest, Timestamp)
+ end;
+find_head_message_timestamp(_, [], Timestamp) ->
+ Timestamp.
+
+zip_msgs_and_acks(Pubs, AckTags) ->
+ lists:zipwith(
+ fun ({#basic_message{ id = Id }, _Props}, AckTag) ->
+ {Id, AckTag}
+ end, Pubs, AckTags).
diff --git a/src/rabbit_queue_collector.erl b/src/rabbit_queue_collector.erl
deleted file mode 100644
index 734228be34..0000000000
--- a/src/rabbit_queue_collector.erl
+++ /dev/null
@@ -1,92 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_queue_collector).
-
--behaviour(gen_server).
-
--export([start_link/1, register/2, delete_all/1]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(state, {monitors, delete_from}).
-
--include("rabbit.hrl").
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (rabbit_types:proc_name()) ->
- rabbit_types:ok_pid_or_error()).
--spec(register/2 :: (pid(), pid()) -> 'ok').
--spec(delete_all/1 :: (pid()) -> 'ok').
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(ProcName) ->
- gen_server:start_link(?MODULE, [ProcName], []).
-
-register(CollectorPid, Q) ->
- gen_server:call(CollectorPid, {register, Q}, infinity).
-
-delete_all(CollectorPid) ->
- gen_server:call(CollectorPid, delete_all, infinity).
-
-%%----------------------------------------------------------------------------
-
-init([ProcName]) ->
- ?store_proc_name(ProcName),
- {ok, #state{monitors = pmon:new(), delete_from = undefined}}.
-
-%%--------------------------------------------------------------------------
-
-handle_call({register, QPid}, _From,
- State = #state{monitors = QMons, delete_from = Deleting}) ->
- case Deleting of
- undefined -> ok;
- _ -> ok = rabbit_amqqueue:delete_immediately([QPid])
- end,
- {reply, ok, State#state{monitors = pmon:monitor(QPid, QMons)}};
-
-handle_call(delete_all, From, State = #state{monitors = QMons,
- delete_from = undefined}) ->
- case pmon:monitored(QMons) of
- [] -> {reply, ok, State#state{delete_from = From}};
- QPids -> ok = rabbit_amqqueue:delete_immediately(QPids),
- {noreply, State#state{delete_from = From}}
- end.
-
-handle_cast(Msg, State) ->
- {stop, {unhandled_cast, Msg}, State}.
-
-handle_info({'DOWN', _MRef, process, DownPid, _Reason},
- State = #state{monitors = QMons, delete_from = Deleting}) ->
- QMons1 = pmon:erase(DownPid, QMons),
- case Deleting =/= undefined andalso pmon:is_empty(QMons1) of
- true -> gen_server:reply(Deleting, ok);
- false -> ok
- end,
- {noreply, State#state{monitors = QMons1}}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/src/rabbit_queue_consumers.erl b/src/rabbit_queue_consumers.erl
index cdb2bff4a9..29fc74dc52 100644
--- a/src/rabbit_queue_consumers.erl
+++ b/src/rabbit_queue_consumers.erl
@@ -99,7 +99,9 @@
%%----------------------------------------------------------------------------
new() -> #state{consumers = priority_queue:new(),
- use = {active, now_micros(), 1.0}}.
+ use = {active,
+ time_compat:monotonic_time(micro_seconds),
+ 1.0}}.
max_active_priority(#state{consumers = Consumers}) ->
priority_queue:highest(Consumers).
@@ -280,7 +282,7 @@ subtract_acks([T | TL] = AckTags, Prefix, CTagCounts, AckQ) ->
orddict:update_counter(CTag, 1, CTagCounts), QTail);
{{value, V}, QTail} ->
subtract_acks(AckTags, [V | Prefix], CTagCounts, QTail);
- {empty, _} ->
+ {empty, _} ->
subtract_acks([], Prefix, CTagCounts, AckQ)
end.
@@ -348,9 +350,9 @@ drain_mode(true) -> drain;
drain_mode(false) -> manual.
utilisation(#state{use = {active, Since, Avg}}) ->
- use_avg(now_micros() - Since, 0, Avg);
+ use_avg(time_compat:monotonic_time(micro_seconds) - Since, 0, Avg);
utilisation(#state{use = {inactive, Since, Active, Avg}}) ->
- use_avg(Active, now_micros() - Since, Avg).
+ use_avg(Active, time_compat:monotonic_time(micro_seconds) - Since, Avg).
%%----------------------------------------------------------------------------
@@ -457,14 +459,14 @@ update_use({inactive, _, _, _} = CUInfo, inactive) ->
update_use({active, _, _} = CUInfo, active) ->
CUInfo;
update_use({active, Since, Avg}, inactive) ->
- Now = now_micros(),
+ Now = time_compat:monotonic_time(micro_seconds),
{inactive, Now, Now - Since, Avg};
update_use({inactive, Since, Active, Avg}, active) ->
- Now = now_micros(),
+ Now = time_compat:monotonic_time(micro_seconds),
{active, Now, use_avg(Active, Now - Since, Avg)}.
+use_avg(0, 0, Avg) ->
+ Avg;
use_avg(Active, Inactive, Avg) ->
Time = Inactive + Active,
rabbit_misc:moving_average(Time, ?USE_AVG_HALF_LIFE, Active / Time, Avg).
-
-now_micros() -> timer:now_diff(now(), {0,0,0}).
diff --git a/src/rabbit_queue_decorator.erl b/src/rabbit_queue_decorator.erl
deleted file mode 100644
index 0c6f0820c7..0000000000
--- a/src/rabbit_queue_decorator.erl
+++ /dev/null
@@ -1,80 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_queue_decorator).
-
--include("rabbit.hrl").
-
--export([select/1, set/1, register/2, unregister/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--callback startup(rabbit_types:amqqueue()) -> 'ok'.
-
--callback shutdown(rabbit_types:amqqueue()) -> 'ok'.
-
--callback policy_changed(rabbit_types:amqqueue(), rabbit_types:amqqueue()) ->
- 'ok'.
-
--callback active_for(rabbit_types:amqqueue()) -> boolean().
-
-%% called with Queue, MaxActivePriority, IsEmpty
--callback consumer_state_changed(
- rabbit_types:amqqueue(), integer(), boolean()) -> 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{startup, 1}, {shutdown, 1}, {policy_changed, 2},
- {active_for, 1}, {consumer_state_changed, 3}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-select(Modules) ->
- [M || M <- Modules, code:which(M) =/= non_existing].
-
-set(Q) -> Q#amqqueue{decorators = [D || D <- list(), D:active_for(Q)]}.
-
-list() -> [M || {_, M} <- rabbit_registry:lookup_all(queue_decorator)].
-
-register(TypeName, ModuleName) ->
- rabbit_registry:register(queue_decorator, TypeName, ModuleName),
- [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
- ok.
-
-unregister(TypeName) ->
- rabbit_registry:unregister(queue_decorator, TypeName),
- [maybe_recover(Q) || Q <- rabbit_amqqueue:list()],
- ok.
-
-maybe_recover(Q = #amqqueue{name = Name,
- decorators = Decs}) ->
- #amqqueue{decorators = Decs1} = set(Q),
- Old = lists:sort(select(Decs)),
- New = lists:sort(select(Decs1)),
- case New of
- Old -> ok;
- _ -> [M:startup(Q) || M <- New -- Old],
- rabbit_amqqueue:update_decorators(Name)
- end.
diff --git a/src/rabbit_queue_index.erl b/src/rabbit_queue_index.erl
index 176f65b18b..b8b197de49 100644
--- a/src/rabbit_queue_index.erl
+++ b/src/rabbit_queue_index.erl
@@ -102,7 +102,7 @@
%% simplifies and clarifies the code.
%%
%% For notes on Clean Shutdown and startup, see documentation in
-%% variable_queue.
+%% rabbit_variable_queue.
%%
%%----------------------------------------------------------------------------
diff --git a/src/rabbit_queue_location_client_local.erl b/src/rabbit_queue_location_client_local.erl
new file mode 100644
index 0000000000..4cf91abc0a
--- /dev/null
+++ b/src/rabbit_queue_location_client_local.erl
@@ -0,0 +1,40 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_location_client_local).
+-behaviour(rabbit_queue_master_locator).
+
+-include("rabbit.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master client local"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"client-local">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description, <<"Locate queue master node as the client local node">>}].
+
+queue_master_location(#amqqueue{}) -> {ok, node()}.
diff --git a/src/rabbit_queue_location_min_masters.erl b/src/rabbit_queue_location_min_masters.erl
new file mode 100644
index 0000000000..21c3bdb045
--- /dev/null
+++ b/src/rabbit_queue_location_min_masters.erl
@@ -0,0 +1,77 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_location_min_masters).
+-behaviour(rabbit_queue_master_locator).
+
+-include("rabbit.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master min bound queues"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"min-masters">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster node with least bound queues">>}].
+
+queue_master_location(#amqqueue{}) ->
+ Cluster = rabbit_queue_master_location_misc:all_nodes(),
+ VHosts = rabbit_vhost:list(),
+ BoundQueueMasters = get_bound_queue_masters_per_vhost(VHosts, []),
+ {_Count, MinMaster}= get_min_master(Cluster, BoundQueueMasters),
+ {ok, MinMaster}.
+
+%%---------------------------------------------------------------------------
+%% Private helper functions
+%%---------------------------------------------------------------------------
+get_min_master(Cluster, BoundQueueMasters) ->
+ lists:min([ {count_masters(Node, BoundQueueMasters), Node} ||
+ Node <- Cluster ]).
+
+count_masters(Node, Masters) ->
+ length([ X || X <- Masters, X == Node ]).
+
+get_bound_queue_masters_per_vhost([], Acc) ->
+ lists:flatten(Acc);
+get_bound_queue_masters_per_vhost([VHost|RemVHosts], Acc) ->
+ Bindings = rabbit_binding:list(VHost),
+ BoundQueueMasters = get_queue_master_per_binding(VHost, Bindings, []),
+ get_bound_queue_masters_per_vhost(RemVHosts, [BoundQueueMasters|Acc]).
+
+
+get_queue_master_per_binding(_VHost, [], BoundQueueNodes) -> BoundQueueNodes;
+get_queue_master_per_binding(VHost, [#binding{destination=
+ #resource{kind=queue,
+ name=QueueName}}|
+ RemBindings],
+ QueueMastersAcc) ->
+ QueueMastersAcc0 = case rabbit_queue_master_location_misc:lookup_master(
+ QueueName, VHost) of
+ {ok, Master} when is_atom(Master) ->
+ [Master|QueueMastersAcc];
+ _ -> QueueMastersAcc
+ end,
+ get_queue_master_per_binding(VHost, RemBindings, QueueMastersAcc0).
diff --git a/src/rabbit_queue_location_random.erl b/src/rabbit_queue_location_random.erl
new file mode 100644
index 0000000000..6a6f1bd913
--- /dev/null
+++ b/src/rabbit_queue_location_random.erl
@@ -0,0 +1,44 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_location_random).
+-behaviour(rabbit_queue_master_locator).
+
+-include("rabbit.hrl").
+
+-export([description/0, queue_master_location/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "locate queue master random"},
+ {mfa, {rabbit_registry, register,
+ [queue_master_locator,
+ <<"random">>, ?MODULE]}},
+ {requires, rabbit_registry},
+ {enables, kernel_ready}]}).
+
+%%---------------------------------------------------------------------------
+%% Queue Master Location Callbacks
+%%---------------------------------------------------------------------------
+
+description() ->
+ [{description,
+ <<"Locate queue master node from cluster in a random manner">>}].
+
+queue_master_location(#amqqueue{}) ->
+ Cluster = rabbit_queue_master_location_misc:all_nodes(),
+ RandomPos = erlang:phash2(time_compat:monotonic_time(), length(Cluster)),
+ MasterNode = lists:nth(RandomPos + 1, Cluster),
+ {ok, MasterNode}.
diff --git a/src/rabbit_queue_location_validator.erl b/src/rabbit_queue_location_validator.erl
new file mode 100644
index 0000000000..00bea44e29
--- /dev/null
+++ b/src/rabbit_queue_location_validator.erl
@@ -0,0 +1,69 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_location_validator).
+-behaviour(rabbit_policy_validator).
+
+-include("rabbit.hrl").
+
+-export([validate_policy/1, validate_strategy/1]).
+
+-rabbit_boot_step({?MODULE,
+ [{description, "Queue location policy validation"},
+ {mfa, {rabbit_registry, register,
+ [policy_validator,
+ <<"queue-master-locator">>,
+ ?MODULE]}}]}).
+
+validate_policy(KeyList) ->
+ case proplists:lookup(<<"queue-master-locator">> , KeyList) of
+ {_, Strategy} -> validate_strategy(Strategy);
+ _ -> {error, "queue-master-locator undefined"}
+ end.
+
+validate_strategy(Strategy) ->
+ case module(Strategy) of
+ R = {ok, _M} -> R;
+ _ ->
+ {error, "~p invalid queue-master-locator value", [Strategy]}
+ end.
+
+policy(Policy, Q) ->
+ case rabbit_policy:get(Policy, Q) of
+ undefined -> none;
+ P -> P
+ end.
+
+module(#amqqueue{} = Q) ->
+ case policy(<<"queue-master-locator">>, Q) of
+ undefined -> no_location_strategy;
+ Mode -> module(Mode)
+ end;
+
+module(Strategy) when is_binary(Strategy) ->
+ case rabbit_registry:binary_to_type(Strategy) of
+ {error, not_found} -> no_location_strategy;
+ T ->
+ case rabbit_registry:lookup_module(queue_master_locator, T) of
+ {ok, Module} ->
+ case code:which(Module) of
+ non_existing -> no_location_strategy;
+ _ -> {ok, Module}
+ end;
+ _ ->
+ no_location_strategy
+ end
+ end.
diff --git a/src/rabbit_queue_master_location_misc.erl b/src/rabbit_queue_master_location_misc.erl
new file mode 100644
index 0000000000..17f5512f95
--- /dev/null
+++ b/src/rabbit_queue_master_location_misc.erl
@@ -0,0 +1,95 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License at
+%% http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+%% License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+-module(rabbit_queue_master_location_misc).
+
+-include("rabbit.hrl").
+
+-export([lookup_master/2,
+ lookup_queue/2,
+ get_location/1,
+ get_location_mod_by_config/1,
+ get_location_mod_by_args/1,
+ get_location_mod_by_policy/1,
+ all_nodes/0]).
+
+lookup_master(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ Queue = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(Queue) of
+ {ok, #amqqueue{pid = Pid}} when is_pid(Pid) ->
+ {ok, node(Pid)};
+ Error -> Error
+ end.
+
+lookup_queue(QueueNameBin, VHostPath) when is_binary(QueueNameBin),
+ is_binary(VHostPath) ->
+ Queue = rabbit_misc:r(VHostPath, queue, QueueNameBin),
+ case rabbit_amqqueue:lookup(Queue) of
+ Reply = {ok, #amqqueue{}} -> Reply;
+ Error -> Error
+ end.
+
+get_location(Queue=#amqqueue{})->
+ Reply1 = case get_location_mod_by_args(Queue) of
+ _Err1 = {error, _} ->
+ case get_location_mod_by_policy(Queue) of
+ _Err2 = {error, _} ->
+ case get_location_mod_by_config(Queue) of
+ Err3 = {error, _} -> Err3;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end;
+ Reply0 = {ok, _Module} -> Reply0
+ end,
+
+ case Reply1 of
+ {ok, CB} -> CB:queue_master_location(Queue);
+ Error -> Error
+ end.
+
+get_location_mod_by_args(#amqqueue{arguments=Args}) ->
+ case proplists:lookup(<<"x-queue-master-locator">> , Args) of
+ {<<"x-queue-master-locator">> , Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "x-queue-master-locator undefined"}
+ end.
+
+get_location_mod_by_policy(Queue=#amqqueue{}) ->
+ case rabbit_policy:get(<<"queue-master-locator">> , Queue) of
+ undefined -> {error, "queue-master-locator policy undefined"};
+ Strategy ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end
+ end.
+
+get_location_mod_by_config(#amqqueue{}) ->
+ case application:get_env(rabbit, queue_master_locator) of
+ {ok, Strategy} ->
+ case rabbit_queue_location_validator:validate_strategy(Strategy) of
+ Reply = {ok, _CB} -> Reply;
+ Error -> Error
+ end;
+ _ -> {error, "queue_master_locator undefined"}
+ end.
+
+all_nodes() -> rabbit_mnesia:cluster_nodes(running).
diff --git a/src/rabbit_reader.erl b/src/rabbit_reader.erl
deleted file mode 100644
index dca51f7664..0000000000
--- a/src/rabbit_reader.erl
+++ /dev/null
@@ -1,1296 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_reader).
--include("rabbit_framing.hrl").
--include("rabbit.hrl").
-
--export([start_link/1, info_keys/0, info/1, info/2, force_event_refresh/2,
- shutdown/2]).
-
--export([system_continue/3, system_terminate/4, system_code_change/4]).
-
--export([init/2, mainloop/4, recvloop/4]).
-
--export([conserve_resources/3, server_properties/1]).
-
--define(NORMAL_TIMEOUT, 3).
--define(CLOSING_TIMEOUT, 30).
--define(CHANNEL_TERMINATION_TIMEOUT, 3).
--define(SILENT_CLOSE_DELAY, 3).
--define(CHANNEL_MIN, 1).
-
-%%--------------------------------------------------------------------------
-
--record(v1, {parent, sock, connection, callback, recv_len, pending_recv,
- connection_state, helper_sup, queue_collector, heartbeater,
- stats_timer, channel_sup_sup_pid, channel_count, throttle}).
-
--record(connection, {name, host, peer_host, port, peer_port,
- protocol, user, timeout_sec, frame_max, channel_max, vhost,
- client_properties, capabilities,
- auth_mechanism, auth_state, connected_at}).
-
--record(throttle, {alarmed_by, last_blocked_by, last_blocked_at}).
-
--define(STATISTICS_KEYS, [pid, recv_oct, recv_cnt, send_oct, send_cnt,
- send_pend, state, channels]).
-
--define(CREATION_EVENT_KEYS,
- [pid, name, port, peer_port, host,
- peer_host, ssl, peer_cert_subject, peer_cert_issuer,
- peer_cert_validity, auth_mechanism, ssl_protocol,
- ssl_key_exchange, ssl_cipher, ssl_hash, protocol, user, vhost,
- timeout, frame_max, channel_max, client_properties, connected_at]).
-
--define(INFO_KEYS, ?CREATION_EVENT_KEYS ++ ?STATISTICS_KEYS -- [pid]).
-
--define(AUTH_NOTIFICATION_INFO_KEYS,
- [host, vhost, name, peer_host, peer_port, protocol, auth_mechanism,
- ssl, ssl_protocol, ssl_cipher, peer_cert_issuer, peer_cert_subject,
- peer_cert_validity]).
-
--define(IS_RUNNING(State),
- (State#v1.connection_state =:= running orelse
- State#v1.connection_state =:= blocking orelse
- State#v1.connection_state =:= blocked)).
-
--define(IS_STOPPING(State),
- (State#v1.connection_state =:= closing orelse
- State#v1.connection_state =:= closed)).
-
-%%--------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start_link/1 :: (pid()) -> rabbit_types:ok(pid())).
--spec(info_keys/0 :: () -> rabbit_types:info_keys()).
--spec(info/1 :: (pid()) -> rabbit_types:infos()).
--spec(info/2 :: (pid(), rabbit_types:info_keys()) -> rabbit_types:infos()).
--spec(force_event_refresh/2 :: (pid(), reference()) -> 'ok').
--spec(shutdown/2 :: (pid(), string()) -> 'ok').
--spec(conserve_resources/3 :: (pid(), atom(), boolean()) -> 'ok').
--spec(server_properties/1 :: (rabbit_types:protocol()) ->
- rabbit_framing:amqp_table()).
-
-%% These specs only exists to add no_return() to keep dialyzer happy
--spec(init/2 :: (pid(), pid()) -> no_return()).
--spec(start_connection/5 ::
- (pid(), pid(), any(), rabbit_net:socket(),
- fun ((rabbit_net:socket()) ->
- rabbit_types:ok_or_error2(
- rabbit_net:socket(), any()))) -> no_return()).
-
--spec(mainloop/4 :: (_,[binary()], non_neg_integer(), #v1{}) -> any()).
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,{[binary()], non_neg_integer(), #v1{}}) ->
- any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
-
--endif.
-
-%%--------------------------------------------------------------------------
-
-start_link(HelperSup) ->
- {ok, proc_lib:spawn_link(?MODULE, init, [self(), HelperSup])}.
-
-shutdown(Pid, Explanation) ->
- gen_server:call(Pid, {shutdown, Explanation}, infinity).
-
-init(Parent, HelperSup) ->
- Deb = sys:debug_options([]),
- receive
- {go, Sock, SockTransform} ->
- start_connection(Parent, HelperSup, Deb, Sock, SockTransform)
- end.
-
-system_continue(Parent, Deb, {Buf, BufLen, State}) ->
- mainloop(Deb, Buf, BufLen, State#v1{parent = Parent}).
-
-system_terminate(Reason, _Parent, _Deb, _State) ->
- exit(Reason).
-
-system_code_change(Misc, _Module, _OldVsn, _Extra) ->
- {ok, Misc}.
-
-info_keys() -> ?INFO_KEYS.
-
-info(Pid) ->
- gen_server:call(Pid, info, infinity).
-
-info(Pid, Items) ->
- case gen_server:call(Pid, {info, Items}, infinity) of
- {ok, Res} -> Res;
- {error, Error} -> throw(Error)
- end.
-
-force_event_refresh(Pid, Ref) ->
- gen_server:cast(Pid, {force_event_refresh, Ref}).
-
-conserve_resources(Pid, Source, Conserve) ->
- Pid ! {conserve_resources, Source, Conserve},
- ok.
-
-server_properties(Protocol) ->
- {ok, Product} = application:get_key(rabbit, id),
- {ok, Version} = application:get_key(rabbit, vsn),
-
- %% Get any configuration-specified server properties
- {ok, RawConfigServerProps} = application:get_env(rabbit,
- server_properties),
-
- %% Normalize the simplifed (2-tuple) and unsimplified (3-tuple) forms
- %% from the config and merge them with the generated built-in properties
- NormalizedConfigServerProps =
- [{<<"capabilities">>, table, server_capabilities(Protocol)} |
- [case X of
- {KeyAtom, Value} -> {list_to_binary(atom_to_list(KeyAtom)),
- longstr,
- maybe_list_to_binary(Value)};
- {BinKey, Type, Value} -> {BinKey, Type, Value}
- end || X <- RawConfigServerProps ++
- [{product, Product},
- {version, Version},
- {cluster_name, rabbit_nodes:cluster_name()},
- {platform, "Erlang/OTP"},
- {copyright, ?COPYRIGHT_MESSAGE},
- {information, ?INFORMATION_MESSAGE}]]],
-
- %% Filter duplicated properties in favour of config file provided values
- lists:usort(fun ({K1,_,_}, {K2,_,_}) -> K1 =< K2 end,
- NormalizedConfigServerProps).
-
-maybe_list_to_binary(V) when is_binary(V) -> V;
-maybe_list_to_binary(V) when is_list(V) -> list_to_binary(V).
-
-server_capabilities(rabbit_framing_amqp_0_9_1) ->
- [{<<"publisher_confirms">>, bool, true},
- {<<"exchange_exchange_bindings">>, bool, true},
- {<<"basic.nack">>, bool, true},
- {<<"consumer_cancel_notify">>, bool, true},
- {<<"connection.blocked">>, bool, true},
- {<<"consumer_priorities">>, bool, true},
- {<<"authentication_failure_close">>, bool, true},
- {<<"per_consumer_qos">>, bool, true}];
-server_capabilities(_) ->
- [].
-
-%%--------------------------------------------------------------------------
-
-log(Level, Fmt, Args) -> rabbit_log:log(connection, Level, Fmt, Args).
-
-socket_error(Reason) when is_atom(Reason) ->
- log(error, "Error on AMQP connection ~p: ~s~n",
- [self(), rabbit_misc:format_inet_error(Reason)]);
-socket_error(Reason) ->
- Level =
- case Reason of
- {ssl_upgrade_error, closed} ->
- %% The socket was closed while upgrading to SSL.
- %% This is presumably a TCP healthcheck, so don't log
- %% it unless specified otherwise.
- debug;
- _ ->
- error
- end,
- log(Level, "Error on AMQP connection ~p:~n~p~n", [self(), Reason]).
-
-inet_op(F) -> rabbit_misc:throw_on_error(inet_error, F).
-
-socket_op(Sock, Fun) ->
- case Fun(Sock) of
- {ok, Res} -> Res;
- {error, Reason} -> socket_error(Reason),
- %% NB: this is tcp socket, even in case of ssl
- rabbit_net:fast_close(Sock),
- exit(normal)
- end.
-
-start_connection(Parent, HelperSup, Deb, Sock, SockTransform) ->
- process_flag(trap_exit, true),
- Name = case rabbit_net:connection_string(Sock, inbound) of
- {ok, Str} -> Str;
- {error, enotconn} -> rabbit_net:fast_close(Sock),
- exit(normal);
- {error, Reason} -> socket_error(Reason),
- rabbit_net:fast_close(Sock),
- exit(normal)
- end,
- {ok, HandshakeTimeout} = application:get_env(rabbit, handshake_timeout),
- ClientSock = socket_op(Sock, SockTransform),
- erlang:send_after(HandshakeTimeout, self(), handshake_timeout),
- {PeerHost, PeerPort, Host, Port} =
- socket_op(Sock, fun (S) -> rabbit_net:socket_ends(S, inbound) end),
- ?store_proc_name(list_to_binary(Name)),
- State = #v1{parent = Parent,
- sock = ClientSock,
- connection = #connection{
- name = list_to_binary(Name),
- host = Host,
- peer_host = PeerHost,
- port = Port,
- peer_port = PeerPort,
- protocol = none,
- user = none,
- timeout_sec = (HandshakeTimeout / 1000),
- frame_max = ?FRAME_MIN_SIZE,
- vhost = none,
- client_properties = none,
- capabilities = [],
- auth_mechanism = none,
- auth_state = none,
- connected_at = rabbit_misc:now_to_ms(os:timestamp())},
- callback = uninitialized_callback,
- recv_len = 0,
- pending_recv = false,
- connection_state = pre_init,
- queue_collector = undefined, %% started on tune-ok
- helper_sup = HelperSup,
- heartbeater = none,
- channel_sup_sup_pid = none,
- channel_count = 0,
- throttle = #throttle{
- alarmed_by = [],
- last_blocked_by = none,
- last_blocked_at = never}},
- try
- run({?MODULE, recvloop,
- [Deb, [], 0, switch_callback(rabbit_event:init_stats_timer(
- State, #v1.stats_timer),
- handshake, 8)]}),
- log(info, "closing AMQP connection ~p (~s)~n", [self(), Name])
- catch
- Ex ->
- log_connection_exception(Name, Ex)
- after
- %% We don't call gen_tcp:close/1 here since it waits for
- %% pending output to be sent, which results in unnecessary
- %% delays. We could just terminate - the reader is the
- %% controlling process and hence its termination will close
- %% the socket. However, to keep the file_handle_cache
- %% accounting as accurate as possible we ought to close the
- %% socket w/o delay before termination.
- rabbit_net:fast_close(ClientSock),
- rabbit_networking:unregister_connection(self()),
- rabbit_event:notify(connection_closed, [{pid, self()}])
- end,
- done.
-
-log_connection_exception(Name, Ex) ->
- Severity = case Ex of
- connection_closed_with_no_data_received -> debug;
- connection_closed_abruptly -> warning;
- _ -> error
- end,
- log_connection_exception(Severity, Name, Ex).
-
-log_connection_exception(Severity, Name, {heartbeat_timeout, TimeoutSec}) ->
- %% Long line to avoid extra spaces and line breaks in log
- log(Severity, "closing AMQP connection ~p (~s):~nMissed heartbeats from client, timeout: ~ps~n",
- [self(), Name, TimeoutSec]);
-log_connection_exception(Severity, Name, Ex) ->
- log(Severity, "closing AMQP connection ~p (~s):~n~p~n",
- [self(), Name, Ex]).
-
-run({M, F, A}) ->
- try apply(M, F, A)
- catch {become, MFA} -> run(MFA)
- end.
-
-recvloop(Deb, Buf, BufLen, State = #v1{pending_recv = true}) ->
- mainloop(Deb, Buf, BufLen, State);
-recvloop(Deb, Buf, BufLen, State = #v1{connection_state = blocked}) ->
- mainloop(Deb, Buf, BufLen, State);
-recvloop(Deb, Buf, BufLen, State = #v1{connection_state = {become, F}}) ->
- throw({become, F(Deb, Buf, BufLen, State)});
-recvloop(Deb, Buf, BufLen, State = #v1{sock = Sock, recv_len = RecvLen})
- when BufLen < RecvLen ->
- case rabbit_net:setopts(Sock, [{active, once}]) of
- ok -> mainloop(Deb, Buf, BufLen,
- State#v1{pending_recv = true});
- {error, Reason} -> stop(Reason, State)
- end;
-recvloop(Deb, [B], _BufLen, State) ->
- {Rest, State1} = handle_input(State#v1.callback, B, State),
- recvloop(Deb, [Rest], size(Rest), State1);
-recvloop(Deb, Buf, BufLen, State = #v1{recv_len = RecvLen}) ->
- {DataLRev, RestLRev} = binlist_split(BufLen - RecvLen, Buf, []),
- Data = list_to_binary(lists:reverse(DataLRev)),
- {<<>>, State1} = handle_input(State#v1.callback, Data, State),
- recvloop(Deb, lists:reverse(RestLRev), BufLen - RecvLen, State1).
-
-binlist_split(0, L, Acc) ->
- {L, Acc};
-binlist_split(Len, L, [Acc0|Acc]) when Len < 0 ->
- {H, T} = split_binary(Acc0, -Len),
- {[H|L], [T|Acc]};
-binlist_split(Len, [H|T], Acc) ->
- binlist_split(Len - size(H), T, [H|Acc]).
-
-mainloop(Deb, Buf, BufLen, State = #v1{sock = Sock,
- connection_state = CS,
- connection = #connection{
- name = ConnName}}) ->
- Recv = rabbit_net:recv(Sock),
- case CS of
- pre_init when Buf =:= [] ->
- %% We only log incoming connections when either the
- %% first byte was received or there was an error (eg. a
- %% timeout).
- %%
- %% The goal is to not log TCP healthchecks (a connection
- %% with no data received) unless specified otherwise.
- log(case Recv of
- closed -> debug;
- _ -> info
- end, "accepting AMQP connection ~p (~s)~n",
- [self(), ConnName]);
- _ ->
- ok
- end,
- case Recv of
- {data, Data} ->
- recvloop(Deb, [Data | Buf], BufLen + size(Data),
- State#v1{pending_recv = false});
- closed when State#v1.connection_state =:= closed ->
- ok;
- closed when CS =:= pre_init andalso Buf =:= [] ->
- stop(tcp_healthcheck, State);
- closed ->
- stop(closed, State);
- {error, Reason} ->
- stop(Reason, State);
- {other, {system, From, Request}} ->
- sys:handle_system_msg(Request, From, State#v1.parent,
- ?MODULE, Deb, {Buf, BufLen, State});
- {other, Other} ->
- case handle_other(Other, State) of
- stop -> ok;
- NewState -> recvloop(Deb, Buf, BufLen, NewState)
- end
- end.
-
-stop(tcp_healthcheck, State) ->
- %% The connection was closed before any packet was received. It's
- %% probably a load-balancer healthcheck: don't consider this a
- %% failure.
- maybe_emit_stats(State),
- throw(connection_closed_with_no_data_received);
-stop(closed, State) ->
- maybe_emit_stats(State),
- throw(connection_closed_abruptly);
-stop(Reason, State) ->
- maybe_emit_stats(State),
- throw({inet_error, Reason}).
-
-handle_other({conserve_resources, Source, Conserve},
- State = #v1{throttle = Throttle = #throttle{alarmed_by = CR}}) ->
- CR1 = case Conserve of
- true -> lists:usort([Source | CR]);
- false -> CR -- [Source]
- end,
- State1 = control_throttle(
- State#v1{throttle = Throttle#throttle{alarmed_by = CR1}}),
- case {blocked_by_alarm(State), blocked_by_alarm(State1)} of
- {false, true} -> ok = send_blocked(State1);
- {true, false} -> ok = send_unblocked(State1);
- {_, _} -> ok
- end,
- State1;
-handle_other({channel_closing, ChPid}, State) ->
- ok = rabbit_channel:ready_for_close(ChPid),
- {_, State1} = channel_cleanup(ChPid, State),
- maybe_close(control_throttle(State1));
-handle_other({'EXIT', Parent, Reason}, State = #v1{parent = Parent}) ->
- terminate(io_lib:format("broker forced connection closure "
- "with reason '~w'", [Reason]), State),
- %% this is what we are expected to do according to
- %% http://www.erlang.org/doc/man/sys.html
- %%
- %% If we wanted to be *really* nice we should wait for a while for
- %% clients to close the socket at their end, just as we do in the
- %% ordinary error case. However, since this termination is
- %% initiated by our parent it is probably more important to exit
- %% quickly.
- maybe_emit_stats(State),
- exit(Reason);
-handle_other({channel_exit, _Channel, E = {writer, send_failed, _E}}, State) ->
- maybe_emit_stats(State),
- throw(E);
-handle_other({channel_exit, Channel, Reason}, State) ->
- handle_exception(State, Channel, Reason);
-handle_other({'DOWN', _MRef, process, ChPid, Reason}, State) ->
- handle_dependent_exit(ChPid, Reason, State);
-handle_other(terminate_connection, State) ->
- maybe_emit_stats(State),
- stop;
-handle_other(handshake_timeout, State)
- when ?IS_RUNNING(State) orelse ?IS_STOPPING(State) ->
- State;
-handle_other(handshake_timeout, State) ->
- maybe_emit_stats(State),
- throw({handshake_timeout, State#v1.callback});
-handle_other(heartbeat_timeout, State = #v1{connection_state = closed}) ->
- State;
-handle_other(heartbeat_timeout,
- State = #v1{connection = #connection{timeout_sec = T}}) ->
- maybe_emit_stats(State),
- throw({heartbeat_timeout, T});
-handle_other({'$gen_call', From, {shutdown, Explanation}}, State) ->
- {ForceTermination, NewState} = terminate(Explanation, State),
- gen_server:reply(From, ok),
- case ForceTermination of
- force -> stop;
- normal -> NewState
- end;
-handle_other({'$gen_call', From, info}, State) ->
- gen_server:reply(From, infos(?INFO_KEYS, State)),
- State;
-handle_other({'$gen_call', From, {info, Items}}, State) ->
- gen_server:reply(From, try {ok, infos(Items, State)}
- catch Error -> {error, Error}
- end),
- State;
-handle_other({'$gen_cast', {force_event_refresh, Ref}}, State)
- when ?IS_RUNNING(State) ->
- rabbit_event:notify(
- connection_created,
- [{type, network} | infos(?CREATION_EVENT_KEYS, State)], Ref),
- rabbit_event:init_stats_timer(State, #v1.stats_timer);
-handle_other({'$gen_cast', {force_event_refresh, _Ref}}, State) ->
- %% Ignore, we will emit a created event once we start running.
- State;
-handle_other(ensure_stats, State) ->
- ensure_stats_timer(State);
-handle_other(emit_stats, State) ->
- emit_stats(State);
-handle_other({bump_credit, Msg}, State) ->
- %% Here we are receiving credit by some channel process.
- credit_flow:handle_bump_msg(Msg),
- control_throttle(State);
-handle_other(Other, State) ->
- %% internal error -> something worth dying for
- maybe_emit_stats(State),
- exit({unexpected_message, Other}).
-
-switch_callback(State, Callback, Length) ->
- State#v1{callback = Callback, recv_len = Length}.
-
-terminate(Explanation, State) when ?IS_RUNNING(State) ->
- {normal, handle_exception(State, 0,
- rabbit_misc:amqp_error(
- connection_forced, Explanation, [], none))};
-terminate(_Explanation, State) ->
- {force, State}.
-
-control_throttle(State = #v1{connection_state = CS, throttle = Throttle}) ->
- IsThrottled = ((Throttle#throttle.alarmed_by =/= []) orelse
- credit_flow:blocked()),
- case {CS, IsThrottled} of
- {running, true} -> State#v1{connection_state = blocking};
- {blocking, false} -> State#v1{connection_state = running};
- {blocked, false} -> ok = rabbit_heartbeat:resume_monitor(
- State#v1.heartbeater),
- State#v1{connection_state = running};
- {blocked, true} -> State#v1{throttle = update_last_blocked_by(
- Throttle)};
- {_, _} -> State
- end.
-
-maybe_block(State = #v1{connection_state = blocking,
- throttle = Throttle}) ->
- ok = rabbit_heartbeat:pause_monitor(State#v1.heartbeater),
- State1 = State#v1{connection_state = blocked,
- throttle = update_last_blocked_by(
- Throttle#throttle{
- last_blocked_at = erlang:now()})},
- case {blocked_by_alarm(State), blocked_by_alarm(State1)} of
- {false, true} -> ok = send_blocked(State1);
- {_, _} -> ok
- end,
- State1;
-maybe_block(State) ->
- State.
-
-
-blocked_by_alarm(#v1{connection_state = blocked,
- throttle = #throttle{alarmed_by = CR}})
- when CR =/= [] ->
- true;
-blocked_by_alarm(#v1{}) ->
- false.
-
-send_blocked(#v1{throttle = #throttle{alarmed_by = CR},
- connection = #connection{protocol = Protocol,
- capabilities = Capabilities},
- sock = Sock}) ->
- case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
- {bool, true} ->
- RStr = string:join([atom_to_list(A) || A <- CR], " & "),
- Reason = list_to_binary(rabbit_misc:format("low on ~s", [RStr])),
- ok = send_on_channel0(Sock, #'connection.blocked'{reason = Reason},
- Protocol);
- _ ->
- ok
- end.
-
-send_unblocked(#v1{connection = #connection{protocol = Protocol,
- capabilities = Capabilities},
- sock = Sock}) ->
- case rabbit_misc:table_lookup(Capabilities, <<"connection.blocked">>) of
- {bool, true} ->
- ok = send_on_channel0(Sock, #'connection.unblocked'{}, Protocol);
- _ ->
- ok
- end.
-
-update_last_blocked_by(Throttle = #throttle{alarmed_by = []}) ->
- Throttle#throttle{last_blocked_by = flow};
-update_last_blocked_by(Throttle) ->
- Throttle#throttle{last_blocked_by = resource}.
-
-%%--------------------------------------------------------------------------
-%% error handling / termination
-
-close_connection(State = #v1{queue_collector = Collector,
- connection = #connection{
- timeout_sec = TimeoutSec}}) ->
- %% The spec says "Exclusive queues may only be accessed by the
- %% current connection, and are deleted when that connection
- %% closes." This does not strictly imply synchrony, but in
- %% practice it seems to be what people assume.
- rabbit_queue_collector:delete_all(Collector),
- %% We terminate the connection after the specified interval, but
- %% no later than ?CLOSING_TIMEOUT seconds.
- erlang:send_after((if TimeoutSec > 0 andalso
- TimeoutSec < ?CLOSING_TIMEOUT -> TimeoutSec;
- true -> ?CLOSING_TIMEOUT
- end) * 1000, self(), terminate_connection),
- State#v1{connection_state = closed}.
-
-handle_dependent_exit(ChPid, Reason, State) ->
- {Channel, State1} = channel_cleanup(ChPid, State),
- case {Channel, termination_kind(Reason)} of
- {undefined, controlled} -> State1;
- {undefined, uncontrolled} -> exit({abnormal_dependent_exit,
- ChPid, Reason});
- {_, controlled} -> maybe_close(control_throttle(State1));
- {_, uncontrolled} -> State2 = handle_exception(
- State1, Channel, Reason),
- maybe_close(control_throttle(State2))
- end.
-
-terminate_channels(#v1{channel_count = 0} = State) ->
- State;
-terminate_channels(#v1{channel_count = ChannelCount} = State) ->
- lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
- Timeout = 1000 * ?CHANNEL_TERMINATION_TIMEOUT * ChannelCount,
- TimerRef = erlang:send_after(Timeout, self(), cancel_wait),
- wait_for_channel_termination(ChannelCount, TimerRef, State).
-
-wait_for_channel_termination(0, TimerRef, State) ->
- case erlang:cancel_timer(TimerRef) of
- false -> receive
- cancel_wait -> State
- end;
- _ -> State
- end;
-wait_for_channel_termination(N, TimerRef,
- State = #v1{connection_state = CS,
- connection = #connection{
- name = ConnName,
- user = User,
- vhost = VHost}}) ->
- receive
- {'DOWN', _MRef, process, ChPid, Reason} ->
- {Channel, State1} = channel_cleanup(ChPid, State),
- case {Channel, termination_kind(Reason)} of
- {undefined, _} ->
- exit({abnormal_dependent_exit, ChPid, Reason});
- {_, controlled} ->
- wait_for_channel_termination(N-1, TimerRef, State1);
- {_, uncontrolled} ->
- log(error, "Error on AMQP connection ~p (~s, vhost: '~s',"
- " user: '~s', state: ~p), channel ~p:"
- "error while terminating:~n~p~n",
- [self(), ConnName, VHost, User#user.username,
- CS, Channel, Reason]),
- wait_for_channel_termination(N-1, TimerRef, State1)
- end;
- cancel_wait ->
- exit(channel_termination_timeout)
- end.
-
-maybe_close(State = #v1{connection_state = closing,
- channel_count = 0,
- connection = #connection{protocol = Protocol},
- sock = Sock}) ->
- NewState = close_connection(State),
- ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
- NewState;
-maybe_close(State) ->
- State.
-
-termination_kind(normal) -> controlled;
-termination_kind(_) -> uncontrolled.
-
-log_hard_error(#v1{connection_state = CS,
- connection = #connection{
- name = ConnName,
- user = User,
- vhost = VHost}}, Channel, Reason) ->
- log(error,
- "Error on AMQP connection ~p (~s, vhost: '~s',"
- " user: '~s', state: ~p), channel ~p:~n~p~n",
- [self(), ConnName, VHost, User#user.username, CS, Channel, Reason]).
-
-handle_exception(State = #v1{connection_state = closed}, Channel, Reason) ->
- log_hard_error(State, Channel, Reason),
- State;
-handle_exception(State = #v1{connection = #connection{protocol = Protocol},
- connection_state = CS},
- Channel, Reason)
- when ?IS_RUNNING(State) orelse CS =:= closing ->
- log_hard_error(State, Channel, Reason),
- {0, CloseMethod} =
- rabbit_binary_generator:map_exception(Channel, Reason, Protocol),
- State1 = close_connection(terminate_channels(State)),
- ok = send_on_channel0(State1#v1.sock, CloseMethod, Protocol),
- State1;
-handle_exception(State, Channel, Reason) ->
- %% We don't trust the client at this point - force them to wait
- %% for a bit so they can't DOS us with repeated failed logins etc.
- timer:sleep(?SILENT_CLOSE_DELAY * 1000),
- throw({handshake_error, State#v1.connection_state, Channel, Reason}).
-
-%% we've "lost sync" with the client and hence must not accept any
-%% more input
-fatal_frame_error(Error, Type, Channel, Payload, State) ->
- frame_error(Error, Type, Channel, Payload, State),
- %% grace period to allow transmission of error
- timer:sleep(?SILENT_CLOSE_DELAY * 1000),
- throw(fatal_frame_error).
-
-frame_error(Error, Type, Channel, Payload, State) ->
- {Str, Bin} = payload_snippet(Payload),
- handle_exception(State, Channel,
- rabbit_misc:amqp_error(frame_error,
- "type ~p, ~s octets = ~p: ~p",
- [Type, Str, Bin, Error], none)).
-
-unexpected_frame(Type, Channel, Payload, State) ->
- {Str, Bin} = payload_snippet(Payload),
- handle_exception(State, Channel,
- rabbit_misc:amqp_error(unexpected_frame,
- "type ~p, ~s octets = ~p",
- [Type, Str, Bin], none)).
-
-payload_snippet(Payload) when size(Payload) =< 16 ->
- {"all", Payload};
-payload_snippet(<<Snippet:16/binary, _/binary>>) ->
- {"first 16", Snippet}.
-
-%%--------------------------------------------------------------------------
-
-create_channel(_Channel,
- #v1{channel_count = ChannelCount,
- connection = #connection{channel_max = ChannelMax}})
- when ChannelMax /= 0 andalso ChannelCount >= ChannelMax ->
- {error, rabbit_misc:amqp_error(
- not_allowed, "number of channels opened (~w) has reached the "
- "negotiated channel_max (~w)",
- [ChannelCount, ChannelMax], 'none')};
-create_channel(Channel,
- #v1{sock = Sock,
- queue_collector = Collector,
- channel_sup_sup_pid = ChanSupSup,
- channel_count = ChannelCount,
- connection =
- #connection{name = Name,
- protocol = Protocol,
- frame_max = FrameMax,
- user = User,
- vhost = VHost,
- capabilities = Capabilities}} = State) ->
- {ok, _ChSupPid, {ChPid, AState}} =
- rabbit_channel_sup_sup:start_channel(
- ChanSupSup, {tcp, Sock, Channel, FrameMax, self(), Name,
- Protocol, User, VHost, Capabilities, Collector}),
- MRef = erlang:monitor(process, ChPid),
- put({ch_pid, ChPid}, {Channel, MRef}),
- put({channel, Channel}, {ChPid, AState}),
- {ok, {ChPid, AState}, State#v1{channel_count = ChannelCount + 1}}.
-
-channel_cleanup(ChPid, State = #v1{channel_count = ChannelCount}) ->
- case get({ch_pid, ChPid}) of
- undefined -> {undefined, State};
- {Channel, MRef} -> credit_flow:peer_down(ChPid),
- erase({channel, Channel}),
- erase({ch_pid, ChPid}),
- erlang:demonitor(MRef, [flush]),
- {Channel, State#v1{channel_count = ChannelCount - 1}}
- end.
-
-all_channels() -> [ChPid || {{ch_pid, ChPid}, _ChannelMRef} <- get()].
-
-%%--------------------------------------------------------------------------
-
-handle_frame(Type, 0, Payload,
- State = #v1{connection = #connection{protocol = Protocol}})
- when ?IS_STOPPING(State) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- {method, MethodName, FieldsBin} ->
- handle_method0(MethodName, FieldsBin, State);
- _Other -> State
- end;
-handle_frame(Type, 0, Payload,
- State = #v1{connection = #connection{protocol = Protocol}}) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- error -> frame_error(unknown_frame, Type, 0, Payload, State);
- heartbeat -> State;
- {method, MethodName, FieldsBin} ->
- handle_method0(MethodName, FieldsBin, State);
- _Other -> unexpected_frame(Type, 0, Payload, State)
- end;
-handle_frame(Type, Channel, Payload,
- State = #v1{connection = #connection{protocol = Protocol}})
- when ?IS_RUNNING(State) ->
- case rabbit_command_assembler:analyze_frame(Type, Payload, Protocol) of
- error -> frame_error(unknown_frame, Type, Channel, Payload, State);
- heartbeat -> unexpected_frame(Type, Channel, Payload, State);
- Frame -> process_frame(Frame, Channel, State)
- end;
-handle_frame(_Type, _Channel, _Payload, State) when ?IS_STOPPING(State) ->
- State;
-handle_frame(Type, Channel, Payload, State) ->
- unexpected_frame(Type, Channel, Payload, State).
-
-process_frame(Frame, Channel, State) ->
- ChKey = {channel, Channel},
- case (case get(ChKey) of
- undefined -> create_channel(Channel, State);
- Other -> {ok, Other, State}
- end) of
- {error, Error} ->
- handle_exception(State, Channel, Error);
- {ok, {ChPid, AState}, State1} ->
- case rabbit_command_assembler:process(Frame, AState) of
- {ok, NewAState} ->
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, State1);
- {ok, Method, NewAState} ->
- rabbit_channel:do(ChPid, Method),
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, State1);
- {ok, Method, Content, NewAState} ->
- rabbit_channel:do_flow(ChPid, Method, Content),
- put(ChKey, {ChPid, NewAState}),
- post_process_frame(Frame, ChPid, control_throttle(State1));
- {error, Reason} ->
- handle_exception(State1, Channel, Reason)
- end
- end.
-
-post_process_frame({method, 'channel.close_ok', _}, ChPid, State) ->
- {_, State1} = channel_cleanup(ChPid, State),
- %% This is not strictly necessary, but more obviously
- %% correct. Also note that we do not need to call maybe_close/1
- %% since we cannot possibly be in the 'closing' state.
- control_throttle(State1);
-post_process_frame({content_header, _, _, _, _}, _ChPid, State) ->
- maybe_block(State);
-post_process_frame({content_body, _}, _ChPid, State) ->
- maybe_block(State);
-post_process_frame(_Frame, _ChPid, State) ->
- State.
-
-%%--------------------------------------------------------------------------
-
-%% We allow clients to exceed the frame size a little bit since quite
-%% a few get it wrong - off-by 1 or 8 (empty frame size) are typical.
--define(FRAME_SIZE_FUDGE, ?EMPTY_FRAME_SIZE).
-
-handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, _/binary>>,
- State = #v1{connection = #connection{frame_max = FrameMax}})
- when FrameMax /= 0 andalso
- PayloadSize > FrameMax - ?EMPTY_FRAME_SIZE + ?FRAME_SIZE_FUDGE ->
- fatal_frame_error(
- {frame_too_large, PayloadSize, FrameMax - ?EMPTY_FRAME_SIZE},
- Type, Channel, <<>>, State);
-handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32,
- Payload:PayloadSize/binary, ?FRAME_END,
- Rest/binary>>,
- State) ->
- {Rest, ensure_stats_timer(handle_frame(Type, Channel, Payload, State))};
-handle_input(frame_header, <<Type:8,Channel:16,PayloadSize:32, Rest/binary>>,
- State) ->
- {Rest, ensure_stats_timer(
- switch_callback(State,
- {frame_payload, Type, Channel, PayloadSize},
- PayloadSize + 1))};
-handle_input({frame_payload, Type, Channel, PayloadSize}, Data, State) ->
- <<Payload:PayloadSize/binary, EndMarker, Rest/binary>> = Data,
- case EndMarker of
- ?FRAME_END -> State1 = handle_frame(Type, Channel, Payload, State),
- {Rest, switch_callback(State1, frame_header, 7)};
- _ -> fatal_frame_error({invalid_frame_end_marker, EndMarker},
- Type, Channel, Payload, State)
- end;
-handle_input(handshake, <<"AMQP", A, B, C, D, Rest/binary>>, State) ->
- {Rest, handshake({A, B, C, D}, State)};
-handle_input(handshake, <<Other:8/binary, _/binary>>, #v1{sock = Sock}) ->
- refuse_connection(Sock, {bad_header, Other});
-handle_input(Callback, Data, _State) ->
- throw({bad_input, Callback, Data}).
-
-%% The two rules pertaining to version negotiation:
-%%
-%% * If the server cannot support the protocol specified in the
-%% protocol header, it MUST respond with a valid protocol header and
-%% then close the socket connection.
-%%
-%% * The server MUST provide a protocol version that is lower than or
-%% equal to that requested by the client in the protocol header.
-handshake({0, 0, 9, 1}, State) ->
- start_connection({0, 9, 1}, rabbit_framing_amqp_0_9_1, State);
-
-%% This is the protocol header for 0-9, which we can safely treat as
-%% though it were 0-9-1.
-handshake({1, 1, 0, 9}, State) ->
- start_connection({0, 9, 0}, rabbit_framing_amqp_0_9_1, State);
-
-%% This is what most clients send for 0-8. The 0-8 spec, confusingly,
-%% defines the version as 8-0.
-handshake({1, 1, 8, 0}, State) ->
- start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
-
-%% The 0-8 spec as on the AMQP web site actually has this as the
-%% protocol header; some libraries e.g., py-amqplib, send it when they
-%% want 0-8.
-handshake({1, 1, 9, 1}, State) ->
- start_connection({8, 0, 0}, rabbit_framing_amqp_0_8, State);
-
-%% ... and finally, the 1.0 spec is crystal clear!
-handshake({Id, 1, 0, 0}, State) ->
- become_1_0(Id, State);
-
-handshake(Vsn, #v1{sock = Sock}) ->
- refuse_connection(Sock, {bad_version, Vsn}).
-
-%% Offer a protocol version to the client. Connection.start only
-%% includes a major and minor version number, Luckily 0-9 and 0-9-1
-%% are similar enough that clients will be happy with either.
-start_connection({ProtocolMajor, ProtocolMinor, _ProtocolRevision},
- Protocol,
- State = #v1{sock = Sock, connection = Connection}) ->
- rabbit_networking:register_connection(self()),
- Start = #'connection.start'{
- version_major = ProtocolMajor,
- version_minor = ProtocolMinor,
- server_properties = server_properties(Protocol),
- mechanisms = auth_mechanisms_binary(Sock),
- locales = <<"en_US">> },
- ok = send_on_channel0(Sock, Start, Protocol),
- switch_callback(State#v1{connection = Connection#connection{
- timeout_sec = ?NORMAL_TIMEOUT,
- protocol = Protocol},
- connection_state = starting},
- frame_header, 7).
-
-refuse_connection(Sock, Exception, {A, B, C, D}) ->
- ok = inet_op(fun () -> rabbit_net:send(Sock, <<"AMQP",A,B,C,D>>) end),
- throw(Exception).
-
--ifdef(use_specs).
--spec(refuse_connection/2 :: (rabbit_net:socket(), any()) -> no_return()).
--endif.
-refuse_connection(Sock, Exception) ->
- refuse_connection(Sock, Exception, {0, 0, 9, 1}).
-
-ensure_stats_timer(State = #v1{connection_state = running}) ->
- rabbit_event:ensure_stats_timer(State, #v1.stats_timer, emit_stats);
-ensure_stats_timer(State) ->
- State.
-
-%%--------------------------------------------------------------------------
-
-handle_method0(MethodName, FieldsBin,
- State = #v1{connection = #connection{protocol = Protocol}}) ->
- try
- handle_method0(Protocol:decode_method_fields(MethodName, FieldsBin),
- State)
- catch throw:{inet_error, E} when E =:= closed; E =:= enotconn ->
- maybe_emit_stats(State),
- throw(connection_closed_abruptly);
- exit:#amqp_error{method = none} = Reason ->
- handle_exception(State, 0, Reason#amqp_error{method = MethodName});
- Type:Reason ->
- Stack = erlang:get_stacktrace(),
- handle_exception(State, 0, {Type, Reason, MethodName, Stack})
- end.
-
-handle_method0(#'connection.start_ok'{mechanism = Mechanism,
- response = Response,
- client_properties = ClientProperties},
- State0 = #v1{connection_state = starting,
- connection = Connection,
- sock = Sock}) ->
- AuthMechanism = auth_mechanism_to_module(Mechanism, Sock),
- Capabilities =
- case rabbit_misc:table_lookup(ClientProperties, <<"capabilities">>) of
- {table, Capabilities1} -> Capabilities1;
- _ -> []
- end,
- State = State0#v1{connection_state = securing,
- connection =
- Connection#connection{
- client_properties = ClientProperties,
- capabilities = Capabilities,
- auth_mechanism = {Mechanism, AuthMechanism},
- auth_state = AuthMechanism:init(Sock)}},
- auth_phase(Response, State);
-
-handle_method0(#'connection.secure_ok'{response = Response},
- State = #v1{connection_state = securing}) ->
- auth_phase(Response, State);
-
-handle_method0(#'connection.tune_ok'{frame_max = FrameMax,
- channel_max = ChannelMax,
- heartbeat = ClientHeartbeat},
- State = #v1{connection_state = tuning,
- connection = Connection,
- helper_sup = SupPid,
- sock = Sock}) ->
- ok = validate_negotiated_integer_value(
- frame_max, ?FRAME_MIN_SIZE, FrameMax),
- ok = validate_negotiated_integer_value(
- channel_max, ?CHANNEL_MIN, ChannelMax),
- {ok, Collector} = rabbit_connection_helper_sup:start_queue_collector(
- SupPid, Connection#connection.name),
- Frame = rabbit_binary_generator:build_heartbeat_frame(),
- SendFun = fun() -> catch rabbit_net:send(Sock, Frame) end,
- Parent = self(),
- ReceiveFun = fun() -> Parent ! heartbeat_timeout end,
- Heartbeater = rabbit_heartbeat:start(
- SupPid, Sock, Connection#connection.name,
- ClientHeartbeat, SendFun, ClientHeartbeat, ReceiveFun),
- State#v1{connection_state = opening,
- connection = Connection#connection{
- frame_max = FrameMax,
- channel_max = ChannelMax,
- timeout_sec = ClientHeartbeat},
- queue_collector = Collector,
- heartbeater = Heartbeater};
-
-handle_method0(#'connection.open'{virtual_host = VHostPath},
- State = #v1{connection_state = opening,
- connection = Connection = #connection{
- user = User,
- protocol = Protocol},
- helper_sup = SupPid,
- sock = Sock,
- throttle = Throttle}) ->
- ok = rabbit_access_control:check_vhost_access(User, VHostPath, Sock),
- NewConnection = Connection#connection{vhost = VHostPath},
- ok = send_on_channel0(Sock, #'connection.open_ok'{}, Protocol),
- Conserve = rabbit_alarm:register(self(), {?MODULE, conserve_resources, []}),
- Throttle1 = Throttle#throttle{alarmed_by = Conserve},
- {ok, ChannelSupSupPid} =
- rabbit_connection_helper_sup:start_channel_sup_sup(SupPid),
- State1 = control_throttle(
- State#v1{connection_state = running,
- connection = NewConnection,
- channel_sup_sup_pid = ChannelSupSupPid,
- throttle = Throttle1}),
- rabbit_event:notify(connection_created,
- [{type, network} |
- infos(?CREATION_EVENT_KEYS, State1)]),
- maybe_emit_stats(State1),
- State1;
-handle_method0(#'connection.close'{}, State) when ?IS_RUNNING(State) ->
- lists:foreach(fun rabbit_channel:shutdown/1, all_channels()),
- maybe_close(State#v1{connection_state = closing});
-handle_method0(#'connection.close'{},
- State = #v1{connection = #connection{protocol = Protocol},
- sock = Sock})
- when ?IS_STOPPING(State) ->
- %% We're already closed or closing, so we don't need to cleanup
- %% anything.
- ok = send_on_channel0(Sock, #'connection.close_ok'{}, Protocol),
- State;
-handle_method0(#'connection.close_ok'{},
- State = #v1{connection_state = closed}) ->
- self() ! terminate_connection,
- State;
-handle_method0(_Method, State) when ?IS_STOPPING(State) ->
- State;
-handle_method0(_Method, #v1{connection_state = S}) ->
- rabbit_misc:protocol_error(
- channel_error, "unexpected method in connection state ~w", [S]).
-
-validate_negotiated_integer_value(Field, Min, ClientValue) ->
- ServerValue = get_env(Field),
- if ClientValue /= 0 andalso ClientValue < Min ->
- fail_negotiation(Field, min, Min, ClientValue);
- ServerValue /= 0 andalso (ClientValue =:= 0 orelse
- ClientValue > ServerValue) ->
- fail_negotiation(Field, max, ServerValue, ClientValue);
- true ->
- ok
- end.
-
-%% keep dialyzer happy
--spec fail_negotiation(atom(), 'min' | 'max', integer(), integer()) ->
- no_return().
-fail_negotiation(Field, MinOrMax, ServerValue, ClientValue) ->
- {S1, S2} = case MinOrMax of
- min -> {lower, minimum};
- max -> {higher, maximum}
- end,
- rabbit_misc:protocol_error(
- not_allowed, "negotiated ~w = ~w is ~w than the ~w allowed value (~w)",
- [Field, ClientValue, S1, S2, ServerValue], 'connection.tune').
-
-get_env(Key) ->
- {ok, Value} = application:get_env(rabbit, Key),
- Value.
-
-send_on_channel0(Sock, Method, Protocol) ->
- ok = rabbit_writer:internal_send_command(Sock, 0, Method, Protocol).
-
-auth_mechanism_to_module(TypeBin, Sock) ->
- case rabbit_registry:binary_to_type(TypeBin) of
- {error, not_found} ->
- rabbit_misc:protocol_error(
- command_invalid, "unknown authentication mechanism '~s'",
- [TypeBin]);
- T ->
- case {lists:member(T, auth_mechanisms(Sock)),
- rabbit_registry:lookup_module(auth_mechanism, T)} of
- {true, {ok, Module}} ->
- Module;
- _ ->
- rabbit_misc:protocol_error(
- command_invalid,
- "invalid authentication mechanism '~s'", [T])
- end
- end.
-
-auth_mechanisms(Sock) ->
- {ok, Configured} = application:get_env(auth_mechanisms),
- [Name || {Name, Module} <- rabbit_registry:lookup_all(auth_mechanism),
- Module:should_offer(Sock), lists:member(Name, Configured)].
-
-auth_mechanisms_binary(Sock) ->
- list_to_binary(
- string:join([atom_to_list(A) || A <- auth_mechanisms(Sock)], " ")).
-
-auth_phase(Response,
- State = #v1{connection = Connection =
- #connection{protocol = Protocol,
- auth_mechanism = {Name, AuthMechanism},
- auth_state = AuthState},
- sock = Sock}) ->
- case AuthMechanism:handle_response(Response, AuthState) of
- {refused, Username, Msg, Args} ->
- auth_fail(Username, Msg, Args, Name, State);
- {protocol_error, Msg, Args} ->
- notify_auth_result(none, user_authentication_failure,
- [{error, rabbit_misc:format(Msg, Args)}],
- State),
- rabbit_misc:protocol_error(syntax_error, Msg, Args);
- {challenge, Challenge, AuthState1} ->
- Secure = #'connection.secure'{challenge = Challenge},
- ok = send_on_channel0(Sock, Secure, Protocol),
- State#v1{connection = Connection#connection{
- auth_state = AuthState1}};
- {ok, User = #user{username = Username}} ->
- case rabbit_access_control:check_user_loopback(Username, Sock) of
- ok ->
- notify_auth_result(Username, user_authentication_success,
- [], State);
- not_allowed ->
- auth_fail(Username, "user '~s' can only connect via "
- "localhost", [Username], Name, State)
- end,
- Tune = #'connection.tune'{frame_max = get_env(frame_max),
- channel_max = get_env(channel_max),
- heartbeat = get_env(heartbeat)},
- ok = send_on_channel0(Sock, Tune, Protocol),
- State#v1{connection_state = tuning,
- connection = Connection#connection{user = User,
- auth_state = none}}
- end.
-
--ifdef(use_specs).
--spec(auth_fail/5 ::
- (rabbit_types:username() | none, string(), [any()], binary(), #v1{}) ->
- no_return()).
--endif.
-auth_fail(Username, Msg, Args, AuthName,
- State = #v1{connection = #connection{protocol = Protocol,
- capabilities = Capabilities}}) ->
- notify_auth_result(Username, user_authentication_failure,
- [{error, rabbit_misc:format(Msg, Args)}], State),
- AmqpError = rabbit_misc:amqp_error(
- access_refused, "~s login refused: ~s",
- [AuthName, io_lib:format(Msg, Args)], none),
- case rabbit_misc:table_lookup(Capabilities,
- <<"authentication_failure_close">>) of
- {bool, true} ->
- SafeMsg = io_lib:format(
- "Login was refused using authentication "
- "mechanism ~s. For details see the broker "
- "logfile.", [AuthName]),
- AmqpError1 = AmqpError#amqp_error{explanation = SafeMsg},
- {0, CloseMethod} = rabbit_binary_generator:map_exception(
- 0, AmqpError1, Protocol),
- ok = send_on_channel0(State#v1.sock, CloseMethod, Protocol);
- _ -> ok
- end,
- rabbit_misc:protocol_error(AmqpError).
-
-notify_auth_result(Username, AuthResult, ExtraProps, State) ->
- EventProps = [{connection_type, network},
- {name, case Username of none -> ''; _ -> Username end}] ++
- [case Item of
- name -> {connection_name, i(name, State)};
- _ -> {Item, i(Item, State)}
- end || Item <- ?AUTH_NOTIFICATION_INFO_KEYS] ++
- ExtraProps,
- rabbit_event:notify(AuthResult, [P || {_, V} = P <- EventProps, V =/= '']).
-
-%%--------------------------------------------------------------------------
-
-infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
-
-i(pid, #v1{}) -> self();
-i(SockStat, S) when SockStat =:= recv_oct;
- SockStat =:= recv_cnt;
- SockStat =:= send_oct;
- SockStat =:= send_cnt;
- SockStat =:= send_pend ->
- socket_info(fun (Sock) -> rabbit_net:getstat(Sock, [SockStat]) end,
- fun ([{_, I}]) -> I end, S);
-i(ssl, #v1{sock = Sock}) -> rabbit_net:is_ssl(Sock);
-i(ssl_protocol, S) -> ssl_info(fun ({P, _}) -> P end, S);
-i(ssl_key_exchange, S) -> ssl_info(fun ({_, {K, _, _}}) -> K end, S);
-i(ssl_cipher, S) -> ssl_info(fun ({_, {_, C, _}}) -> C end, S);
-i(ssl_hash, S) -> ssl_info(fun ({_, {_, _, H}}) -> H end, S);
-i(peer_cert_issuer, S) -> cert_info(fun rabbit_ssl:peer_cert_issuer/1, S);
-i(peer_cert_subject, S) -> cert_info(fun rabbit_ssl:peer_cert_subject/1, S);
-i(peer_cert_validity, S) -> cert_info(fun rabbit_ssl:peer_cert_validity/1, S);
-i(channels, #v1{channel_count = ChannelCount}) -> ChannelCount;
-i(state, #v1{connection_state = ConnectionState,
- throttle = #throttle{alarmed_by = Alarms,
- last_blocked_by = WasBlockedBy,
- last_blocked_at = T}}) ->
- case Alarms =:= [] andalso %% not throttled by resource alarms
- (credit_flow:blocked() %% throttled by flow now
- orelse %% throttled by flow recently
- (WasBlockedBy =:= flow andalso T =/= never andalso
- timer:now_diff(erlang:now(), T) < 5000000)) of
- true -> flow;
- false -> ConnectionState
- end;
-i(Item, #v1{connection = Conn}) -> ic(Item, Conn).
-
-ic(name, #connection{name = Name}) -> Name;
-ic(host, #connection{host = Host}) -> Host;
-ic(peer_host, #connection{peer_host = PeerHost}) -> PeerHost;
-ic(port, #connection{port = Port}) -> Port;
-ic(peer_port, #connection{peer_port = PeerPort}) -> PeerPort;
-ic(protocol, #connection{protocol = none}) -> none;
-ic(protocol, #connection{protocol = P}) -> P:version();
-ic(user, #connection{user = none}) -> '';
-ic(user, #connection{user = U}) -> U#user.username;
-ic(vhost, #connection{vhost = VHost}) -> VHost;
-ic(timeout, #connection{timeout_sec = Timeout}) -> Timeout;
-ic(frame_max, #connection{frame_max = FrameMax}) -> FrameMax;
-ic(channel_max, #connection{channel_max = ChMax}) -> ChMax;
-ic(client_properties, #connection{client_properties = CP}) -> CP;
-ic(auth_mechanism, #connection{auth_mechanism = none}) -> none;
-ic(auth_mechanism, #connection{auth_mechanism = {Name, _Mod}}) -> Name;
-ic(connected_at, #connection{connected_at = T}) -> T;
-ic(Item, #connection{}) -> throw({bad_argument, Item}).
-
-socket_info(Get, Select, #v1{sock = Sock}) ->
- case Get(Sock) of
- {ok, T} -> Select(T);
- {error, _} -> ''
- end.
-
-ssl_info(F, #v1{sock = Sock}) ->
- %% The first ok form is R14
- %% The second is R13 - the extra term is exportability (by inspection,
- %% the docs are wrong)
- case rabbit_net:ssl_info(Sock) of
- nossl -> '';
- {error, _} -> '';
- {ok, {P, {K, C, H}}} -> F({P, {K, C, H}});
- {ok, {P, {K, C, H, _}}} -> F({P, {K, C, H}})
- end.
-
-cert_info(F, #v1{sock = Sock}) ->
- case rabbit_net:peercert(Sock) of
- nossl -> '';
- {error, _} -> '';
- {ok, Cert} -> list_to_binary(F(Cert))
- end.
-
-maybe_emit_stats(State) ->
- rabbit_event:if_enabled(State, #v1.stats_timer,
- fun() -> emit_stats(State) end).
-
-emit_stats(State) ->
- Infos = infos(?STATISTICS_KEYS, State),
- rabbit_event:notify(connection_stats, Infos),
- State1 = rabbit_event:reset_stats_timer(State, #v1.stats_timer),
- %% If we emit an event which looks like we are in flow control, it's not a
- %% good idea for it to be our last even if we go idle. Keep emitting
- %% events, either we stay busy or we drop out of flow control.
- case proplists:get_value(state, Infos) of
- flow -> ensure_stats_timer(State1);
- _ -> State1
- end.
-
-%% 1.0 stub
--ifdef(use_specs).
--spec(become_1_0/2 :: (non_neg_integer(), #v1{}) -> no_return()).
--endif.
-become_1_0(Id, State = #v1{sock = Sock}) ->
- case code:is_loaded(rabbit_amqp1_0_reader) of
- false -> refuse_connection(Sock, amqp1_0_plugin_not_enabled);
- _ -> Mode = case Id of
- 0 -> amqp;
- 3 -> sasl;
- _ -> refuse_connection(
- Sock, {unsupported_amqp1_0_protocol_id, Id},
- {3, 1, 0, 0})
- end,
- F = fun (_Deb, Buf, BufLen, S) ->
- {rabbit_amqp1_0_reader, init,
- [Mode, pack_for_1_0(Buf, BufLen, S)]}
- end,
- State#v1{connection_state = {become, F}}
- end.
-
-pack_for_1_0(Buf, BufLen, #v1{parent = Parent,
- sock = Sock,
- recv_len = RecvLen,
- pending_recv = PendingRecv,
- helper_sup = SupPid}) ->
- {Parent, Sock, RecvLen, PendingRecv, SupPid, Buf, BufLen}.
diff --git a/src/rabbit_registry.erl b/src/rabbit_registry.erl
index fc016e718e..f75d839bbf 100644
--- a/src/rabbit_registry.erl
+++ b/src/rabbit_registry.erl
@@ -133,7 +133,8 @@ class_module(exchange_decorator) -> rabbit_exchange_decorator;
class_module(queue_decorator) -> rabbit_queue_decorator;
class_module(policy_validator) -> rabbit_policy_validator;
class_module(ha_mode) -> rabbit_mirror_queue_mode;
-class_module(channel_interceptor) -> rabbit_channel_interceptor.
+class_module(channel_interceptor) -> rabbit_channel_interceptor;
+class_module(queue_master_locator)-> rabbit_queue_master_locator.
%%---------------------------------------------------------------------------
diff --git a/src/rabbit_resource_monitor_misc.erl b/src/rabbit_resource_monitor_misc.erl
new file mode 100644
index 0000000000..f90b8ce310
--- /dev/null
+++ b/src/rabbit_resource_monitor_misc.erl
@@ -0,0 +1,51 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at http://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
+%%
+
+
+-module(rabbit_resource_monitor_misc).
+
+-export([parse_information_unit/1]).
+
+-ifdef(use_spec).
+
+-spec(parse_information_unit/1 :: (integer() | string()) ->
+ {ok, integer()} | {error, parse_error}).
+
+-endif.
+
+parse_information_unit(Value) when is_integer(Value) -> {ok, Value};
+parse_information_unit(Value) when is_list(Value) ->
+ case re:run(Value,
+ "^(?<VAL>[0-9]+)(?<UNIT>kB|KB|MB|GB|kb|mb|gb|Kb|Mb|Gb|kiB|KiB|MiB|GiB|kib|mib|gib|KIB|MIB|GIB|k|K|m|M|g|G)?$",
+ [{capture, all_but_first, list}]) of
+ {match, [[], _]} ->
+ {ok, list_to_integer(Value)};
+ {match, [Num]} ->
+ {ok, list_to_integer(Num)};
+ {match, [Num, Unit]} ->
+ Multiplier = case Unit of
+ KiB when KiB =:= "k"; KiB =:= "kiB"; KiB =:= "K"; KiB =:= "KIB"; KiB =:= "kib" -> 1024;
+ MiB when MiB =:= "m"; MiB =:= "MiB"; MiB =:= "M"; MiB =:= "MIB"; MiB =:= "mib" -> 1024*1024;
+ GiB when GiB =:= "g"; GiB =:= "GiB"; GiB =:= "G"; GiB =:= "GIB"; GiB =:= "gib" -> 1024*1024*1024;
+ KB when KB =:= "KB"; KB =:= "kB"; KB =:= "kb"; KB =:= "Kb" -> 1000;
+ MB when MB =:= "MB"; MB =:= "mB"; MB =:= "mb"; MB =:= "Mb" -> 1000000;
+ GB when GB =:= "GB"; GB =:= "gB"; GB =:= "gb"; GB =:= "Gb" -> 1000000000
+ end,
+ {ok, list_to_integer(Num) * Multiplier};
+ nomatch ->
+ % log error
+ {error, parse_error}
+ end.
diff --git a/src/rabbit_runtime_parameter.erl b/src/rabbit_runtime_parameter.erl
deleted file mode 100644
index 1d4bc0b575..0000000000
--- a/src/rabbit_runtime_parameter.erl
+++ /dev/null
@@ -1,42 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_runtime_parameter).
-
--ifdef(use_specs).
-
--type(validate_results() ::
- 'ok' | {error, string(), [term()]} | [validate_results()]).
-
--callback validate(rabbit_types:vhost(), binary(), binary(),
- term(), rabbit_types:user()) -> validate_results().
--callback notify(rabbit_types:vhost(), binary(), binary(), term()) -> 'ok'.
--callback notify_clear(rabbit_types:vhost(), binary(), binary()) -> 'ok'.
-
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [
- {validate, 5},
- {notify, 4},
- {notify_clear, 3}
- ];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
diff --git a/src/rabbit_runtime_parameters.erl b/src/rabbit_runtime_parameters.erl
index fafd598bb7..5e6f12904d 100644
--- a/src/rabbit_runtime_parameters.erl
+++ b/src/rabbit_runtime_parameters.erl
@@ -19,8 +19,8 @@
-include("rabbit.hrl").
-export([parse_set/5, set/5, set_any/5, clear/3, clear_any/3, list/0, list/1,
- list_component/1, list/2, list_formatted/1, lookup/3,
- value/3, value/4, info_keys/0]).
+ list_component/1, list/2, list_formatted/1, list_formatted/3,
+ lookup/3, value/3, value/4, info_keys/0]).
-export([set_global/2, value_global/1, value_global/2]).
@@ -48,6 +48,7 @@
-spec(list/2 :: (rabbit_types:vhost() | '_', binary() | '_')
-> [rabbit_types:infos()]).
-spec(list_formatted/1 :: (rabbit_types:vhost()) -> [rabbit_types:infos()]).
+-spec(list_formatted/3 :: (rabbit_types:vhost(), reference(), pid()) -> 'ok').
-spec(lookup/3 :: (rabbit_types:vhost(), binary(), binary())
-> rabbit_types:infos() | 'not_found').
-spec(value/3 :: (rabbit_types:vhost(), binary(), binary()) -> term()).
@@ -198,6 +199,11 @@ list(VHost, Component) ->
list_formatted(VHost) ->
[pset(value, format(pget(value, P)), P) || P <- list(VHost)].
+list_formatted(VHost, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref,
+ fun(P) -> pset(value, format(pget(value, P)), P) end, list(VHost)).
+
lookup(VHost, Component, Name) ->
case lookup0({VHost, Component, Name}, rabbit_misc:const(not_found)) of
not_found -> not_found;
diff --git a/src/rabbit_types.erl b/src/rabbit_types.erl
index 3e2b5ba0c1..c56628a7f0 100644
--- a/src/rabbit_types.erl
+++ b/src/rabbit_types.erl
@@ -31,9 +31,10 @@
username/0, password/0, password_hash/0,
ok/1, error/1, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
channel_exit/0, connection_exit/0, mfargs/0, proc_name/0,
- proc_type_and_name/0]).
+ proc_type_and_name/0, timestamp/0]).
-type(maybe(T) :: T | 'none').
+-type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
-type(vhost() :: binary()).
-type(ctag() :: binary()).
diff --git a/src/rabbit_upgrade.erl b/src/rabbit_upgrade.erl
index daf39b8acc..0a85ef3e7d 100644
--- a/src/rabbit_upgrade.erl
+++ b/src/rabbit_upgrade.erl
@@ -100,7 +100,12 @@ ensure_backup_taken() ->
false -> ok = take_backup();
_ -> ok
end;
- true -> throw({error, previous_upgrade_failed})
+ true ->
+ error("Found lock file at ~s.
+ Either previous upgrade is in progress or has failed.
+ Database backup path: ~s",
+ [lock_filename(), backup_dir()]),
+ throw({error, previous_upgrade_failed})
end.
take_backup() ->
@@ -108,7 +113,7 @@ take_backup() ->
case rabbit_mnesia:copy_db(BackupDir) of
ok -> info("upgrades: Mnesia dir backed up to ~p~n",
[BackupDir]);
- {error, E} -> throw({could_not_back_up_mnesia_dir, E})
+ {error, E} -> throw({could_not_back_up_mnesia_dir, E, BackupDir})
end.
ensure_backup_removed() ->
diff --git a/src/rabbit_upgrade_functions.erl b/src/rabbit_upgrade_functions.erl
index 4eced3f32f..485e5cdefc 100644
--- a/src/rabbit_upgrade_functions.erl
+++ b/src/rabbit_upgrade_functions.erl
@@ -51,6 +51,7 @@
-rabbit_upgrade({down_slave_nodes, mnesia, [queue_decorators]}).
-rabbit_upgrade({queue_state, mnesia, [down_slave_nodes]}).
-rabbit_upgrade({recoverable_slaves, mnesia, [queue_state]}).
+-rabbit_upgrade({user_password_hashing, mnesia, [hash_passwords]}).
%% -------------------------------------------------------------------
@@ -84,6 +85,7 @@
-spec(down_slave_nodes/0 :: () -> 'ok').
-spec(queue_state/0 :: () -> 'ok').
-spec(recoverable_slaves/0 :: () -> 'ok').
+-spec(user_password_hashing/0 :: () -> 'ok').
-endif.
@@ -103,11 +105,15 @@ remove_user_scope() ->
end,
[user_vhost, permission]).
+%% this is an early migration that hashes passwords using MD5,
+%% only relevant to those migrating from 2.1.1.
+%% all users created after in 3.6.0 or later will use SHA-256 (unless configured
+%% otherwise)
hash_passwords() ->
transform(
rabbit_user,
fun ({user, Username, Password, IsAdmin}) ->
- Hash = rabbit_auth_backend_internal:hash_password(Password),
+ Hash = rabbit_auth_backend_internal:hash_password(rabbit_password_hashing_md5, Password),
{user, Username, Hash, IsAdmin}
end,
[username, password_hash, is_admin]).
@@ -431,6 +437,17 @@ recoverable_slaves(Table) ->
sync_slave_pids, recoverable_slaves, policy, gm_pids, decorators,
state]).
+%% Prior to 3.6.0, passwords were hashed using MD5, this populates
+%% existing records with said default. Users created with 3.6.0+ will
+%% have internal_user.hashing_algorithm populated by the internal
+%% authn backend.
+user_password_hashing() ->
+ transform(
+ rabbit_user,
+ fun ({internal_user, Username, Hash, Tags}) ->
+ {internal_user, Username, Hash, Tags, rabbit_password_hashing_md5}
+ end,
+ [username, password_hash, tags, hashing_algorithm]).
%%--------------------------------------------------------------------
@@ -452,8 +469,8 @@ create(Tab, TabDef) ->
%% Dumb replacement for rabbit_exchange:declare that does not require
%% the exchange type registry or worker pool to be running by dint of
%% not validating anything and assuming the exchange type does not
-%% require serialisation.
-%% NB: this assumes the pre-exchange-scratch-space format
+%% require serialisation. NB: this assumes the
+%% pre-exchange-scratch-space format
declare_exchange(XName, Type) ->
X = {exchange, XName, Type, true, false, false, []},
ok = mnesia:dirty_write(rabbit_durable_exchange, X).
diff --git a/src/rabbit_variable_queue.erl b/src/rabbit_variable_queue.erl
index a0e71c69de..eb3e9f5095 100644
--- a/src/rabbit_variable_queue.erl
+++ b/src/rabbit_variable_queue.erl
@@ -18,12 +18,15 @@
-export([init/3, terminate/2, delete_and_terminate/2, delete_crashed/1,
purge/1, purge_acks/1,
- publish/6, publish_delivered/5, discard/4, drain_confirmed/1,
+ publish/6, publish_delivered/5,
+ batch_publish/4, batch_publish_delivered/4,
+ discard/4, drain_confirmed/1,
dropwhile/2, fetchwhile/4, fetch/2, drop/2, ack/2, requeue/2,
ackfold/4, fold/3, len/1, is_empty/1, depth/1,
set_ram_duration_target/2, ram_duration/1, needs_timeout/1, timeout/1,
handle_pre_hibernate/1, resume/1, msg_rates/1,
- info/2, invoke/3, is_duplicate/2, multiple_routing_keys/0]).
+ info/2, invoke/3, is_duplicate/2, set_queue_mode/2,
+ zip_msgs_and_acks/4, multiple_routing_keys/0]).
-export([start/1, stop/0]).
@@ -300,7 +303,10 @@
disk_read_count,
disk_write_count,
- io_batch_size
+ io_batch_size,
+
+ %% default queue or lazy queue
+ mode
}).
-record(rates, { in, out, ack_in, ack_out, timestamp }).
@@ -337,14 +343,13 @@
-ifdef(use_specs).
--type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
-type(seq_id() :: non_neg_integer()).
-type(rates() :: #rates { in :: float(),
out :: float(),
ack_in :: float(),
ack_out :: float(),
- timestamp :: timestamp()}).
+ timestamp :: rabbit_types:timestamp()}).
-type(delta() :: #delta { start_seq_id :: non_neg_integer(),
count :: non_neg_integer(),
@@ -398,7 +403,8 @@
disk_read_count :: non_neg_integer(),
disk_write_count :: non_neg_integer(),
- io_batch_size :: pos_integer()}).
+ io_batch_size :: pos_integer(),
+ mode :: 'default' | 'lazy' }).
%% Duplicated from rabbit_backing_queue
-spec(ack/2 :: ([ack()], state()) -> {[rabbit_guid:guid()], state()}).
@@ -559,52 +565,32 @@ purge(State = #vqstate { len = Len }) ->
purge_acks(State) -> a(purge_pending_ack(false, State)).
-publish(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
- MsgProps = #message_properties { needs_confirming = NeedsConfirming },
- IsDelivered, _ChPid, _Flow,
- State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
- qi_embed_msgs_below = IndexMaxSize,
- next_seq_id = SeqId,
- in_counter = InCount,
- durable = IsDurable,
- unconfirmed = UC }) ->
- IsPersistent1 = IsDurable andalso IsPersistent,
- MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
- {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
- State2 = case ?QUEUE:is_empty(Q3) of
- false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
- true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
- end,
- InCount1 = InCount + 1,
- UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
- State3 = stats({1, 0}, {none, MsgStatus1},
- State2#vqstate{ next_seq_id = SeqId + 1,
- in_counter = InCount1,
- unconfirmed = UC1 }),
- a(reduce_memory_use(maybe_update_rates(State3))).
-
-publish_delivered(Msg = #basic_message { is_persistent = IsPersistent,
- id = MsgId },
- MsgProps = #message_properties {
- needs_confirming = NeedsConfirming },
- _ChPid, _Flow,
- State = #vqstate { qi_embed_msgs_below = IndexMaxSize,
- next_seq_id = SeqId,
- out_counter = OutCount,
- in_counter = InCount,
- durable = IsDurable,
- unconfirmed = UC }) ->
- IsPersistent1 = IsDurable andalso IsPersistent,
- MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
- {MsgStatus1, State1} = maybe_write_to_disk(false, false, MsgStatus, State),
- State2 = record_pending_ack(m(MsgStatus1), State1),
- UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
- State3 = stats({0, 1}, {none, MsgStatus1},
- State2 #vqstate { next_seq_id = SeqId + 1,
- out_counter = OutCount + 1,
- in_counter = InCount + 1,
- unconfirmed = UC1 }),
- {SeqId, a(reduce_memory_use(maybe_update_rates(State3)))}.
+publish(Msg, MsgProps, IsDelivered, ChPid, Flow, State) ->
+ State1 =
+ publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ a(reduce_memory_use(maybe_update_rates(State1))).
+
+batch_publish(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, State1} =
+ lists:foldl(fun batch_publish1/2, {ChPid, Flow, State}, Publishes),
+ State2 = ui(State1),
+ a(reduce_memory_use(maybe_update_rates(State2))).
+
+publish_delivered(Msg, MsgProps, ChPid, Flow, State) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_write_to_disk/4,
+ State),
+ {SeqId, a(reduce_memory_use(maybe_update_rates(State1)))}.
+
+batch_publish_delivered(Publishes, ChPid, Flow, State) ->
+ {ChPid, Flow, SeqIds, State1} =
+ lists:foldl(fun batch_publish_delivered1/2,
+ {ChPid, Flow, [], State}, Publishes),
+ State2 = ui(State1),
+ {lists:reverse(SeqIds), a(reduce_memory_use(maybe_update_rates(State2)))}.
discard(_MsgId, _ChPid, _Flow, State) -> State.
@@ -686,7 +672,8 @@ ack(AckTags, State) ->
a(State1 #vqstate { index_state = IndexState1,
ack_out_counter = AckOutCount + length(AckTags) })}.
-requeue(AckTags, #vqstate { delta = Delta,
+requeue(AckTags, #vqstate { mode = default,
+ delta = Delta,
q3 = Q3,
q4 = Q4,
in_counter = InCounter,
@@ -706,6 +693,23 @@ requeue(AckTags, #vqstate { delta = Delta,
q3 = Q3a,
q4 = Q4a,
in_counter = InCounter + MsgCount,
+ len = Len + MsgCount })))};
+requeue(AckTags, #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3,
+ in_counter = InCounter,
+ len = Len } = State) ->
+ {SeqIds, Q3a, MsgIds, State1} = queue_merge(lists:sort(AckTags), Q3, [],
+ delta_limit(Delta),
+ fun publish_beta/2, State),
+ {Delta1, MsgIds1, State2} = delta_merge(SeqIds, Delta, MsgIds,
+ State1),
+ MsgCount = length(MsgIds1),
+ {MsgIds1, a(reduce_memory_use(
+ maybe_update_rates(
+ State2 #vqstate { delta = Delta1,
+ q3 = Q3a,
+ in_counter = InCounter + MsgCount,
len = Len + MsgCount })))}.
ackfold(MsgFun, Acc, State, AckTags) ->
@@ -770,7 +774,7 @@ update_rates(State = #vqstate{ in_counter = InCount,
ack_in = AckInRate,
ack_out = AckOutRate,
timestamp = TS }}) ->
- Now = erlang:now(),
+ Now = time_compat:monotonic_time(),
Rates = #rates { in = update_rate(Now, TS, InCount, InRate),
out = update_rate(Now, TS, OutCount, OutRate),
@@ -785,8 +789,13 @@ update_rates(State = #vqstate{ in_counter = InCount,
rates = Rates }.
update_rate(Now, TS, Count, Rate) ->
- Time = timer:now_diff(Now, TS) / ?MICROS_PER_SECOND,
- rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE, Count / Time, Rate).
+ Time = time_compat:convert_time_unit(Now - TS, native, micro_seconds) /
+ ?MICROS_PER_SECOND,
+ if
+ Time == 0 -> Rate;
+ true -> rabbit_misc:moving_average(Time, ?RATE_AVG_HALF_LIFE,
+ Count / Time, Rate)
+ end.
ram_duration(State) ->
State1 = #vqstate { rates = #rates { in = AvgIngressRate,
@@ -854,12 +863,19 @@ info(message_bytes_ram, #vqstate{ram_bytes = RamBytes}) ->
RamBytes;
info(message_bytes_persistent, #vqstate{persistent_bytes = PersistentBytes}) ->
PersistentBytes;
+info(head_message_timestamp, #vqstate{
+ q3 = Q3,
+ q4 = Q4,
+ ram_pending_ack = RPA,
+ qi_pending_ack = QPA}) ->
+ head_message_timestamp(Q3, Q4, RPA, QPA);
info(disk_reads, #vqstate{disk_read_count = Count}) ->
Count;
info(disk_writes, #vqstate{disk_write_count = Count}) ->
Count;
info(backing_queue_status, #vqstate {
q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = Mode,
len = Len,
target_ram_count = TargetRamCount,
next_seq_id = NextSeqId,
@@ -868,7 +884,8 @@ info(backing_queue_status, #vqstate {
ack_in = AvgAckIngressRate,
ack_out = AvgAckEgressRate }}) ->
- [ {q1 , ?QUEUE:len(Q1)},
+ [ {mode , Mode},
+ {q1 , ?QUEUE:len(Q1)},
{q2 , ?QUEUE:len(Q2)},
{delta , Delta},
{q3 , ?QUEUE:len(Q3)},
@@ -888,11 +905,113 @@ invoke( _, _, State) -> State.
is_duplicate(_Msg, State) -> {false, State}.
+set_queue_mode(Mode, State = #vqstate { mode = Mode }) ->
+ State;
+set_queue_mode(lazy, State = #vqstate {
+ target_ram_count = TargetRamCount }) ->
+ %% To become a lazy queue we need to page everything to disk first.
+ State1 = convert_to_lazy(State),
+ %% restore the original target_ram_count
+ a(State1 #vqstate { mode = lazy, target_ram_count = TargetRamCount });
+set_queue_mode(default, State) ->
+ %% becoming a default queue means loading messages from disk like
+ %% when a queue is recovered.
+ a(maybe_deltas_to_betas(State #vqstate { mode = default }));
+set_queue_mode(_, State) ->
+ State.
+
+zip_msgs_and_acks(Msgs, AckTags, Accumulator, _State) ->
+ lists:foldl(fun ({{#basic_message{ id = Id }, _Props}, AckTag}, Acc) ->
+ [{Id, AckTag} | Acc]
+ end, Accumulator, lists:zip(Msgs, AckTags)).
+
+convert_to_lazy(State) ->
+ State1 = #vqstate { delta = Delta, q3 = Q3, len = Len } =
+ set_ram_duration_target(0, State),
+ case Delta#delta.count + ?QUEUE:len(Q3) == Len of
+ true ->
+ State1;
+ false ->
+ %% When pushing messages to disk, we might have been
+ %% blocked by the msg_store, so we need to see if we have
+ %% to wait for more credit, and then keep paging messages.
+ %%
+ %% The amqqueue_process could have taken care of this, but
+ %% between the time it receives the bump_credit msg and
+ %% calls BQ:resume to keep paging messages to disk, some
+ %% other request may arrive to the BQ which at this moment
+ %% is not in a proper state for a lazy BQ (unless all
+ %% messages have been paged to disk already).
+ wait_for_msg_store_credit(),
+ convert_to_lazy(State1)
+ end.
+
+wait_for_msg_store_credit() ->
+ case credit_flow:blocked() of
+ true -> receive
+ {bump_credit, Msg} ->
+ credit_flow:handle_bump_msg(Msg)
+ end;
+ false -> ok
+ end.
+
+%% Get the Timestamp property of the first msg, if present. This is
+%% the one with the oldest timestamp among the heads of the pending
+%% acks and unread queues. We can't check disk_pending_acks as these
+%% are paged out - we assume some will soon be paged in rather than
+%% forcing it to happen. Pending ack msgs are included as they are
+%% regarded as unprocessed until acked, this also prevents the result
+%% apparently oscillating during repeated rejects. Q3 is only checked
+%% when Q4 is empty as any Q4 msg will be earlier.
+head_message_timestamp(Q3, Q4, RPA, QPA) ->
+ HeadMsgs = [ HeadMsgStatus#msg_status.msg ||
+ HeadMsgStatus <-
+ [ get_qs_head([Q4, Q3]),
+ get_pa_head(RPA),
+ get_pa_head(QPA) ],
+ HeadMsgStatus /= undefined,
+ HeadMsgStatus#msg_status.msg /= undefined ],
+
+ Timestamps =
+ [Timestamp || HeadMsg <- HeadMsgs,
+ Timestamp <- [rabbit_basic:extract_timestamp(
+ HeadMsg#basic_message.content)],
+ Timestamp /= undefined
+ ],
+
+ case Timestamps == [] of
+ true -> '';
+ false -> lists:min(Timestamps)
+ end.
+
+get_qs_head(Qs) ->
+ catch lists:foldl(
+ fun (Q, Acc) ->
+ case get_q_head(Q) of
+ undefined -> Acc;
+ Val -> throw(Val)
+ end
+ end, undefined, Qs).
+
+get_q_head(Q) ->
+ get_collection_head(Q, fun ?QUEUE:is_empty/1, fun ?QUEUE:peek/1).
+
+get_pa_head(PA) ->
+ get_collection_head(PA, fun gb_trees:is_empty/1, fun gb_trees:smallest/1).
+
+get_collection_head(Col, IsEmpty, GetVal) ->
+ case IsEmpty(Col) of
+ false ->
+ {_, MsgStatus} = GetVal(Col),
+ MsgStatus;
+ true -> undefined
+ end.
+
%%----------------------------------------------------------------------------
%% Minor helpers
%%----------------------------------------------------------------------------
-
a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = default,
len = Len,
bytes = Bytes,
unacked_bytes = UnackedBytes,
@@ -907,9 +1026,16 @@ a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
E4 = ?QUEUE:is_empty(Q4),
LZ = Len == 0,
+ %% if q1 has messages then q3 cannot be empty. See publish/6.
true = E1 or not E3,
+ %% if q2 has messages then we have messages in delta (paged to
+ %% disk). See push_alphas_to_betas/2.
true = E2 or not ED,
+ %% if delta has messages then q3 cannot be empty. This is enforced
+ %% by paging, where min([?SEGMENT_ENTRY_COUNT, len(q3)]) messages
+ %% are always kept on RAM.
true = ED or not E3,
+ %% if the queue length is 0, then q3 and q4 must be empty.
true = LZ == (E3 and E4),
true = Len >= 0,
@@ -922,6 +1048,53 @@ a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
true = RamBytes >= 0,
true = RamBytes =< Bytes + UnackedBytes,
+ State;
+a(State = #vqstate { q1 = Q1, q2 = Q2, delta = Delta, q3 = Q3, q4 = Q4,
+ mode = lazy,
+ len = Len,
+ bytes = Bytes,
+ unacked_bytes = UnackedBytes,
+ persistent_count = PersistentCount,
+ persistent_bytes = PersistentBytes,
+ ram_msg_count = RamMsgCount,
+ ram_bytes = RamBytes}) ->
+ E1 = ?QUEUE:is_empty(Q1),
+ E2 = ?QUEUE:is_empty(Q2),
+ ED = Delta#delta.count == 0,
+ E3 = ?QUEUE:is_empty(Q3),
+ E4 = ?QUEUE:is_empty(Q4),
+ LZ = Len == 0,
+ L3 = ?QUEUE:len(Q3),
+
+ %% q1 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E1,
+
+ %% q2 only gets messages from q1 when push_alphas_to_betas is
+ %% called for a non empty delta, which won't be the case for a
+ %% lazy queue. This means q2 must always be empty.
+ true = E2,
+
+ %% q4 must always be empty, since q1 only gets messages during
+ %% publish, but for lazy queues messages go straight to delta.
+ true = E4,
+
+ %% if the queue is empty, then delta is empty and q3 is empty.
+ true = LZ == (ED and E3),
+
+ %% There should be no messages in q1, q2, and q4
+ true = Delta#delta.count + L3 == Len,
+
+ true = Len >= 0,
+ true = Bytes >= 0,
+ true = UnackedBytes >= 0,
+ true = PersistentCount >= 0,
+ true = PersistentBytes >= 0,
+ true = RamMsgCount >= 0,
+ true = RamMsgCount =< Len,
+ true = RamBytes >= 0,
+ true = RamBytes =< Bytes + UnackedBytes,
+
State.
d(Delta = #delta { start_seq_id = Start, count = Count, end_seq_id = End })
@@ -1114,7 +1287,7 @@ init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
count = DeltaCount1,
end_seq_id = NextSeqId })
end,
- Now = now(),
+ Now = time_compat:monotonic_time(),
IoBatchSize = rabbit_misc:get_env(rabbit, msg_store_io_batch_size,
?IO_BATCH_SIZE),
@@ -1159,7 +1332,9 @@ init(IsDurable, IndexState, DeltaCount, DeltaBytes, Terms,
disk_read_count = 0,
disk_write_count = 0,
- io_batch_size = IoBatchSize },
+ io_batch_size = IoBatchSize,
+
+ mode = default },
a(maybe_deltas_to_betas(State)).
blank_rates(Now) ->
@@ -1170,7 +1345,7 @@ blank_rates(Now) ->
timestamp = Now}.
in_r(MsgStatus = #msg_status { msg = undefined },
- State = #vqstate { q3 = Q3, q4 = Q4 }) ->
+ State = #vqstate { mode = default, q3 = Q3, q4 = Q4 }) ->
case ?QUEUE:is_empty(Q4) of
true -> State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) };
false -> {Msg, State1 = #vqstate { q4 = Q4a }} =
@@ -1179,10 +1354,24 @@ in_r(MsgStatus = #msg_status { msg = undefined },
stats(ready0, {MsgStatus, MsgStatus1},
State1 #vqstate { q4 = ?QUEUE:in_r(MsgStatus1, Q4a) })
end;
-in_r(MsgStatus, State = #vqstate { q4 = Q4 }) ->
- State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) }.
+in_r(MsgStatus,
+ State = #vqstate { mode = default, q4 = Q4 }) ->
+ State #vqstate { q4 = ?QUEUE:in_r(MsgStatus, Q4) };
+%% lazy queues
+in_r(MsgStatus = #msg_status { seq_id = SeqId },
+ State = #vqstate { mode = lazy, q3 = Q3, delta = Delta}) ->
+ case ?QUEUE:is_empty(Q3) of
+ true ->
+ {_MsgStatus1, State1} =
+ maybe_write_to_disk(true, true, MsgStatus, State),
+ State2 = stats(ready0, {MsgStatus, none}, State1),
+ Delta1 = expand_delta(SeqId, Delta),
+ State2 #vqstate{ delta = Delta1 };
+ false ->
+ State #vqstate { q3 = ?QUEUE:in_r(MsgStatus, Q3) }
+ end.
-queue_out(State = #vqstate { q4 = Q4 }) ->
+queue_out(State = #vqstate { mode = default, q4 = Q4 }) ->
case ?QUEUE:out(Q4) of
{empty, _Q4} ->
case fetch_from_q3(State) of
@@ -1191,6 +1380,12 @@ queue_out(State = #vqstate { q4 = Q4 }) ->
end;
{{value, MsgStatus}, Q4a} ->
{{value, MsgStatus}, State #vqstate { q4 = Q4a }}
+ end;
+%% lazy queues
+queue_out(State = #vqstate { mode = lazy }) ->
+ case fetch_from_q3(State) of
+ {empty, _State1} = Result -> Result;
+ {loaded, {MsgStatus, State1}} -> {{value, MsgStatus}, State1}
end.
read_msg(#msg_status{msg = undefined,
@@ -1210,11 +1405,13 @@ read_msg(MsgId, IsPersistent, State = #vqstate{msg_store_clients = MSCState,
stats(Signs, Statuses, State) ->
stats0(expand_signs(Signs), expand_statuses(Statuses), State).
-expand_signs(ready0) -> {0, 0, true};
-expand_signs({A, B}) -> {A, B, false}.
+expand_signs(ready0) -> {0, 0, true};
+expand_signs(lazy_pub) -> {1, 0, true};
+expand_signs({A, B}) -> {A, B, false}.
expand_statuses({none, A}) -> {false, msg_in_ram(A), A};
expand_statuses({B, none}) -> {msg_in_ram(B), false, B};
+expand_statuses({lazy, A}) -> {false , false, A};
expand_statuses({B, A}) -> {msg_in_ram(B), msg_in_ram(A), B}.
%% In this function at least, we are religious: the variable name
@@ -1456,10 +1653,16 @@ count_pending_acks(#vqstate { ram_pending_ack = RPA,
qi_pending_ack = QPA }) ->
gb_trees:size(RPA) + gb_trees:size(DPA) + gb_trees:size(QPA).
-purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { q3 = Q3 }) ->
+purge_betas_and_deltas(DelsAndAcksFun, State = #vqstate { mode = Mode }) ->
+ State0 = #vqstate { q3 = Q3 } =
+ case Mode of
+ lazy -> maybe_deltas_to_betas(DelsAndAcksFun, State);
+ _ -> State
+ end,
+
case ?QUEUE:is_empty(Q3) of
- true -> State;
- false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State),
+ true -> State0;
+ false -> State1 = remove_queue_entries(Q3, DelsAndAcksFun, State0),
purge_betas_and_deltas(DelsAndAcksFun,
maybe_deltas_to_betas(
DelsAndAcksFun,
@@ -1503,6 +1706,108 @@ process_delivers_and_acks_fun(_) ->
%% Internal gubbins for publishing
%%----------------------------------------------------------------------------
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { q1 = Q1, q3 = Q3, q4 = Q4,
+ mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = case ?QUEUE:is_empty(Q3) of
+ false -> State1 #vqstate { q1 = ?QUEUE:in(m(MsgStatus1), Q1) };
+ true -> State1 #vqstate { q4 = ?QUEUE:in(m(MsgStatus1), Q4) }
+ end,
+ InCount1 = InCount + 1,
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats({1, 0}, {none, MsgStatus1},
+ State2#vqstate{ next_seq_id = SeqId + 1,
+ in_counter = InCount1,
+ unconfirmed = UC1 });
+publish1(Msg = #basic_message { is_persistent = IsPersistent, id = MsgId },
+ MsgProps = #message_properties { needs_confirming = NeedsConfirming },
+ IsDelivered, _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC,
+ delta = Delta }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, IsDelivered, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ Delta1 = expand_delta(SeqId, Delta),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ stats(lazy_pub, {lazy, m(MsgStatus1)},
+ State1#vqstate{ delta = Delta1,
+ next_seq_id = SeqId + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }).
+
+batch_publish1({Msg, MsgProps, IsDelivered}, {ChPid, Flow, State}) ->
+ {ChPid, Flow, publish1(Msg, MsgProps, IsDelivered, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4, State)}.
+
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = default,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(false, false, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1},
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3};
+publish_delivered1(Msg = #basic_message { is_persistent = IsPersistent,
+ id = MsgId },
+ MsgProps = #message_properties {
+ needs_confirming = NeedsConfirming },
+ _ChPid, _Flow, PersistFun,
+ State = #vqstate { mode = lazy,
+ qi_embed_msgs_below = IndexMaxSize,
+ next_seq_id = SeqId,
+ out_counter = OutCount,
+ in_counter = InCount,
+ durable = IsDurable,
+ unconfirmed = UC }) ->
+ IsPersistent1 = IsDurable andalso IsPersistent,
+ MsgStatus = msg_status(IsPersistent1, true, SeqId, Msg, MsgProps, IndexMaxSize),
+ {MsgStatus1, State1} = PersistFun(true, true, MsgStatus, State),
+ State2 = record_pending_ack(m(MsgStatus1), State1),
+ UC1 = gb_sets_maybe_insert(NeedsConfirming, MsgId, UC),
+ State3 = stats({0, 1}, {none, MsgStatus1},
+ State2 #vqstate { next_seq_id = SeqId + 1,
+ out_counter = OutCount + 1,
+ in_counter = InCount + 1,
+ unconfirmed = UC1 }),
+ {SeqId, State3}.
+
+batch_publish_delivered1({Msg, MsgProps}, {ChPid, Flow, SeqIds, State}) ->
+ {SeqId, State1} =
+ publish_delivered1(Msg, MsgProps, ChPid, Flow,
+ fun maybe_prepare_write_to_disk/4,
+ State),
+ {ChPid, Flow, [SeqId | SeqIds], State1}.
+
maybe_write_msg_to_disk(_Force, MsgStatus = #msg_status {
msg_in_store = true }, State) ->
{MsgStatus, State};
@@ -1963,6 +2268,7 @@ ifold(Fun, Acc, Its, State) ->
reduce_memory_use(State = #vqstate { target_ram_count = infinity }) ->
State;
reduce_memory_use(State = #vqstate {
+ mode = default,
ram_pending_ack = RPA,
ram_msg_count = RamMsgCount,
target_ram_count = TargetRamCount,
@@ -2008,6 +2314,30 @@ reduce_memory_use(State = #vqstate {
end,
%% See rabbitmq-server-290 for the reasons behind this GC call.
garbage_collect(),
+ State3;
+%% When using lazy queues, there are no alphas, so we don't need to
+%% call push_alphas_to_betas/2.
+reduce_memory_use(State = #vqstate {
+ mode = lazy,
+ ram_pending_ack = RPA,
+ ram_msg_count = RamMsgCount,
+ target_ram_count = TargetRamCount }) ->
+ State1 = #vqstate { q3 = Q3 } =
+ case chunk_size(RamMsgCount + gb_trees:size(RPA), TargetRamCount) of
+ 0 -> State;
+ S1 -> {_, State2} = limit_ram_acks(S1, State),
+ State2
+ end,
+
+ State3 =
+ case chunk_size(?QUEUE:len(Q3),
+ permitted_beta_count(State1)) of
+ 0 ->
+ State1;
+ S2 ->
+ push_betas_to_deltas(S2, State1)
+ end,
+ garbage_collect(),
State3.
limit_ram_acks(0, State) ->
@@ -2031,6 +2361,9 @@ limit_ram_acks(Quota, State = #vqstate { ram_pending_ack = RPA,
permitted_beta_count(#vqstate { len = 0 }) ->
infinity;
+permitted_beta_count(#vqstate { mode = lazy,
+ target_ram_count = TargetRamCount}) ->
+ TargetRamCount;
permitted_beta_count(#vqstate { target_ram_count = 0, q3 = Q3 }) ->
lists:min([?QUEUE:len(Q3), rabbit_queue_index:next_segment_boundary(0)]);
permitted_beta_count(#vqstate { q1 = Q1,
@@ -2048,7 +2381,8 @@ chunk_size(Current, Permitted)
chunk_size(Current, Permitted) ->
Current - Permitted.
-fetch_from_q3(State = #vqstate { q1 = Q1,
+fetch_from_q3(State = #vqstate { mode = default,
+ q1 = Q1,
q2 = Q2,
delta = #delta { count = DeltaCount },
q3 = Q3,
@@ -2078,6 +2412,19 @@ fetch_from_q3(State = #vqstate { q1 = Q1,
State1
end,
{loaded, {MsgStatus, State2}}
+ end;
+%% lazy queues
+fetch_from_q3(State = #vqstate { mode = lazy,
+ delta = #delta { count = DeltaCount },
+ q3 = Q3 }) ->
+ case ?QUEUE:out(Q3) of
+ {empty, _Q3} when DeltaCount =:= 0 ->
+ {empty, State};
+ {empty, _Q3} ->
+ fetch_from_q3(maybe_deltas_to_betas(State));
+ {{value, MsgStatus}, Q3a} ->
+ State1 = State #vqstate { q3 = Q3a },
+ {loaded, {MsgStatus, State1}}
end.
maybe_deltas_to_betas(State) ->
@@ -2186,7 +2533,8 @@ push_alphas_to_betas(Generator, Consumer, Quota, Q, State) ->
end
end.
-push_betas_to_deltas(Quota, State = #vqstate { q2 = Q2,
+push_betas_to_deltas(Quota, State = #vqstate { mode = default,
+ q2 = Q2,
delta = Delta,
q3 = Q3}) ->
PushState = {Quota, Delta, State},
@@ -2201,8 +2549,22 @@ push_betas_to_deltas(Quota, State = #vqstate { q2 = Q2,
{_, Delta1, State1} = PushState2,
State1 #vqstate { q2 = Q2a,
delta = Delta1,
+ q3 = Q3a };
+%% In the case of lazy queues we want to page as many messages as
+%% possible from q3.
+push_betas_to_deltas(Quota, State = #vqstate { mode = lazy,
+ delta = Delta,
+ q3 = Q3}) ->
+ PushState = {Quota, Delta, State},
+ {Q3a, PushState1} = push_betas_to_deltas(
+ fun ?QUEUE:out_r/1,
+ fun (Q2MinSeqId) -> Q2MinSeqId end,
+ Q3, PushState),
+ {_, Delta1, State1} = PushState1,
+ State1 #vqstate { delta = Delta1,
q3 = Q3a }.
+
push_betas_to_deltas(Generator, LimitFun, Q, PushState) ->
case ?QUEUE:is_empty(Q) of
true ->
diff --git a/src/rabbit_vhost.erl b/src/rabbit_vhost.erl
index 9b627adf5d..c1394c321c 100644
--- a/src/rabbit_vhost.erl
+++ b/src/rabbit_vhost.erl
@@ -21,7 +21,7 @@
%%----------------------------------------------------------------------------
-export([add/1, delete/1, exists/1, list/0, with/2, assert/1]).
--export([info/1, info/2, info_all/0, info_all/1]).
+-export([info/1, info/2, info_all/0, info_all/1, info_all/2, info_all/3]).
-ifdef(use_specs).
@@ -37,6 +37,8 @@
-> rabbit_types:infos()).
-spec(info_all/0 :: () -> [rabbit_types:infos()]).
-spec(info_all/1 :: (rabbit_types:info_keys()) -> [rabbit_types:infos()]).
+-spec(info_all/3 :: (rabbit_types:info_keys(), reference(), pid()) ->
+ 'ok').
-endif.
@@ -153,3 +155,8 @@ info(VHost, Items) -> infos(Items, VHost).
info_all() -> info_all(?INFO_KEYS).
info_all(Items) -> [info(VHost, Items) || VHost <- list()].
+
+info_all(Ref, AggregatorPid) -> info_all(?INFO_KEYS, Ref, AggregatorPid).
+info_all(Items, Ref, AggregatorPid) ->
+ rabbit_control_misc:emitting_map(
+ AggregatorPid, Ref, fun(VHost) -> info(VHost, Items) end, list()).
diff --git a/src/rabbit_vm.erl b/src/rabbit_vm.erl
index 534a8883e1..50b60509e2 100644
--- a/src/rabbit_vm.erl
+++ b/src/rabbit_vm.erl
@@ -134,7 +134,18 @@ interesting_sups0() ->
PluginProcs = plugin_sups(),
[MsgIndexProcs, MgmtDbProcs, PluginProcs].
-conn_sups() -> [rabbit_tcp_client_sup, ssl_connection_sup, amqp_sup].
+conn_sups() ->
+ Ranches = lists:flatten(ranch_server_sups()),
+ [amqp_sup|Ranches].
+
+ranch_server_sups() ->
+ try
+ ets:match(ranch_server, {{conns_sup, '_'}, '$1'})
+ catch
+ %% Ranch ETS table doesn't exist yet
+ error:badarg -> []
+ end.
+
conn_sups(With) -> [{Sup, With} || Sup <- conn_sups()].
distinguishers() -> [{rabbit_amqqueue_sup_sup, fun queue_type/1} |
diff --git a/src/rabbit_writer.erl b/src/rabbit_writer.erl
deleted file mode 100644
index 7cba7170a4..0000000000
--- a/src/rabbit_writer.erl
+++ /dev/null
@@ -1,354 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_writer).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
-
--export([start/6, start_link/6, start/7, start_link/7]).
-
--export([system_continue/3, system_terminate/4, system_code_change/4]).
-
--export([send_command/2, send_command/3,
- send_command_sync/2, send_command_sync/3,
- send_command_and_notify/4, send_command_and_notify/5,
- send_command_flow/2, send_command_flow/3,
- flush/1]).
--export([internal_send_command/4, internal_send_command/6]).
-
-%% internal
--export([enter_mainloop/2, mainloop/2, mainloop1/2]).
-
--record(wstate, {sock, channel, frame_max, protocol, reader,
- stats_timer, pending}).
-
--define(HIBERNATE_AFTER, 5000).
-
-%%---------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(start/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(),
- rabbit_types:proc_name())
- -> rabbit_types:ok(pid())).
--spec(start_link/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(),
- rabbit_types:proc_name())
- -> rabbit_types:ok(pid())).
--spec(start/7 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(),
- rabbit_types:proc_name(), boolean())
- -> rabbit_types:ok(pid())).
--spec(start_link/7 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- non_neg_integer(), rabbit_types:protocol(), pid(),
- rabbit_types:proc_name(), boolean())
- -> rabbit_types:ok(pid())).
-
--spec(system_code_change/4 :: (_,_,_,_) -> {'ok',_}).
--spec(system_continue/3 :: (_,_,#wstate{}) -> any()).
--spec(system_terminate/4 :: (_,_,_,_) -> none()).
-
--spec(send_command/2 ::
- (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command/3 ::
- (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
- -> 'ok').
--spec(send_command_sync/2 ::
- (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_sync/3 ::
- (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
- -> 'ok').
--spec(send_command_and_notify/4 ::
- (pid(), pid(), pid(), rabbit_framing:amqp_method_record())
- -> 'ok').
--spec(send_command_and_notify/5 ::
- (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
- rabbit_types:content())
- -> 'ok').
--spec(send_command_flow/2 ::
- (pid(), rabbit_framing:amqp_method_record()) -> 'ok').
--spec(send_command_flow/3 ::
- (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content())
- -> 'ok').
--spec(flush/1 :: (pid()) -> 'ok').
--spec(internal_send_command/4 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record(), rabbit_types:protocol())
- -> 'ok').
--spec(internal_send_command/6 ::
- (rabbit_net:socket(), rabbit_channel:channel_number(),
- rabbit_framing:amqp_method_record(), rabbit_types:content(),
- non_neg_integer(), rabbit_types:protocol())
- -> 'ok').
-
--endif.
-
-%%---------------------------------------------------------------------------
-
-start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
- start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
-
-start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
- start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
-
-start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
- ReaderWantsStats) ->
- State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
- ReaderWantsStats),
- {ok, proc_lib:spawn(?MODULE, enter_mainloop, [Identity, State])}.
-
-start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
- ReaderWantsStats) ->
- State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
- ReaderWantsStats),
- {ok, proc_lib:spawn_link(?MODULE, enter_mainloop, [Identity, State])}.
-
-initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats) ->
- (case ReaderWantsStats of
- true -> fun rabbit_event:init_stats_timer/2;
- false -> fun rabbit_event:init_disabled_stats_timer/2
- end)(#wstate{sock = Sock,
- channel = Channel,
- frame_max = FrameMax,
- protocol = Protocol,
- reader = ReaderPid,
- pending = []},
- #wstate.stats_timer).
-
-system_continue(Parent, Deb, State) ->
- mainloop(Deb, State#wstate{reader = Parent}).
-
-system_terminate(Reason, _Parent, _Deb, _State) ->
- exit(Reason).
-
-system_code_change(Misc, _Module, _OldVsn, _Extra) ->
- {ok, Misc}.
-
-enter_mainloop(Identity, State) ->
- Deb = sys:debug_options([]),
- ?store_proc_name(Identity),
- mainloop(Deb, State).
-
-mainloop(Deb, State) ->
- try
- mainloop1(Deb, State)
- catch
- exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
- ReaderPid ! {channel_exit, Channel, Error}
- end,
- done.
-
-mainloop1(Deb, State = #wstate{pending = []}) ->
- receive
- Message -> {Deb1, State1} = handle_message(Deb, Message, State),
- ?MODULE:mainloop1(Deb1, State1)
- after ?HIBERNATE_AFTER ->
- erlang:hibernate(?MODULE, mainloop, [Deb, State])
- end;
-mainloop1(Deb, State) ->
- receive
- Message -> {Deb1, State1} = handle_message(Deb, Message, State),
- ?MODULE:mainloop1(Deb1, State1)
- after 0 ->
- ?MODULE:mainloop1(Deb, internal_flush(State))
- end.
-
-handle_message(Deb, {system, From, Req}, State = #wstate{reader = Parent}) ->
- sys:handle_system_msg(Req, From, Parent, ?MODULE, Deb, State);
-handle_message(Deb, Message, State) ->
- {Deb, handle_message(Message, State)}.
-
-handle_message({send_command, MethodRecord}, State) ->
- internal_send_command_async(MethodRecord, State);
-handle_message({send_command, MethodRecord, Content}, State) ->
- internal_send_command_async(MethodRecord, Content, State);
-handle_message({send_command_flow, MethodRecord, Sender}, State) ->
- credit_flow:ack(Sender),
- internal_send_command_async(MethodRecord, State);
-handle_message({send_command_flow, MethodRecord, Content, Sender}, State) ->
- credit_flow:ack(Sender),
- internal_send_command_async(MethodRecord, Content, State);
-handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
- State1 = internal_flush(
- internal_send_command_async(MethodRecord, State)),
- gen_server:reply(From, ok),
- State1;
-handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
- State) ->
- State1 = internal_flush(
- internal_send_command_async(MethodRecord, Content, State)),
- gen_server:reply(From, ok),
- State1;
-handle_message({'$gen_call', From, flush}, State) ->
- State1 = internal_flush(State),
- gen_server:reply(From, ok),
- State1;
-handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
- State1 = internal_send_command_async(MethodRecord, State),
- rabbit_amqqueue:notify_sent(QPid, ChPid),
- State1;
-handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
- State) ->
- State1 = internal_send_command_async(MethodRecord, Content, State),
- rabbit_amqqueue:notify_sent(QPid, ChPid),
- State1;
-handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
- rabbit_amqqueue:notify_sent_queue_down(QPid),
- State;
-handle_message({inet_reply, _, ok}, State) ->
- rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
-handle_message({inet_reply, _, Status}, _State) ->
- exit({writer, send_failed, Status});
-handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
- ReaderPid ! ensure_stats,
- rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
-handle_message(Message, _State) ->
- exit({writer, message_not_understood, Message}).
-
-%%---------------------------------------------------------------------------
-
-send_command(W, MethodRecord) ->
- W ! {send_command, MethodRecord},
- ok.
-
-send_command(W, MethodRecord, Content) ->
- W ! {send_command, MethodRecord, Content},
- ok.
-
-send_command_flow(W, MethodRecord) ->
- credit_flow:send(W),
- W ! {send_command_flow, MethodRecord, self()},
- ok.
-
-send_command_flow(W, MethodRecord, Content) ->
- credit_flow:send(W),
- W ! {send_command_flow, MethodRecord, Content, self()},
- ok.
-
-send_command_sync(W, MethodRecord) ->
- call(W, {send_command_sync, MethodRecord}).
-
-send_command_sync(W, MethodRecord, Content) ->
- call(W, {send_command_sync, MethodRecord, Content}).
-
-send_command_and_notify(W, Q, ChPid, MethodRecord) ->
- W ! {send_command_and_notify, Q, ChPid, MethodRecord},
- ok.
-
-send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
- W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
- ok.
-
-flush(W) -> call(W, flush).
-
-%%---------------------------------------------------------------------------
-
-call(Pid, Msg) ->
- {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
- Res.
-
-%%---------------------------------------------------------------------------
-
-assemble_frame(Channel, MethodRecord, Protocol) ->
- rabbit_binary_generator:build_simple_method_frame(
- Channel, MethodRecord, Protocol).
-
-assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) ->
- MethodName = rabbit_misc:method_record_type(MethodRecord),
- true = Protocol:method_has_content(MethodName), % assertion
- MethodFrame = rabbit_binary_generator:build_simple_method_frame(
- Channel, MethodRecord, Protocol),
- ContentFrames = rabbit_binary_generator:build_simple_content_frames(
- Channel, Content, FrameMax, Protocol),
- [MethodFrame | ContentFrames].
-
-tcp_send(Sock, Data) ->
- rabbit_misc:throw_on_error(inet_error,
- fun () -> rabbit_net:send(Sock, Data) end).
-
-internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
- ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
-
-internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
- Protocol) ->
- ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
- (_Frame, Other) -> Other
- end, ok, assemble_frames(Channel, MethodRecord,
- Content, FrameMax, Protocol)).
-
-internal_send_command_async(MethodRecord,
- State = #wstate{channel = Channel,
- protocol = Protocol,
- pending = Pending}) ->
- Frame = assemble_frame(Channel, MethodRecord, Protocol),
- maybe_flush(State#wstate{pending = [Frame | Pending]}).
-
-internal_send_command_async(MethodRecord, Content,
- State = #wstate{channel = Channel,
- frame_max = FrameMax,
- protocol = Protocol,
- pending = Pending}) ->
- Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
- Protocol),
- rabbit_basic:maybe_gc_large_msg(Content),
- maybe_flush(State#wstate{pending = [Frames | Pending]}).
-
-%% This magic number is the tcp-over-ethernet MSS (1460) minus the
-%% minimum size of a AMQP basic.deliver method frame (24) plus basic
-%% content header (22). The idea is that we want to flush just before
-%% exceeding the MSS.
--define(FLUSH_THRESHOLD, 1414).
-
-maybe_flush(State = #wstate{pending = Pending}) ->
- case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
- true -> internal_flush(State);
- false -> State
- end.
-
-internal_flush(State = #wstate{pending = []}) ->
- State;
-internal_flush(State = #wstate{sock = Sock, pending = Pending}) ->
- ok = port_cmd(Sock, lists:reverse(Pending)),
- State#wstate{pending = []}.
-
-%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
-%% Status} to obtain the result. That is bad when it is called from
-%% the writer since it requires scanning of the writers possibly quite
-%% large message queue.
-%%
-%% So instead we lift the code from prim_inet:send/2, which is what
-%% gen_tcp:send/2 calls, do the first half here and then just process
-%% the result code in handle_message/2 as and when it arrives.
-%%
-%% This means we may end up happily sending data down a closed/broken
-%% socket, but that's ok since a) data in the buffers will be lost in
-%% any case (so qualitatively we are no worse off than if we used
-%% gen_tcp:send/2), and b) we do detect the changed socket status
-%% eventually, i.e. when we get round to handling the result code.
-%%
-%% Also note that the port has bounded buffers and port_command blocks
-%% when these are full. So the fact that we process the result
-%% asynchronously does not impact flow control.
-port_cmd(Sock, Data) ->
- true = try rabbit_net:port_command(Sock, Data)
- catch error:Error -> exit({writer, send_failed, Error})
- end,
- ok.
diff --git a/src/ssl_compat.erl b/src/ssl_compat.erl
deleted file mode 100644
index fc83fbcfa6..0000000000
--- a/src/ssl_compat.erl
+++ /dev/null
@@ -1,75 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(ssl_compat).
-
-%% We don't want warnings about the use of erlang:now/0 in
-%% this module.
--compile(nowarn_deprecated_function).
-
--export([connection_information/1,
- connection_information/2]).
-
-connection_information(SslSocket) ->
- try
- ssl:connection_information(SslSocket)
- catch
- error:undef ->
- case ssl:connection_info(SslSocket) of
- {ok, {ProtocolVersion, CipherSuite}} ->
- {ok, [{protocol, ProtocolVersion},
- {cipher_suite, CipherSuite}]};
- {error, Reason} ->
- {error, Reason}
- end
- end.
-
-connection_information(SslSocket, Items) ->
- try
- ssl:connection_information(SslSocket, Items)
- catch
- error:undef ->
- WantProtocolVersion = lists:member(protocol, Items),
- WantCipherSuite = lists:member(cipher_suite, Items),
- if
- WantProtocolVersion orelse WantCipherSuite ->
- case ssl:connection_info(SslSocket) of
- {ok, {ProtocolVersion, CipherSuite}} ->
- filter_information_items(ProtocolVersion,
- CipherSuite,
- Items,
- []);
- {error, Reason} ->
- {error, Reason}
- end;
- true ->
- {ok, []}
- end
- end.
-
-filter_information_items(ProtocolVersion, CipherSuite, [protocol | Rest],
- Result) ->
- filter_information_items(ProtocolVersion, CipherSuite, Rest,
- [{protocol, ProtocolVersion} | Result]);
-filter_information_items(ProtocolVersion, CipherSuite, [cipher_suite | Rest],
- Result) ->
- filter_information_items(ProtocolVersion, CipherSuite, Rest,
- [{cipher_suite, CipherSuite} | Result]);
-filter_information_items(ProtocolVersion, CipherSuite, [_ | Rest],
- Result) ->
- filter_information_items(ProtocolVersion, CipherSuite, Rest, Result);
-filter_information_items(_ProtocolVersion, _CipherSuite, [], Result) ->
- {ok, lists:reverse(Result)}.
diff --git a/src/supervisor2.erl b/src/supervisor2.erl
deleted file mode 100644
index 7b9421eb3e..0000000000
--- a/src/supervisor2.erl
+++ /dev/null
@@ -1,1566 +0,0 @@
-%% This file is a copy of supervisor.erl from the R16B Erlang/OTP
-%% distribution, with the following modifications:
-%%
-%% 1) the module name is supervisor2
-%%
-%% 2) a find_child/2 utility function has been added
-%%
-%% 3) Added an 'intrinsic' restart type. Like the transient type, this
-%% type means the child should only be restarted if the child exits
-%% abnormally. Unlike the transient type, if the child exits
-%% normally, the supervisor itself also exits normally. If the
-%% child is a supervisor and it exits normally (i.e. with reason of
-%% 'shutdown') then the child's parent also exits normally.
-%%
-%% 4) child specifications can contain, as the restart type, a tuple
-%% {permanent, Delay} | {transient, Delay} | {intrinsic, Delay}
-%% where Delay >= 0 (see point (4) below for intrinsic). The delay,
-%% in seconds, indicates what should happen if a child, upon being
-%% restarted, exceeds the MaxT and MaxR parameters. Thus, if a
-%% child exits, it is restarted as normal. If it exits sufficiently
-%% quickly and often to exceed the boundaries set by the MaxT and
-%% MaxR parameters, and a Delay is specified, then rather than
-%% stopping the supervisor, the supervisor instead continues and
-%% tries to start up the child again, Delay seconds later.
-%%
-%% Note that if a child is delay-restarted this will reset the
-%% count of restarts towrds MaxR and MaxT. This matters if MaxT >
-%% Delay, since otherwise we would fail to restart after the delay.
-%%
-%% Sometimes, you may wish for a transient or intrinsic child to
-%% exit abnormally so that it gets restarted, but still log
-%% nothing. gen_server will log any exit reason other than
-%% 'normal', 'shutdown' or {'shutdown', _}. Thus the exit reason of
-%% {'shutdown', 'restart'} is interpreted to mean you wish the
-%% child to be restarted according to the delay parameters, but
-%% gen_server will not log the error. Thus from gen_server's
-%% perspective it's a normal exit, whilst from supervisor's
-%% perspective, it's an abnormal exit.
-%%
-%% 5) normal, and {shutdown, _} exit reasons are all treated the same
-%% (i.e. are regarded as normal exits)
-%%
-%% All modifications are (C) 2010-2013 GoPivotal, Inc.
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
-%%
-%% The contents of this file are subject to the Erlang Public License,
-%% Version 1.1, (the "License"); you may not use this file except in
-%% compliance with the License. You should have received a copy of the
-%% Erlang Public License along with this software. If not, it can be
-%% retrieved online at http://www.erlang.org/.
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% %CopyrightEnd%
-%%
--module(supervisor2).
-
--behaviour(gen_server).
-
-%% External exports
--export([start_link/2, start_link/3,
- start_child/2, restart_child/2,
- delete_child/2, terminate_child/2,
- which_children/1, count_children/1,
- find_child/2, check_childspecs/1]).
-
-%% Internal exports
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
--export([try_again_restart/3]).
-
-%%--------------------------------------------------------------------------
--ifdef(use_specs).
--export_type([child_spec/0, startchild_ret/0, strategy/0, sup_name/0]).
--endif.
-%%--------------------------------------------------------------------------
-
--ifdef(use_specs).
--type child() :: 'undefined' | pid().
--type child_id() :: term().
--type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
--type modules() :: [module()] | 'dynamic'.
--type delay() :: non_neg_integer().
--type restart() :: 'permanent' | 'transient' | 'temporary' | 'intrinsic' | {'permanent', delay()} | {'transient', delay()} | {'intrinsic', delay()}.
--type shutdown() :: 'brutal_kill' | timeout().
--type worker() :: 'worker' | 'supervisor'.
--type sup_name() :: {'local', Name :: atom()} | {'global', Name :: atom()}.
--type sup_ref() :: (Name :: atom())
- | {Name :: atom(), Node :: node()}
- | {'global', Name :: atom()}
- | pid().
--type child_spec() :: {Id :: child_id(),
- StartFunc :: mfargs(),
- Restart :: restart(),
- Shutdown :: shutdown(),
- Type :: worker(),
- Modules :: modules()}.
-
--type strategy() :: 'one_for_all' | 'one_for_one'
- | 'rest_for_one' | 'simple_one_for_one'.
--endif.
-
-%%--------------------------------------------------------------------------
-
--ifdef(use_specs).
--record(child, {% pid is undefined when child is not running
- pid = undefined :: child() | {restarting,pid()} | [pid()],
- name :: child_id(),
- mfargs :: mfargs(),
- restart_type :: restart(),
- shutdown :: shutdown(),
- child_type :: worker(),
- modules = [] :: modules()}).
--type child_rec() :: #child{}.
--else.
--record(child, {
- pid = undefined,
- name,
- mfargs,
- restart_type,
- shutdown,
- child_type,
- modules = []}).
--endif.
-
--define(DICT, dict).
--define(SETS, sets).
--define(SET, set).
-
--ifdef(use_specs).
--record(state, {name,
- strategy :: strategy(),
- children = [] :: [child_rec()],
- dynamics :: ?DICT:?DICT() | ?SETS:?SET(),
- intensity :: non_neg_integer(),
- period :: pos_integer(),
- restarts = [],
- module,
- args}).
--type state() :: #state{}.
--else.
--record(state, {name,
- strategy,
- children = [],
- dynamics,
- intensity,
- period,
- restarts = [],
- module,
- args}).
--endif.
-
--define(is_simple(State), State#state.strategy =:= simple_one_for_one).
--define(is_permanent(R), ((R =:= permanent) orelse
- (is_tuple(R) andalso
- tuple_size(R) == 2 andalso
- element(1, R) =:= permanent))).
--define(is_explicit_restart(R),
- R == {shutdown, restart}).
-
--ifdef(use_specs).
--callback init(Args :: term()) ->
- {ok, {{RestartStrategy :: strategy(),
- MaxR :: non_neg_integer(),
- MaxT :: non_neg_integer()},
- [ChildSpec :: child_spec()]}}
- | ignore.
--else.
-
--export([behaviour_info/1]).
-
-behaviour_info(callbacks) ->
- [{init,1}];
-behaviour_info(_Other) ->
- undefined.
-
--endif.
--define(restarting(_Pid_), {restarting,_Pid_}).
-
-%%% ---------------------------------------------------
-%%% This is a general process supervisor built upon gen_server.erl.
-%%% Servers/processes should/could also be built using gen_server.erl.
-%%% SupName = {local, atom()} | {global, atom()}.
-%%% ---------------------------------------------------
--ifdef(use_specs).
--type startlink_err() :: {'already_started', pid()}
- | {'shutdown', term()}
- | term().
--type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
-
--spec start_link(Module, Args) -> startlink_ret() when
- Module :: module(),
- Args :: term().
-
--endif.
-start_link(Mod, Args) ->
- gen_server:start_link(?MODULE, {self, Mod, Args}, []).
-
--ifdef(use_specs).
--spec start_link(SupName, Module, Args) -> startlink_ret() when
- SupName :: sup_name(),
- Module :: module(),
- Args :: term().
--endif.
-start_link(SupName, Mod, Args) ->
- gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
-
-%%% ---------------------------------------------------
-%%% Interface functions.
-%%% ---------------------------------------------------
--ifdef(use_specs).
--type startchild_err() :: 'already_present'
- | {'already_started', Child :: child()} | term().
--type startchild_ret() :: {'ok', Child :: child()}
- | {'ok', Child :: child(), Info :: term()}
- | {'error', startchild_err()}.
-
--spec start_child(SupRef, ChildSpec) -> startchild_ret() when
- SupRef :: sup_ref(),
- ChildSpec :: child_spec() | (List :: [term()]).
--endif.
-start_child(Supervisor, ChildSpec) ->
- call(Supervisor, {start_child, ChildSpec}).
-
--ifdef(use_specs).
--spec restart_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: child_id(),
- Result :: {'ok', Child :: child()}
- | {'ok', Child :: child(), Info :: term()}
- | {'error', Error},
- Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one' |
- term().
--endif.
-restart_child(Supervisor, Name) ->
- call(Supervisor, {restart_child, Name}).
-
--ifdef(use_specs).
--spec delete_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: child_id(),
- Result :: 'ok' | {'error', Error},
- Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one'.
--endif.
-delete_child(Supervisor, Name) ->
- call(Supervisor, {delete_child, Name}).
-
-%%-----------------------------------------------------------------
-%% Func: terminate_child/2
-%% Returns: ok | {error, Reason}
-%% Note that the child is *always* terminated in some
-%% way (maybe killed).
-%%-----------------------------------------------------------------
--ifdef(use_specs).
--spec terminate_child(SupRef, Id) -> Result when
- SupRef :: sup_ref(),
- Id :: pid() | child_id(),
- Result :: 'ok' | {'error', Error},
- Error :: 'not_found' | 'simple_one_for_one'.
--endif.
-terminate_child(Supervisor, Name) ->
- call(Supervisor, {terminate_child, Name}).
-
--ifdef(use_specs).
--spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
- SupRef :: sup_ref(),
- Id :: child_id() | undefined,
- Child :: child() | 'restarting',
- Type :: worker(),
- Modules :: modules().
--endif.
-which_children(Supervisor) ->
- call(Supervisor, which_children).
-
--ifdef(use_specs).
--spec count_children(SupRef) -> PropListOfCounts when
- SupRef :: sup_ref(),
- PropListOfCounts :: [Count],
- Count :: {specs, ChildSpecCount :: non_neg_integer()}
- | {active, ActiveProcessCount :: non_neg_integer()}
- | {supervisors, ChildSupervisorCount :: non_neg_integer()}
- |{workers, ChildWorkerCount :: non_neg_integer()}.
--endif.
-count_children(Supervisor) ->
- call(Supervisor, count_children).
-
--ifdef(use_specs).
--spec find_child(Supervisor, Name) -> [pid()] when
- Supervisor :: sup_ref(),
- Name :: child_id().
--endif.
-find_child(Supervisor, Name) ->
- [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
- Name1 =:= Name].
-
-call(Supervisor, Req) ->
- gen_server:call(Supervisor, Req, infinity).
-
--ifdef(use_specs).
--spec check_childspecs(ChildSpecs) -> Result when
- ChildSpecs :: [child_spec()],
- Result :: 'ok' | {'error', Error :: term()}.
--endif.
-check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
- case check_startspec(ChildSpecs) of
- {ok, _} -> ok;
- Error -> {error, Error}
- end;
-check_childspecs(X) -> {error, {badarg, X}}.
-
-%%%-----------------------------------------------------------------
-%%% Called by timer:apply_after from restart/2
--ifdef(use_specs).
--spec try_again_restart(SupRef, Child, Reason) -> ok when
- SupRef :: sup_ref(),
- Child :: child_id() | pid(),
- Reason :: term().
--endif.
-try_again_restart(Supervisor, Child, Reason) ->
- cast(Supervisor, {try_again_restart, Child, Reason}).
-
-cast(Supervisor, Req) ->
- gen_server:cast(Supervisor, Req).
-
-%%% ---------------------------------------------------
-%%%
-%%% Initialize the supervisor.
-%%%
-%%% ---------------------------------------------------
--ifdef(use_specs).
--type init_sup_name() :: sup_name() | 'self'.
-
--type stop_rsn() :: {'shutdown', term()}
- | {'bad_return', {module(),'init', term()}}
- | {'bad_start_spec', term()}
- | {'start_spec', term()}
- | {'supervisor_data', term()}.
-
--spec init({init_sup_name(), module(), [term()]}) ->
- {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
--endif.
-init({SupName, Mod, Args}) ->
- process_flag(trap_exit, true),
- case Mod:init(Args) of
- {ok, {SupFlags, StartSpec}} ->
- case init_state(SupName, SupFlags, Mod, Args) of
- {ok, State} when ?is_simple(State) ->
- init_dynamic(State, StartSpec);
- {ok, State} ->
- init_children(State, StartSpec);
- Error ->
- {stop, {supervisor_data, Error}}
- end;
- ignore ->
- ignore;
- Error ->
- {stop, {bad_return, {Mod, init, Error}}}
- end.
-
-init_children(State, StartSpec) ->
- SupName = State#state.name,
- case check_startspec(StartSpec) of
- {ok, Children} ->
- case start_children(Children, SupName) of
- {ok, NChildren} ->
- {ok, State#state{children = NChildren}};
- {error, NChildren, Reason} ->
- terminate_children(NChildren, SupName),
- {stop, {shutdown, Reason}}
- end;
- Error ->
- {stop, {start_spec, Error}}
- end.
-
-init_dynamic(State, [StartSpec]) ->
- case check_startspec([StartSpec]) of
- {ok, Children} ->
- {ok, State#state{children = Children}};
- Error ->
- {stop, {start_spec, Error}}
- end;
-init_dynamic(_State, StartSpec) ->
- {stop, {bad_start_spec, StartSpec}}.
-
-%%-----------------------------------------------------------------
-%% Func: start_children/2
-%% Args: Children = [child_rec()] in start order
-%% SupName = {local, atom()} | {global, atom()} | {pid(), Mod}
-%% Purpose: Start all children. The new list contains #child's
-%% with pids.
-%% Returns: {ok, NChildren} | {error, NChildren, Reason}
-%% NChildren = [child_rec()] in termination order (reversed
-%% start order)
-%%-----------------------------------------------------------------
-start_children(Children, SupName) -> start_children(Children, [], SupName).
-
-start_children([Child|Chs], NChildren, SupName) ->
- case do_start_child(SupName, Child) of
- {ok, undefined} when Child#child.restart_type =:= temporary ->
- start_children(Chs, NChildren, SupName);
- {ok, Pid} ->
- start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
- {ok, Pid, _Extra} ->
- start_children(Chs, [Child#child{pid = Pid}|NChildren], SupName);
- {error, Reason} ->
- report_error(start_error, Reason, Child, SupName),
- {error, lists:reverse(Chs) ++ [Child | NChildren],
- {failed_to_start_child,Child#child.name,Reason}}
- end;
-start_children([], NChildren, _SupName) ->
- {ok, NChildren}.
-
-do_start_child(SupName, Child) ->
- #child{mfargs = {M, F, Args}} = Child,
- case catch apply(M, F, Args) of
- {ok, Pid} when is_pid(Pid) ->
- NChild = Child#child{pid = Pid},
- report_progress(NChild, SupName),
- {ok, Pid};
- {ok, Pid, Extra} when is_pid(Pid) ->
- NChild = Child#child{pid = Pid},
- report_progress(NChild, SupName),
- {ok, Pid, Extra};
- ignore ->
- {ok, undefined};
- {error, What} -> {error, What};
- What -> {error, What}
- end.
-
-do_start_child_i(M, F, A) ->
- case catch apply(M, F, A) of
- {ok, Pid} when is_pid(Pid) ->
- {ok, Pid};
- {ok, Pid, Extra} when is_pid(Pid) ->
- {ok, Pid, Extra};
- ignore ->
- {ok, undefined};
- {error, Error} ->
- {error, Error};
- What ->
- {error, What}
- end.
-
-%%% ---------------------------------------------------
-%%%
-%%% Callback functions.
-%%%
-%%% ---------------------------------------------------
--ifdef(use_specs).
--type call() :: 'which_children' | 'count_children' | {_, _}. % XXX: refine
--spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
--endif.
-handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
- Child = hd(State#state.children),
- #child{mfargs = {M, F, A}} = Child,
- Args = A ++ EArgs,
- case do_start_child_i(M, F, Args) of
- {ok, undefined} when Child#child.restart_type =:= temporary ->
- {reply, {ok, undefined}, State};
- {ok, Pid} ->
- NState = save_dynamic_child(Child#child.restart_type, Pid, Args, State),
- {reply, {ok, Pid}, NState};
- {ok, Pid, Extra} ->
- NState = save_dynamic_child(Child#child.restart_type, Pid, Args, State),
- {reply, {ok, Pid, Extra}, NState};
- What ->
- {reply, What, State}
- end;
-
-%% terminate_child for simple_one_for_one can only be done with pid
-handle_call({terminate_child, Name}, _From, State) when not is_pid(Name),
- ?is_simple(State) ->
- {reply, {error, simple_one_for_one}, State};
-
-handle_call({terminate_child, Name}, _From, State) ->
- case get_child(Name, State, ?is_simple(State)) of
- {value, Child} ->
- case do_terminate(Child, State#state.name) of
- #child{restart_type=RT} when RT=:=temporary; ?is_simple(State) ->
- {reply, ok, state_del_child(Child, State)};
- NChild ->
- {reply, ok, replace_child(NChild, State)}
- end;
- false ->
- {reply, {error, not_found}, State}
- end;
-
-%%% The requests delete_child and restart_child are invalid for
-%%% simple_one_for_one supervisors.
-handle_call({_Req, _Data}, _From, State) when ?is_simple(State) ->
- {reply, {error, simple_one_for_one}, State};
-
-handle_call({start_child, ChildSpec}, _From, State) ->
- case check_childspec(ChildSpec) of
- {ok, Child} ->
- {Resp, NState} = handle_start_child(Child, State),
- {reply, Resp, NState};
- What ->
- {reply, {error, What}, State}
- end;
-
-handle_call({restart_child, Name}, _From, State) ->
- case get_child(Name, State) of
- {value, Child} when Child#child.pid =:= undefined ->
- case do_start_child(State#state.name, Child) of
- {ok, Pid} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {reply, {ok, Pid}, NState};
- {ok, Pid, Extra} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {reply, {ok, Pid, Extra}, NState};
- Error ->
- {reply, Error, State}
- end;
- {value, #child{pid=?restarting(_)}} ->
- {reply, {error, restarting}, State};
- {value, _} ->
- {reply, {error, running}, State};
- _ ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call({delete_child, Name}, _From, State) ->
- case get_child(Name, State) of
- {value, Child} when Child#child.pid =:= undefined ->
- NState = remove_child(Child, State),
- {reply, ok, NState};
- {value, #child{pid=?restarting(_)}} ->
- {reply, {error, restarting}, State};
- {value, _} ->
- {reply, {error, running}, State};
- _ ->
- {reply, {error, not_found}, State}
- end;
-
-handle_call(which_children, _From, #state{children = [#child{restart_type = temporary,
- child_type = CT,
- modules = Mods}]} =
- State) when ?is_simple(State) ->
- Reply = lists:map(fun(Pid) -> {undefined, Pid, CT, Mods} end,
- ?SETS:to_list(dynamics_db(temporary, State#state.dynamics))),
- {reply, Reply, State};
-
-handle_call(which_children, _From, #state{children = [#child{restart_type = RType,
- child_type = CT,
- modules = Mods}]} =
- State) when ?is_simple(State) ->
- Reply = lists:map(fun({?restarting(_),_}) -> {undefined,restarting,CT,Mods};
- ({Pid, _}) -> {undefined, Pid, CT, Mods} end,
- ?DICT:to_list(dynamics_db(RType, State#state.dynamics))),
- {reply, Reply, State};
-
-handle_call(which_children, _From, State) ->
- Resp =
- lists:map(fun(#child{pid = ?restarting(_), name = Name,
- child_type = ChildType, modules = Mods}) ->
- {Name, restarting, ChildType, Mods};
- (#child{pid = Pid, name = Name,
- child_type = ChildType, modules = Mods}) ->
- {Name, Pid, ChildType, Mods}
- end,
- State#state.children),
- {reply, Resp, State};
-
-
-handle_call(count_children, _From, #state{children = [#child{restart_type = temporary,
- child_type = CT}]} = State)
- when ?is_simple(State) ->
- {Active, Count} =
- ?SETS:fold(fun(Pid, {Alive, Tot}) ->
- case is_pid(Pid) andalso is_process_alive(Pid) of
- true ->{Alive+1, Tot +1};
- false ->
- {Alive, Tot + 1}
- end
- end, {0, 0}, dynamics_db(temporary, State#state.dynamics)),
- Reply = case CT of
- supervisor -> [{specs, 1}, {active, Active},
- {supervisors, Count}, {workers, 0}];
- worker -> [{specs, 1}, {active, Active},
- {supervisors, 0}, {workers, Count}]
- end,
- {reply, Reply, State};
-
-handle_call(count_children, _From, #state{children = [#child{restart_type = RType,
- child_type = CT}]} = State)
- when ?is_simple(State) ->
- {Active, Count} =
- ?DICT:fold(fun(Pid, _Val, {Alive, Tot}) ->
- case is_pid(Pid) andalso is_process_alive(Pid) of
- true ->
- {Alive+1, Tot +1};
- false ->
- {Alive, Tot + 1}
- end
- end, {0, 0}, dynamics_db(RType, State#state.dynamics)),
- Reply = case CT of
- supervisor -> [{specs, 1}, {active, Active},
- {supervisors, Count}, {workers, 0}];
- worker -> [{specs, 1}, {active, Active},
- {supervisors, 0}, {workers, Count}]
- end,
- {reply, Reply, State};
-
-handle_call(count_children, _From, State) ->
- %% Specs and children are together on the children list...
- {Specs, Active, Supers, Workers} =
- lists:foldl(fun(Child, Counts) ->
- count_child(Child, Counts)
- end, {0,0,0,0}, State#state.children),
-
- %% Reformat counts to a property list.
- Reply = [{specs, Specs}, {active, Active},
- {supervisors, Supers}, {workers, Workers}],
- {reply, Reply, State}.
-
-
-count_child(#child{pid = Pid, child_type = worker},
- {Specs, Active, Supers, Workers}) ->
- case is_pid(Pid) andalso is_process_alive(Pid) of
- true -> {Specs+1, Active+1, Supers, Workers+1};
- false -> {Specs+1, Active, Supers, Workers+1}
- end;
-count_child(#child{pid = Pid, child_type = supervisor},
- {Specs, Active, Supers, Workers}) ->
- case is_pid(Pid) andalso is_process_alive(Pid) of
- true -> {Specs+1, Active+1, Supers+1, Workers};
- false -> {Specs+1, Active, Supers+1, Workers}
- end.
-
-
-%%% If a restart attempt failed, this message is sent via
-%%% timer:apply_after(0,...) in order to give gen_server the chance to
-%%% check it's inbox before trying again.
--ifdef(use_specs).
--spec handle_cast({try_again_restart, child_id() | pid(), term()}, state()) ->
- {'noreply', state()} | {stop, shutdown, state()}.
--endif.
-handle_cast({try_again_restart,Pid,Reason}, #state{children=[Child]}=State)
- when ?is_simple(State) ->
- RT = Child#child.restart_type,
- RPid = restarting(Pid),
- case dynamic_child_args(RPid, dynamics_db(RT, State#state.dynamics)) of
- {ok, Args} ->
- {M, F, _} = Child#child.mfargs,
- NChild = Child#child{pid = RPid, mfargs = {M, F, Args}},
- try_restart(Child#child.restart_type, Reason, NChild, State);
- error ->
- {noreply, State}
- end;
-
-handle_cast({try_again_restart,Name,Reason}, State) ->
- %% we still support >= R12-B3 in which lists:keyfind/3 doesn't exist
- case lists:keysearch(Name,#child.name,State#state.children) of
- {value, Child = #child{pid=?restarting(_), restart_type=RestartType}} ->
- try_restart(RestartType, Reason, Child, State);
- _ ->
- {noreply,State}
- end.
-
-%%
-%% Take care of terminated children.
-%%
--ifdef(use_specs).
--spec handle_info(term(), state()) ->
- {'noreply', state()} | {'stop', 'shutdown', state()}.
--endif.
-handle_info({'EXIT', Pid, Reason}, State) ->
- case restart_child(Pid, Reason, State) of
- {ok, State1} ->
- {noreply, State1};
- {shutdown, State1} ->
- {stop, shutdown, State1}
- end;
-
-handle_info({delayed_restart, {RestartType, Reason, Child}}, State)
- when ?is_simple(State) ->
- try_restart(RestartType, Reason, Child, State#state{restarts = []}); %% [1]
-handle_info({delayed_restart, {RestartType, Reason, Child}}, State) ->
- case get_child(Child#child.name, State) of
- {value, Child1} ->
- try_restart(RestartType, Reason, Child1,
- State#state{restarts = []}); %% [1]
- _What ->
- {noreply, State}
- end;
-%% [1] When we receive a delayed_restart message we want to reset the
-%% restarts field since otherwise the MaxT might not have elapsed and
-%% we would just delay again and again. Since a common use of the
-%% delayed restart feature is for MaxR = 1, MaxT = some huge number
-%% (so that we don't end up bouncing around in non-delayed restarts)
-%% this is important.
-
-handle_info(Msg, State) ->
- error_logger:error_msg("Supervisor received unexpected message: ~p~n",
- [Msg]),
- {noreply, State}.
-
-%%
-%% Terminate this server.
-%%
--ifdef(use_specs).
--spec terminate(term(), state()) -> 'ok'.
--endif.
-terminate(_Reason, #state{children=[Child]} = State) when ?is_simple(State) ->
- terminate_dynamic_children(Child, dynamics_db(Child#child.restart_type,
- State#state.dynamics),
- State#state.name);
-terminate(_Reason, State) ->
- terminate_children(State#state.children, State#state.name).
-
-%%
-%% Change code for the supervisor.
-%% Call the new call-back module and fetch the new start specification.
-%% Combine the new spec. with the old. If the new start spec. is
-%% not valid the code change will not succeed.
-%% Use the old Args as argument to Module:init/1.
-%% NOTE: This requires that the init function of the call-back module
-%% does not have any side effects.
-%%
--ifdef(use_specs).
--spec code_change(term(), state(), term()) ->
- {'ok', state()} | {'error', term()}.
--endif.
-code_change(_, State, _) ->
- case (State#state.module):init(State#state.args) of
- {ok, {SupFlags, StartSpec}} ->
- case catch check_flags(SupFlags) of
- ok ->
- {Strategy, MaxIntensity, Period} = SupFlags,
- update_childspec(State#state{strategy = Strategy,
- intensity = MaxIntensity,
- period = Period},
- StartSpec);
- Error ->
- {error, Error}
- end;
- ignore ->
- {ok, State};
- Error ->
- Error
- end.
-
-check_flags({Strategy, MaxIntensity, Period}) ->
- validStrategy(Strategy),
- validIntensity(MaxIntensity),
- validPeriod(Period),
- ok;
-check_flags(What) ->
- {bad_flags, What}.
-
-update_childspec(State, StartSpec) when ?is_simple(State) ->
- case check_startspec(StartSpec) of
- {ok, [Child]} ->
- {ok, State#state{children = [Child]}};
- Error ->
- {error, Error}
- end;
-update_childspec(State, StartSpec) ->
- case check_startspec(StartSpec) of
- {ok, Children} ->
- OldC = State#state.children, % In reverse start order !
- NewC = update_childspec1(OldC, Children, []),
- {ok, State#state{children = NewC}};
- Error ->
- {error, Error}
- end.
-
-update_childspec1([Child|OldC], Children, KeepOld) ->
- case update_chsp(Child, Children) of
- {ok,NewChildren} ->
- update_childspec1(OldC, NewChildren, KeepOld);
- false ->
- update_childspec1(OldC, Children, [Child|KeepOld])
- end;
-update_childspec1([], Children, KeepOld) ->
- %% Return them in (kept) reverse start order.
- lists:reverse(Children ++ KeepOld).
-
-update_chsp(OldCh, Children) ->
- case lists:map(fun(Ch) when OldCh#child.name =:= Ch#child.name ->
- Ch#child{pid = OldCh#child.pid};
- (Ch) ->
- Ch
- end,
- Children) of
- Children ->
- false; % OldCh not found in new spec.
- NewC ->
- {ok, NewC}
- end.
-
-%%% ---------------------------------------------------
-%%% Start a new child.
-%%% ---------------------------------------------------
-
-handle_start_child(Child, State) ->
- case get_child(Child#child.name, State) of
- false ->
- case do_start_child(State#state.name, Child) of
- {ok, undefined} when Child#child.restart_type =:= temporary ->
- {{ok, undefined}, State};
- {ok, Pid} ->
- {{ok, Pid}, save_child(Child#child{pid = Pid}, State)};
- {ok, Pid, Extra} ->
- {{ok, Pid, Extra}, save_child(Child#child{pid = Pid}, State)};
- {error, What} ->
- {{error, {What, Child}}, State}
- end;
- {value, OldChild} when is_pid(OldChild#child.pid) ->
- {{error, {already_started, OldChild#child.pid}}, State};
- {value, _OldChild} ->
- {{error, already_present}, State}
- end.
-
-%%% ---------------------------------------------------
-%%% Restart. A process has terminated.
-%%% Returns: {ok, state()} | {shutdown, state()}
-%%% ---------------------------------------------------
-
-restart_child(Pid, Reason, #state{children = [Child]} = State) when ?is_simple(State) ->
- RestartType = Child#child.restart_type,
- case dynamic_child_args(Pid, dynamics_db(RestartType, State#state.dynamics)) of
- {ok, Args} ->
- {M, F, _} = Child#child.mfargs,
- NChild = Child#child{pid = Pid, mfargs = {M, F, Args}},
- do_restart(RestartType, Reason, NChild, State);
- error ->
- {ok, State}
- end;
-
-restart_child(Pid, Reason, State) ->
- Children = State#state.children,
- %% we still support >= R12-B3 in which lists:keyfind/3 doesn't exist
- case lists:keysearch(Pid, #child.pid, Children) of
- {value, #child{restart_type = RestartType} = Child} ->
- do_restart(RestartType, Reason, Child, State);
- false ->
- {ok, State}
- end.
-
-try_restart(RestartType, Reason, Child, State) ->
- case handle_restart(RestartType, Reason, Child, State) of
- {ok, NState} -> {noreply, NState};
- {shutdown, State2} -> {stop, shutdown, State2}
- end.
-
-do_restart(RestartType, Reason, Child, State) ->
- maybe_report_error(RestartType, Reason, Child, State),
- handle_restart(RestartType, Reason, Child, State).
-
-maybe_report_error(permanent, Reason, Child, State) ->
- report_child_termination(Reason, Child, State);
-maybe_report_error({permanent, _}, Reason, Child, State) ->
- report_child_termination(Reason, Child, State);
-maybe_report_error(_Type, Reason, Child, State) ->
- case is_abnormal_termination(Reason) of
- true -> report_child_termination(Reason, Child, State);
- false -> ok
- end.
-
-report_child_termination(Reason, Child, State) ->
- report_error(child_terminated, Reason, Child, State#state.name).
-
-handle_restart(permanent, _Reason, Child, State) ->
- restart(Child, State);
-handle_restart(transient, Reason, Child, State) ->
- restart_if_explicit_or_abnormal(fun restart/2,
- fun delete_child_and_continue/2,
- Reason, Child, State);
-handle_restart(intrinsic, Reason, Child, State) ->
- restart_if_explicit_or_abnormal(fun restart/2,
- fun delete_child_and_stop/2,
- Reason, Child, State);
-handle_restart(temporary, _Reason, Child, State) ->
- delete_child_and_continue(Child, State);
-handle_restart({permanent, _Delay}=Restart, Reason, Child, State) ->
- do_restart_delay(Restart, Reason, Child, State);
-handle_restart({transient, _Delay}=Restart, Reason, Child, State) ->
- restart_if_explicit_or_abnormal(defer_to_restart_delay(Restart, Reason),
- fun delete_child_and_continue/2,
- Reason, Child, State);
-handle_restart({intrinsic, _Delay}=Restart, Reason, Child, State) ->
- restart_if_explicit_or_abnormal(defer_to_restart_delay(Restart, Reason),
- fun delete_child_and_stop/2,
- Reason, Child, State).
-
-restart_if_explicit_or_abnormal(RestartHow, Otherwise, Reason, Child, State) ->
- case ?is_explicit_restart(Reason) orelse is_abnormal_termination(Reason) of
- true -> RestartHow(Child, State);
- false -> Otherwise(Child, State)
- end.
-
-defer_to_restart_delay(Restart, Reason) ->
- fun(Child, State) -> do_restart_delay(Restart, Reason, Child, State) end.
-
-delete_child_and_continue(Child, State) ->
- {ok, state_del_child(Child, State)}.
-
-delete_child_and_stop(Child, State) ->
- {shutdown, state_del_child(Child, State)}.
-
-is_abnormal_termination(normal) -> false;
-is_abnormal_termination(shutdown) -> false;
-is_abnormal_termination({shutdown, _}) -> false;
-is_abnormal_termination(_Other) -> true.
-
-do_restart_delay({RestartType, Delay}, Reason, Child, State) ->
- case add_restart(State) of
- {ok, NState} ->
- maybe_restart(NState#state.strategy, Child, NState);
- {terminate, _NState} ->
- %% we've reached the max restart intensity, but the
- %% add_restart will have added to the restarts
- %% field. Given we don't want to die here, we need to go
- %% back to the old restarts field otherwise we'll never
- %% attempt to restart later, which is why we ignore
- %% NState for this clause.
- _TRef = erlang:send_after(trunc(Delay*1000), self(),
- {delayed_restart,
- {{RestartType, Delay}, Reason, Child}}),
- {ok, state_del_child(Child, State)}
- end.
-
-restart(Child, State) ->
- case add_restart(State) of
- {ok, NState} ->
- maybe_restart(NState#state.strategy, Child, NState);
- {terminate, NState} ->
- report_error(shutdown, reached_max_restart_intensity,
- Child, State#state.name),
- {shutdown, remove_child(Child, NState)}
- end.
-
-maybe_restart(Strategy, Child, State) ->
- case restart(Strategy, Child, State) of
- {try_again, Reason, NState2} ->
- %% Leaving control back to gen_server before
- %% trying again. This way other incoming requsts
- %% for the supervisor can be handled - e.g. a
- %% shutdown request for the supervisor or the
- %% child.
- Id = if ?is_simple(State) -> Child#child.pid;
- true -> Child#child.name
- end,
- timer:apply_after(0,?MODULE,try_again_restart,[self(),Id,Reason]),
- {ok,NState2};
- Other ->
- Other
- end.
-
-restart(simple_one_for_one, Child, State) ->
- #child{pid = OldPid, mfargs = {M, F, A}} = Child,
- Dynamics = ?DICT:erase(OldPid, dynamics_db(Child#child.restart_type,
- State#state.dynamics)),
- case do_start_child_i(M, F, A) of
- {ok, Pid} ->
- NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
- {ok, NState};
- {ok, Pid, _Extra} ->
- NState = State#state{dynamics = ?DICT:store(Pid, A, Dynamics)},
- {ok, NState};
- {error, Error} ->
- NState = State#state{dynamics = ?DICT:store(restarting(OldPid), A,
- Dynamics)},
- report_error(start_error, Error, Child, State#state.name),
- {try_again, Error, NState}
- end;
-restart(one_for_one, Child, State) ->
- OldPid = Child#child.pid,
- case do_start_child(State#state.name, Child) of
- {ok, Pid} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {ok, NState};
- {ok, Pid, _Extra} ->
- NState = replace_child(Child#child{pid = Pid}, State),
- {ok, NState};
- {error, Reason} ->
- NState = replace_child(Child#child{pid = restarting(OldPid)}, State),
- report_error(start_error, Reason, Child, State#state.name),
- {try_again, Reason, NState}
- end;
-restart(rest_for_one, Child, State) ->
- {ChAfter, ChBefore} = split_child(Child#child.pid, State#state.children),
- ChAfter2 = terminate_children(ChAfter, State#state.name),
- case start_children(ChAfter2, State#state.name) of
- {ok, ChAfter3} ->
- {ok, State#state{children = ChAfter3 ++ ChBefore}};
- {error, ChAfter3, Reason} ->
- NChild = Child#child{pid=restarting(Child#child.pid)},
- NState = State#state{children = ChAfter3 ++ ChBefore},
- {try_again, Reason, replace_child(NChild,NState)}
- end;
-restart(one_for_all, Child, State) ->
- Children1 = del_child(Child#child.pid, State#state.children),
- Children2 = terminate_children(Children1, State#state.name),
- case start_children(Children2, State#state.name) of
- {ok, NChs} ->
- {ok, State#state{children = NChs}};
- {error, NChs, Reason} ->
- NChild = Child#child{pid=restarting(Child#child.pid)},
- NState = State#state{children = NChs},
- {try_again, Reason, replace_child(NChild,NState)}
- end.
-
-restarting(Pid) when is_pid(Pid) -> ?restarting(Pid);
-restarting(RPid) -> RPid.
-
-%%-----------------------------------------------------------------
-%% Func: terminate_children/2
-%% Args: Children = [child_rec()] in termination order
-%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
-%% Returns: NChildren = [child_rec()] in
-%% startup order (reversed termination order)
-%%-----------------------------------------------------------------
-terminate_children(Children, SupName) ->
- terminate_children(Children, SupName, []).
-
-%% Temporary children should not be restarted and thus should
-%% be skipped when building the list of terminated children, although
-%% we do want them to be shut down as many functions from this module
-%% use this function to just clear everything.
-terminate_children([Child = #child{restart_type=temporary} | Children], SupName, Res) ->
- do_terminate(Child, SupName),
- terminate_children(Children, SupName, Res);
-terminate_children([Child | Children], SupName, Res) ->
- NChild = do_terminate(Child, SupName),
- terminate_children(Children, SupName, [NChild | Res]);
-terminate_children([], _SupName, Res) ->
- Res.
-
-do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
- case shutdown(Child#child.pid, Child#child.shutdown) of
- ok ->
- ok;
- {error, normal} when not ?is_permanent(Child#child.restart_type) ->
- ok;
- {error, OtherReason} ->
- report_error(shutdown_error, OtherReason, Child, SupName)
- end,
- Child#child{pid = undefined};
-do_terminate(Child, _SupName) ->
- Child#child{pid = undefined}.
-
-%%-----------------------------------------------------------------
-%% Shutdowns a child. We must check the EXIT value
-%% of the child, because it might have died with another reason than
-%% the wanted. In that case we want to report the error. We put a
-%% monitor on the child an check for the 'DOWN' message instead of
-%% checking for the 'EXIT' message, because if we check the 'EXIT'
-%% message a "naughty" child, who does unlink(Sup), could hang the
-%% supervisor.
-%% Returns: ok | {error, OtherReason} (this should be reported)
-%%-----------------------------------------------------------------
-shutdown(Pid, brutal_kill) ->
- case monitor_child(Pid) of
- ok ->
- exit(Pid, kill),
- receive
- {'DOWN', _MRef, process, Pid, killed} ->
- ok;
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- end;
- {error, Reason} ->
- {error, Reason}
- end;
-shutdown(Pid, Time) ->
- case monitor_child(Pid) of
- ok ->
- exit(Pid, shutdown), %% Try to shutdown gracefully
- receive
- {'DOWN', _MRef, process, Pid, shutdown} ->
- ok;
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- after Time ->
- exit(Pid, kill), %% Force termination.
- receive
- {'DOWN', _MRef, process, Pid, OtherReason} ->
- {error, OtherReason}
- end
- end;
- {error, Reason} ->
- {error, Reason}
- end.
-
-%% Help function to shutdown/2 switches from link to monitor approach
-monitor_child(Pid) ->
-
- %% Do the monitor operation first so that if the child dies
- %% before the monitoring is done causing a 'DOWN'-message with
- %% reason noproc, we will get the real reason in the 'EXIT'-message
- %% unless a naughty child has already done unlink...
- erlang:monitor(process, Pid),
- unlink(Pid),
-
- receive
- %% If the child dies before the unlik we must empty
- %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
- {'EXIT', Pid, Reason} ->
- receive
- {'DOWN', _, process, Pid, _} ->
- {error, Reason}
- end
- after 0 ->
- %% If a naughty child did unlink and the child dies before
- %% monitor the result will be that shutdown/2 receives a
- %% 'DOWN'-message with reason noproc.
- %% If the child should die after the unlink there
- %% will be a 'DOWN'-message with a correct reason
- %% that will be handled in shutdown/2.
- ok
- end.
-
-
-%%-----------------------------------------------------------------
-%% Func: terminate_dynamic_children/3
-%% Args: Child = child_rec()
-%% Dynamics = ?DICT() | ?SET()
-%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
-%% Returns: ok
-%%
-%%
-%% Shutdown all dynamic children. This happens when the supervisor is
-%% stopped. Because the supervisor can have millions of dynamic children, we
-%% can have an significative overhead here.
-%%-----------------------------------------------------------------
-terminate_dynamic_children(Child, Dynamics, SupName) ->
- {Pids, EStack0} = monitor_dynamic_children(Child, Dynamics),
- Sz = ?SETS:size(Pids),
- EStack = case Child#child.shutdown of
- brutal_kill ->
- ?SETS:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
- wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
- infinity ->
- ?SETS:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
- wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
- Time ->
- ?SETS:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
- TRef = erlang:start_timer(Time, self(), kill),
- wait_dynamic_children(Child, Pids, Sz, TRef, EStack0)
- end,
- %% Unroll stacked errors and report them
- ?DICT:fold(fun(Reason, Ls, _) ->
- report_error(shutdown_error, Reason,
- Child#child{pid=Ls}, SupName)
- end, ok, EStack).
-
-
-monitor_dynamic_children(#child{restart_type=temporary}, Dynamics) ->
- ?SETS:fold(fun(P, {Pids, EStack}) ->
- case monitor_child(P) of
- ok ->
- {?SETS:add_element(P, Pids), EStack};
- {error, normal} ->
- {Pids, EStack};
- {error, Reason} ->
- {Pids, ?DICT:append(Reason, P, EStack)}
- end
- end, {?SETS:new(), ?DICT:new()}, Dynamics);
-monitor_dynamic_children(#child{restart_type=RType}, Dynamics) ->
- ?DICT:fold(fun(P, _, {Pids, EStack}) when is_pid(P) ->
- case monitor_child(P) of
- ok ->
- {?SETS:add_element(P, Pids), EStack};
- {error, normal} when not ?is_permanent(RType) ->
- {Pids, EStack};
- {error, Reason} ->
- {Pids, ?DICT:append(Reason, P, EStack)}
- end;
- (?restarting(_), _, {Pids, EStack}) ->
- {Pids, EStack}
- end, {?SETS:new(), ?DICT:new()}, Dynamics).
-
-wait_dynamic_children(_Child, _Pids, 0, undefined, EStack) ->
- EStack;
-wait_dynamic_children(_Child, _Pids, 0, TRef, EStack) ->
- %% If the timer has expired before its cancellation, we must empty the
- %% mail-box of the 'timeout'-message.
- erlang:cancel_timer(TRef),
- receive
- {timeout, TRef, kill} ->
- EStack
- after 0 ->
- EStack
- end;
-wait_dynamic_children(#child{shutdown=brutal_kill} = Child, Pids, Sz,
- TRef, EStack) ->
- receive
- {'DOWN', _MRef, process, Pid, killed} ->
- wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
- TRef, EStack);
-
- {'DOWN', _MRef, process, Pid, Reason} ->
- wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
- TRef, ?DICT:append(Reason, Pid, EStack))
- end;
-wait_dynamic_children(#child{restart_type=RType} = Child, Pids, Sz,
- TRef, EStack) ->
- receive
- {'DOWN', _MRef, process, Pid, shutdown} ->
- wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
- TRef, EStack);
-
- {'DOWN', _MRef, process, Pid, normal} when not ?is_permanent(RType) ->
- wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
- TRef, EStack);
-
- {'DOWN', _MRef, process, Pid, Reason} ->
- wait_dynamic_children(Child, ?SETS:del_element(Pid, Pids), Sz-1,
- TRef, ?DICT:append(Reason, Pid, EStack));
-
- {timeout, TRef, kill} ->
- ?SETS:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
- wait_dynamic_children(Child, Pids, Sz-1, undefined, EStack)
- end.
-
-%%-----------------------------------------------------------------
-%% Child/State manipulating functions.
-%%-----------------------------------------------------------------
-
-%% Note we do not want to save the parameter list for temporary processes as
-%% they will not be restarted, and hence we do not need this information.
-%% Especially for dynamic children to simple_one_for_one supervisors
-%% it could become very costly as it is not uncommon to spawn
-%% very many such processes.
-save_child(#child{restart_type = temporary,
- mfargs = {M, F, _}} = Child, #state{children = Children} = State) ->
- State#state{children = [Child#child{mfargs = {M, F, undefined}} |Children]};
-save_child(Child, #state{children = Children} = State) ->
- State#state{children = [Child |Children]}.
-
-save_dynamic_child(temporary, Pid, _, #state{dynamics = Dynamics} = State) ->
- State#state{dynamics = ?SETS:add_element(Pid, dynamics_db(temporary, Dynamics))};
-save_dynamic_child(RestartType, Pid, Args, #state{dynamics = Dynamics} = State) ->
- State#state{dynamics = ?DICT:store(Pid, Args, dynamics_db(RestartType, Dynamics))}.
-
-dynamics_db(temporary, undefined) ->
- ?SETS:new();
-dynamics_db(_, undefined) ->
- ?DICT:new();
-dynamics_db(_,Dynamics) ->
- Dynamics.
-
-dynamic_child_args(Pid, Dynamics) ->
- case ?SETS:is_set(Dynamics) of
- true ->
- {ok, undefined};
- false ->
- ?DICT:find(Pid, Dynamics)
- end.
-
-state_del_child(#child{pid = Pid, restart_type = temporary}, State) when ?is_simple(State) ->
- NDynamics = ?SETS:del_element(Pid, dynamics_db(temporary, State#state.dynamics)),
- State#state{dynamics = NDynamics};
-state_del_child(#child{pid = Pid, restart_type = RType}, State) when ?is_simple(State) ->
- NDynamics = ?DICT:erase(Pid, dynamics_db(RType, State#state.dynamics)),
- State#state{dynamics = NDynamics};
-state_del_child(Child, State) ->
- NChildren = del_child(Child#child.name, State#state.children),
- State#state{children = NChildren}.
-
-del_child(Name, [Ch=#child{pid = ?restarting(_)}|_]=Chs) when Ch#child.name =:= Name ->
- Chs;
-del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name, Ch#child.restart_type =:= temporary ->
- Chs;
-del_child(Name, [Ch|Chs]) when Ch#child.name =:= Name ->
- [Ch#child{pid = undefined} | Chs];
-del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid, Ch#child.restart_type =:= temporary ->
- Chs;
-del_child(Pid, [Ch|Chs]) when Ch#child.pid =:= Pid ->
- [Ch#child{pid = undefined} | Chs];
-del_child(Name, [Ch|Chs]) ->
- [Ch|del_child(Name, Chs)];
-del_child(_, []) ->
- [].
-
-%% Chs = [S4, S3, Ch, S1, S0]
-%% Ret: {[S4, S3, Ch], [S1, S0]}
-split_child(Name, Chs) ->
- split_child(Name, Chs, []).
-
-split_child(Name, [Ch|Chs], After) when Ch#child.name =:= Name ->
- {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
-split_child(Pid, [Ch|Chs], After) when Ch#child.pid =:= Pid ->
- {lists:reverse([Ch#child{pid = undefined} | After]), Chs};
-split_child(Name, [Ch|Chs], After) ->
- split_child(Name, Chs, [Ch | After]);
-split_child(_, [], After) ->
- {lists:reverse(After), []}.
-
-get_child(Name, State) ->
- get_child(Name, State, false).
-get_child(Pid, State, AllowPid) when AllowPid, is_pid(Pid) ->
- get_dynamic_child(Pid, State);
-get_child(Name, State, _) ->
- lists:keysearch(Name, #child.name, State#state.children).
-
-get_dynamic_child(Pid, #state{children=[Child], dynamics=Dynamics}) ->
- DynamicsDb = dynamics_db(Child#child.restart_type, Dynamics),
- case is_dynamic_pid(Pid, DynamicsDb) of
- true ->
- {value, Child#child{pid=Pid}};
- false ->
- RPid = restarting(Pid),
- case is_dynamic_pid(RPid, DynamicsDb) of
- true ->
- {value, Child#child{pid=RPid}};
- false ->
- case erlang:is_process_alive(Pid) of
- true -> false;
- false -> {value, Child}
- end
- end
- end.
-
-is_dynamic_pid(Pid, Dynamics) ->
- case ?SETS:is_set(Dynamics) of
- true ->
- ?SETS:is_element(Pid, Dynamics);
- false ->
- ?DICT:is_key(Pid, Dynamics)
- end.
-
-replace_child(Child, State) ->
- Chs = do_replace_child(Child, State#state.children),
- State#state{children = Chs}.
-
-do_replace_child(Child, [Ch|Chs]) when Ch#child.name =:= Child#child.name ->
- [Child | Chs];
-do_replace_child(Child, [Ch|Chs]) ->
- [Ch|do_replace_child(Child, Chs)].
-
-remove_child(Child, State) ->
- Chs = lists:keydelete(Child#child.name, #child.name, State#state.children),
- State#state{children = Chs}.
-
-%%-----------------------------------------------------------------
-%% Func: init_state/4
-%% Args: SupName = {local, atom()} | {global, atom()} | self
-%% Type = {Strategy, MaxIntensity, Period}
-%% Strategy = one_for_one | one_for_all | simple_one_for_one |
-%% rest_for_one
-%% MaxIntensity = integer() >= 0
-%% Period = integer() > 0
-%% Mod :== atom()
-%% Args :== term()
-%% Purpose: Check that Type is of correct type (!)
-%% Returns: {ok, state()} | Error
-%%-----------------------------------------------------------------
-init_state(SupName, Type, Mod, Args) ->
- case catch init_state1(SupName, Type, Mod, Args) of
- {ok, State} ->
- {ok, State};
- Error ->
- Error
- end.
-
-init_state1(SupName, {Strategy, MaxIntensity, Period}, Mod, Args) ->
- validStrategy(Strategy),
- validIntensity(MaxIntensity),
- validPeriod(Period),
- {ok, #state{name = supname(SupName,Mod),
- strategy = Strategy,
- intensity = MaxIntensity,
- period = Period,
- module = Mod,
- args = Args}};
-init_state1(_SupName, Type, _, _) ->
- {invalid_type, Type}.
-
-validStrategy(simple_one_for_one) -> true;
-validStrategy(one_for_one) -> true;
-validStrategy(one_for_all) -> true;
-validStrategy(rest_for_one) -> true;
-validStrategy(What) -> throw({invalid_strategy, What}).
-
-validIntensity(Max) when is_integer(Max),
- Max >= 0 -> true;
-validIntensity(What) -> throw({invalid_intensity, What}).
-
-validPeriod(Period) when is_integer(Period),
- Period > 0 -> true;
-validPeriod(What) -> throw({invalid_period, What}).
-
-supname(self, Mod) -> {self(), Mod};
-supname(N, _) -> N.
-
-%%% ------------------------------------------------------
-%%% Check that the children start specification is valid.
-%%% Shall be a six (6) tuple
-%%% {Name, Func, RestartType, Shutdown, ChildType, Modules}
-%%% where Name is an atom
-%%% Func is {Mod, Fun, Args} == {atom(), atom(), list()}
-%%% RestartType is permanent | temporary | transient |
-%%% intrinsic | {permanent, Delay} |
-%%% {transient, Delay} | {intrinsic, Delay}
-%% where Delay >= 0
-%%% Shutdown = integer() > 0 | infinity | brutal_kill
-%%% ChildType = supervisor | worker
-%%% Modules = [atom()] | dynamic
-%%% Returns: {ok, [child_rec()]} | Error
-%%% ------------------------------------------------------
-
-check_startspec(Children) -> check_startspec(Children, []).
-
-check_startspec([ChildSpec|T], Res) ->
- case check_childspec(ChildSpec) of
- {ok, Child} ->
- case lists:keymember(Child#child.name, #child.name, Res) of
- true -> {duplicate_child_name, Child#child.name};
- false -> check_startspec(T, [Child | Res])
- end;
- Error -> Error
- end;
-check_startspec([], Res) ->
- {ok, lists:reverse(Res)}.
-
-check_childspec({Name, Func, RestartType, Shutdown, ChildType, Mods}) ->
- catch check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods);
-check_childspec(X) -> {invalid_child_spec, X}.
-
-check_childspec(Name, Func, RestartType, Shutdown, ChildType, Mods) ->
- validName(Name),
- validFunc(Func),
- validRestartType(RestartType),
- validChildType(ChildType),
- validShutdown(Shutdown, ChildType),
- validMods(Mods),
- {ok, #child{name = Name, mfargs = Func, restart_type = RestartType,
- shutdown = Shutdown, child_type = ChildType, modules = Mods}}.
-
-validChildType(supervisor) -> true;
-validChildType(worker) -> true;
-validChildType(What) -> throw({invalid_child_type, What}).
-
-validName(_Name) -> true.
-
-validFunc({M, F, A}) when is_atom(M),
- is_atom(F),
- is_list(A) -> true;
-validFunc(Func) -> throw({invalid_mfa, Func}).
-
-validRestartType(permanent) -> true;
-validRestartType(temporary) -> true;
-validRestartType(transient) -> true;
-validRestartType(intrinsic) -> true;
-validRestartType({permanent, Delay}) -> validDelay(Delay);
-validRestartType({intrinsic, Delay}) -> validDelay(Delay);
-validRestartType({transient, Delay}) -> validDelay(Delay);
-validRestartType(RestartType) -> throw({invalid_restart_type,
- RestartType}).
-
-validDelay(Delay) when is_number(Delay),
- Delay >= 0 -> true;
-validDelay(What) -> throw({invalid_delay, What}).
-
-validShutdown(Shutdown, _)
- when is_integer(Shutdown), Shutdown > 0 -> true;
-validShutdown(infinity, _) -> true;
-validShutdown(brutal_kill, _) -> true;
-validShutdown(Shutdown, _) -> throw({invalid_shutdown, Shutdown}).
-
-validMods(dynamic) -> true;
-validMods(Mods) when is_list(Mods) ->
- lists:foreach(fun(Mod) ->
- if
- is_atom(Mod) -> ok;
- true -> throw({invalid_module, Mod})
- end
- end,
- Mods);
-validMods(Mods) -> throw({invalid_modules, Mods}).
-
-%%% ------------------------------------------------------
-%%% Add a new restart and calculate if the max restart
-%%% intensity has been reached (in that case the supervisor
-%%% shall terminate).
-%%% All restarts accured inside the period amount of seconds
-%%% are kept in the #state.restarts list.
-%%% Returns: {ok, State'} | {terminate, State'}
-%%% ------------------------------------------------------
-
-add_restart(State) ->
- I = State#state.intensity,
- P = State#state.period,
- R = State#state.restarts,
- Now = erlang:now(),
- R1 = add_restart([Now|R], Now, P),
- State1 = State#state{restarts = R1},
- case length(R1) of
- CurI when CurI =< I ->
- {ok, State1};
- _ ->
- {terminate, State1}
- end.
-
-add_restart([R|Restarts], Now, Period) ->
- case inPeriod(R, Now, Period) of
- true ->
- [R|add_restart(Restarts, Now, Period)];
- _ ->
- []
- end;
-add_restart([], _, _) ->
- [].
-
-inPeriod(Time, Now, Period) ->
- case difference(Time, Now) of
- T when T > Period ->
- false;
- _ ->
- true
- end.
-
-%%
-%% Time = {MegaSecs, Secs, MicroSecs} (NOTE: MicroSecs is ignored)
-%% Calculate the time elapsed in seconds between two timestamps.
-%% If MegaSecs is equal just subtract Secs.
-%% Else calculate the Mega difference and add the Secs difference,
-%% note that Secs difference can be negative, e.g.
-%% {827, 999999, 676} diff {828, 1, 653753} == > 2 secs.
-%%
-difference({TimeM, TimeS, _}, {CurM, CurS, _}) when CurM > TimeM ->
- ((CurM - TimeM) * 1000000) + (CurS - TimeS);
-difference({_, TimeS, _}, {_, CurS, _}) ->
- CurS - TimeS.
-
-%%% ------------------------------------------------------
-%%% Error and progress reporting.
-%%% ------------------------------------------------------
-
-report_error(Error, Reason, Child, SupName) ->
- ErrorMsg = [{supervisor, SupName},
- {errorContext, Error},
- {reason, Reason},
- {offender, extract_child(Child)}],
- error_logger:error_report(supervisor_report, ErrorMsg).
-
-
-extract_child(Child) when is_list(Child#child.pid) ->
- [{nb_children, length(Child#child.pid)},
- {name, Child#child.name},
- {mfargs, Child#child.mfargs},
- {restart_type, Child#child.restart_type},
- {shutdown, Child#child.shutdown},
- {child_type, Child#child.child_type}];
-extract_child(Child) ->
- [{pid, Child#child.pid},
- {name, Child#child.name},
- {mfargs, Child#child.mfargs},
- {restart_type, Child#child.restart_type},
- {shutdown, Child#child.shutdown},
- {child_type, Child#child.child_type}].
-
-report_progress(Child, SupName) ->
- Progress = [{supervisor, SupName},
- {started, extract_child(Child)}],
- error_logger:info_report(progress, Progress).
diff --git a/src/tcp_acceptor.erl b/src/tcp_acceptor.erl
deleted file mode 100644
index 75f216c3dd..0000000000
--- a/src/tcp_acceptor.erl
+++ /dev/null
@@ -1,105 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(tcp_acceptor).
-
--behaviour(gen_server).
-
--export([start_link/2]).
-
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
--record(state, {callback, sock, ref}).
-
-%%--------------------------------------------------------------------
-
-start_link(Callback, LSock) ->
- gen_server:start_link(?MODULE, {Callback, LSock}, []).
-
-%%--------------------------------------------------------------------
-
-init({Callback, LSock}) ->
- gen_server:cast(self(), accept),
- {ok, #state{callback=Callback, sock=LSock}}.
-
-handle_call(_Request, _From, State) ->
- {noreply, State}.
-
-handle_cast(accept, State) ->
- ok = file_handle_cache:obtain(),
- accept(State);
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info({inet_async, LSock, Ref, {ok, Sock}},
- State = #state{callback={M,F,A}, sock=LSock, ref=Ref}) ->
-
- %% patch up the socket so it looks like one we got from
- %% gen_tcp:accept/1
- {ok, Mod} = inet_db:lookup_socket(LSock),
- inet_db:register_socket(Sock, Mod),
-
- %% handle
- case tune_buffer_size(Sock) of
- ok -> file_handle_cache:transfer(
- apply(M, F, A ++ [Sock])),
- ok = file_handle_cache:obtain();
- {error, enotconn} -> catch port_close(Sock);
- {error, Err} -> {ok, {IPAddress, Port}} = inet:sockname(LSock),
- error_logger:error_msg(
- "failed to tune buffer size of "
- "connection accepted on ~s:~p - ~s~n",
- [rabbit_misc:ntoab(IPAddress), Port,
- rabbit_misc:format_inet_error(Err)]),
- catch port_close(Sock)
- end,
-
- %% accept more
- accept(State);
-
-handle_info({inet_async, LSock, Ref, {error, Reason}},
- State=#state{sock=LSock, ref=Ref}) ->
- case Reason of
- closed -> {stop, normal, State}; %% listening socket closed
- econnaborted -> accept(State); %% client sent RST before we accepted
- _ -> {stop, {accept_failed, Reason}, State}
- end;
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%--------------------------------------------------------------------
-
-accept(State = #state{sock=LSock}) ->
- case prim_inet:async_accept(LSock, -1) of
- {ok, Ref} -> {noreply, State#state{ref=Ref}};
- Error -> {stop, {cannot_accept, Error}, State}
- end.
-
-tune_buffer_size(Sock) ->
- case inet:getopts(Sock, [sndbuf, recbuf, buffer]) of
- {ok, BufSizes} -> BufSz = lists:max([Sz || {_Opt, Sz} <- BufSizes]),
- inet:setopts(Sock, [{buffer, BufSz}]);
- Error -> Error
- end.
diff --git a/src/tcp_acceptor_sup.erl b/src/tcp_acceptor_sup.erl
deleted file mode 100644
index 22c886e0ab..0000000000
--- a/src/tcp_acceptor_sup.erl
+++ /dev/null
@@ -1,43 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(tcp_acceptor_sup).
-
--behaviour(supervisor).
-
--export([start_link/2]).
-
--export([init/1]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--type(mfargs() :: {atom(), atom(), [any()]}).
-
--spec(start_link/2 :: (atom(), mfargs()) -> rabbit_types:ok_pid_or_error()).
-
--endif.
-
-%%----------------------------------------------------------------------------
-
-start_link(Name, Callback) ->
- supervisor:start_link({local,Name}, ?MODULE, Callback).
-
-init(Callback) ->
- {ok, {{simple_one_for_one, 10, 10},
- [{tcp_acceptor, {tcp_acceptor, start_link, [Callback]},
- transient, brutal_kill, worker, [tcp_acceptor]}]}}.
diff --git a/src/tcp_listener.erl b/src/tcp_listener.erl
index 307249af09..dcb607ccdd 100644
--- a/src/tcp_listener.erl
+++ b/src/tcp_listener.erl
@@ -16,14 +16,44 @@
-module(tcp_listener).
+%% Represents a running TCP listener (a process that listens for inbound
+%% TCP or TLS connections). Every protocol supported typically has one
+%% or two listeners, plain TCP and (optionally) TLS, but there can
+%% be more, e.g. when multiple network interfaces are involved.
+%%
+%% A listener has 6 properties (is a tuple of 6):
+%%
+%% * IP address
+%% * Port
+%% * Node
+%% * Label (human-friendly name, e.g. AMQP 0-9-1)
+%% * Startup callback
+%% * Shutdown callback
+%%
+%% Listeners use Ranch in embedded mode to accept and "bridge" client
+%% connections with protocol entry points such as rabbit_reader.
+%%
+%% Listeners are tracked in a Mnesia table so that they can be
+%%
+%% * Shut down
+%% * Listed (e.g. in the management UI)
+%%
+%% Every tcp_listener process has callbacks that are executed on start
+%% and termination. Those must take care of listener registration
+%% among other things.
+%%
+%% Listeners are supervised by tcp_listener_sup (one supervisor per protocol).
+%%
+%% See also rabbit_networking and tcp_listener_sup.
+
-behaviour(gen_server).
--export([start_link/8]).
+-export([start_link/5]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
--record(state, {sock, on_startup, on_shutdown, label}).
+-record(state, {on_startup, on_shutdown, label, ip, port}).
%%----------------------------------------------------------------------------
@@ -31,52 +61,31 @@
-type(mfargs() :: {atom(), atom(), [any()]}).
--spec(start_link/8 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- integer(), atom(), mfargs(), mfargs(), string()) ->
+-spec(start_link/5 ::
+ (inet:ip_address(), inet:port_number(),
+ mfargs(), mfargs(), string()) ->
rabbit_types:ok_pid_or_error()).
-endif.
%%--------------------------------------------------------------------
-start_link(IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
+start_link(IPAddress, Port,
OnStartup, OnShutdown, Label) ->
gen_server:start_link(
- ?MODULE, {IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
+ ?MODULE, {IPAddress, Port,
OnStartup, OnShutdown, Label}, []).
%%--------------------------------------------------------------------
-init({IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, AcceptorSup,
- {M,F,A} = OnStartup, OnShutdown, Label}) ->
+init({IPAddress, Port, {M,F,A} = OnStartup, OnShutdown, Label}) ->
process_flag(trap_exit, true),
- case gen_tcp:listen(Port, SocketOpts ++ [{ip, IPAddress},
- {active, false}]) of
- {ok, LSock} ->
- lists:foreach(fun (_) ->
- {ok, _APid} = supervisor:start_child(
- AcceptorSup, [LSock])
- end,
- lists:duplicate(ConcurrentAcceptorCount, dummy)),
- {ok, {LIPAddress, LPort}} = inet:sockname(LSock),
- error_logger:info_msg(
- "started ~s on ~s:~p~n",
- [Label, rabbit_misc:ntoab(LIPAddress), LPort]),
- apply(M, F, A ++ [IPAddress, Port]),
- {ok, #state{sock = LSock,
- on_startup = OnStartup, on_shutdown = OnShutdown,
- label = Label}};
- {error, Reason} ->
- error_logger:error_msg(
- "failed to start ~s on ~s:~p - ~p (~s)~n",
- [Label, rabbit_misc:ntoab(IPAddress), Port,
- Reason, inet:format_error(Reason)]),
- {stop, {cannot_listen, IPAddress, Port, Reason}}
- end.
+ error_logger:info_msg(
+ "started ~s on ~s:~p~n",
+ [Label, rabbit_misc:ntoab(IPAddress), Port]),
+ apply(M, F, A ++ [IPAddress, Port]),
+ {ok, #state{on_startup = OnStartup, on_shutdown = OnShutdown,
+ label = Label, ip=IPAddress, port=Port}}.
handle_call(_Request, _From, State) ->
{noreply, State}.
@@ -87,9 +96,7 @@ handle_cast(_Msg, State) ->
handle_info(_Info, State) ->
{noreply, State}.
-terminate(_Reason, #state{sock=LSock, on_shutdown = {M,F,A}, label=Label}) ->
- {ok, {IPAddress, Port}} = inet:sockname(LSock),
- gen_tcp:close(LSock),
+terminate(_Reason, #state{on_shutdown = {M,F,A}, label=Label, ip=IPAddress, port=Port}) ->
error_logger:info_msg("stopped ~s on ~s:~p~n",
[Label, rabbit_misc:ntoab(IPAddress), Port]),
apply(M, F, A ++ [IPAddress, Port]).
diff --git a/src/tcp_listener_sup.erl b/src/tcp_listener_sup.erl
index 94bdecc28c..4df8a45ac1 100644
--- a/src/tcp_listener_sup.erl
+++ b/src/tcp_listener_sup.erl
@@ -16,9 +16,16 @@
-module(tcp_listener_sup).
+%% Supervises TCP listeners. There is a separate supervisor for every
+%% protocol. In case of AMQP 0-9-1, it resides under rabbit_sup. Plugins
+%% that provide protocol support (e.g. STOMP) have an instance of this supervisor in their
+%% app supervision tree.
+%%
+%% See also rabbit_networking and tcp_listener.
+
-behaviour(supervisor).
--export([start_link/7, start_link/8]).
+-export([start_link/9, start_link/10]).
-export([init/1]).
@@ -28,43 +35,41 @@
-type(mfargs() :: {atom(), atom(), [any()]}).
--spec(start_link/7 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- mfargs(), mfargs(), mfargs(), string()) ->
+-spec(start_link/9 ::
+ (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()],
+ module(), any(), mfargs(), mfargs(), string()) ->
rabbit_types:ok_pid_or_error()).
--spec(start_link/8 ::
- (inet:ip_address(), inet:port_number(), [gen_tcp:listen_option()],
- mfargs(), mfargs(), mfargs(), integer(), string()) ->
+-spec(start_link/10 ::
+ (inet:ip_address(), inet:port_number(), module(), [gen_tcp:listen_option()],
+ module(), any(), mfargs(), mfargs(), integer(), string()) ->
rabbit_types:ok_pid_or_error()).
-endif.
%%----------------------------------------------------------------------------
-start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, Label) ->
- start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, 1, Label).
+start_link(IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ Label) ->
+ start_link(IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ 1, Label).
-start_link(IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label) ->
+start_link(IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label) ->
supervisor:start_link(
- ?MODULE, {IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label}).
+ ?MODULE, {IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}).
-init({IPAddress, Port, SocketOpts, OnStartup, OnShutdown,
- AcceptCallback, ConcurrentAcceptorCount, Label}) ->
- %% This is gross. The tcp_listener needs to know about the
- %% tcp_acceptor_sup, and the only way I can think of accomplishing
- %% that without jumping through hoops is to register the
- %% tcp_acceptor_sup.
- Name = rabbit_misc:tcp_name(tcp_acceptor_sup, IPAddress, Port),
- {ok, {{one_for_all, 10, 10},
- [{tcp_acceptor_sup, {tcp_acceptor_sup, start_link,
- [Name, AcceptCallback]},
- transient, infinity, supervisor, [tcp_acceptor_sup]},
- {tcp_listener, {tcp_listener, start_link,
- [IPAddress, Port, SocketOpts,
- ConcurrentAcceptorCount, Name,
- OnStartup, OnShutdown, Label]},
- transient, 16#ffffffff, worker, [tcp_listener]}]}}.
+init({IPAddress, Port, Transport, SocketOpts, ProtoSup, ProtoOpts, OnStartup, OnShutdown,
+ ConcurrentAcceptorCount, Label}) ->
+ {ok, AckTimeout} = application:get_env(rabbit, ssl_handshake_timeout),
+ {ok, {{one_for_all, 10, 10}, [
+ ranch:child_spec({acceptor, IPAddress, Port}, ConcurrentAcceptorCount,
+ Transport, [{port, Port}, {ip, IPAddress},
+ {max_connections, infinity},
+ {ack_timeout, AckTimeout},
+ {connection_type, supervisor}|SocketOpts],
+ ProtoSup, ProtoOpts),
+ {tcp_listener, {tcp_listener, start_link,
+ [IPAddress, Port,
+ OnStartup, OnShutdown, Label]},
+ transient, 16#ffffffff, worker, [tcp_listener]}]}}.
diff --git a/src/time_compat.erl b/src/time_compat.erl
deleted file mode 100644
index b87c6cc550..0000000000
--- a/src/time_compat.erl
+++ /dev/null
@@ -1,305 +0,0 @@
-%%
-%% %CopyrightBegin%
-%%
-%% Copyright Ericsson AB 2014-2015. All Rights Reserved.
-%%
-%% Licensed under the Apache License, Version 2.0 (the "License");
-%% you may not use this file except in compliance with the License.
-%% You may obtain a copy of the License at
-%%
-%% http://www.apache.org/licenses/LICENSE-2.0
-%%
-%% Unless required by applicable law or agreed to in writing, software
-%% distributed under the License is distributed on an "AS IS" BASIS,
-%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-%% See the License for the specific language governing permissions and
-%% limitations under the License.
-%%
-%% %CopyrightEnd%
-%%
-
-%%
-%% If your code need to be able to execute on ERTS versions both
-%% earlier and later than 7.0, the best approach is to use the new
-%% time API introduced in ERTS 7.0 and implement a fallback
-%% solution using the old primitives to be used on old ERTS
-%% versions. This way your code can automatically take advantage
-%% of the improvements in the API when available. This is an
-%% example of how to implement such an API, but it can be used
-%% as is if you want to. Just add (a preferrably renamed version of)
-%% this module to your project, and call the API via this module
-%% instead of calling the BIFs directly.
-%%
-
--module(time_compat).
-
-%% We don't want warnings about the use of erlang:now/0 in
-%% this module.
--compile(nowarn_deprecated_function).
-%%
-%% We don't use
-%% -compile({nowarn_deprecated_function, [{erlang, now, 0}]}).
-%% since this will produce warnings when compiled on systems
-%% where it has not yet been deprecated.
-%%
-
--export([monotonic_time/0,
- monotonic_time/1,
- erlang_system_time/0,
- erlang_system_time/1,
- os_system_time/0,
- os_system_time/1,
- time_offset/0,
- time_offset/1,
- convert_time_unit/3,
- timestamp/0,
- unique_integer/0,
- unique_integer/1,
- monitor/2,
- system_info/1,
- system_flag/2]).
-
-monotonic_time() ->
- try
- erlang:monotonic_time()
- catch
- error:undef ->
- %% Use Erlang system time as monotonic time
- erlang_system_time_fallback()
- end.
-
-monotonic_time(Unit) ->
- try
- erlang:monotonic_time(Unit)
- catch
- error:badarg ->
- erlang:error(badarg, [Unit]);
- error:undef ->
- %% Use Erlang system time as monotonic time
- STime = erlang_system_time_fallback(),
- try
- convert_time_unit_fallback(STime, native, Unit)
- catch
- error:bad_time_unit -> erlang:error(badarg, [Unit])
- end
- end.
-
-erlang_system_time() ->
- try
- erlang:system_time()
- catch
- error:undef ->
- erlang_system_time_fallback()
- end.
-
-erlang_system_time(Unit) ->
- try
- erlang:system_time(Unit)
- catch
- error:badarg ->
- erlang:error(badarg, [Unit]);
- error:undef ->
- STime = erlang_system_time_fallback(),
- try
- convert_time_unit_fallback(STime, native, Unit)
- catch
- error:bad_time_unit -> erlang:error(badarg, [Unit])
- end
- end.
-
-os_system_time() ->
- try
- os:system_time()
- catch
- error:undef ->
- os_system_time_fallback()
- end.
-
-os_system_time(Unit) ->
- try
- os:system_time(Unit)
- catch
- error:badarg ->
- erlang:error(badarg, [Unit]);
- error:undef ->
- STime = os_system_time_fallback(),
- try
- convert_time_unit_fallback(STime, native, Unit)
- catch
- error:bad_time_unit -> erlang:error(badarg, [Unit])
- end
- end.
-
-time_offset() ->
- try
- erlang:time_offset()
- catch
- error:undef ->
- %% Erlang system time and Erlang monotonic
- %% time are always aligned
- 0
- end.
-
-time_offset(Unit) ->
- try
- erlang:time_offset(Unit)
- catch
- error:badarg ->
- erlang:error(badarg, [Unit]);
- error:undef ->
- try
- _ = integer_time_unit(Unit)
- catch
- error:bad_time_unit -> erlang:error(badarg, [Unit])
- end,
- %% Erlang system time and Erlang monotonic
- %% time are always aligned
- 0
- end.
-
-convert_time_unit(Time, FromUnit, ToUnit) ->
- try
- erlang:convert_time_unit(Time, FromUnit, ToUnit)
- catch
- error:undef ->
- try
- convert_time_unit_fallback(Time, FromUnit, ToUnit)
- catch
- _:_ ->
- erlang:error(badarg, [Time, FromUnit, ToUnit])
- end;
- error:Error ->
- erlang:error(Error, [Time, FromUnit, ToUnit])
- end.
-
-timestamp() ->
- try
- erlang:timestamp()
- catch
- error:undef ->
- erlang:now()
- end.
-
-unique_integer() ->
- try
- erlang:unique_integer()
- catch
- error:undef ->
- {MS, S, US} = erlang:now(),
- (MS*1000000+S)*1000000+US
- end.
-
-unique_integer(Modifiers) ->
- try
- erlang:unique_integer(Modifiers)
- catch
- error:badarg ->
- erlang:error(badarg, [Modifiers]);
- error:undef ->
- case is_valid_modifier_list(Modifiers) of
- true ->
- %% now() converted to an integer
- %% fullfill the requirements of
- %% all modifiers: unique, positive,
- %% and monotonic...
- {MS, S, US} = erlang:now(),
- (MS*1000000+S)*1000000+US;
- false ->
- erlang:error(badarg, [Modifiers])
- end
- end.
-
-monitor(Type, Item) ->
- try
- erlang:monitor(Type, Item)
- catch
- error:Error ->
- case {Error, Type, Item} of
- {badarg, time_offset, clock_service} ->
- %% Time offset is final and will never change.
- %% Return a dummy reference, there will never
- %% be any need for 'CHANGE' messages...
- make_ref();
- _ ->
- erlang:error(Error, [Type, Item])
- end
- end.
-
-system_info(Item) ->
- try
- erlang:system_info(Item)
- catch
- error:badarg ->
- case Item of
- time_correction ->
- case erlang:system_info(tolerant_timeofday) of
- enabled -> true;
- disabled -> false
- end;
- time_warp_mode ->
- no_time_warp;
- time_offset ->
- final;
- NotSupArg when NotSupArg == os_monotonic_time_source;
- NotSupArg == os_system_time_source;
- NotSupArg == start_time;
- NotSupArg == end_time ->
- %% Cannot emulate this...
- erlang:error(notsup, [NotSupArg]);
- _ ->
- erlang:error(badarg, [Item])
- end;
- error:Error ->
- erlang:error(Error, [Item])
- end.
-
-system_flag(Flag, Value) ->
- try
- erlang:system_flag(Flag, Value)
- catch
- error:Error ->
- case {Error, Flag, Value} of
- {badarg, time_offset, finalize} ->
- %% Time offset is final
- final;
- _ ->
- erlang:error(Error, [Flag, Value])
- end
- end.
-
-%%
-%% Internal functions
-%%
-
-integer_time_unit(native) -> 1000*1000;
-integer_time_unit(nano_seconds) -> 1000*1000*1000;
-integer_time_unit(micro_seconds) -> 1000*1000;
-integer_time_unit(milli_seconds) -> 1000;
-integer_time_unit(seconds) -> 1;
-integer_time_unit(I) when is_integer(I), I > 0 -> I;
-integer_time_unit(BadRes) -> erlang:error(bad_time_unit, [BadRes]).
-
-erlang_system_time_fallback() ->
- {MS, S, US} = erlang:now(),
- (MS*1000000+S)*1000000+US.
-
-os_system_time_fallback() ->
- {MS, S, US} = os:timestamp(),
- (MS*1000000+S)*1000000+US.
-
-convert_time_unit_fallback(Time, FromUnit, ToUnit) ->
- FU = integer_time_unit(FromUnit),
- TU = integer_time_unit(ToUnit),
- case Time < 0 of
- true -> TU*Time - (FU - 1);
- false -> TU*Time
- end div FU.
-
-is_valid_modifier_list([positive|Ms]) ->
- is_valid_modifier_list(Ms);
-is_valid_modifier_list([monotonic|Ms]) ->
- is_valid_modifier_list(Ms);
-is_valid_modifier_list([]) ->
- true;
-is_valid_modifier_list(_) ->
- false.
diff --git a/src/vm_memory_monitor.erl b/src/vm_memory_monitor.erl
index 1d98843365..e599f50103 100644
--- a/src/vm_memory_monitor.erl
+++ b/src/vm_memory_monitor.erl
@@ -49,6 +49,7 @@
%% wrong. Scale by vm_memory_high_watermark in configuration to get a
%% sensible value.
-define(MEMORY_SIZE_FOR_UNKNOWN_OS, 1073741824).
+-define(DEFAULT_VM_MEMORY_HIGH_WATERMARK, 0.4).
-record(state, {total_memory,
memory_limit,
@@ -63,7 +64,7 @@
-ifdef(use_specs).
--type(vm_memory_high_watermark() :: (float() | {'absolute', integer()})).
+-type(vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()})).
-spec(start_link/1 :: (float()) -> rabbit_types:ok_pid_or_error()).
-spec(start_link/3 :: (float(), fun ((any()) -> 'ok'),
fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error()).
@@ -208,7 +209,7 @@ set_mem_limits(State, MemLimit) ->
_ ->
TotalMemory
end,
- MemLim = interpret_limit(MemLimit, UsableMemory),
+ MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory),
error_logger:info_msg("Memory limit set to ~pMB of ~pMB total.~n",
[trunc(MemLim/?ONE_MB), trunc(TotalMemory/?ONE_MB)]),
internal_update(State #state { total_memory = TotalMemory,
@@ -220,6 +221,20 @@ interpret_limit({'absolute', MemLim}, UsableMemory) ->
interpret_limit(MemFraction, UsableMemory) ->
trunc(MemFraction * UsableMemory).
+
+parse_mem_limit({absolute, Limit}) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Limit) of
+ {ok, ParsedLimit} -> {absolute, ParsedLimit};
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse vm_memory_high_watermark value ~p", [Limit]),
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK
+ end;
+parse_mem_limit(Relative) when is_float(Relative), Relative < 1 ->
+ Relative;
+parse_mem_limit(_) ->
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK.
+
+
internal_update(State = #state { memory_limit = MemLimit,
alarmed = Alarmed,
alarm_funs = {AlarmSet, AlarmClear} }) ->
diff --git a/src/worker_pool.erl b/src/worker_pool.erl
index 99b227e392..ffe74ae634 100644
--- a/src/worker_pool.erl
+++ b/src/worker_pool.erl
@@ -49,8 +49,12 @@
-behaviour(gen_server2).
--export([start_link/0, submit/1, submit/2, submit_async/1, ready/1,
- idle/1]).
+-export([start_link/1,
+ submit/1, submit/2, submit/3,
+ submit_async/1, submit_async/2,
+ ready/2,
+ idle/2,
+ default_pool/0]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2, code_change/3]).
@@ -61,18 +65,20 @@
-type(mfargs() :: {atom(), atom(), [any()]}).
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/1 :: (atom()) -> {'ok', pid()} | {'error', any()}).
-spec(submit/1 :: (fun (() -> A) | mfargs()) -> A).
-spec(submit/2 :: (fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
+-spec(submit/3 :: (atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
-spec(submit_async/1 :: (fun (() -> any()) | mfargs()) -> 'ok').
--spec(ready/1 :: (pid()) -> 'ok').
--spec(idle/1 :: (pid()) -> 'ok').
+-spec(ready/2 :: (atom(), pid()) -> 'ok').
+-spec(idle/2 :: (atom(), pid()) -> 'ok').
+-spec(default_pool/0 :: () -> atom()).
-endif.
%%----------------------------------------------------------------------------
--define(SERVER, ?MODULE).
+-define(DEFAULT_POOL, ?MODULE).
-define(HIBERNATE_AFTER_MIN, 1000).
-define(DESIRED_HIBERNATE, 10000).
@@ -80,25 +86,32 @@
%%----------------------------------------------------------------------------
-start_link() -> gen_server2:start_link({local, ?SERVER}, ?MODULE, [],
- [{timeout, infinity}]).
+start_link(Name) -> gen_server2:start_link({local, Name}, ?MODULE, [],
+ [{timeout, infinity}]).
submit(Fun) ->
- submit(Fun, reuse).
+ submit(?DEFAULT_POOL, Fun, reuse).
%% ProcessModel =:= single is for working around the mnesia_locker bug.
submit(Fun, ProcessModel) ->
+ submit(?DEFAULT_POOL, Fun, ProcessModel).
+
+submit(Server, Fun, ProcessModel) ->
case get(worker_pool_worker) of
true -> worker_pool_worker:run(Fun);
- _ -> Pid = gen_server2:call(?SERVER, {next_free, self()}, infinity),
+ _ -> Pid = gen_server2:call(Server, {next_free, self()}, infinity),
worker_pool_worker:submit(Pid, Fun, ProcessModel)
end.
-submit_async(Fun) -> gen_server2:cast(?SERVER, {run_async, Fun}).
+submit_async(Fun) -> submit_async(?DEFAULT_POOL, Fun).
+
+submit_async(Server, Fun) -> gen_server2:cast(Server, {run_async, Fun}).
+
+ready(Server, WPid) -> gen_server2:cast(Server, {ready, WPid}).
-ready(WPid) -> gen_server2:cast(?SERVER, {ready, WPid}).
+idle(Server, WPid) -> gen_server2:cast(Server, {idle, WPid}).
-idle(WPid) -> gen_server2:cast(?SERVER, {idle, WPid}).
+default_pool() -> ?DEFAULT_POOL.
%%----------------------------------------------------------------------------
diff --git a/src/worker_pool_sup.erl b/src/worker_pool_sup.erl
index 99afd91ea5..3043764e05 100644
--- a/src/worker_pool_sup.erl
+++ b/src/worker_pool_sup.erl
@@ -18,7 +18,7 @@
-behaviour(supervisor).
--export([start_link/0, start_link/1]).
+-export([start_link/0, start_link/1, start_link/2]).
-export([init/1]).
@@ -28,26 +28,29 @@
-spec(start_link/0 :: () -> rabbit_types:ok_pid_or_error()).
-spec(start_link/1 :: (non_neg_integer()) -> rabbit_types:ok_pid_or_error()).
+-spec(start_link/2 :: (non_neg_integer(), atom())
+ -> rabbit_types:ok_pid_or_error()).
-endif.
%%----------------------------------------------------------------------------
--define(SERVER, ?MODULE).
-
-%%----------------------------------------------------------------------------
-
start_link() ->
start_link(erlang:system_info(schedulers)).
start_link(WCount) ->
- supervisor:start_link({local, ?SERVER}, ?MODULE, [WCount]).
+ start_link(WCount, worker_pool:default_pool()).
+
+start_link(WCount, PoolName) ->
+ SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"),
+ supervisor:start_link({local, SupName}, ?MODULE, [WCount, PoolName]).
%%----------------------------------------------------------------------------
-init([WCount]) ->
+init([WCount, PoolName]) ->
{ok, {{one_for_one, 10, 10},
- [{worker_pool, {worker_pool, start_link, []}, transient,
+ [{worker_pool, {worker_pool, start_link, [PoolName]}, transient,
16#ffffffff, worker, [worker_pool]} |
- [{N, {worker_pool_worker, start_link, []}, transient, 16#ffffffff,
- worker, [worker_pool_worker]} || N <- lists:seq(1, WCount)]]}}.
+ [{N, {worker_pool_worker, start_link, [PoolName]}, transient,
+ 16#ffffffff, worker, [worker_pool_worker]}
+ || N <- lists:seq(1, WCount)]]}}.
diff --git a/src/worker_pool_worker.erl b/src/worker_pool_worker.erl
index 6e66d8518e..8bfcb5e6c7 100644
--- a/src/worker_pool_worker.erl
+++ b/src/worker_pool_worker.erl
@@ -23,7 +23,8 @@
-behaviour(gen_server2).
--export([start_link/0, next_job_from/2, submit/3, submit_async/2, run/1]).
+-export([start_link/1, next_job_from/2, submit/3, submit_async/2,
+ run/1]).
-export([set_maximum_since_use/2]).
@@ -36,7 +37,7 @@
-type(mfargs() :: {atom(), atom(), [any()]}).
--spec(start_link/0 :: () -> {'ok', pid()} | {'error', any()}).
+-spec(start_link/1 :: (atom) -> {'ok', pid()} | {'error', any()}).
-spec(next_job_from/2 :: (pid(), pid()) -> 'ok').
-spec(submit/3 :: (pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A).
-spec(submit_async/2 :: (pid(), fun (() -> any()) | mfargs()) -> 'ok').
@@ -52,8 +53,8 @@
%%----------------------------------------------------------------------------
-start_link() ->
- gen_server2:start_link(?MODULE, [], [{timeout, infinity}]).
+start_link(PoolName) ->
+ gen_server2:start_link(?MODULE, [PoolName], [{timeout, infinity}]).
next_job_from(Pid, CPid) ->
gen_server2:cast(Pid, {next_job_from, CPid}).
@@ -86,11 +87,12 @@ run(Fun, single) ->
%%----------------------------------------------------------------------------
-init([]) ->
+init([PoolName]) ->
ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
[self()]),
- ok = worker_pool:ready(self()),
+ ok = worker_pool:ready(PoolName, self()),
put(worker_pool_worker, true),
+ put(worker_pool_name, PoolName),
{ok, undefined, hibernate,
{backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
@@ -104,7 +106,7 @@ handle_call({submit, Fun, CPid, ProcessModel}, From, undefined) ->
handle_call({submit, Fun, CPid, ProcessModel}, From, {from, CPid, MRef}) ->
erlang:demonitor(MRef),
gen_server2:reply(From, run(Fun, ProcessModel)),
- ok = worker_pool:idle(self()),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
{noreply, undefined, hibernate};
handle_call(Msg, _From, State) ->
@@ -116,12 +118,12 @@ handle_cast({next_job_from, CPid}, undefined) ->
handle_cast({next_job_from, CPid}, {job, CPid, From, Fun, ProcessModel}) ->
gen_server2:reply(From, run(Fun, ProcessModel)),
- ok = worker_pool:idle(self()),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
{noreply, undefined, hibernate};
handle_cast({submit_async, Fun}, undefined) ->
run(Fun),
- ok = worker_pool:idle(self()),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
{noreply, undefined, hibernate};
handle_cast({set_maximum_since_use, Age}, State) ->
@@ -132,7 +134,7 @@ handle_cast(Msg, State) ->
{stop, {unexpected_cast, Msg}, State}.
handle_info({'DOWN', MRef, process, CPid, _Reason}, {from, CPid, MRef}) ->
- ok = worker_pool:idle(self()),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
{noreply, undefined, hibernate};
handle_info({'DOWN', _MRef, process, _Pid, _Reason}, State) ->
diff --git a/test/src/credit_flow_test.erl b/test/src/credit_flow_test.erl
deleted file mode 100644
index 148c9024f1..0000000000
--- a/test/src/credit_flow_test.erl
+++ /dev/null
@@ -1,49 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2014 GoPivotal, Inc. All rights reserved.
-%%
-
--module(credit_flow_test).
-
--export([test_credit_flow_settings/0]).
-
-test_credit_flow_settings() ->
- %% default values
- passed = test_proc(200, 50),
-
- application:set_env(rabbit, credit_flow_default_credit, {100, 20}),
- passed = test_proc(100, 20),
-
- application:unset_env(rabbit, credit_flow_default_credit),
- % back to defaults
- passed = test_proc(200, 50),
- passed.
-
-test_proc(InitialCredit, MoreCreditAfter) ->
- Pid = spawn(fun dummy/0),
- Pid ! {credit, self()},
- {InitialCredit, MoreCreditAfter} =
- receive
- {credit, Val} -> Val
- end,
- passed.
-
-dummy() ->
- credit_flow:send(self()),
- receive
- {credit, From} ->
- From ! {credit, get(credit_flow_default_credit)};
- _ ->
- dummy()
- end.
diff --git a/test/src/gm_qc.erl b/test/src/gm_qc.erl
deleted file mode 100644
index c6b33a2b9f..0000000000
--- a/test/src/gm_qc.erl
+++ /dev/null
@@ -1,384 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(gm_qc).
--ifdef(use_proper_qc).
-
--include_lib("proper/include/proper.hrl").
-
--define(GROUP, test_group).
--define(MAX_SIZE, 5).
--define(MSG_TIMEOUT, 1000000). %% micros
-
--export([prop_gm_test/0]).
-
--behaviour(proper_statem).
--export([initial_state/0, command/1, precondition/2, postcondition/3,
- next_state/3]).
-
--behaviour(gm).
--export([joined/2, members_changed/3, handle_msg/3, terminate/2]).
-
-%% Helpers
--export([do_join/0, do_leave/1, do_send/1, do_proceed1/1, do_proceed2/2]).
-
-%% For insertion into gm
--export([call/3, cast/2, monitor/1, demonitor/1, execute_mnesia_transaction/1]).
-
--record(state, {seq, %% symbolic and dynamic
- instrumented, %% dynamic only
- outstanding, %% dynamic only
- monitors, %% dynamic only
- all_join, %% for symbolic
- to_join, %% dynamic only
- to_leave %% for symbolic
- }).
-
-prop_gm_test() ->
- case ?INSTR_MOD of
- ?MODULE -> ok;
- _ -> exit(compile_with_INSTRUMENT_FOR_QC)
- end,
- process_flag(trap_exit, true),
- erlang:register(?MODULE, self()),
- ?FORALL(Cmds, commands(?MODULE), gm_test(Cmds)).
-
-gm_test(Cmds) ->
- {_H, State, Res} = run_commands(?MODULE, Cmds),
- cleanup(State),
- ?WHENFAIL(
- io:format("Result: ~p~n", [Res]),
- aggregate(command_names(Cmds), Res =:= ok)).
-
-cleanup(S) ->
- S2 = ensure_joiners_joined_and_msgs_received(S),
- All = gms_joined(S2),
- All = gms(S2), %% assertion - none to join
- check_stale_members(All),
- [gm:leave(GM) || GM <- All],
- drain_and_proceed_gms(S2),
- [await_death(GM) || GM <- All],
- gm:forget_group(?GROUP),
- ok.
-
-check_stale_members(All) ->
- GMs = [P || P <- processes(), is_gm_process(?GROUP, P)],
- case GMs -- All of
- [] -> ok;
- Rest -> exit({forgot, Rest})
- end.
-
-is_gm_process(Group, P) ->
- case process_info(P, dictionary) of
- undefined -> false;
- {dictionary, D} -> {gm, Group} =:= proplists:get_value(process_name, D)
- end.
-
-await_death(P) ->
- MRef = erlang:monitor(process, P),
- await_death(MRef, P).
-
-await_death(MRef, P) ->
- receive
- {'DOWN', MRef, process, P, _} -> ok;
- {'DOWN', _, _, _, _} -> await_death(MRef, P);
- {'EXIT', _, normal} -> await_death(MRef, P);
- {'EXIT', _, Reason} -> exit(Reason);
- {joined, _GM} -> await_death(MRef, P);
- {left, _GM} -> await_death(MRef, P);
- Anything -> exit({stray_msg, Anything})
- end.
-
-%% ---------------------------------------------------------------------------
-%% proper_statem
-%% ---------------------------------------------------------------------------
-
-initial_state() -> #state{seq = 1,
- outstanding = dict:new(),
- instrumented = dict:new(),
- monitors = dict:new(),
- all_join = sets:new(),
- to_join = sets:new(),
- to_leave = sets:new()}.
-
-command(S) ->
- case {length(gms_symb_not_left(S)), length(gms_symb(S))} of
- {0, 0} -> qc_join(S);
- {0, _} -> frequency([{1, qc_join(S)},
- {3, qc_proceed1(S)},
- {5, qc_proceed2(S)}]);
- _ -> frequency([{1, qc_join(S)},
- {1, qc_leave(S)},
- {10, qc_send(S)},
- {5, qc_proceed1(S)},
- {15, qc_proceed2(S)}])
- end.
-
-qc_join(_S) -> {call,?MODULE,do_join, []}.
-qc_leave(S) -> {call,?MODULE,do_leave,[oneof(gms_symb_not_left(S))]}.
-qc_send(S) -> {call,?MODULE,do_send, [oneof(gms_symb_not_left(S))]}.
-qc_proceed1(S) -> {call,?MODULE,do_proceed1, [oneof(gms_symb(S))]}.
-qc_proceed2(S) -> {call,?MODULE,do_proceed2, [oneof(gms_symb(S)),
- oneof(gms_symb(S))]}.
-
-precondition(S, {call, ?MODULE, do_join, []}) ->
- length(gms_symb(S)) < ?MAX_SIZE;
-
-precondition(_S, {call, ?MODULE, do_leave, [_GM]}) ->
- true;
-
-precondition(_S, {call, ?MODULE, do_send, [_GM]}) ->
- true;
-
-precondition(_S, {call, ?MODULE, do_proceed1, [_GM]}) ->
- true;
-
-precondition(_S, {call, ?MODULE, do_proceed2, [GM1, GM2]}) ->
- GM1 =/= GM2.
-
-postcondition(_S, {call, _M, _F, _A}, _Res) ->
- true.
-
-next_state(S = #state{to_join = ToSet,
- all_join = AllSet}, GM, {call, ?MODULE, do_join, []}) ->
- S#state{to_join = sets:add_element(GM, ToSet),
- all_join = sets:add_element(GM, AllSet)};
-
-next_state(S = #state{to_leave = Set}, _Res, {call, ?MODULE, do_leave, [GM]}) ->
- S#state{to_leave = sets:add_element(GM, Set)};
-
-next_state(S = #state{seq = Seq,
- outstanding = Outstanding}, _Res,
- {call, ?MODULE, do_send, [GM]}) ->
- case is_pid(GM) andalso lists:member(GM, gms_joined(S)) of
- true ->
- %% Dynamic state, i.e. runtime
- Msg = [{sequence, Seq},
- {sent_to, GM},
- {dests, gms_joined(S)}],
- gm:broadcast(GM, Msg),
- Outstanding1 = dict:map(
- fun (_GM, Set) ->
- gb_sets:add_element(Msg, Set)
- end, Outstanding),
- drain(S#state{seq = Seq + 1,
- outstanding = Outstanding1});
- false ->
- S
- end;
-
-next_state(S, _Res, {call, ?MODULE, do_proceed1, [Pid]}) ->
- proceed(Pid, S);
-
-next_state(S, _Res, {call, ?MODULE, do_proceed2, [From, To]}) ->
- proceed({From, To}, S).
-
-proceed(K, S = #state{instrumented = Msgs}) ->
- case dict:find(K, Msgs) of
- {ok, Q} -> case queue:out(Q) of
- {{value, Thing}, Q2} ->
- S2 = proceed(K, Thing, S),
- S2#state{instrumented = dict:store(K, Q2, Msgs)};
- {empty, _} ->
- S
- end;
- error -> S
- end.
-
-%% ---------------------------------------------------------------------------
-%% GM
-%% ---------------------------------------------------------------------------
-
-joined(Pid, _Members) -> Pid ! {joined, self()},
- ok.
-members_changed(_Pid, _Bs, _Ds) -> ok.
-handle_msg(Pid, _From, Msg) -> Pid ! {gm, self(), Msg}, ok.
-terminate(Pid, _Reason) -> Pid ! {left, self()}.
-
-%% ---------------------------------------------------------------------------
-%% Helpers
-%% ---------------------------------------------------------------------------
-
-do_join() ->
- {ok, GM} = gm:start_link(?GROUP, ?MODULE, self(),
- fun execute_mnesia_transaction/1),
- GM.
-
-do_leave(GM) ->
- gm:leave(GM),
- GM.
-
-%% We need to update the state, so do the work in next_state
-do_send( _GM) -> ok.
-do_proceed1(_Pid) -> ok.
-do_proceed2(_From, _To) -> ok.
-
-%% All GMs, joined and to join
-gms(#state{outstanding = Outstanding,
- to_join = ToJoin}) ->
- dict:fetch_keys(Outstanding) ++ sets:to_list(ToJoin).
-
-%% All GMs, joined and to join
-gms_joined(#state{outstanding = Outstanding}) ->
- dict:fetch_keys(Outstanding).
-
-%% All GMs including those that have left (symbolic)
-gms_symb(#state{all_join = AllJoin}) ->
- sets:to_list(AllJoin).
-
-%% All GMs not including those that have left (symbolic)
-gms_symb_not_left(#state{all_join = AllJoin,
- to_leave = ToLeave}) ->
- sets:to_list(sets:subtract(AllJoin, ToLeave)).
-
-drain(S) ->
- receive
- Msg -> drain(handle_msg(Msg, S))
- after 10 -> S
- end.
-
-drain_and_proceed_gms(S0) ->
- S = #state{instrumented = Msgs} = drain(S0),
- case dict:size(Msgs) of
- 0 -> S;
- _ -> S1 = dict:fold(
- fun (Key, Q, Si) ->
- lists:foldl(
- fun (Msg, Sij) ->
- proceed(Key, Msg, Sij)
- end, Si, queue:to_list(Q))
- end, S, Msgs),
- drain_and_proceed_gms(S1#state{instrumented = dict:new()})
- end.
-
-handle_msg({gm, GM, Msg}, S = #state{outstanding = Outstanding}) ->
- case dict:find(GM, Outstanding) of
- {ok, Set} ->
- Set2 = gb_sets:del_element(Msg, Set),
- S#state{outstanding = dict:store(GM, Set2, Outstanding)};
- error ->
- %% Message from GM that has already died. OK.
- S
- end;
-handle_msg({instrumented, Key, Thing}, S = #state{instrumented = Msgs}) ->
- Q1 = case dict:find(Key, Msgs) of
- {ok, Q} -> queue:in(Thing, Q);
- error -> queue:from_list([Thing])
- end,
- S#state{instrumented = dict:store(Key, Q1, Msgs)};
-handle_msg({joined, GM}, S = #state{outstanding = Outstanding,
- to_join = ToJoin}) ->
- S#state{outstanding = dict:store(GM, gb_sets:empty(), Outstanding),
- to_join = sets:del_element(GM, ToJoin)};
-handle_msg({left, GM}, S = #state{outstanding = Outstanding,
- to_join = ToJoin}) ->
- true = dict:is_key(GM, Outstanding) orelse sets:is_element(GM, ToJoin),
- S#state{outstanding = dict:erase(GM, Outstanding),
- to_join = sets:del_element(GM, ToJoin)};
-handle_msg({'DOWN', MRef, _, From, _} = Msg, S = #state{monitors = Mons}) ->
- To = dict:fetch(MRef, Mons),
- handle_msg({instrumented, {From, To}, {info, Msg}},
- S#state{monitors = dict:erase(MRef, Mons)});
-handle_msg({'EXIT', _From, normal}, S) ->
- S;
-handle_msg({'EXIT', _From, Reason}, _S) ->
- %% We just trapped exits to get nicer SASL logging.
- exit(Reason).
-
-proceed({_From, To}, {cast, Msg}, S) -> gen_server2:cast(To, Msg), S;
-proceed({_From, To}, {info, Msg}, S) -> To ! Msg, S;
-proceed({From, _To}, {wait, Ref}, S) -> From ! {proceed, Ref}, S;
-proceed({From, To}, {mon, Ref}, S) -> add_monitor(From, To, Ref, S);
-proceed(_Pid, {demon, MRef}, S) -> erlang:demonitor(MRef), S;
-proceed(Pid, {wait, Ref}, S) -> Pid ! {proceed, Ref}, S.
-
-%% NB From here is To in handle_msg/DOWN above, since the msg is going
-%% the other way
-add_monitor(From, To, Ref, S = #state{monitors = Mons}) ->
- MRef = erlang:monitor(process, To),
- From ! {mref, Ref, MRef},
- S#state{monitors = dict:store(MRef, From, Mons)}.
-
-%% ----------------------------------------------------------------------------
-%% Assertions
-%% ----------------------------------------------------------------------------
-
-ensure_joiners_joined_and_msgs_received(S0) ->
- S = drain_and_proceed_gms(S0),
- case outstanding_joiners(S) of
- true -> ensure_joiners_joined_and_msgs_received(S);
- false -> case outstanding_msgs(S) of
- [] -> S;
- Out -> exit({outstanding_msgs, Out})
- end
- end.
-
-outstanding_joiners(#state{to_join = ToJoin}) ->
- sets:size(ToJoin) > 0.
-
-outstanding_msgs(#state{outstanding = Outstanding}) ->
- dict:fold(fun (GM, Set, OS) ->
- case gb_sets:is_empty(Set) of
- true -> OS;
- false -> [{GM, gb_sets:to_list(Set)} | OS]
- end
- end, [], Outstanding).
-
-%% ---------------------------------------------------------------------------
-%% For insertion into GM
-%% ---------------------------------------------------------------------------
-
-call(Pid, Msg, infinity) ->
- Ref = make_ref(),
- whereis(?MODULE) ! {instrumented, {self(), Pid}, {wait, Ref}},
- receive
- {proceed, Ref} -> ok
- end,
- gen_server2:call(Pid, Msg, infinity).
-
-cast(Pid, Msg) ->
- whereis(?MODULE) ! {instrumented, {self(), Pid}, {cast, Msg}},
- ok.
-
-monitor(Pid) ->
- Ref = make_ref(),
- whereis(?MODULE) ! {instrumented, {self(), Pid}, {mon, Ref}},
- receive
- {mref, Ref, MRef} -> MRef
- end.
-
-demonitor(MRef) ->
- whereis(?MODULE) ! {instrumented, self(), {demon, MRef}},
- true.
-
-execute_mnesia_transaction(Fun) ->
- Ref = make_ref(),
- whereis(?MODULE) ! {instrumented, self(), {wait, Ref}},
- receive
- {proceed, Ref} -> ok
- end,
- rabbit_misc:execute_mnesia_transaction(Fun).
-
--else.
-
--export([prop_disabled/0]).
-
-prop_disabled() ->
- exit({compiled_without_proper,
- "PropEr was not present during compilation of the test module. "
- "Hence all tests are disabled."}).
-
--endif.
diff --git a/test/src/gm_soak_test.erl b/test/src/gm_soak_test.erl
deleted file mode 100644
index 32476b56bc..0000000000
--- a/test/src/gm_soak_test.erl
+++ /dev/null
@@ -1,133 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(gm_soak_test).
-
--export([test/0]).
--export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% ---------------------------------------------------------------------------
-%% Soak test
-%% ---------------------------------------------------------------------------
-
-get_state() ->
- get(state).
-
-with_state(Fun) ->
- put(state, Fun(get_state())).
-
-inc() ->
- case 1 + get(count) of
- 100000 -> Now = now(),
- Start = put(ts, Now),
- Diff = timer:now_diff(Now, Start),
- Rate = 100000 / (Diff / 1000000),
- io:format("~p seeing ~p msgs/sec~n", [self(), Rate]),
- put(count, 0);
- N -> put(count, N)
- end.
-
-joined([], Members) ->
- io:format("Joined ~p (~p members)~n", [self(), length(Members)]),
- put(state, dict:from_list([{Member, empty} || Member <- Members])),
- put(count, 0),
- put(ts, now()),
- ok.
-
-members_changed([], Births, Deaths) ->
- with_state(
- fun (State) ->
- State1 =
- lists:foldl(
- fun (Born, StateN) ->
- false = dict:is_key(Born, StateN),
- dict:store(Born, empty, StateN)
- end, State, Births),
- lists:foldl(
- fun (Died, StateN) ->
- true = dict:is_key(Died, StateN),
- dict:store(Died, died, StateN)
- end, State1, Deaths)
- end),
- ok.
-
-handle_msg([], From, {test_msg, Num}) ->
- inc(),
- with_state(
- fun (State) ->
- ok = case dict:find(From, State) of
- {ok, died} ->
- exit({{from, From},
- {received_posthumous_delivery, Num}});
- {ok, empty} -> ok;
- {ok, Num} -> ok;
- {ok, Num1} when Num < Num1 ->
- exit({{from, From},
- {duplicate_delivery_of, Num},
- {expecting, Num1}});
- {ok, Num1} ->
- exit({{from, From},
- {received_early, Num},
- {expecting, Num1}});
- error ->
- exit({{from, From},
- {received_premature_delivery, Num}})
- end,
- dict:store(From, Num + 1, State)
- end),
- ok.
-
-handle_terminate([], Reason) ->
- io:format("Left ~p (~p)~n", [self(), Reason]),
- ok.
-
-spawn_member() ->
- spawn_link(
- fun () ->
- {MegaSecs, Secs, MicroSecs} = now(),
- random:seed(MegaSecs, Secs, MicroSecs),
- %% start up delay of no more than 10 seconds
- timer:sleep(random:uniform(10000)),
- {ok, Pid} = gm:start_link(
- ?MODULE, ?MODULE, [],
- fun rabbit_misc:execute_mnesia_transaction/1),
- Start = random:uniform(10000),
- send_loop(Pid, Start, Start + random:uniform(10000)),
- gm:leave(Pid),
- spawn_more()
- end).
-
-spawn_more() ->
- [spawn_member() || _ <- lists:seq(1, 4 - random:uniform(4))].
-
-send_loop(_Pid, Target, Target) ->
- ok;
-send_loop(Pid, Count, Target) when Target > Count ->
- case random:uniform(3) of
- 3 -> gm:confirmed_broadcast(Pid, {test_msg, Count});
- _ -> gm:broadcast(Pid, {test_msg, Count})
- end,
- timer:sleep(random:uniform(5) - 1), %% sleep up to 4 ms
- send_loop(Pid, Count + 1, Target).
-
-test() ->
- ok = gm:create_tables(),
- spawn_member(),
- spawn_member().
diff --git a/test/src/gm_speed_test.erl b/test/src/gm_speed_test.erl
deleted file mode 100644
index f11e7d487b..0000000000
--- a/test/src/gm_speed_test.erl
+++ /dev/null
@@ -1,83 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(gm_speed_test).
-
--export([test/3]).
--export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
--export([wile_e_coyote/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
-%% callbacks
-
-joined(Owner, _Members) ->
- Owner ! joined,
- ok.
-
-members_changed(_Owner, _Births, _Deaths) ->
- ok.
-
-handle_msg(Owner, _From, ping) ->
- Owner ! ping,
- ok.
-
-handle_terminate(Owner, _Reason) ->
- Owner ! terminated,
- ok.
-
-%% other
-
-wile_e_coyote(Time, WriteUnit) ->
- {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- receive joined -> ok end,
- timer:sleep(1000), %% wait for all to join
- timer:send_after(Time, stop),
- Start = now(),
- {Sent, Received} = loop(Pid, WriteUnit, 0, 0),
- End = now(),
- ok = gm:leave(Pid),
- receive terminated -> ok end,
- Elapsed = timer:now_diff(End, Start) / 1000000,
- io:format("Sending rate: ~p msgs/sec~nReceiving rate: ~p msgs/sec~n~n",
- [Sent/Elapsed, Received/Elapsed]),
- ok.
-
-loop(Pid, WriteUnit, Sent, Received) ->
- case read(Received) of
- {stop, Received1} -> {Sent, Received1};
- {ok, Received1} -> ok = write(Pid, WriteUnit),
- loop(Pid, WriteUnit, Sent + WriteUnit, Received1)
- end.
-
-read(Count) ->
- receive
- ping -> read(Count + 1);
- stop -> {stop, Count}
- after 5 ->
- {ok, Count}
- end.
-
-write(_Pid, 0) -> ok;
-write(Pid, N) -> ok = gm:broadcast(Pid, ping),
- write(Pid, N - 1).
-
-test(Time, WriteUnit, Nodes) ->
- ok = gm:create_tables(),
- [spawn(Node, ?MODULE, wile_e_coyote, [Time, WriteUnit]) || Node <- Nodes].
diff --git a/test/src/gm_tests.erl b/test/src/gm_tests.erl
deleted file mode 100644
index 8daac11125..0000000000
--- a/test/src/gm_tests.erl
+++ /dev/null
@@ -1,186 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(gm_tests).
-
--export([test_join_leave/0,
- test_broadcast/0,
- test_confirmed_broadcast/0,
- test_member_death/0,
- test_receive_in_order/0,
- all_tests/0]).
--export([joined/2, members_changed/3, handle_msg/3, handle_terminate/2]).
-
--behaviour(gm).
-
--include("gm_specs.hrl").
-
--define(RECEIVE_OR_THROW(Body, Bool, Error),
- receive Body ->
- true = Bool,
- passed
- after 1000 ->
- throw(Error)
- end).
-
-joined(Pid, Members) ->
- Pid ! {joined, self(), Members},
- ok.
-
-members_changed(Pid, Births, Deaths) ->
- Pid ! {members_changed, self(), Births, Deaths},
- ok.
-
-handle_msg(Pid, From, Msg) ->
- Pid ! {msg, self(), From, Msg},
- ok.
-
-handle_terminate(Pid, Reason) ->
- Pid ! {termination, self(), Reason},
- ok.
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
- passed = test_join_leave(),
- passed = test_broadcast(),
- passed = test_confirmed_broadcast(),
- passed = test_member_death(),
- passed = test_receive_in_order(),
- passed.
-
-test_join_leave() ->
- with_two_members(fun (_Pid, _Pid2) -> passed end).
-
-test_broadcast() ->
- test_broadcast(fun gm:broadcast/2).
-
-test_confirmed_broadcast() ->
- test_broadcast(fun gm:confirmed_broadcast/2).
-
-test_member_death() ->
- with_two_members(
- fun (Pid, Pid2) ->
- {ok, Pid3} = gm:start_link(
- ?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid3, [Pid, Pid2, Pid3],
- timeout_joining_gm_group_3),
- passed = receive_birth(Pid, Pid3, timeout_waiting_for_birth_3_1),
- passed = receive_birth(Pid2, Pid3, timeout_waiting_for_birth_3_2),
-
- unlink(Pid3),
- exit(Pid3, kill),
-
- %% Have to do some broadcasts to ensure that all members
- %% find out about the death.
- passed = (test_broadcast_fun(fun gm:confirmed_broadcast/2))(
- Pid, Pid2),
-
- passed = receive_death(Pid, Pid3, timeout_waiting_for_death_3_1),
- passed = receive_death(Pid2, Pid3, timeout_waiting_for_death_3_2),
-
- passed
- end).
-
-test_receive_in_order() ->
- with_two_members(
- fun (Pid, Pid2) ->
- Numbers = lists:seq(1,1000),
- [begin ok = gm:broadcast(Pid, N), ok = gm:broadcast(Pid2, N) end
- || N <- Numbers],
- passed = receive_numbers(
- Pid, Pid, {timeout_for_msgs, Pid, Pid}, Numbers),
- passed = receive_numbers(
- Pid, Pid2, {timeout_for_msgs, Pid, Pid2}, Numbers),
- passed = receive_numbers(
- Pid2, Pid, {timeout_for_msgs, Pid2, Pid}, Numbers),
- passed = receive_numbers(
- Pid2, Pid2, {timeout_for_msgs, Pid2, Pid2}, Numbers),
- passed
- end).
-
-test_broadcast(Fun) ->
- with_two_members(test_broadcast_fun(Fun)).
-
-test_broadcast_fun(Fun) ->
- fun (Pid, Pid2) ->
- ok = Fun(Pid, magic_message),
- passed = receive_or_throw({msg, Pid, Pid, magic_message},
- timeout_waiting_for_msg),
- passed = receive_or_throw({msg, Pid2, Pid, magic_message},
- timeout_waiting_for_msg)
- end.
-
-with_two_members(Fun) ->
- ok = gm:create_tables(),
-
- {ok, Pid} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid, [Pid], timeout_joining_gm_group_1),
-
- {ok, Pid2} = gm:start_link(?MODULE, ?MODULE, self(),
- fun rabbit_misc:execute_mnesia_transaction/1),
- passed = receive_joined(Pid2, [Pid, Pid2], timeout_joining_gm_group_2),
- passed = receive_birth(Pid, Pid2, timeout_waiting_for_birth_2),
-
- passed = Fun(Pid, Pid2),
-
- ok = gm:leave(Pid),
- passed = receive_death(Pid2, Pid, timeout_waiting_for_death_1),
- passed =
- receive_termination(Pid, normal, timeout_waiting_for_termination_1),
-
- ok = gm:leave(Pid2),
- passed =
- receive_termination(Pid2, normal, timeout_waiting_for_termination_2),
-
- receive X -> throw({unexpected_message, X})
- after 0 -> passed
- end.
-
-receive_or_throw(Pattern, Error) ->
- ?RECEIVE_OR_THROW(Pattern, true, Error).
-
-receive_birth(From, Born, Error) ->
- ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
- ([Born] == Birth) andalso ([] == Death),
- Error).
-
-receive_death(From, Died, Error) ->
- ?RECEIVE_OR_THROW({members_changed, From, Birth, Death},
- ([] == Birth) andalso ([Died] == Death),
- Error).
-
-receive_joined(From, Members, Error) ->
- ?RECEIVE_OR_THROW({joined, From, Members1},
- lists:usort(Members) == lists:usort(Members1),
- Error).
-
-receive_termination(From, Reason, Error) ->
- ?RECEIVE_OR_THROW({termination, From, Reason1},
- Reason == Reason1,
- Error).
-
-receive_numbers(_Pid, _Sender, _Error, []) ->
- passed;
-receive_numbers(Pid, Sender, Error, [N | Numbers]) ->
- ?RECEIVE_OR_THROW({msg, Pid, Sender, M},
- M == N,
- Error),
- receive_numbers(Pid, Sender, Error, Numbers).
diff --git a/test/src/mirrored_supervisor_tests.erl b/test/src/mirrored_supervisor_tests.erl
deleted file mode 100644
index 34411c2c62..0000000000
--- a/test/src/mirrored_supervisor_tests.erl
+++ /dev/null
@@ -1,307 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(mirrored_supervisor_tests).
-
--export([all_tests/0]).
-
--export([init/1]).
-
--behaviour(mirrored_supervisor).
-
--define(MS, mirrored_supervisor).
--define(SERVER, mirrored_supervisor_tests_gs).
-
-%% ---------------------------------------------------------------------------
-%% Functional tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
- passed = test_migrate(),
- passed = test_migrate_twice(),
- passed = test_already_there(),
- passed = test_delete_restart(),
- passed = test_which_children(),
- passed = test_large_group(),
- passed = test_childspecs_at_init(),
- passed = test_anonymous_supervisors(),
- passed = test_no_migration_on_shutdown(),
- passed = test_start_idempotence(),
- passed = test_unsupported(),
- passed = test_ignore(),
- passed = test_startup_failure(),
- passed.
-
-%% Simplest test
-test_migrate() ->
- with_sups(fun([A, _]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [a, b]).
-
-%% Is migration transitive?
-test_migrate_twice() ->
- with_sups(fun([A, B]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- {ok, C} = start_sup(c),
- Pid2 = pid_of(worker),
- kill_registered(B, Pid2),
- Pid3 = pid_of(worker),
- false = (Pid1 =:= Pid3),
- kill(C)
- end, [a, b]).
-
-%% Can't start the same child twice
-test_already_there() ->
- with_sups(fun([_, _]) ->
- S = childspec(worker),
- {ok, Pid} = ?MS:start_child(a, S),
- {error, {already_started, Pid}} = ?MS:start_child(b, S)
- end, [a, b]).
-
-%% Deleting and restarting should work as per a normal supervisor
-test_delete_restart() ->
- with_sups(fun([_, _]) ->
- S = childspec(worker),
- {ok, Pid1} = ?MS:start_child(a, S),
- {error, running} = ?MS:delete_child(a, worker),
- ok = ?MS:terminate_child(a, worker),
- ok = ?MS:delete_child(a, worker),
- {ok, Pid2} = ?MS:start_child(b, S),
- false = (Pid1 =:= Pid2),
- ok = ?MS:terminate_child(b, worker),
- {ok, Pid3} = ?MS:restart_child(b, worker),
- Pid3 = pid_of(worker),
- false = (Pid2 =:= Pid3),
- %% Not the same supervisor as the worker is on
- ok = ?MS:terminate_child(a, worker),
- ok = ?MS:delete_child(a, worker),
- {ok, Pid4} = ?MS:start_child(a, S),
- false = (Pid3 =:= Pid4)
- end, [a, b]).
-
-test_which_children() ->
- with_sups(
- fun([A, B] = Both) ->
- ?MS:start_child(A, childspec(worker)),
- assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
- ok = ?MS:terminate_child(a, worker),
- assert_wc(Both, fun ([C]) -> undefined = wc_pid(C) end),
- {ok, _} = ?MS:restart_child(a, worker),
- assert_wc(Both, fun ([C]) -> true = is_pid(wc_pid(C)) end),
- ?MS:start_child(B, childspec(worker2)),
- assert_wc(Both, fun (C) -> 2 = length(C) end)
- end, [a, b]).
-
-assert_wc(Sups, Fun) ->
- [Fun(?MS:which_children(Sup)) || Sup <- Sups].
-
-wc_pid(Child) ->
- {worker, Pid, worker, [mirrored_supervisor_tests]} = Child,
- Pid.
-
-%% Not all the members of the group should actually do the failover
-test_large_group() ->
- with_sups(fun([A, _, _, _]) ->
- ?MS:start_child(a, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [a, b, c, d]).
-
-%% Do childspecs work when returned from init?
-test_childspecs_at_init() ->
- S = childspec(worker),
- with_sups(fun([A, _]) ->
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [{a, [S]}, {b, [S]}]).
-
-test_anonymous_supervisors() ->
- with_sups(fun([A, _B]) ->
- ?MS:start_child(A, childspec(worker)),
- Pid1 = pid_of(worker),
- kill_registered(A, Pid1),
- Pid2 = pid_of(worker),
- false = (Pid1 =:= Pid2)
- end, [anon, anon]).
-
-%% When a mirrored_supervisor terminates, we should not migrate, but
-%% the whole supervisor group should shut down. To test this we set up
-%% a situation where the gen_server will only fail if it's running
-%% under the supervisor called 'evil'. It should not migrate to
-%% 'good' and survive, rather the whole group should go away.
-test_no_migration_on_shutdown() ->
- with_sups(fun([Evil, _]) ->
- ?MS:start_child(Evil, childspec(worker)),
- try
- call(worker, ping, 1000, 100),
- exit(worker_should_not_have_migrated)
- catch exit:{timeout_waiting_for_server, _, _} ->
- ok
- end
- end, [evil, good]).
-
-test_start_idempotence() ->
- with_sups(fun([_]) ->
- CS = childspec(worker),
- {ok, Pid} = ?MS:start_child(a, CS),
- {error, {already_started, Pid}} = ?MS:start_child(a, CS),
- ?MS:terminate_child(a, worker),
- {error, already_present} = ?MS:start_child(a, CS)
- end, [a]).
-
-test_unsupported() ->
- try
- ?MS:start_link({global, foo}, get_group(group), fun tx_fun/1, ?MODULE,
- {one_for_one, []}),
- exit(no_global)
- catch error:badarg ->
- ok
- end,
- try
- ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
- {simple_one_for_one, []}),
- exit(no_sofo)
- catch error:badarg ->
- ok
- end,
- passed.
-
-%% Just test we don't blow up
-test_ignore() ->
- ?MS:start_link({local, foo}, get_group(group), fun tx_fun/1, ?MODULE,
- {fake_strategy_for_ignore, []}),
- passed.
-
-test_startup_failure() ->
- [test_startup_failure(F) || F <- [want_error, want_exit]],
- passed.
-
-test_startup_failure(Fail) ->
- process_flag(trap_exit, true),
- ?MS:start_link(get_group(group), fun tx_fun/1, ?MODULE,
- {one_for_one, [childspec(Fail)]}),
- receive
- {'EXIT', _, shutdown} ->
- ok
- after 1000 ->
- exit({did_not_exit, Fail})
- end,
- process_flag(trap_exit, false).
-
-%% ---------------------------------------------------------------------------
-
-with_sups(Fun, Sups) ->
- inc_group(),
- Pids = [begin {ok, Pid} = start_sup(Sup), Pid end || Sup <- Sups],
- Fun(Pids),
- [kill(Pid) || Pid <- Pids, is_process_alive(Pid)],
- timer:sleep(500),
- passed.
-
-start_sup(Spec) ->
- start_sup(Spec, group).
-
-start_sup({Name, ChildSpecs}, Group) ->
- {ok, Pid} = start_sup0(Name, get_group(Group), ChildSpecs),
- %% We are not a supervisor, when we kill the supervisor we do not
- %% want to die!
- unlink(Pid),
- {ok, Pid};
-
-start_sup(Name, Group) ->
- start_sup({Name, []}, Group).
-
-start_sup0(anon, Group, ChildSpecs) ->
- ?MS:start_link(Group, fun tx_fun/1, ?MODULE,
- {one_for_one, ChildSpecs});
-
-start_sup0(Name, Group, ChildSpecs) ->
- ?MS:start_link({local, Name}, Group, fun tx_fun/1, ?MODULE,
- {one_for_one, ChildSpecs}).
-
-childspec(Id) ->
- {Id,{?SERVER, start_link, [Id]}, transient, 16#ffffffff, worker, [?MODULE]}.
-
-pid_of(Id) ->
- {received, Pid, ping} = call(Id, ping),
- Pid.
-
-tx_fun(Fun) ->
- case mnesia:sync_transaction(Fun) of
- {atomic, Result} -> Result;
- {aborted, Reason} -> throw({error, Reason})
- end.
-
-inc_group() ->
- Count = case get(counter) of
- undefined -> 0;
- C -> C
- end + 1,
- put(counter, Count).
-
-get_group(Group) ->
- {Group, get(counter)}.
-
-call(Id, Msg) -> call(Id, Msg, 10*1000, 100).
-
-call(Id, Msg, 0, _Decr) ->
- exit({timeout_waiting_for_server, {Id, Msg}, erlang:get_stacktrace()});
-
-call(Id, Msg, MaxDelay, Decr) ->
- try
- gen_server:call(Id, Msg, infinity)
- catch exit:_ -> timer:sleep(Decr),
- call(Id, Msg, MaxDelay - Decr, Decr)
- end.
-
-kill(Pid) -> kill(Pid, []).
-kill(Pid, Wait) when is_pid(Wait) -> kill(Pid, [Wait]);
-kill(Pid, Waits) ->
- erlang:monitor(process, Pid),
- [erlang:monitor(process, P) || P <- Waits],
- exit(Pid, bang),
- kill_wait(Pid),
- [kill_wait(P) || P <- Waits].
-
-kill_registered(Pid, Child) ->
- {registered_name, Name} = erlang:process_info(Child, registered_name),
- kill(Pid, Child),
- false = (Child =:= whereis(Name)),
- ok.
-
-kill_wait(Pid) ->
- receive
- {'DOWN', _Ref, process, Pid, _Reason} ->
- ok
- end.
-
-%% ---------------------------------------------------------------------------
-
-init({fake_strategy_for_ignore, _ChildSpecs}) ->
- ignore;
-
-init({Strategy, ChildSpecs}) ->
- {ok, {{Strategy, 0, 1}, ChildSpecs}}.
diff --git a/test/src/mirrored_supervisor_tests_gs.erl b/test/src/mirrored_supervisor_tests_gs.erl
deleted file mode 100644
index beaf49a44d..0000000000
--- a/test/src/mirrored_supervisor_tests_gs.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(mirrored_supervisor_tests_gs).
-
-%% Dumb gen_server we can supervise
-
--export([start_link/1]).
-
--export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
- handle_cast/2]).
-
--behaviour(gen_server).
-
--define(MS, mirrored_supervisor).
-
-start_link(want_error) ->
- {error, foo};
-
-start_link(want_exit) ->
- exit(foo);
-
-start_link(Id) ->
- gen_server:start_link({local, Id}, ?MODULE, [], []).
-
-%% ---------------------------------------------------------------------------
-
-init([]) ->
- {ok, state}.
-
-handle_call(Msg, _From, State) ->
- die_if_my_supervisor_is_evil(),
- {reply, {received, self(), Msg}, State}.
-
-handle_cast(_Msg, State) ->
- {noreply, State}.
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(_Reason, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-die_if_my_supervisor_is_evil() ->
- try lists:keysearch(self(), 2, ?MS:which_children(evil)) of
- false -> ok;
- _ -> exit(doooom)
- catch
- exit:{noproc, _} -> ok
- end.
diff --git a/test/src/on_disk_store_tunable_parameter_validation_test.erl b/test/src/on_disk_store_tunable_parameter_validation_test.erl
deleted file mode 100644
index 9db5425e6d..0000000000
--- a/test/src/on_disk_store_tunable_parameter_validation_test.erl
+++ /dev/null
@@ -1,47 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(on_disk_store_tunable_parameter_validation_test).
-
--include("rabbit.hrl").
-
--export([test_msg_store_parameter_validation/0]).
-
--define(T(Fun, Args), (catch apply(rabbit, Fun, Args))).
-
-test_msg_store_parameter_validation() ->
- %% make sure it works with default values
- ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [?CREDIT_DISC_BOUND, ?IO_BATCH_SIZE]),
-
- %% IO_BATCH_SIZE must be greater than CREDIT_DISC_BOUND initial credit
- ok = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, 500}, 3000]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, 500}, 1500]),
-
- %% All values must be integers
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, 500}, "1500"]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{"2000", 500}, abc]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, "500"}, 2048]),
-
- %% CREDIT_DISC_BOUND must be a tuple
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [[2000, 500], 1500]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [2000, 1500]),
-
- %% config values can't be smaller than default values
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{1999, 500}, 2048]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, 499}, 2048]),
- {error, _} = ?T(validate_msg_store_io_batch_size_and_credit_disc_bound, [{2000, 500}, 2047]),
-
- passed.
diff --git a/test/src/rabbit_backing_queue_qc.erl b/test/src/rabbit_backing_queue_qc.erl
deleted file mode 100644
index a025823910..0000000000
--- a/test/src/rabbit_backing_queue_qc.erl
+++ /dev/null
@@ -1,473 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_backing_queue_qc).
--ifdef(use_proper_qc).
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("proper/include/proper.hrl").
-
--behaviour(proper_statem).
-
--define(BQMOD, rabbit_variable_queue).
--define(QUEUE_MAXLEN, 10000).
--define(TIMEOUT_LIMIT, 100).
-
--define(RECORD_INDEX(Key, Record),
- proplists:get_value(
- Key, lists:zip(record_info(fields, Record),
- lists:seq(2, record_info(size, Record))))).
-
--export([initial_state/0, command/1, precondition/2, postcondition/3,
- next_state/3]).
-
--export([prop_backing_queue_test/0, publish_multiple/1,
- timeout/2, bump_credit/1]).
-
--record(state, {bqstate,
- len, %% int
- next_seq_id, %% int
- messages, %% gb_trees of seqid => {msg_props, basic_msg}
- acks, %% [{acktag, {seqid, {msg_props, basic_msg}}}]
- confirms, %% set of msgid
- publishing}).%% int
-
-%% Initialise model
-
-initial_state() ->
- #state{bqstate = qc_variable_queue_init(qc_test_queue()),
- len = 0,
- next_seq_id = 0,
- messages = gb_trees:empty(),
- acks = [],
- confirms = gb_sets:new(),
- publishing = 0}.
-
-%% Property
-
-prop_backing_queue_test() ->
- ?FORALL(Cmds, commands(?MODULE, initial_state()),
- backing_queue_test(Cmds)).
-
-backing_queue_test(Cmds) ->
- {ok, FileSizeLimit} =
- application:get_env(rabbit, msg_store_file_size_limit),
- application:set_env(rabbit, msg_store_file_size_limit, 512,
- infinity),
- {ok, MaxJournal} =
- application:get_env(rabbit, queue_index_max_journal_entries),
- application:set_env(rabbit, queue_index_max_journal_entries, 128,
- infinity),
-
- {_H, #state{bqstate = BQ}, Res} = run_commands(?MODULE, Cmds),
-
- application:set_env(rabbit, msg_store_file_size_limit,
- FileSizeLimit, infinity),
- application:set_env(rabbit, queue_index_max_journal_entries,
- MaxJournal, infinity),
-
- ?BQMOD:delete_and_terminate(shutdown, BQ),
- ?WHENFAIL(
- io:format("Result: ~p~n", [Res]),
- aggregate(command_names(Cmds), Res =:= ok)).
-
-%% Commands
-
-%% Command frequencies are tuned so that queues are normally
-%% reasonably short, but they may sometimes exceed
-%% ?QUEUE_MAXLEN. Publish-multiple and purging cause extreme queue
-%% lengths, so these have lower probabilities. Fetches/drops are
-%% sufficiently frequent so that commands that need acktags get decent
-%% coverage.
-
-command(S) ->
- frequency([{10, qc_publish(S)},
- {1, qc_publish_delivered(S)},
- {1, qc_publish_multiple(S)}, %% very slow
- {9, qc_fetch(S)}, %% needed for ack and requeue
- {6, qc_drop(S)}, %%
- {15, qc_ack(S)},
- {15, qc_requeue(S)},
- {3, qc_set_ram_duration_target(S)},
- {1, qc_ram_duration(S)},
- {1, qc_drain_confirmed(S)},
- {1, qc_dropwhile(S)},
- {1, qc_is_empty(S)},
- {1, qc_timeout(S)},
- {1, qc_bump_credit(S)},
- {1, qc_purge(S)},
- {1, qc_fold(S)}]).
-
-qc_publish(#state{bqstate = BQ}) ->
- {call, ?BQMOD, publish,
- [qc_message(),
- #message_properties{needs_confirming = frequency([{1, true},
- {20, false}]),
- expiry = oneof([undefined | lists:seq(1, 10)]),
- size = 10},
- false, self(), BQ]}.
-
-qc_publish_multiple(#state{}) ->
- {call, ?MODULE, publish_multiple, [resize(?QUEUE_MAXLEN, pos_integer())]}.
-
-qc_publish_delivered(#state{bqstate = BQ}) ->
- {call, ?BQMOD, publish_delivered,
- [qc_message(), #message_properties{size = 10}, self(), BQ]}.
-
-qc_fetch(#state{bqstate = BQ}) ->
- {call, ?BQMOD, fetch, [boolean(), BQ]}.
-
-qc_drop(#state{bqstate = BQ}) ->
- {call, ?BQMOD, drop, [boolean(), BQ]}.
-
-qc_ack(#state{bqstate = BQ, acks = Acks}) ->
- {call, ?BQMOD, ack, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_requeue(#state{bqstate = BQ, acks = Acks}) ->
- {call, ?BQMOD, requeue, [rand_choice(proplists:get_keys(Acks)), BQ]}.
-
-qc_set_ram_duration_target(#state{bqstate = BQ}) ->
- {call, ?BQMOD, set_ram_duration_target,
- [oneof([0, 1, 2, resize(1000, pos_integer()), infinity]), BQ]}.
-
-qc_ram_duration(#state{bqstate = BQ}) ->
- {call, ?BQMOD, ram_duration, [BQ]}.
-
-qc_drain_confirmed(#state{bqstate = BQ}) ->
- {call, ?BQMOD, drain_confirmed, [BQ]}.
-
-qc_dropwhile(#state{bqstate = BQ}) ->
- {call, ?BQMOD, dropwhile, [fun dropfun/1, BQ]}.
-
-qc_is_empty(#state{bqstate = BQ}) ->
- {call, ?BQMOD, is_empty, [BQ]}.
-
-qc_timeout(#state{bqstate = BQ}) ->
- {call, ?MODULE, timeout, [BQ, ?TIMEOUT_LIMIT]}.
-
-qc_bump_credit(#state{bqstate = BQ}) ->
- {call, ?MODULE, bump_credit, [BQ]}.
-
-qc_purge(#state{bqstate = BQ}) ->
- {call, ?BQMOD, purge, [BQ]}.
-
-qc_fold(#state{bqstate = BQ}) ->
- {call, ?BQMOD, fold, [makefoldfun(pos_integer()), foldacc(), BQ]}.
-
-%% Preconditions
-
-%% Create long queues by only allowing publishing
-precondition(#state{publishing = Count}, {call, _Mod, Fun, _Arg})
- when Count > 0, Fun /= publish ->
- false;
-precondition(#state{acks = Acks}, {call, ?BQMOD, Fun, _Arg})
- when Fun =:= ack; Fun =:= requeue ->
- length(Acks) > 0;
-precondition(#state{messages = Messages},
- {call, ?BQMOD, publish_delivered, _Arg}) ->
- gb_trees:is_empty(Messages);
-precondition(_S, {call, ?BQMOD, _Fun, _Arg}) ->
- true;
-precondition(_S, {call, ?MODULE, timeout, _Arg}) ->
- true;
-precondition(_S, {call, ?MODULE, bump_credit, _Arg}) ->
- true;
-precondition(#state{len = Len}, {call, ?MODULE, publish_multiple, _Arg}) ->
- Len < ?QUEUE_MAXLEN.
-
-%% Model updates
-
-next_state(S, BQ, {call, ?BQMOD, publish, [Msg, MsgProps, _Del, _Pid, _BQ]}) ->
- #state{len = Len,
- messages = Messages,
- confirms = Confirms,
- publishing = PublishCount,
- next_seq_id = NextSeq} = S,
- MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
- NeedsConfirm =
- {call, erlang, element,
- [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
- S#state{bqstate = BQ,
- len = Len + 1,
- next_seq_id = NextSeq + 1,
- messages = gb_trees:insert(NextSeq, {MsgProps, Msg}, Messages),
- publishing = {call, erlang, max, [0, {call, erlang, '-',
- [PublishCount, 1]}]},
- confirms = case eval(NeedsConfirm) of
- true -> gb_sets:add(MsgId, Confirms);
- _ -> Confirms
- end};
-
-next_state(S, _BQ, {call, ?MODULE, publish_multiple, [PublishCount]}) ->
- S#state{publishing = PublishCount};
-
-next_state(S, Res,
- {call, ?BQMOD, publish_delivered,
- [Msg, MsgProps, _Pid, _BQ]}) ->
- #state{confirms = Confirms, acks = Acks, next_seq_id = NextSeq} = S,
- AckTag = {call, erlang, element, [1, Res]},
- BQ1 = {call, erlang, element, [2, Res]},
- MsgId = {call, erlang, element, [?RECORD_INDEX(id, basic_message), Msg]},
- NeedsConfirm =
- {call, erlang, element,
- [?RECORD_INDEX(needs_confirming, message_properties), MsgProps]},
- S#state{bqstate = BQ1,
- next_seq_id = NextSeq + 1,
- confirms = case eval(NeedsConfirm) of
- true -> gb_sets:add(MsgId, Confirms);
- _ -> Confirms
- end,
- acks = [{AckTag, {NextSeq, {MsgProps, Msg}}}|Acks]
- };
-
-next_state(S, Res, {call, ?BQMOD, fetch, [AckReq, _BQ]}) ->
- next_state_fetch_and_drop(S, Res, AckReq, 3);
-
-next_state(S, Res, {call, ?BQMOD, drop, [AckReq, _BQ]}) ->
- next_state_fetch_and_drop(S, Res, AckReq, 2);
-
-next_state(S, Res, {call, ?BQMOD, ack, [AcksArg, _BQ]}) ->
- #state{acks = AcksState} = S,
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1,
- acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, Res, {call, ?BQMOD, requeue, [AcksArg, _V]}) ->
- #state{messages = Messages, acks = AcksState} = S,
- BQ1 = {call, erlang, element, [2, Res]},
- Messages1 = lists:foldl(fun (AckTag, Msgs) ->
- {SeqId, MsgPropsMsg} =
- proplists:get_value(AckTag, AcksState),
- gb_trees:insert(SeqId, MsgPropsMsg, Msgs)
- end, Messages, AcksArg),
- S#state{bqstate = BQ1,
- len = gb_trees:size(Messages1),
- messages = Messages1,
- acks = lists:foldl(fun proplists:delete/2, AcksState, AcksArg)};
-
-next_state(S, BQ, {call, ?BQMOD, set_ram_duration_target, _Args}) ->
- S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, ram_duration, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, drain_confirmed, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1};
-
-next_state(S, Res, {call, ?BQMOD, dropwhile, _Args}) ->
- BQ = {call, erlang, element, [2, Res]},
- #state{messages = Messages} = S,
- Msgs1 = drop_messages(Messages),
- S#state{bqstate = BQ, len = gb_trees:size(Msgs1), messages = Msgs1};
-
-next_state(S, _Res, {call, ?BQMOD, is_empty, _Args}) ->
- S;
-
-next_state(S, BQ, {call, ?MODULE, timeout, _Args}) ->
- S#state{bqstate = BQ};
-next_state(S, BQ, {call, ?MODULE, bump_credit, _Args}) ->
- S#state{bqstate = BQ};
-
-next_state(S, Res, {call, ?BQMOD, purge, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1, len = 0, messages = gb_trees:empty()};
-
-next_state(S, Res, {call, ?BQMOD, fold, _Args}) ->
- BQ1 = {call, erlang, element, [2, Res]},
- S#state{bqstate = BQ1}.
-
-%% Postconditions
-
-postcondition(S, {call, ?BQMOD, fetch, _Args}, Res) ->
- #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
- case Res of
- {{MsgFetched, _IsDelivered, AckTag}, _BQ} ->
- {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
- MsgFetched =:= Msg andalso
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms) andalso
- Len =/= 0;
- {empty, _BQ} ->
- Len =:= 0
- end;
-
-postcondition(S, {call, ?BQMOD, drop, _Args}, Res) ->
- #state{messages = Messages, len = Len, acks = Acks, confirms = Confrms} = S,
- case Res of
- {{MsgIdFetched, AckTag}, _BQ} ->
- {_SeqId, {_MsgProps, Msg}} = gb_trees:smallest(Messages),
- MsgId = eval({call, erlang, element,
- [?RECORD_INDEX(id, basic_message), Msg]}),
- MsgIdFetched =:= MsgId andalso
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms) andalso
- Len =/= 0;
- {empty, _BQ} ->
- Len =:= 0
- end;
-
-postcondition(S, {call, ?BQMOD, publish_delivered, _Args}, {AckTag, _BQ}) ->
- #state{acks = Acks, confirms = Confrms} = S,
- not proplists:is_defined(AckTag, Acks) andalso
- not gb_sets:is_element(AckTag, Confrms);
-
-postcondition(#state{len = Len}, {call, ?BQMOD, purge, _Args}, Res) ->
- {PurgeCount, _BQ} = Res,
- Len =:= PurgeCount;
-
-postcondition(#state{len = Len}, {call, ?BQMOD, is_empty, _Args}, Res) ->
- (Len =:= 0) =:= Res;
-
-postcondition(S, {call, ?BQMOD, drain_confirmed, _Args}, Res) ->
- #state{confirms = Confirms} = S,
- {ReportedConfirmed, _BQ} = Res,
- lists:all(fun (M) -> gb_sets:is_element(M, Confirms) end,
- ReportedConfirmed);
-
-postcondition(S, {call, ?BQMOD, fold, [FoldFun, Acc0, _BQ0]}, {Res, _BQ1}) ->
- #state{messages = Messages} = S,
- {_, Model} = lists:foldl(fun ({_SeqId, {_MsgProps, _Msg}}, {stop, Acc}) ->
- {stop, Acc};
- ({_SeqId, {MsgProps, Msg}}, {cont, Acc}) ->
- FoldFun(Msg, MsgProps, false, Acc)
- end, {cont, Acc0}, gb_trees:to_list(Messages)),
- true = Model =:= Res;
-
-postcondition(#state{bqstate = BQ, len = Len}, {call, _M, _F, _A}, _Res) ->
- ?BQMOD:len(BQ) =:= Len.
-
-%% Helpers
-
-publish_multiple(_C) ->
- ok.
-
-timeout(BQ, 0) ->
- BQ;
-timeout(BQ, AtMost) ->
- case ?BQMOD:needs_timeout(BQ) of
- false -> BQ;
- _ -> timeout(?BQMOD:timeout(BQ), AtMost - 1)
- end.
-
-bump_credit(BQ) ->
- case credit_flow:blocked() of
- false -> BQ;
- true -> receive
- {bump_credit, Msg} ->
- credit_flow:handle_bump_msg(Msg),
- ?BQMOD:resume(BQ)
- end
- end.
-
-qc_message_payload() -> ?SIZED(Size, resize(Size * Size, binary())).
-
-qc_routing_key() -> noshrink(binary(10)).
-
-qc_delivery_mode() -> oneof([1, 2]).
-
-qc_message() -> qc_message(qc_delivery_mode()).
-
-qc_message(DeliveryMode) ->
- {call, rabbit_basic, message, [qc_default_exchange(),
- qc_routing_key(),
- #'P_basic'{delivery_mode = DeliveryMode},
- qc_message_payload()]}.
-
-qc_default_exchange() ->
- {call, rabbit_misc, r, [<<>>, exchange, <<>>]}.
-
-qc_variable_queue_init(Q) ->
- {call, ?BQMOD, init,
- [Q, new, function(2, {ok, []})]}.
-
-qc_test_q() -> {call, rabbit_misc, r, [<<"/">>, queue, noshrink(binary(16))]}.
-
-qc_test_queue() -> qc_test_queue(boolean()).
-
-qc_test_queue(Durable) ->
- #amqqueue{name = qc_test_q(),
- durable = Durable,
- auto_delete = false,
- arguments = [],
- pid = self()}.
-
-rand_choice([]) -> [];
-rand_choice(List) -> rand_choice(List, [], random:uniform(length(List))).
-
-rand_choice(_List, Selection, 0) ->
- Selection;
-rand_choice(List, Selection, N) ->
- Picked = lists:nth(random:uniform(length(List)), List),
- rand_choice(List -- [Picked], [Picked | Selection],
- N - 1).
-
-makefoldfun(Size) ->
- fun (Msg, _MsgProps, Unacked, Acc) ->
- case {length(Acc) > Size, Unacked} of
- {false, false} -> {cont, [Msg | Acc]};
- {false, true} -> {cont, Acc};
- {true, _} -> {stop, Acc}
- end
- end.
-foldacc() -> [].
-
-dropfun(Props) ->
- Expiry = eval({call, erlang, element,
- [?RECORD_INDEX(expiry, message_properties), Props]}),
- Expiry =/= 1.
-
-drop_messages(Messages) ->
- case gb_trees:is_empty(Messages) of
- true ->
- Messages;
- false -> {_Seq, MsgProps_Msg, M2} = gb_trees:take_smallest(Messages),
- MsgProps = {call, erlang, element, [1, MsgProps_Msg]},
- case dropfun(MsgProps) of
- true -> drop_messages(M2);
- false -> Messages
- end
- end.
-
-next_state_fetch_and_drop(S, Res, AckReq, AckTagIdx) ->
- #state{len = Len, messages = Messages, acks = Acks} = S,
- ResultInfo = {call, erlang, element, [1, Res]},
- BQ1 = {call, erlang, element, [2, Res]},
- AckTag = {call, erlang, element, [AckTagIdx, ResultInfo]},
- S1 = S#state{bqstate = BQ1},
- case gb_trees:is_empty(Messages) of
- true -> S1;
- false -> {SeqId, MsgProp_Msg, M2} = gb_trees:take_smallest(Messages),
- S2 = S1#state{len = Len - 1, messages = M2},
- case AckReq of
- true ->
- S2#state{acks = [{AckTag, {SeqId, MsgProp_Msg}}|Acks]};
- false ->
- S2
- end
- end.
-
--else.
-
--export([prop_disabled/0]).
-
-prop_disabled() ->
- exit({compiled_without_proper,
- "PropEr was not present during compilation of the test module. "
- "Hence all tests are disabled."}).
-
--endif.
diff --git a/test/src/rabbit_runtime_parameters_test.erl b/test/src/rabbit_runtime_parameters_test.erl
deleted file mode 100644
index d88975b61e..0000000000
--- a/test/src/rabbit_runtime_parameters_test.erl
+++ /dev/null
@@ -1,72 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_runtime_parameters_test).
--behaviour(rabbit_runtime_parameter).
--behaviour(rabbit_policy_validator).
-
--include("rabbit.hrl").
-
--export([validate/5, notify/4, notify_clear/3]).
--export([register/0, unregister/0]).
--export([validate_policy/1]).
--export([register_policy_validator/0, unregister_policy_validator/0]).
-
-%----------------------------------------------------------------------------
-
-register() ->
- rabbit_registry:register(runtime_parameter, <<"test">>, ?MODULE).
-
-unregister() ->
- rabbit_registry:unregister(runtime_parameter, <<"test">>).
-
-validate(_, <<"test">>, <<"good">>, _Term, _User) -> ok;
-validate(_, <<"test">>, <<"maybe">>, <<"good">>, _User) -> ok;
-validate(_, <<"test">>, <<"admin">>, _Term, none) -> ok;
-validate(_, <<"test">>, <<"admin">>, _Term, User) ->
- case lists:member(administrator, User#user.tags) of
- true -> ok;
- false -> {error, "meh", []}
- end;
-validate(_, <<"test">>, _, _, _) -> {error, "meh", []}.
-
-notify(_, _, _, _) -> ok.
-notify_clear(_, _, _) -> ok.
-
-%----------------------------------------------------------------------------
-
-register_policy_validator() ->
- rabbit_registry:register(policy_validator, <<"testeven">>, ?MODULE),
- rabbit_registry:register(policy_validator, <<"testpos">>, ?MODULE).
-
-unregister_policy_validator() ->
- rabbit_registry:unregister(policy_validator, <<"testeven">>),
- rabbit_registry:unregister(policy_validator, <<"testpos">>).
-
-validate_policy([{<<"testeven">>, Terms}]) when is_list(Terms) ->
- case length(Terms) rem 2 =:= 0 of
- true -> ok;
- false -> {error, "meh", []}
- end;
-
-validate_policy([{<<"testpos">>, Terms}]) when is_list(Terms) ->
- case lists:all(fun (N) -> is_integer(N) andalso N > 0 end, Terms) of
- true -> ok;
- false -> {error, "meh", []}
- end;
-
-validate_policy(_) ->
- {error, "meh", []}.
diff --git a/test/src/rabbit_tests.erl b/test/src/rabbit_tests.erl
deleted file mode 100644
index b0aaa53b3e..0000000000
--- a/test/src/rabbit_tests.erl
+++ /dev/null
@@ -1,3094 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_tests).
-
--compile([export_all]).
-
--export([all_tests/0]).
-
--import(rabbit_misc, [pget/2]).
-
--include("rabbit.hrl").
--include("rabbit_framing.hrl").
--include_lib("kernel/include/file.hrl").
-
--define(PERSISTENT_MSG_STORE, msg_store_persistent).
--define(TRANSIENT_MSG_STORE, msg_store_transient).
--define(CLEANUP_QUEUE_NAME, <<"cleanup-queue">>).
--define(TIMEOUT, 30000).
-
-all_tests() ->
- try
- all_tests0()
- catch
- Type:Error ->
- rabbit_misc:format(
- "Tests failed~nError: {~p, ~p}~nStack trace:~n~p~n",
- [Type, Error, erlang:get_stacktrace()])
- end.
-
-all_tests0() ->
- ok = setup_cluster(),
- ok = truncate:test(),
- ok = supervisor2_tests:test_all(),
- passed = gm_tests:all_tests(),
- passed = mirrored_supervisor_tests:all_tests(),
- application:set_env(rabbit, file_handles_high_watermark, 10),
- ok = file_handle_cache:set_limit(10),
- passed = test_version_equivalance(),
- passed = test_file_handle_cache(),
- passed = test_backing_queue(),
- passed = test_rabbit_basic_header_handling(),
- passed = test_priority_queue(),
- passed = test_pg_local(),
- passed = test_unfold(),
- passed = test_supervisor_delayed_restart(),
- passed = test_table_codec(),
- passed = test_content_framing(),
- passed = test_content_transcoding(),
- passed = test_topic_matching(),
- passed = test_log_management(),
- passed = test_app_management(),
- passed = test_log_management_during_startup(),
- passed = test_statistics(),
- passed = test_arguments_parser(),
- passed = test_dynamic_mirroring(),
- passed = test_user_management(),
- passed = test_runtime_parameters(),
- passed = test_policy_validation(),
- passed = test_policy_opts_validation(),
- passed = test_ha_policy_validation(),
- passed = test_server_status(),
- passed = test_amqp_connection_refusal(),
- passed = test_confirms(),
- passed = test_with_state(),
- passed = test_mcall(),
- passed =
- do_if_secondary_node(
- fun run_cluster_dependent_tests/1,
- fun (SecondaryNode) ->
- io:format("Skipping cluster dependent tests with node ~p~n",
- [SecondaryNode]),
- passed
- end),
- passed = test_configurable_server_properties(),
- passed = vm_memory_monitor_tests:all_tests(),
- passed = test_memory_high_watermark(),
- passed = on_disk_store_tunable_parameter_validation_test:test_msg_store_parameter_validation(),
- passed = credit_flow_test:test_credit_flow_settings(),
- passed =
- do_if_meck_enabled(
- fun disk_monitor_test/0,
- fun () ->
- io:format("Skipping meck dependent tests ~n"),
- passed
- end),
- passed.
-
-
-do_if_secondary_node(Up, Down) ->
- SecondaryNode = rabbit_nodes:make("hare"),
-
- case net_adm:ping(SecondaryNode) of
- pong -> Up(SecondaryNode);
- pang -> Down(SecondaryNode)
- end.
-
-do_if_meck_enabled(Enabled, Disabled) ->
- case code:which(meck) of
- non_existing -> Disabled();
- _ -> Enabled()
- end.
-
-setup_cluster() ->
- do_if_secondary_node(
- fun (SecondaryNode) ->
- ok = control_action(stop_app, []),
- ok = control_action(join_cluster,
- [atom_to_list(SecondaryNode)]),
- ok = control_action(start_app, []),
- ok = control_action(start_app, SecondaryNode, [], [])
- end,
- fun (_) -> ok end).
-
-maybe_run_cluster_dependent_tests() ->
- do_if_secondary_node(
- fun (SecondaryNode) ->
- passed = run_cluster_dependent_tests(SecondaryNode)
- end,
- fun (SecondaryNode) ->
- io:format("Skipping cluster dependent tests with node ~p~n",
- [SecondaryNode])
- end).
-
-run_cluster_dependent_tests(SecondaryNode) ->
- io:format("Running cluster dependent tests with node ~p~n", [SecondaryNode]),
- passed = test_delegates_async(SecondaryNode),
- passed = test_delegates_sync(SecondaryNode),
- passed = test_queue_cleanup(SecondaryNode),
- passed = test_declare_on_dead_queue(SecondaryNode),
- passed = test_refresh_events(SecondaryNode),
-
- %% we now run the tests remotely, so that code coverage on the
- %% local node picks up more of the delegate
- Node = node(),
- Self = self(),
- Remote = spawn(SecondaryNode,
- fun () -> Rs = [ test_delegates_async(Node),
- test_delegates_sync(Node),
- test_queue_cleanup(Node),
- test_declare_on_dead_queue(Node),
- test_refresh_events(Node) ],
- Self ! {self(), Rs}
- end),
- receive
- {Remote, Result} ->
- Result = lists:duplicate(length(Result), passed)
- after 30000 ->
- throw(timeout)
- end,
-
- passed.
-
-test_version_equivalance() ->
- true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
- true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
- true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
- false = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
- passed.
-
-test_rabbit_basic_header_handling() ->
- passed = write_table_with_invalid_existing_type_test(),
- passed = invalid_existing_headers_test(),
- passed = disparate_invalid_header_entries_accumulate_separately_test(),
- passed = corrupt_or_invalid_headers_are_overwritten_test(),
- passed = invalid_same_header_entry_accumulation_test(),
- passed.
-
--define(XDEATH_TABLE,
- [{<<"reason">>, longstr, <<"blah">>},
- {<<"queue">>, longstr, <<"foo.bar.baz">>},
- {<<"exchange">>, longstr, <<"my-exchange">>},
- {<<"routing-keys">>, array, []}]).
-
--define(ROUTE_TABLE, [{<<"redelivered">>, bool, <<"true">>}]).
-
--define(BAD_HEADER(K), {<<K>>, longstr, <<"bad ", K>>}).
--define(BAD_HEADER2(K, Suf), {<<K>>, longstr, <<"bad ", K, Suf>>}).
--define(FOUND_BAD_HEADER(K), {<<K>>, array, [{longstr, <<"bad ", K>>}]}).
-
-write_table_with_invalid_existing_type_test() ->
- prepend_check(<<"header1">>, ?XDEATH_TABLE, [?BAD_HEADER("header1")]),
- passed.
-
-invalid_existing_headers_test() ->
- Headers =
- prepend_check(<<"header2">>, ?ROUTE_TABLE, [?BAD_HEADER("header2")]),
- {array, [{table, ?ROUTE_TABLE}]} =
- rabbit_misc:table_lookup(Headers, <<"header2">>),
- passed.
-
-disparate_invalid_header_entries_accumulate_separately_test() ->
- BadHeaders = [?BAD_HEADER("header2")],
- Headers = prepend_check(<<"header2">>, ?ROUTE_TABLE, BadHeaders),
- Headers2 = prepend_check(<<"header1">>, ?XDEATH_TABLE,
- [?BAD_HEADER("header1") | Headers]),
- {table, [?FOUND_BAD_HEADER("header1"),
- ?FOUND_BAD_HEADER("header2")]} =
- rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
- passed.
-
-corrupt_or_invalid_headers_are_overwritten_test() ->
- Headers0 = [?BAD_HEADER("header1"),
- ?BAD_HEADER("x-invalid-headers")],
- Headers1 = prepend_check(<<"header1">>, ?XDEATH_TABLE, Headers0),
- {table,[?FOUND_BAD_HEADER("header1"),
- ?FOUND_BAD_HEADER("x-invalid-headers")]} =
- rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
- passed.
-
-invalid_same_header_entry_accumulation_test() ->
- BadHeader1 = ?BAD_HEADER2("header1", "a"),
- Headers = prepend_check(<<"header1">>, ?ROUTE_TABLE, [BadHeader1]),
- Headers2 = prepend_check(<<"header1">>, ?ROUTE_TABLE,
- [?BAD_HEADER2("header1", "b") | Headers]),
- {table, InvalidHeaders} =
- rabbit_misc:table_lookup(Headers2, ?INVALID_HEADERS_KEY),
- {array, [{longstr,<<"bad header1b">>},
- {longstr,<<"bad header1a">>}]} =
- rabbit_misc:table_lookup(InvalidHeaders, <<"header1">>),
- passed.
-
-prepend_check(HeaderKey, HeaderTable, Headers) ->
- Headers1 = rabbit_basic:prepend_table_header(
- HeaderKey, HeaderTable, Headers),
- {table, Invalid} =
- rabbit_misc:table_lookup(Headers1, ?INVALID_HEADERS_KEY),
- {Type, Value} = rabbit_misc:table_lookup(Headers, HeaderKey),
- {array, [{Type, Value} | _]} =
- rabbit_misc:table_lookup(Invalid, HeaderKey),
- Headers1.
-
-test_priority_queue() ->
-
- false = priority_queue:is_queue(not_a_queue),
-
- %% empty Q
- Q = priority_queue:new(),
- {true, true, 0, [], []} = test_priority_queue(Q),
-
- %% 1-4 element no-priority Q
- true = lists:all(fun (X) -> X =:= passed end,
- lists:map(fun test_simple_n_element_queue/1,
- lists:seq(1, 4))),
-
- %% 1-element priority Q
- Q1 = priority_queue:in(foo, 1, priority_queue:new()),
- {true, false, 1, [{1, foo}], [foo]} =
- test_priority_queue(Q1),
-
- %% 2-element same-priority Q
- Q2 = priority_queue:in(bar, 1, Q1),
- {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
- test_priority_queue(Q2),
-
- %% 2-element different-priority Q
- Q3 = priority_queue:in(bar, 2, Q1),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q3),
-
- %% 1-element negative priority Q
- Q4 = priority_queue:in(foo, -1, priority_queue:new()),
- {true, false, 1, [{-1, foo}], [foo]} = test_priority_queue(Q4),
-
- %% merge 2 * 1-element no-priority Qs
- Q5 = priority_queue:join(priority_queue:in(foo, Q),
- priority_queue:in(bar, Q)),
- {true, false, 2, [{0, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q5),
-
- %% merge 1-element no-priority Q with 1-element priority Q
- Q6 = priority_queue:join(priority_queue:in(foo, Q),
- priority_queue:in(bar, 1, Q)),
- {true, false, 2, [{1, bar}, {0, foo}], [bar, foo]} =
- test_priority_queue(Q6),
-
- %% merge 1-element priority Q with 1-element no-priority Q
- Q7 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, Q)),
- {true, false, 2, [{1, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q7),
-
- %% merge 2 * 1-element same-priority Qs
- Q8 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, 1, Q)),
- {true, false, 2, [{1, foo}, {1, bar}], [foo, bar]} =
- test_priority_queue(Q8),
-
- %% merge 2 * 1-element different-priority Qs
- Q9 = priority_queue:join(priority_queue:in(foo, 1, Q),
- priority_queue:in(bar, 2, Q)),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q9),
-
- %% merge 2 * 1-element different-priority Qs (other way around)
- Q10 = priority_queue:join(priority_queue:in(bar, 2, Q),
- priority_queue:in(foo, 1, Q)),
- {true, false, 2, [{2, bar}, {1, foo}], [bar, foo]} =
- test_priority_queue(Q10),
-
- %% merge 2 * 2-element multi-different-priority Qs
- Q11 = priority_queue:join(Q6, Q5),
- {true, false, 4, [{1, bar}, {0, foo}, {0, foo}, {0, bar}],
- [bar, foo, foo, bar]} = test_priority_queue(Q11),
-
- %% and the other way around
- Q12 = priority_queue:join(Q5, Q6),
- {true, false, 4, [{1, bar}, {0, foo}, {0, bar}, {0, foo}],
- [bar, foo, bar, foo]} = test_priority_queue(Q12),
-
- %% merge with negative priorities
- Q13 = priority_queue:join(Q4, Q5),
- {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
- test_priority_queue(Q13),
-
- %% and the other way around
- Q14 = priority_queue:join(Q5, Q4),
- {true, false, 3, [{0, foo}, {0, bar}, {-1, foo}], [foo, bar, foo]} =
- test_priority_queue(Q14),
-
- %% joins with empty queues:
- Q1 = priority_queue:join(Q, Q1),
- Q1 = priority_queue:join(Q1, Q),
-
- %% insert with priority into non-empty zero-priority queue
- Q15 = priority_queue:in(baz, 1, Q5),
- {true, false, 3, [{1, baz}, {0, foo}, {0, bar}], [baz, foo, bar]} =
- test_priority_queue(Q15),
-
- %% 1-element infinity priority Q
- Q16 = priority_queue:in(foo, infinity, Q),
- {true, false, 1, [{infinity, foo}], [foo]} = test_priority_queue(Q16),
-
- %% add infinity to 0-priority Q
- Q17 = priority_queue:in(foo, infinity, priority_queue:in(bar, Q)),
- {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q17),
-
- %% and the other way around
- Q18 = priority_queue:in(bar, priority_queue:in(foo, infinity, Q)),
- {true, false, 2, [{infinity, foo}, {0, bar}], [foo, bar]} =
- test_priority_queue(Q18),
-
- %% add infinity to mixed-priority Q
- Q19 = priority_queue:in(qux, infinity, Q3),
- {true, false, 3, [{infinity, qux}, {2, bar}, {1, foo}], [qux, bar, foo]} =
- test_priority_queue(Q19),
-
- %% merge the above with a negative priority Q
- Q20 = priority_queue:join(Q19, Q4),
- {true, false, 4, [{infinity, qux}, {2, bar}, {1, foo}, {-1, foo}],
- [qux, bar, foo, foo]} = test_priority_queue(Q20),
-
- %% merge two infinity priority queues
- Q21 = priority_queue:join(priority_queue:in(foo, infinity, Q),
- priority_queue:in(bar, infinity, Q)),
- {true, false, 2, [{infinity, foo}, {infinity, bar}], [foo, bar]} =
- test_priority_queue(Q21),
-
- %% merge two mixed priority with infinity queues
- Q22 = priority_queue:join(Q18, Q20),
- {true, false, 6, [{infinity, foo}, {infinity, qux}, {2, bar}, {1, foo},
- {0, bar}, {-1, foo}], [foo, qux, bar, foo, bar, foo]} =
- test_priority_queue(Q22),
-
- passed.
-
-priority_queue_in_all(Q, L) ->
- lists:foldl(fun (X, Acc) -> priority_queue:in(X, Acc) end, Q, L).
-
-priority_queue_out_all(Q) ->
- case priority_queue:out(Q) of
- {empty, _} -> [];
- {{value, V}, Q1} -> [V | priority_queue_out_all(Q1)]
- end.
-
-test_priority_queue(Q) ->
- {priority_queue:is_queue(Q),
- priority_queue:is_empty(Q),
- priority_queue:len(Q),
- priority_queue:to_list(Q),
- priority_queue_out_all(Q)}.
-
-test_simple_n_element_queue(N) ->
- Items = lists:seq(1, N),
- Q = priority_queue_in_all(priority_queue:new(), Items),
- ToListRes = [{0, X} || X <- Items],
- {true, false, N, ToListRes, Items} = test_priority_queue(Q),
- passed.
-
-test_pg_local() ->
- [P, Q] = [spawn(fun () -> receive X -> X end end) || _ <- [x, x]],
- check_pg_local(ok, [], []),
- check_pg_local(pg_local:join(a, P), [P], []),
- check_pg_local(pg_local:join(b, P), [P], [P]),
- check_pg_local(pg_local:join(a, P), [P, P], [P]),
- check_pg_local(pg_local:join(a, Q), [P, P, Q], [P]),
- check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q]),
- check_pg_local(pg_local:join(b, Q), [P, P, Q], [P, Q, Q]),
- check_pg_local(pg_local:leave(a, P), [P, Q], [P, Q, Q]),
- check_pg_local(pg_local:leave(b, P), [P, Q], [Q, Q]),
- check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
- check_pg_local(pg_local:leave(a, P), [Q], [Q, Q]),
- [begin X ! done,
- Ref = erlang:monitor(process, X),
- receive {'DOWN', Ref, process, X, _Info} -> ok end
- end || X <- [P, Q]],
- check_pg_local(ok, [], []),
- passed.
-
-check_pg_local(ok, APids, BPids) ->
- ok = pg_local:sync(),
- [true, true] = [lists:sort(Pids) == lists:sort(pg_local:get_members(Key)) ||
- {Key, Pids} <- [{a, APids}, {b, BPids}]].
-
-test_unfold() ->
- {[], test} = rabbit_misc:unfold(fun (_V) -> false end, test),
- List = lists:seq(2,20,2),
- {List, 0} = rabbit_misc:unfold(fun (0) -> false;
- (N) -> {true, N*2, N-1}
- end, 10),
- passed.
-
-test_table_codec() ->
- %% FIXME this does not test inexact numbers (double and float) yet,
- %% because they won't pass the equality assertions
- Table = [{<<"longstr">>, longstr, <<"Here is a long string">>},
- {<<"signedint">>, signedint, 12345},
- {<<"decimal">>, decimal, {3, 123456}},
- {<<"timestamp">>, timestamp, 109876543209876},
- {<<"table">>, table, [{<<"one">>, signedint, 54321},
- {<<"two">>, longstr,
- <<"A long string">>}]},
- {<<"byte">>, byte, -128},
- {<<"long">>, long, 1234567890},
- {<<"short">>, short, 655},
- {<<"bool">>, bool, true},
- {<<"binary">>, binary, <<"a binary string">>},
- {<<"void">>, void, undefined},
- {<<"array">>, array, [{signedint, 54321},
- {longstr, <<"A long string">>}]}
- ],
- Binary = <<
- 7,"longstr", "S", 21:32, "Here is a long string",
- 9,"signedint", "I", 12345:32/signed,
- 7,"decimal", "D", 3, 123456:32,
- 9,"timestamp", "T", 109876543209876:64,
- 5,"table", "F", 31:32, % length of table
- 3,"one", "I", 54321:32,
- 3,"two", "S", 13:32, "A long string",
- 4,"byte", "b", -128:8/signed,
- 4,"long", "l", 1234567890:64,
- 5,"short", "s", 655:16,
- 4,"bool", "t", 1,
- 6,"binary", "x", 15:32, "a binary string",
- 4,"void", "V",
- 5,"array", "A", 23:32,
- "I", 54321:32,
- "S", 13:32, "A long string"
- >>,
- Binary = rabbit_binary_generator:generate_table(Table),
- Table = rabbit_binary_parser:parse_table(Binary),
- passed.
-
-%% Test that content frames don't exceed frame-max
-test_content_framing(FrameMax, BodyBin) ->
- [Header | Frames] =
- rabbit_binary_generator:build_simple_content_frames(
- 1,
- rabbit_binary_generator:ensure_content_encoded(
- rabbit_basic:build_content(#'P_basic'{}, BodyBin),
- rabbit_framing_amqp_0_9_1),
- FrameMax,
- rabbit_framing_amqp_0_9_1),
- %% header is formatted correctly and the size is the total of the
- %% fragments
- <<_FrameHeader:7/binary, _ClassAndWeight:4/binary,
- BodySize:64/unsigned, _Rest/binary>> = list_to_binary(Header),
- BodySize = size(BodyBin),
- true = lists:all(
- fun (ContentFrame) ->
- FrameBinary = list_to_binary(ContentFrame),
- %% assert
- <<_TypeAndChannel:3/binary,
- Size:32/unsigned, _Payload:Size/binary, 16#CE>> =
- FrameBinary,
- size(FrameBinary) =< FrameMax
- end, Frames),
- passed.
-
-test_content_framing() ->
- %% no content
- passed = test_content_framing(4096, <<>>),
- %% easily fit in one frame
- passed = test_content_framing(4096, <<"Easy">>),
- %% exactly one frame (empty frame = 8 bytes)
- passed = test_content_framing(11, <<"One">>),
- %% more than one frame
- passed = test_content_framing(11, <<"More than one frame">>),
- passed.
-
-test_content_transcoding() ->
- %% there are no guarantees provided by 'clear' - it's just a hint
- ClearDecoded = fun rabbit_binary_parser:clear_decoded_content/1,
- ClearEncoded = fun rabbit_binary_generator:clear_encoded_content/1,
- EnsureDecoded =
- fun (C0) ->
- C1 = rabbit_binary_parser:ensure_content_decoded(C0),
- true = C1#content.properties =/= none,
- C1
- end,
- EnsureEncoded =
- fun (Protocol) ->
- fun (C0) ->
- C1 = rabbit_binary_generator:ensure_content_encoded(
- C0, Protocol),
- true = C1#content.properties_bin =/= none,
- C1
- end
- end,
- %% Beyond the assertions in Ensure*, the only testable guarantee
- %% is that the operations should never fail.
- %%
- %% If we were using quickcheck we'd simply stuff all the above
- %% into a generator for sequences of operations. In the absence of
- %% quickcheck we pick particularly interesting sequences that:
- %%
- %% - execute every op twice since they are idempotent
- %% - invoke clear_decoded, clear_encoded, decode and transcode
- %% with one or both of decoded and encoded content present
- [begin
- sequence_with_content([Op]),
- sequence_with_content([ClearEncoded, Op]),
- sequence_with_content([ClearDecoded, Op])
- end || Op <- [ClearDecoded, ClearEncoded, EnsureDecoded,
- EnsureEncoded(rabbit_framing_amqp_0_9_1),
- EnsureEncoded(rabbit_framing_amqp_0_8)]],
- passed.
-
-sequence_with_content(Sequence) ->
- lists:foldl(fun (F, V) -> F(F(V)) end,
- rabbit_binary_generator:ensure_content_encoded(
- rabbit_basic:build_content(#'P_basic'{}, <<>>),
- rabbit_framing_amqp_0_9_1),
- Sequence).
-
-test_topic_matching() ->
- XName = #resource{virtual_host = <<"/">>,
- kind = exchange,
- name = <<"test_exchange">>},
- X0 = #exchange{name = XName, type = topic, durable = false,
- auto_delete = false, arguments = []},
- X = rabbit_exchange_decorator:set(X0),
- %% create
- rabbit_exchange_type_topic:validate(X),
- exchange_op_callback(X, create, []),
-
- %% add some bindings
- Bindings = [#binding{source = XName,
- key = list_to_binary(Key),
- destination = #resource{virtual_host = <<"/">>,
- kind = queue,
- name = list_to_binary(Q)},
- args = Args} ||
- {Key, Q, Args} <- [{"a.b.c", "t1", []},
- {"a.*.c", "t2", []},
- {"a.#.b", "t3", []},
- {"a.b.b.c", "t4", []},
- {"#", "t5", []},
- {"#.#", "t6", []},
- {"#.b", "t7", []},
- {"*.*", "t8", []},
- {"a.*", "t9", []},
- {"*.b.c", "t10", []},
- {"a.#", "t11", []},
- {"a.#.#", "t12", []},
- {"b.b.c", "t13", []},
- {"a.b.b", "t14", []},
- {"a.b", "t15", []},
- {"b.c", "t16", []},
- {"", "t17", []},
- {"*.*.*", "t18", []},
- {"vodka.martini", "t19", []},
- {"a.b.c", "t20", []},
- {"*.#", "t21", []},
- {"#.*.#", "t22", []},
- {"*.#.#", "t23", []},
- {"#.#.#", "t24", []},
- {"*", "t25", []},
- {"#.b.#", "t26", []},
- {"args-test", "t27",
- [{<<"foo">>, longstr, <<"bar">>}]},
- {"args-test", "t27", %% Note aliasing
- [{<<"foo">>, longstr, <<"baz">>}]}]],
- lists:foreach(fun (B) -> exchange_op_callback(X, add_binding, [B]) end,
- Bindings),
-
- %% test some matches
- test_topic_expect_match(
- X, [{"a.b.c", ["t1", "t2", "t5", "t6", "t10", "t11", "t12",
- "t18", "t20", "t21", "t22", "t23", "t24",
- "t26"]},
- {"a.b", ["t3", "t5", "t6", "t7", "t8", "t9", "t11",
- "t12", "t15", "t21", "t22", "t23", "t24",
- "t26"]},
- {"a.b.b", ["t3", "t5", "t6", "t7", "t11", "t12", "t14",
- "t18", "t21", "t22", "t23", "t24", "t26"]},
- {"", ["t5", "t6", "t17", "t24"]},
- {"b.c.c", ["t5", "t6", "t18", "t21", "t22", "t23",
- "t24", "t26"]},
- {"a.a.a.a.a", ["t5", "t6", "t11", "t12", "t21", "t22",
- "t23", "t24"]},
- {"vodka.gin", ["t5", "t6", "t8", "t21", "t22", "t23",
- "t24"]},
- {"vodka.martini", ["t5", "t6", "t8", "t19", "t21", "t22", "t23",
- "t24"]},
- {"b.b.c", ["t5", "t6", "t10", "t13", "t18", "t21",
- "t22", "t23", "t24", "t26"]},
- {"nothing.here.at.all", ["t5", "t6", "t21", "t22", "t23", "t24"]},
- {"oneword", ["t5", "t6", "t21", "t22", "t23", "t24",
- "t25"]},
- {"args-test", ["t5", "t6", "t21", "t22", "t23", "t24",
- "t25", "t27"]}]),
- %% remove some bindings
- RemovedBindings = [lists:nth(1, Bindings), lists:nth(5, Bindings),
- lists:nth(11, Bindings), lists:nth(19, Bindings),
- lists:nth(21, Bindings), lists:nth(28, Bindings)],
- exchange_op_callback(X, remove_bindings, [RemovedBindings]),
- RemainingBindings = ordsets:to_list(
- ordsets:subtract(ordsets:from_list(Bindings),
- ordsets:from_list(RemovedBindings))),
-
- %% test some matches
- test_topic_expect_match(
- X,
- [{"a.b.c", ["t2", "t6", "t10", "t12", "t18", "t20", "t22",
- "t23", "t24", "t26"]},
- {"a.b", ["t3", "t6", "t7", "t8", "t9", "t12", "t15",
- "t22", "t23", "t24", "t26"]},
- {"a.b.b", ["t3", "t6", "t7", "t12", "t14", "t18", "t22",
- "t23", "t24", "t26"]},
- {"", ["t6", "t17", "t24"]},
- {"b.c.c", ["t6", "t18", "t22", "t23", "t24", "t26"]},
- {"a.a.a.a.a", ["t6", "t12", "t22", "t23", "t24"]},
- {"vodka.gin", ["t6", "t8", "t22", "t23", "t24"]},
- {"vodka.martini", ["t6", "t8", "t22", "t23", "t24"]},
- {"b.b.c", ["t6", "t10", "t13", "t18", "t22", "t23",
- "t24", "t26"]},
- {"nothing.here.at.all", ["t6", "t22", "t23", "t24"]},
- {"oneword", ["t6", "t22", "t23", "t24", "t25"]},
- {"args-test", ["t6", "t22", "t23", "t24", "t25", "t27"]}]),
-
- %% remove the entire exchange
- exchange_op_callback(X, delete, [RemainingBindings]),
- %% none should match now
- test_topic_expect_match(X, [{"a.b.c", []}, {"b.b.c", []}, {"", []}]),
- passed.
-
-exchange_op_callback(X, Fun, Args) ->
- rabbit_misc:execute_mnesia_transaction(
- fun () -> rabbit_exchange:callback(X, Fun, transaction, [X] ++ Args) end),
- rabbit_exchange:callback(X, Fun, none, [X] ++ Args).
-
-test_topic_expect_match(X, List) ->
- lists:foreach(
- fun ({Key, Expected}) ->
- BinKey = list_to_binary(Key),
- Message = rabbit_basic:message(X#exchange.name, BinKey,
- #'P_basic'{}, <<>>),
- Res = rabbit_exchange_type_topic:route(
- X, #delivery{mandatory = false,
- sender = self(),
- message = Message}),
- ExpectedRes = lists:map(
- fun (Q) -> #resource{virtual_host = <<"/">>,
- kind = queue,
- name = list_to_binary(Q)}
- end, Expected),
- true = (lists:usort(ExpectedRes) =:= lists:usort(Res))
- end, List).
-
-test_app_management() ->
- control_action(wait, [rabbit_mnesia:dir() ++ ".pid"]),
- %% Starting, stopping and diagnostics. Note that we don't try
- %% 'report' when the rabbit app is stopped and that we enable
- %% tracing for the duration of this function.
- ok = control_action(trace_on, []),
- ok = control_action(stop_app, []),
- ok = control_action(stop_app, []),
- ok = control_action(status, []),
- ok = control_action(cluster_status, []),
- ok = control_action(environment, []),
- ok = control_action(start_app, []),
- ok = control_action(start_app, []),
- ok = control_action(status, []),
- ok = control_action(report, []),
- ok = control_action(cluster_status, []),
- ok = control_action(environment, []),
- ok = control_action(trace_off, []),
- passed.
-
-test_log_management() ->
- MainLog = rabbit:log_location(kernel),
- SaslLog = rabbit:log_location(sasl),
- Suffix = ".1",
-
- %% prepare basic logs
- file:delete([MainLog, Suffix]),
- file:delete([SaslLog, Suffix]),
-
- %% simple logs reopening
- ok = control_action(rotate_logs, []),
- [true, true] = empty_files([MainLog, SaslLog]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% simple log rotation
- ok = control_action(rotate_logs, [Suffix]),
- [true, true] = non_empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
- [true, true] = empty_files([MainLog, SaslLog]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% reopening logs with log rotation performed first
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = control_action(rotate_logs, []),
- ok = file:rename(MainLog, [MainLog, Suffix]),
- ok = file:rename(SaslLog, [SaslLog, Suffix]),
- ok = test_logs_working([MainLog, Suffix], [SaslLog, Suffix]),
- ok = control_action(rotate_logs, []),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% log rotation on empty files (the main log will have a ctl action logged)
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = control_action(rotate_logs, []),
- ok = control_action(rotate_logs, [Suffix]),
- [false, true] = empty_files([[MainLog, Suffix], [SaslLog, Suffix]]),
-
- %% logs with suffix are not writable
- ok = control_action(rotate_logs, [Suffix]),
- ok = make_files_non_writable([[MainLog, Suffix], [SaslLog, Suffix]]),
- ok = control_action(rotate_logs, [Suffix]),
- ok = test_logs_working(MainLog, SaslLog),
-
- %% rotate when original log files are not writable
- ok = make_files_non_writable([MainLog, SaslLog]),
- ok = control_action(rotate_logs, []),
-
- %% logging directed to tty (first, remove handlers)
- ok = delete_log_handlers([rabbit_sasl_report_file_h,
- rabbit_error_logger_file_h]),
- ok = clean_logs([MainLog, SaslLog], Suffix),
- ok = application:set_env(rabbit, sasl_error_logger, tty),
- ok = application:set_env(rabbit, error_logger, tty),
- ok = control_action(rotate_logs, []),
- [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
- %% rotate logs when logging is turned off
- ok = application:set_env(rabbit, sasl_error_logger, false),
- ok = application:set_env(rabbit, error_logger, silent),
- ok = control_action(rotate_logs, []),
- [{error, enoent}, {error, enoent}] = empty_files([MainLog, SaslLog]),
-
- %% cleanup
- ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
- ok = application:set_env(rabbit, error_logger, {file, MainLog}),
- ok = add_log_handlers([{rabbit_error_logger_file_h, MainLog},
- {rabbit_sasl_report_file_h, SaslLog}]),
- passed.
-
-test_log_management_during_startup() ->
- MainLog = rabbit:log_location(kernel),
- SaslLog = rabbit:log_location(sasl),
-
- %% start application with simple tty logging
- ok = control_action(stop_app, []),
- ok = application:set_env(rabbit, error_logger, tty),
- ok = application:set_env(rabbit, sasl_error_logger, tty),
- ok = add_log_handlers([{error_logger_tty_h, []},
- {sasl_report_tty_h, []}]),
- ok = control_action(start_app, []),
-
- %% start application with tty logging and
- %% proper handlers not installed
- ok = control_action(stop_app, []),
- ok = error_logger:tty(false),
- ok = delete_log_handlers([sasl_report_tty_h]),
- ok = case catch control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotation_tty_no_handlers_test});
- {badrpc, {'EXIT', {error,
- {cannot_log_to_tty, _, not_installed}}}} -> ok
- end,
-
- %% fix sasl logging
- ok = application:set_env(rabbit, sasl_error_logger, {file, SaslLog}),
-
- %% start application with logging to non-existing directory
- TmpLog = "/tmp/rabbit-tests/test.log",
- delete_file(TmpLog),
- ok = control_action(stop_app, []),
- ok = application:set_env(rabbit, error_logger, {file, TmpLog}),
-
- ok = delete_log_handlers([rabbit_error_logger_file_h]),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = control_action(start_app, []),
-
- %% start application with logging to directory with no
- %% write permissions
- ok = control_action(stop_app, []),
- TmpDir = "/tmp/rabbit-tests",
- ok = set_permissions(TmpDir, 8#00400),
- ok = delete_log_handlers([rabbit_error_logger_file_h]),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = case control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotation_no_write_permission_dir_test});
- {badrpc, {'EXIT',
- {error, {cannot_log_to_file, _, _}}}} -> ok
- end,
-
- %% start application with logging to a subdirectory which
- %% parent directory has no write permissions
- ok = control_action(stop_app, []),
- TmpTestDir = "/tmp/rabbit-tests/no-permission/test/log",
- ok = application:set_env(rabbit, error_logger, {file, TmpTestDir}),
- ok = add_log_handlers([{error_logger_file_h, MainLog}]),
- ok = case control_action(start_app, []) of
- ok -> exit({got_success_but_expected_failure,
- log_rotatation_parent_dirs_test});
- {badrpc,
- {'EXIT',
- {error, {cannot_log_to_file, _,
- {error,
- {cannot_create_parent_dirs, _, eacces}}}}}} -> ok
- end,
- ok = set_permissions(TmpDir, 8#00700),
- ok = set_permissions(TmpLog, 8#00600),
- ok = delete_file(TmpLog),
- ok = file:del_dir(TmpDir),
-
- %% start application with standard error_logger_file_h
- %% handler not installed
- ok = control_action(stop_app, []),
- ok = application:set_env(rabbit, error_logger, {file, MainLog}),
- ok = control_action(start_app, []),
-
- %% start application with standard sasl handler not installed
- %% and rabbit main log handler installed correctly
- ok = control_action(stop_app, []),
- ok = delete_log_handlers([rabbit_sasl_report_file_h]),
- ok = control_action(start_app, []),
- passed.
-
-test_arguments_parser() ->
- GlobalOpts1 = [{"-f1", flag}, {"-o1", {option, "foo"}}],
- Commands1 = [command1, {command2, [{"-f2", flag}, {"-o2", {option, "bar"}}]}],
-
- GetOptions =
- fun (Args) ->
- rabbit_cli:parse_arguments(Commands1, GlobalOpts1, "-n", Args)
- end,
-
- check_parse_arguments(no_command, GetOptions, []),
- check_parse_arguments(no_command, GetOptions, ["foo", "bar"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "foo"}], []}},
- GetOptions, ["command1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
- GetOptions, ["command1", "-o1", "blah"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", true}, {"-o1", "foo"}], []}},
- GetOptions, ["command1", "-f1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], []}},
- GetOptions, ["-o1", "blah", "command1"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "blah"}], ["quux"]}},
- GetOptions, ["-o1", "blah", "command1", "quux"]),
- check_parse_arguments(
- {ok, {command1, [{"-f1", true}, {"-o1", "blah"}], ["quux", "baz"]}},
- GetOptions, ["command1", "quux", "-f1", "-o1", "blah", "baz"]),
- %% For duplicate flags, the last one counts
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "second"}], []}},
- GetOptions, ["-o1", "first", "command1", "-o1", "second"]),
- %% If the flag "eats" the command, the command won't be recognised
- check_parse_arguments(no_command, GetOptions,
- ["-o1", "command1", "quux"]),
- %% If a flag eats another flag, the eaten flag won't be recognised
- check_parse_arguments(
- {ok, {command1, [{"-f1", false}, {"-o1", "-f1"}], []}},
- GetOptions, ["command1", "-o1", "-f1"]),
-
- %% Now for some command-specific flags...
- check_parse_arguments(
- {ok, {command2, [{"-f1", false}, {"-f2", false},
- {"-o1", "foo"}, {"-o2", "bar"}], []}},
- GetOptions, ["command2"]),
-
- check_parse_arguments(
- {ok, {command2, [{"-f1", false}, {"-f2", true},
- {"-o1", "baz"}, {"-o2", "bar"}], ["quux", "foo"]}},
- GetOptions, ["-f2", "command2", "quux", "-o1", "baz", "foo"]),
-
- passed.
-
-test_dynamic_mirroring() ->
- %% Just unit tests of the node selection logic, see multi node
- %% tests for the rest...
- Test = fun ({NewM, NewSs, ExtraSs}, Policy, Params,
- {MNode, SNodes, SSNodes}, All) ->
- {ok, M} = rabbit_mirror_queue_misc:module(Policy),
- {NewM, NewSs0} = M:suggested_queue_nodes(
- Params, MNode, SNodes, SSNodes, All),
- NewSs1 = lists:sort(NewSs0),
- case dm_list_match(NewSs, NewSs1, ExtraSs) of
- ok -> ok;
- error -> exit({no_match, NewSs, NewSs1, ExtraSs})
- end
- end,
-
- Test({a,[b,c],0},<<"all">>,'_',{a,[], []}, [a,b,c]),
- Test({a,[b,c],0},<<"all">>,'_',{a,[b,c],[b,c]},[a,b,c]),
- Test({a,[b,c],0},<<"all">>,'_',{a,[d], [d]}, [a,b,c]),
-
- N = fun (Atoms) -> [list_to_binary(atom_to_list(A)) || A <- Atoms] end,
-
- %% Add a node
- Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[b],[b]},[a,b,c,d]),
- Test({b,[a,c],0},<<"nodes">>,N([a,b,c]),{b,[a],[a]},[a,b,c,d]),
- %% Add two nodes and drop one
- Test({a,[b,c],0},<<"nodes">>,N([a,b,c]),{a,[d],[d]},[a,b,c,d]),
- %% Don't try to include nodes that are not running
- Test({a,[b], 0},<<"nodes">>,N([a,b,f]),{a,[b],[b]},[a,b,c,d]),
- %% If we can't find any of the nodes listed then just keep the master
- Test({a,[], 0},<<"nodes">>,N([f,g,h]),{a,[b],[b]},[a,b,c,d]),
- %% And once that's happened, still keep the master even when not listed,
- %% if nothing is synced
- Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[], []}, [a,b,c,d]),
- Test({a,[b,c],0},<<"nodes">>,N([b,c]), {a,[b],[]}, [a,b,c,d]),
- %% But if something is synced we can lose the master - but make
- %% sure we pick the new master from the nodes which are synced!
- Test({b,[c], 0},<<"nodes">>,N([b,c]), {a,[b],[b]},[a,b,c,d]),
- Test({b,[c], 0},<<"nodes">>,N([c,b]), {a,[b],[b]},[a,b,c,d]),
-
- Test({a,[], 1},<<"exactly">>,2,{a,[], []}, [a,b,c,d]),
- Test({a,[], 2},<<"exactly">>,3,{a,[], []}, [a,b,c,d]),
- Test({a,[c], 0},<<"exactly">>,2,{a,[c], [c]}, [a,b,c,d]),
- Test({a,[c], 1},<<"exactly">>,3,{a,[c], [c]}, [a,b,c,d]),
- Test({a,[c], 0},<<"exactly">>,2,{a,[c,d],[c,d]},[a,b,c,d]),
- Test({a,[c,d],0},<<"exactly">>,3,{a,[c,d],[c,d]},[a,b,c,d]),
-
- passed.
-
-%% Does the first list match the second where the second is required
-%% to have exactly Extra superfluous items?
-dm_list_match([], [], 0) -> ok;
-dm_list_match(_, [], _Extra) -> error;
-dm_list_match([H|T1], [H |T2], Extra) -> dm_list_match(T1, T2, Extra);
-dm_list_match(L1, [_H|T2], Extra) -> dm_list_match(L1, T2, Extra - 1).
-
-test_user_management() ->
-
- %% lots if stuff that should fail
- {error, {no_such_user, _}} =
- control_action(delete_user, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(change_password, ["foo", "baz"]),
- {error, {no_such_vhost, _}} =
- control_action(delete_vhost, ["/testhost"]),
- {error, {no_such_user, _}} =
- control_action(set_permissions, ["foo", ".*", ".*", ".*"]),
- {error, {no_such_user, _}} =
- control_action(clear_permissions, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(list_user_permissions, ["foo"]),
- {error, {no_such_vhost, _}} =
- control_action(list_permissions, [], [{"-p", "/testhost"}]),
- {error, {invalid_regexp, _, _}} =
- control_action(set_permissions, ["guest", "+foo", ".*", ".*"]),
- {error, {no_such_user, _}} =
- control_action(set_user_tags, ["foo", "bar"]),
-
- %% user creation
- ok = control_action(add_user, ["foo", "bar"]),
- {error, {user_already_exists, _}} =
- control_action(add_user, ["foo", "bar"]),
- ok = control_action(clear_password, ["foo"]),
- ok = control_action(change_password, ["foo", "baz"]),
-
- TestTags = fun (Tags) ->
- Args = ["foo" | [atom_to_list(T) || T <- Tags]],
- ok = control_action(set_user_tags, Args),
- {ok, #internal_user{tags = Tags}} =
- rabbit_auth_backend_internal:lookup_user(<<"foo">>),
- ok = control_action(list_users, [])
- end,
- TestTags([foo, bar, baz]),
- TestTags([administrator]),
- TestTags([]),
-
- %% user authentication
- ok = control_action(authenticate_user, ["foo", "baz"]),
- {refused, _User, _Format, _Params} =
- control_action(authenticate_user, ["foo", "bar"]),
-
- %% vhost creation
- ok = control_action(add_vhost, ["/testhost"]),
- {error, {vhost_already_exists, _}} =
- control_action(add_vhost, ["/testhost"]),
- ok = control_action(list_vhosts, []),
-
- %% user/vhost mapping
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
- ok = control_action(list_permissions, [], [{"-p", "/testhost"}]),
- ok = control_action(list_user_permissions, ["foo"]),
-
- %% user/vhost unmapping
- ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
- ok = control_action(clear_permissions, ["foo"], [{"-p", "/testhost"}]),
-
- %% vhost deletion
- ok = control_action(delete_vhost, ["/testhost"]),
- {error, {no_such_vhost, _}} =
- control_action(delete_vhost, ["/testhost"]),
-
- %% deleting a populated vhost
- ok = control_action(add_vhost, ["/testhost"]),
- ok = control_action(set_permissions, ["foo", ".*", ".*", ".*"],
- [{"-p", "/testhost"}]),
- {new, _} = rabbit_amqqueue:declare(
- rabbit_misc:r(<<"/testhost">>, queue, <<"test">>),
- true, false, [], none),
- ok = control_action(delete_vhost, ["/testhost"]),
-
- %% user deletion
- ok = control_action(delete_user, ["foo"]),
- {error, {no_such_user, _}} =
- control_action(delete_user, ["foo"]),
-
- passed.
-
-test_runtime_parameters() ->
- rabbit_runtime_parameters_test:register(),
- Good = fun(L) -> ok = control_action(set_parameter, L) end,
- Bad = fun(L) -> {error_string, _} = control_action(set_parameter, L) end,
-
- %% Acceptable for bijection
- Good(["test", "good", "\"ignore\""]),
- Good(["test", "good", "123"]),
- Good(["test", "good", "true"]),
- Good(["test", "good", "false"]),
- Good(["test", "good", "null"]),
- Good(["test", "good", "{\"key\": \"value\"}"]),
-
- %% Invalid json
- Bad(["test", "good", "atom"]),
- Bad(["test", "good", "{\"foo\": \"bar\""]),
- Bad(["test", "good", "{foo: \"bar\"}"]),
-
- %% Test actual validation hook
- Good(["test", "maybe", "\"good\""]),
- Bad(["test", "maybe", "\"bad\""]),
- Good(["test", "admin", "\"ignore\""]), %% ctl means 'user' -> none
-
- ok = control_action(list_parameters, []),
-
- ok = control_action(clear_parameter, ["test", "good"]),
- ok = control_action(clear_parameter, ["test", "maybe"]),
- ok = control_action(clear_parameter, ["test", "admin"]),
- {error_string, _} =
- control_action(clear_parameter, ["test", "neverexisted"]),
-
- %% We can delete for a component that no longer exists
- Good(["test", "good", "\"ignore\""]),
- rabbit_runtime_parameters_test:unregister(),
- ok = control_action(clear_parameter, ["test", "good"]),
- passed.
-
-test_policy_validation() ->
- rabbit_runtime_parameters_test:register_policy_validator(),
- SetPol = fun (Key, Val) ->
- control_action_opts(
- ["set_policy", "name", ".*",
- rabbit_misc:format("{\"~s\":~p}", [Key, Val])])
- end,
-
- ok = SetPol("testeven", []),
- ok = SetPol("testeven", [1, 2]),
- ok = SetPol("testeven", [1, 2, 3, 4]),
- ok = SetPol("testpos", [2, 5, 5678]),
-
- error = SetPol("testpos", [-1, 0, 1]),
- error = SetPol("testeven", [ 1, 2, 3]),
-
- ok = control_action(clear_policy, ["name"]),
- rabbit_runtime_parameters_test:unregister_policy_validator(),
- passed.
-
-test_policy_opts_validation() ->
- Set = fun (Extra) -> control_action_opts(
- ["set_policy", "name", ".*", "{\"ha-mode\":\"all\"}"
- | Extra]) end,
- OK = fun (Extra) -> ok = Set(Extra) end,
- Fail = fun (Extra) -> error = Set(Extra) end,
-
- OK ([]),
-
- OK (["--priority", "0"]),
- OK (["--priority", "3"]),
- Fail(["--priority", "banana"]),
- Fail(["--priority"]),
-
- OK (["--apply-to", "all"]),
- OK (["--apply-to", "queues"]),
- Fail(["--apply-to", "bananas"]),
- Fail(["--apply-to"]),
-
- OK (["--priority", "3", "--apply-to", "queues"]),
- Fail(["--priority", "banana", "--apply-to", "queues"]),
- Fail(["--priority", "3", "--apply-to", "bananas"]),
-
- Fail(["--offline"]),
-
- ok = control_action(clear_policy, ["name"]),
- passed.
-
-test_ha_policy_validation() ->
- Set = fun (JSON) -> control_action_opts(
- ["set_policy", "name", ".*", JSON]) end,
- OK = fun (JSON) -> ok = Set(JSON) end,
- Fail = fun (JSON) -> error = Set(JSON) end,
-
- OK ("{\"ha-mode\":\"all\"}"),
- Fail("{\"ha-mode\":\"made_up\"}"),
-
- Fail("{\"ha-mode\":\"nodes\"}"),
- Fail("{\"ha-mode\":\"nodes\",\"ha-params\":2}"),
- Fail("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",2]}"),
- OK ("{\"ha-mode\":\"nodes\",\"ha-params\":[\"a\",\"b\"]}"),
- Fail("{\"ha-params\":[\"a\",\"b\"]}"),
-
- Fail("{\"ha-mode\":\"exactly\"}"),
- Fail("{\"ha-mode\":\"exactly\",\"ha-params\":[\"a\",\"b\"]}"),
- OK ("{\"ha-mode\":\"exactly\",\"ha-params\":2}"),
- Fail("{\"ha-params\":2}"),
-
- OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"manual\"}"),
- OK ("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"automatic\"}"),
- Fail("{\"ha-mode\":\"all\",\"ha-sync-mode\":\"made_up\"}"),
- Fail("{\"ha-sync-mode\":\"manual\"}"),
- Fail("{\"ha-sync-mode\":\"automatic\"}"),
-
- ok = control_action(clear_policy, ["name"]),
- passed.
-
-test_server_status() ->
- %% create a few things so there is some useful information to list
- {_Writer, Limiter, Ch} = test_channel(),
- [Q, Q2] = [Queue || {Name, Owner} <- [{<<"foo">>, none}, {<<"bar">>, self()}],
- {new, Queue = #amqqueue{}} <-
- [rabbit_amqqueue:declare(
- rabbit_misc:r(<<"/">>, queue, Name),
- false, false, [], Owner)]],
- ok = rabbit_amqqueue:basic_consume(
- Q, true, Ch, Limiter, false, 0, <<"ctag">>, true, [], undefined),
-
- %% list queues
- ok = info_action(list_queues, rabbit_amqqueue:info_keys(), true),
-
- %% as we have no way to collect output of info_action/3 call, the only way
- %% we can test individual queueinfoitems is by directly calling
- %% rabbit_amqqueue:info/2
- [{exclusive, false}] = rabbit_amqqueue:info(Q, [exclusive]),
- [{exclusive, true}] = rabbit_amqqueue:info(Q2, [exclusive]),
-
- %% list exchanges
- ok = info_action(list_exchanges, rabbit_exchange:info_keys(), true),
-
- %% list bindings
- ok = info_action(list_bindings, rabbit_binding:info_keys(), true),
- %% misc binding listing APIs
- [_|_] = rabbit_binding:list_for_source(
- rabbit_misc:r(<<"/">>, exchange, <<"">>)),
- [_] = rabbit_binding:list_for_destination(
- rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
- [_] = rabbit_binding:list_for_source_and_destination(
- rabbit_misc:r(<<"/">>, exchange, <<"">>),
- rabbit_misc:r(<<"/">>, queue, <<"foo">>)),
-
- %% list connections
- {H, P} = find_listener(),
- {ok, C} = gen_tcp:connect(H, P, []),
- gen_tcp:send(C, <<"AMQP", 0, 0, 9, 1>>),
- timer:sleep(100),
- ok = info_action(list_connections,
- rabbit_networking:connection_info_keys(), false),
- %% close_connection
- [ConnPid] = rabbit_networking:connections(),
- ok = control_action(close_connection, [rabbit_misc:pid_to_string(ConnPid),
- "go away"]),
-
- %% list channels
- ok = info_action(list_channels, rabbit_channel:info_keys(), false),
-
- %% list consumers
- ok = control_action(list_consumers, []),
-
- %% set vm memory high watermark
- HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
- ok = control_action(set_vm_memory_high_watermark, ["1"]),
- ok = control_action(set_vm_memory_high_watermark, ["1.0"]),
- %% this will trigger an alarm
- ok = control_action(set_vm_memory_high_watermark, ["0.0"]),
- %% reset
- ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
-
- %% eval
- {error_string, _} = control_action(eval, ["\""]),
- {error_string, _} = control_action(eval, ["a("]),
- ok = control_action(eval, ["a."]),
-
- %% cleanup
- [{ok, _} = rabbit_amqqueue:delete(QR, false, false) || QR <- [Q, Q2]],
-
- unlink(Ch),
- ok = rabbit_channel:shutdown(Ch),
-
- passed.
-
-test_amqp_connection_refusal() ->
- [passed = test_amqp_connection_refusal(V) ||
- V <- [<<"AMQP",9,9,9,9>>, <<"AMQP",0,1,0,0>>, <<"XXXX",0,0,9,1>>]],
- passed.
-
-test_amqp_connection_refusal(Header) ->
- {H, P} = find_listener(),
- {ok, C} = gen_tcp:connect(H, P, [binary, {active, false}]),
- ok = gen_tcp:send(C, Header),
- {ok, <<"AMQP",0,0,9,1>>} = gen_tcp:recv(C, 8, 100),
- ok = gen_tcp:close(C),
- passed.
-
-find_listener() ->
- [#listener{host = H, port = P} | _] =
- [L || L = #listener{node = N, protocol = amqp}
- <- rabbit_networking:active_listeners(),
- N =:= node()],
- {H, P}.
-
-test_writer(Pid) ->
- receive
- {'$gen_call', From, flush} -> gen_server:reply(From, ok),
- test_writer(Pid);
- {send_command, Method} -> Pid ! Method,
- test_writer(Pid);
- shutdown -> ok
- end.
-
-test_channel() ->
- Me = self(),
- Writer = spawn(fun () -> test_writer(Me) end),
- {ok, Limiter} = rabbit_limiter:start_link(no_id),
- {ok, Ch} = rabbit_channel:start_link(
- 1, Me, Writer, Me, "", rabbit_framing_amqp_0_9_1,
- user(<<"guest">>), <<"/">>, [], Me, Limiter),
- {Writer, Limiter, Ch}.
-
-test_spawn() ->
- {Writer, _Limiter, Ch} = test_channel(),
- ok = rabbit_channel:do(Ch, #'channel.open'{}),
- receive #'channel.open_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_receive_channel_open_ok)
- end,
- {Writer, Ch}.
-
-test_spawn(Node) ->
- rpc:call(Node, ?MODULE, test_spawn_remote, []).
-
-%% Spawn an arbitrary long lived process, so we don't end up linking
-%% the channel to the short-lived process (RPC, here) spun up by the
-%% RPC server.
-test_spawn_remote() ->
- RPC = self(),
- spawn(fun () ->
- {Writer, Ch} = test_spawn(),
- RPC ! {Writer, Ch},
- link(Ch),
- receive
- _ -> ok
- end
- end),
- receive Res -> Res
- after ?TIMEOUT -> throw(failed_to_receive_result)
- end.
-
-user(Username) ->
- #user{username = Username,
- tags = [administrator],
- authz_backends = [{rabbit_auth_backend_internal, none}]}.
-
-test_confirms() ->
- {_Writer, Ch} = test_spawn(),
- DeclareBindDurableQueue =
- fun() ->
- rabbit_channel:do(Ch, #'queue.declare'{durable = true}),
- receive #'queue.declare_ok'{queue = Q0} ->
- rabbit_channel:do(Ch, #'queue.bind'{
- queue = Q0,
- exchange = <<"amq.direct">>,
- routing_key = "magic" }),
- receive #'queue.bind_ok'{} -> Q0
- after ?TIMEOUT -> throw(failed_to_bind_queue)
- end
- after ?TIMEOUT -> throw(failed_to_declare_queue)
- end
- end,
- %% Declare and bind two queues
- QName1 = DeclareBindDurableQueue(),
- QName2 = DeclareBindDurableQueue(),
- %% Get the first one's pid (we'll crash it later)
- {ok, Q1} = rabbit_amqqueue:lookup(rabbit_misc:r(<<"/">>, queue, QName1)),
- QPid1 = Q1#amqqueue.pid,
- %% Enable confirms
- rabbit_channel:do(Ch, #'confirm.select'{}),
- receive
- #'confirm.select_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_enable_confirms)
- end,
- %% Publish a message
- rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"amq.direct">>,
- routing_key = "magic"
- },
- rabbit_basic:build_content(
- #'P_basic'{delivery_mode = 2}, <<"">>)),
- %% We must not kill the queue before the channel has processed the
- %% 'publish'.
- ok = rabbit_channel:flush(Ch),
- %% Crash the queue
- QPid1 ! boom,
- %% Wait for a nack
- receive
- #'basic.nack'{} -> ok;
- #'basic.ack'{} -> throw(received_ack_instead_of_nack)
- after ?TIMEOUT-> throw(did_not_receive_nack)
- end,
- receive
- #'basic.ack'{} -> throw(received_ack_when_none_expected)
- after 1000 -> ok
- end,
- %% Cleanup
- rabbit_channel:do(Ch, #'queue.delete'{queue = QName2}),
- receive
- #'queue.delete_ok'{} -> ok
- after ?TIMEOUT -> throw(failed_to_cleanup_queue)
- end,
- unlink(Ch),
- ok = rabbit_channel:shutdown(Ch),
-
- passed.
-
-test_with_state() ->
- fhc_state = gen_server2:with_state(file_handle_cache,
- fun (S) -> element(1, S) end),
- passed.
-
-test_mcall() ->
- P1 = spawn(fun gs2_test_listener/0),
- register(foo, P1),
- global:register_name(gfoo, P1),
-
- P2 = spawn(fun() -> exit(bang) end),
- %% ensure P2 is dead (ignore the race setting up the monitor)
- await_exit(P2),
-
- P3 = spawn(fun gs2_test_crasher/0),
-
- %% since P2 crashes almost immediately and P3 after receiving its first
- %% message, we have to spawn a few more processes to handle the additional
- %% cases we're interested in here
- register(baz, spawn(fun gs2_test_crasher/0)),
- register(bog, spawn(fun gs2_test_crasher/0)),
- global:register_name(gbaz, spawn(fun gs2_test_crasher/0)),
-
- NoNode = rabbit_nodes:make("nonode"),
-
- Targets =
- %% pids
- [P1, P2, P3]
- ++
- %% registered names
- [foo, bar, baz]
- ++
- %% {Name, Node} pairs
- [{foo, node()}, {bar, node()}, {bog, node()}, {foo, NoNode}]
- ++
- %% {global, Name}
- [{global, gfoo}, {global, gbar}, {global, gbaz}],
-
- GoodResults = [{D, goodbye} || D <- [P1, foo,
- {foo, node()},
- {global, gfoo}]],
-
- BadResults = [{P2, noproc}, % died before use
- {P3, boom}, % died on first use
- {bar, noproc}, % never registered
- {baz, boom}, % died on first use
- {{bar, node()}, noproc}, % never registered
- {{bog, node()}, boom}, % died on first use
- {{foo, NoNode}, nodedown}, % invalid node
- {{global, gbar}, noproc}, % never registered globally
- {{global, gbaz}, boom}], % died on first use
-
- {Replies, Errors} = gen_server2:mcall([{T, hello} || T <- Targets]),
- true = lists:sort(Replies) == lists:sort(GoodResults),
- true = lists:sort(Errors) == lists:sort(BadResults),
-
- %% cleanup (ignore the race setting up the monitor)
- P1 ! stop,
- await_exit(P1),
- passed.
-
-await_exit(Pid) ->
- MRef = erlang:monitor(process, Pid),
- receive
- {'DOWN', MRef, _, _, _} -> ok
- end.
-
-gs2_test_crasher() ->
- receive
- {'$gen_call', _From, hello} -> exit(boom)
- end.
-
-gs2_test_listener() ->
- receive
- {'$gen_call', From, hello} ->
- gen_server2:reply(From, goodbye),
- gs2_test_listener();
- stop ->
- ok
- end.
-
-test_statistics_event_receiver(Pid) ->
- receive
- Foo -> Pid ! Foo, test_statistics_event_receiver(Pid)
- end.
-
-test_statistics_receive_event(Ch, Matcher) ->
- rabbit_channel:flush(Ch),
- Ch ! emit_stats,
- test_statistics_receive_event1(Ch, Matcher).
-
-test_statistics_receive_event1(Ch, Matcher) ->
- receive #event{type = channel_stats, props = Props} ->
- case Matcher(Props) of
- true -> Props;
- _ -> test_statistics_receive_event1(Ch, Matcher)
- end
- after ?TIMEOUT -> throw(failed_to_receive_event)
- end.
-
-test_statistics() ->
- application:set_env(rabbit, collect_statistics, fine),
-
- %% ATM this just tests the queue / exchange stats in channels. That's
- %% by far the most complex code though.
-
- %% Set up a channel and queue
- {_Writer, Ch} = test_spawn(),
- rabbit_channel:do(Ch, #'queue.declare'{}),
- QName = receive #'queue.declare_ok'{queue = Q0} -> Q0
- after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
- end,
- QRes = rabbit_misc:r(<<"/">>, queue, QName),
- X = rabbit_misc:r(<<"/">>, exchange, <<"">>),
-
- rabbit_tests_event_receiver:start(self(), [node()], [channel_stats]),
-
- %% Check stats empty
- Event = test_statistics_receive_event(Ch, fun (_) -> true end),
- [] = proplists:get_value(channel_queue_stats, Event),
- [] = proplists:get_value(channel_exchange_stats, Event),
- [] = proplists:get_value(channel_queue_exchange_stats, Event),
-
- %% Publish and get a message
- rabbit_channel:do(Ch, #'basic.publish'{exchange = <<"">>,
- routing_key = QName},
- rabbit_basic:build_content(#'P_basic'{}, <<"">>)),
- rabbit_channel:do(Ch, #'basic.get'{queue = QName}),
-
- %% Check the stats reflect that
- Event2 = test_statistics_receive_event(
- Ch,
- fun (E) ->
- length(proplists:get_value(
- channel_queue_exchange_stats, E)) > 0
- end),
- [{QRes, [{get,1}]}] = proplists:get_value(channel_queue_stats, Event2),
- [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event2),
- [{{QRes,X},[{publish,1}]}] =
- proplists:get_value(channel_queue_exchange_stats, Event2),
-
- %% Check the stats remove stuff on queue deletion
- rabbit_channel:do(Ch, #'queue.delete'{queue = QName}),
- Event3 = test_statistics_receive_event(
- Ch,
- fun (E) ->
- length(proplists:get_value(
- channel_queue_exchange_stats, E)) == 0
- end),
-
- [] = proplists:get_value(channel_queue_stats, Event3),
- [{X,[{publish,1}]}] = proplists:get_value(channel_exchange_stats, Event3),
- [] = proplists:get_value(channel_queue_exchange_stats, Event3),
-
- rabbit_channel:shutdown(Ch),
- rabbit_tests_event_receiver:stop(),
- passed.
-
-test_refresh_events(SecondaryNode) ->
- rabbit_tests_event_receiver:start(self(), [node(), SecondaryNode],
- [channel_created, queue_created]),
-
- {_Writer, Ch} = test_spawn(),
- expect_events(pid, Ch, channel_created),
- rabbit_channel:shutdown(Ch),
-
- {_Writer2, Ch2} = test_spawn(SecondaryNode),
- expect_events(pid, Ch2, channel_created),
- rabbit_channel:shutdown(Ch2),
-
- {new, #amqqueue{name = QName} = Q} =
- rabbit_amqqueue:declare(test_queue(), false, false, [], none),
- expect_events(name, QName, queue_created),
- rabbit_amqqueue:delete(Q, false, false),
-
- rabbit_tests_event_receiver:stop(),
- passed.
-
-expect_events(Tag, Key, Type) ->
- expect_event(Tag, Key, Type),
- rabbit:force_event_refresh(make_ref()),
- expect_event(Tag, Key, Type).
-
-expect_event(Tag, Key, Type) ->
- receive #event{type = Type, props = Props} ->
- case pget(Tag, Props) of
- Key -> ok;
- _ -> expect_event(Tag, Key, Type)
- end
- after ?TIMEOUT -> throw({failed_to_receive_event, Type})
- end.
-
-test_delegates_async(SecondaryNode) ->
- Self = self(),
- Sender = fun (Pid) -> Pid ! {invoked, Self} end,
-
- Responder = make_responder(fun ({invoked, Pid}) -> Pid ! response end),
-
- ok = delegate:invoke_no_result(spawn(Responder), Sender),
- ok = delegate:invoke_no_result(spawn(SecondaryNode, Responder), Sender),
- await_response(2),
-
- LocalPids = spawn_responders(node(), Responder, 10),
- RemotePids = spawn_responders(SecondaryNode, Responder, 10),
- ok = delegate:invoke_no_result(LocalPids ++ RemotePids, Sender),
- await_response(20),
-
- passed.
-
-make_responder(FMsg) -> make_responder(FMsg, timeout).
-make_responder(FMsg, Throw) ->
- fun () ->
- receive Msg -> FMsg(Msg)
- after ?TIMEOUT -> throw(Throw)
- end
- end.
-
-spawn_responders(Node, Responder, Count) ->
- [spawn(Node, Responder) || _ <- lists:seq(1, Count)].
-
-await_response(0) ->
- ok;
-await_response(Count) ->
- receive
- response -> ok,
- await_response(Count - 1)
- after ?TIMEOUT -> throw(timeout)
- end.
-
-must_exit(Fun) ->
- try
- Fun(),
- throw(exit_not_thrown)
- catch
- exit:_ -> ok
- end.
-
-test_delegates_sync(SecondaryNode) ->
- Sender = fun (Pid) -> gen_server:call(Pid, invoked, infinity) end,
- BadSender = fun (_Pid) -> exit(exception) end,
-
- Responder = make_responder(fun ({'$gen_call', From, invoked}) ->
- gen_server:reply(From, response)
- end),
-
- BadResponder = make_responder(fun ({'$gen_call', From, invoked}) ->
- gen_server:reply(From, response)
- end, bad_responder_died),
-
- response = delegate:invoke(spawn(Responder), Sender),
- response = delegate:invoke(spawn(SecondaryNode, Responder), Sender),
-
- must_exit(fun () -> delegate:invoke(spawn(BadResponder), BadSender) end),
- must_exit(fun () ->
- delegate:invoke(spawn(SecondaryNode, BadResponder), BadSender) end),
-
- LocalGoodPids = spawn_responders(node(), Responder, 2),
- RemoteGoodPids = spawn_responders(SecondaryNode, Responder, 2),
- LocalBadPids = spawn_responders(node(), BadResponder, 2),
- RemoteBadPids = spawn_responders(SecondaryNode, BadResponder, 2),
-
- {GoodRes, []} = delegate:invoke(LocalGoodPids ++ RemoteGoodPids, Sender),
- true = lists:all(fun ({_, response}) -> true end, GoodRes),
- GoodResPids = [Pid || {Pid, _} <- GoodRes],
-
- Good = lists:usort(LocalGoodPids ++ RemoteGoodPids),
- Good = lists:usort(GoodResPids),
-
- {[], BadRes} = delegate:invoke(LocalBadPids ++ RemoteBadPids, BadSender),
- true = lists:all(fun ({_, {exit, exception, _}}) -> true end, BadRes),
- BadResPids = [Pid || {Pid, _} <- BadRes],
-
- Bad = lists:usort(LocalBadPids ++ RemoteBadPids),
- Bad = lists:usort(BadResPids),
-
- MagicalPids = [rabbit_misc:string_to_pid(Str) ||
- Str <- ["<nonode@nohost.0.1.0>", "<nonode@nohost.0.2.0>"]],
- {[], BadNodes} = delegate:invoke(MagicalPids, Sender),
- true = lists:all(
- fun ({_, {exit, {nodedown, nonode@nohost}, _Stack}}) -> true end,
- BadNodes),
- BadNodesPids = [Pid || {Pid, _} <- BadNodes],
-
- Magical = lists:usort(MagicalPids),
- Magical = lists:usort(BadNodesPids),
-
- passed.
-
-test_queue_cleanup(_SecondaryNode) ->
- {_Writer, Ch} = test_spawn(),
- rabbit_channel:do(Ch, #'queue.declare'{ queue = ?CLEANUP_QUEUE_NAME }),
- receive #'queue.declare_ok'{queue = ?CLEANUP_QUEUE_NAME} ->
- ok
- after ?TIMEOUT -> throw(failed_to_receive_queue_declare_ok)
- end,
- rabbit_channel:shutdown(Ch),
- rabbit:stop(),
- rabbit:start(),
- {_Writer2, Ch2} = test_spawn(),
- rabbit_channel:do(Ch2, #'queue.declare'{ passive = true,
- queue = ?CLEANUP_QUEUE_NAME }),
- receive
- #'channel.close'{reply_code = ?NOT_FOUND} ->
- ok
- after ?TIMEOUT -> throw(failed_to_receive_channel_exit)
- end,
- rabbit_channel:shutdown(Ch2),
- passed.
-
-test_declare_on_dead_queue(SecondaryNode) ->
- QueueName = rabbit_misc:r(<<"/">>, queue, ?CLEANUP_QUEUE_NAME),
- Self = self(),
- Pid = spawn(SecondaryNode,
- fun () ->
- {new, #amqqueue{name = QueueName, pid = QPid}} =
- rabbit_amqqueue:declare(QueueName, false, false, [],
- none),
- exit(QPid, kill),
- Self ! {self(), killed, QPid}
- end),
- receive
- {Pid, killed, OldPid} ->
- Q = dead_queue_loop(QueueName, OldPid),
- {ok, 0} = rabbit_amqqueue:delete(Q, false, false),
- passed
- after ?TIMEOUT -> throw(failed_to_create_and_kill_queue)
- end.
-
-dead_queue_loop(QueueName, OldPid) ->
- {existing, Q} = rabbit_amqqueue:declare(QueueName, false, false, [], none),
- case Q#amqqueue.pid of
- OldPid -> timer:sleep(25),
- dead_queue_loop(QueueName, OldPid);
- _ -> true = rabbit_misc:is_process_alive(Q#amqqueue.pid),
- Q
- end.
-
-%%---------------------------------------------------------------------
-
-control_action(Command, Args) ->
- control_action(Command, node(), Args, default_options()).
-
-control_action(Command, Args, NewOpts) ->
- control_action(Command, node(), Args,
- expand_options(default_options(), NewOpts)).
-
-control_action(Command, Node, Args, Opts) ->
- case catch rabbit_control_main:action(
- Command, Node, Args, Opts,
- fun (Format, Args1) ->
- io:format(Format ++ " ...~n", Args1)
- end) of
- ok ->
- io:format("done.~n"),
- ok;
- {ok, Result} ->
- rabbit_ctl_misc:print_cmd_result(Command, Result),
- ok;
- Other ->
- io:format("failed.~n"),
- Other
- end.
-
-control_action_opts(Raw) ->
- NodeStr = atom_to_list(node()),
- case rabbit_control_main:parse_arguments(Raw, NodeStr) of
- {ok, {Cmd, Opts, Args}} ->
- case control_action(Cmd, node(), Args, Opts) of
- ok -> ok;
- _ -> error
- end;
- _ ->
- error
- end.
-
-info_action(Command, Args, CheckVHost) ->
- ok = control_action(Command, []),
- if CheckVHost -> ok = control_action(Command, [], ["-p", "/"]);
- true -> ok
- end,
- ok = control_action(Command, lists:map(fun atom_to_list/1, Args)),
- {bad_argument, dummy} = control_action(Command, ["dummy"]),
- ok.
-
-default_options() -> [{"-p", "/"}, {"-q", "false"}].
-
-expand_options(As, Bs) ->
- lists:foldl(fun({K, _}=A, R) ->
- case proplists:is_defined(K, R) of
- true -> R;
- false -> [A | R]
- end
- end, Bs, As).
-
-check_parse_arguments(ExpRes, Fun, As) ->
- SortRes =
- fun (no_command) -> no_command;
- ({ok, {C, KVs, As1}}) -> {ok, {C, lists:sort(KVs), As1}}
- end,
-
- true = SortRes(ExpRes) =:= SortRes(Fun(As)).
-
-empty_files(Files) ->
- [case file:read_file_info(File) of
- {ok, FInfo} -> FInfo#file_info.size == 0;
- Error -> Error
- end || File <- Files].
-
-non_empty_files(Files) ->
- [case EmptyFile of
- {error, Reason} -> {error, Reason};
- _ -> not(EmptyFile)
- end || EmptyFile <- empty_files(Files)].
-
-test_logs_working(MainLogFile, SaslLogFile) ->
- ok = rabbit_log:error("foo bar"),
- ok = error_logger:error_report(crash_report, [foo, bar]),
- %% give the error loggers some time to catch up
- timer:sleep(100),
- [true, true] = non_empty_files([MainLogFile, SaslLogFile]),
- ok.
-
-set_permissions(Path, Mode) ->
- case file:read_file_info(Path) of
- {ok, FInfo} -> file:write_file_info(
- Path,
- FInfo#file_info{mode=Mode});
- Error -> Error
- end.
-
-clean_logs(Files, Suffix) ->
- [begin
- ok = delete_file(File),
- ok = delete_file([File, Suffix])
- end || File <- Files],
- ok.
-
-assert_ram_node() ->
- case rabbit_mnesia:node_type() of
- disc -> exit('not_ram_node');
- ram -> ok
- end.
-
-assert_disc_node() ->
- case rabbit_mnesia:node_type() of
- disc -> ok;
- ram -> exit('not_disc_node')
- end.
-
-delete_file(File) ->
- case file:delete(File) of
- ok -> ok;
- {error, enoent} -> ok;
- Error -> Error
- end.
-
-make_files_non_writable(Files) ->
- [ok = file:write_file_info(File, #file_info{mode=0}) ||
- File <- Files],
- ok.
-
-add_log_handlers(Handlers) ->
- [ok = error_logger:add_report_handler(Handler, Args) ||
- {Handler, Args} <- Handlers],
- ok.
-
-%% sasl_report_file_h returns [] during terminate
-%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
-%%
-%% error_logger_file_h returns ok since OTP 18.1
-%% see: https://github.com/erlang/otp/blob/maint/lib/stdlib/src/error_logger_file_h.erl#L98
-delete_log_handlers(Handlers) ->
- [ok_or_empty_list(error_logger:delete_report_handler(Handler))
- || Handler <- Handlers],
- ok.
-
-ok_or_empty_list([]) ->
- [];
-ok_or_empty_list(ok) ->
- ok.
-
-test_supervisor_delayed_restart() ->
- test_sup:test_supervisor_delayed_restart().
-
-test_file_handle_cache() ->
- %% test copying when there is just one spare handle
- Limit = file_handle_cache:get_limit(),
- ok = file_handle_cache:set_limit(5), %% 1 or 2 sockets, 2 msg_stores
- TmpDir = filename:join(rabbit_mnesia:dir(), "tmp"),
- ok = filelib:ensure_dir(filename:join(TmpDir, "nothing")),
- [Src1, Dst1, Src2, Dst2] = Files =
- [filename:join(TmpDir, Str) || Str <- ["file1", "file2", "file3", "file4"]],
- Content = <<"foo">>,
- CopyFun = fun (Src, Dst) ->
- {ok, Hdl} = prim_file:open(Src, [binary, write]),
- ok = prim_file:write(Hdl, Content),
- ok = prim_file:sync(Hdl),
- prim_file:close(Hdl),
-
- {ok, SrcHdl} = file_handle_cache:open(Src, [read], []),
- {ok, DstHdl} = file_handle_cache:open(Dst, [write], []),
- Size = size(Content),
- {ok, Size} = file_handle_cache:copy(SrcHdl, DstHdl, Size),
- ok = file_handle_cache:delete(SrcHdl),
- ok = file_handle_cache:delete(DstHdl)
- end,
- Pid = spawn(fun () -> {ok, Hdl} = file_handle_cache:open(
- filename:join(TmpDir, "file5"),
- [write], []),
- receive {next, Pid1} -> Pid1 ! {next, self()} end,
- file_handle_cache:delete(Hdl),
- %% This will block and never return, so we
- %% exercise the fhc tidying up the pending
- %% queue on the death of a process.
- ok = CopyFun(Src1, Dst1)
- end),
- ok = CopyFun(Src1, Dst1),
- ok = file_handle_cache:set_limit(2),
- Pid ! {next, self()},
- receive {next, Pid} -> ok end,
- timer:sleep(100),
- Pid1 = spawn(fun () -> CopyFun(Src2, Dst2) end),
- timer:sleep(100),
- erlang:monitor(process, Pid),
- erlang:monitor(process, Pid1),
- exit(Pid, kill),
- exit(Pid1, kill),
- receive {'DOWN', _MRef, process, Pid, _Reason} -> ok end,
- receive {'DOWN', _MRef1, process, Pid1, _Reason1} -> ok end,
- [file:delete(File) || File <- Files],
- ok = file_handle_cache:set_limit(Limit),
- passed.
-
-test_backing_queue() ->
- case application:get_env(rabbit, backing_queue_module) of
- {ok, rabbit_priority_queue} ->
- {ok, FileSizeLimit} =
- application:get_env(rabbit, msg_store_file_size_limit),
- application:set_env(rabbit, msg_store_file_size_limit, 512),
- {ok, MaxJournal} =
- application:get_env(rabbit, queue_index_max_journal_entries),
- application:set_env(rabbit, queue_index_max_journal_entries, 128),
- passed = test_msg_store(),
- application:set_env(rabbit, msg_store_file_size_limit,
- FileSizeLimit),
- [begin
- application:set_env(
- rabbit, queue_index_embed_msgs_below, Bytes),
- passed = test_queue_index(),
- passed = test_queue_index_props(),
- passed = test_variable_queue(),
- passed = test_variable_queue_delete_msg_store_files_callback(),
- passed = test_queue_recover()
- end || Bytes <- [0, 1024]],
- application:set_env(rabbit, queue_index_max_journal_entries,
- MaxJournal),
- %% We will have restarted the message store, and thus changed
- %% the order of the children of rabbit_sup. This will cause
- %% problems if there are subsequent failures - see bug 24262.
- ok = restart_app(),
- passed;
- _ ->
- passed
- end.
-
-restart_msg_store_empty() ->
- ok = rabbit_variable_queue:stop_msg_store(),
- ok = rabbit_variable_queue:start_msg_store(
- undefined, {fun (ok) -> finished end, ok}).
-
-msg_id_bin(X) ->
- erlang:md5(term_to_binary(X)).
-
-msg_store_client_init(MsgStore, Ref) ->
- rabbit_msg_store:client_init(MsgStore, Ref, undefined, undefined).
-
-on_disk_capture() ->
- receive
- {await, MsgIds, Pid} -> on_disk_capture([], MsgIds, Pid);
- stop -> done
- end.
-
-on_disk_capture([_|_], _Awaiting, Pid) ->
- Pid ! {self(), surplus};
-on_disk_capture(OnDisk, Awaiting, Pid) ->
- receive
- {on_disk, MsgIdsS} ->
- MsgIds = gb_sets:to_list(MsgIdsS),
- on_disk_capture(OnDisk ++ (MsgIds -- Awaiting), Awaiting -- MsgIds,
- Pid);
- stop ->
- done
- after (case Awaiting of [] -> 200; _ -> ?TIMEOUT end) ->
- case Awaiting of
- [] -> Pid ! {self(), arrived}, on_disk_capture();
- _ -> Pid ! {self(), timeout}
- end
- end.
-
-on_disk_await(Pid, MsgIds) when is_list(MsgIds) ->
- Pid ! {await, MsgIds, self()},
- receive
- {Pid, arrived} -> ok;
- {Pid, Error} -> Error
- end.
-
-on_disk_stop(Pid) ->
- MRef = erlang:monitor(process, Pid),
- Pid ! stop,
- receive {'DOWN', MRef, process, Pid, _Reason} ->
- ok
- end.
-
-msg_store_client_init_capture(MsgStore, Ref) ->
- Pid = spawn(fun on_disk_capture/0),
- {Pid, rabbit_msg_store:client_init(
- MsgStore, Ref, fun (MsgIds, _ActionTaken) ->
- Pid ! {on_disk, MsgIds}
- end, undefined)}.
-
-msg_store_contains(Atom, MsgIds, MSCState) ->
- Atom = lists:foldl(
- fun (MsgId, Atom1) when Atom1 =:= Atom ->
- rabbit_msg_store:contains(MsgId, MSCState) end,
- Atom, MsgIds).
-
-msg_store_read(MsgIds, MSCState) ->
- lists:foldl(fun (MsgId, MSCStateM) ->
- {{ok, MsgId}, MSCStateN} = rabbit_msg_store:read(
- MsgId, MSCStateM),
- MSCStateN
- end, MSCState, MsgIds).
-
-msg_store_write(MsgIds, MSCState) ->
- ok = lists:foldl(fun (MsgId, ok) ->
- rabbit_msg_store:write(MsgId, MsgId, MSCState)
- end, ok, MsgIds).
-
-msg_store_write_flow(MsgIds, MSCState) ->
- ok = lists:foldl(fun (MsgId, ok) ->
- rabbit_msg_store:write_flow(MsgId, MsgId, MSCState)
- end, ok, MsgIds).
-
-msg_store_remove(MsgIds, MSCState) ->
- rabbit_msg_store:remove(MsgIds, MSCState).
-
-msg_store_remove(MsgStore, Ref, MsgIds) ->
- with_msg_store_client(MsgStore, Ref,
- fun (MSCStateM) ->
- ok = msg_store_remove(MsgIds, MSCStateM),
- MSCStateM
- end).
-
-with_msg_store_client(MsgStore, Ref, Fun) ->
- rabbit_msg_store:client_terminate(
- Fun(msg_store_client_init(MsgStore, Ref))).
-
-foreach_with_msg_store_client(MsgStore, Ref, Fun, L) ->
- rabbit_msg_store:client_terminate(
- lists:foldl(fun (MsgId, MSCState) -> Fun(MsgId, MSCState) end,
- msg_store_client_init(MsgStore, Ref), L)).
-
-test_msg_store() ->
- restart_msg_store_empty(),
- MsgIds = [msg_id_bin(M) || M <- lists:seq(1,100)],
- {MsgIds1stHalf, MsgIds2ndHalf} = lists:split(length(MsgIds) div 2, MsgIds),
- Ref = rabbit_guid:gen(),
- {Cap, MSCState} = msg_store_client_init_capture(
- ?PERSISTENT_MSG_STORE, Ref),
- Ref2 = rabbit_guid:gen(),
- {Cap2, MSC2State} = msg_store_client_init_capture(
- ?PERSISTENT_MSG_STORE, Ref2),
- %% check we don't contain any of the msgs we're about to publish
- false = msg_store_contains(false, MsgIds, MSCState),
- %% test confirm logic
- passed = test_msg_store_confirms([hd(MsgIds)], Cap, MSCState),
- %% check we don't contain any of the msgs we're about to publish
- false = msg_store_contains(false, MsgIds, MSCState),
- %% publish the first half
- ok = msg_store_write(MsgIds1stHalf, MSCState),
- %% sync on the first half
- ok = on_disk_await(Cap, MsgIds1stHalf),
- %% publish the second half
- ok = msg_store_write(MsgIds2ndHalf, MSCState),
- %% check they're all in there
- true = msg_store_contains(true, MsgIds, MSCState),
- %% publish the latter half twice so we hit the caching and ref
- %% count code. We need to do this through a 2nd client since a
- %% single client is not supposed to write the same message more
- %% than once without first removing it.
- ok = msg_store_write(MsgIds2ndHalf, MSC2State),
- %% check they're still all in there
- true = msg_store_contains(true, MsgIds, MSCState),
- %% sync on the 2nd half
- ok = on_disk_await(Cap2, MsgIds2ndHalf),
- %% cleanup
- ok = on_disk_stop(Cap2),
- ok = rabbit_msg_store:client_delete_and_terminate(MSC2State),
- ok = on_disk_stop(Cap),
- %% read them all
- MSCState1 = msg_store_read(MsgIds, MSCState),
- %% read them all again - this will hit the cache, not disk
- MSCState2 = msg_store_read(MsgIds, MSCState1),
- %% remove them all
- ok = msg_store_remove(MsgIds, MSCState2),
- %% check first half doesn't exist
- false = msg_store_contains(false, MsgIds1stHalf, MSCState2),
- %% check second half does exist
- true = msg_store_contains(true, MsgIds2ndHalf, MSCState2),
- %% read the second half again
- MSCState3 = msg_store_read(MsgIds2ndHalf, MSCState2),
- %% read the second half again, just for fun (aka code coverage)
- MSCState4 = msg_store_read(MsgIds2ndHalf, MSCState3),
- ok = rabbit_msg_store:client_terminate(MSCState4),
- %% stop and restart, preserving every other msg in 2nd half
- ok = rabbit_variable_queue:stop_msg_store(),
- ok = rabbit_variable_queue:start_msg_store(
- [], {fun ([]) -> finished;
- ([MsgId|MsgIdsTail])
- when length(MsgIdsTail) rem 2 == 0 ->
- {MsgId, 1, MsgIdsTail};
- ([MsgId|MsgIdsTail]) ->
- {MsgId, 0, MsgIdsTail}
- end, MsgIds2ndHalf}),
- MSCState5 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- %% check we have the right msgs left
- lists:foldl(
- fun (MsgId, Bool) ->
- not(Bool = rabbit_msg_store:contains(MsgId, MSCState5))
- end, false, MsgIds2ndHalf),
- ok = rabbit_msg_store:client_terminate(MSCState5),
- %% restart empty
- restart_msg_store_empty(),
- MSCState6 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- %% check we don't contain any of the msgs
- false = msg_store_contains(false, MsgIds, MSCState6),
- %% publish the first half again
- ok = msg_store_write(MsgIds1stHalf, MSCState6),
- %% this should force some sort of sync internally otherwise misread
- ok = rabbit_msg_store:client_terminate(
- msg_store_read(MsgIds1stHalf, MSCState6)),
- MSCState7 = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- ok = msg_store_remove(MsgIds1stHalf, MSCState7),
- ok = rabbit_msg_store:client_terminate(MSCState7),
- %% restart empty
- restart_msg_store_empty(), %% now safe to reuse msg_ids
- %% push a lot of msgs in... at least 100 files worth
- {ok, FileSize} = application:get_env(rabbit, msg_store_file_size_limit),
- PayloadSizeBits = 65536,
- BigCount = trunc(100 * FileSize / (PayloadSizeBits div 8)),
- MsgIdsBig = [msg_id_bin(X) || X <- lists:seq(1, BigCount)],
- Payload = << 0:PayloadSizeBits >>,
- ok = with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MSCStateM) ->
- [ok = rabbit_msg_store:write(MsgId, Payload, MSCStateM) ||
- MsgId <- MsgIdsBig],
- MSCStateM
- end),
- %% now read them to ensure we hit the fast client-side reading
- ok = foreach_with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MsgId, MSCStateM) ->
- {{ok, Payload}, MSCStateN} = rabbit_msg_store:read(
- MsgId, MSCStateM),
- MSCStateN
- end, MsgIdsBig),
- %% .., then 3s by 1...
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount, 1, -3)]),
- %% .., then remove 3s by 2, from the young end first. This hits
- %% GC (under 50% good data left, but no empty files. Must GC).
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount-1, 1, -3)]),
- %% .., then remove 3s by 3, from the young end first. This hits
- %% GC...
- ok = msg_store_remove(?PERSISTENT_MSG_STORE, Ref,
- [msg_id_bin(X) || X <- lists:seq(BigCount-2, 1, -3)]),
- %% ensure empty
- ok = with_msg_store_client(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MSCStateM) ->
- false = msg_store_contains(false, MsgIdsBig, MSCStateM),
- MSCStateM
- end),
- %%
- passed = test_msg_store_client_delete_and_terminate(),
- %% restart empty
- restart_msg_store_empty(),
- passed.
-
-test_msg_store_confirms(MsgIds, Cap, MSCState) ->
- %% write -> confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% remove -> _
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, []),
- %% write, remove -> confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% write, remove, write -> confirmed, confirmed
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds ++ MsgIds),
- %% remove, write -> confirmed
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% remove, write, remove -> confirmed
- ok = msg_store_remove(MsgIds, MSCState),
- ok = msg_store_write(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- ok = on_disk_await(Cap, MsgIds),
- %% confirmation on timer-based sync
- passed = test_msg_store_confirm_timer(),
- passed.
-
-test_msg_store_confirm_timer() ->
- Ref = rabbit_guid:gen(),
- MsgId = msg_id_bin(1),
- Self = self(),
- MSCState = rabbit_msg_store:client_init(
- ?PERSISTENT_MSG_STORE, Ref,
- fun (MsgIds, _ActionTaken) ->
- case gb_sets:is_member(MsgId, MsgIds) of
- true -> Self ! on_disk;
- false -> ok
- end
- end, undefined),
- ok = msg_store_write([MsgId], MSCState),
- ok = msg_store_keep_busy_until_confirm([msg_id_bin(2)], MSCState, false),
- ok = msg_store_remove([MsgId], MSCState),
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- passed.
-
-msg_store_keep_busy_until_confirm(MsgIds, MSCState, Blocked) ->
- After = case Blocked of
- false -> 0;
- true -> ?MAX_WAIT
- end,
- Recurse = fun () -> msg_store_keep_busy_until_confirm(
- MsgIds, MSCState, credit_flow:blocked()) end,
- receive
- on_disk -> ok;
- {bump_credit, Msg} -> credit_flow:handle_bump_msg(Msg),
- Recurse()
- after After ->
- ok = msg_store_write_flow(MsgIds, MSCState),
- ok = msg_store_remove(MsgIds, MSCState),
- Recurse()
- end.
-
-test_msg_store_client_delete_and_terminate() ->
- restart_msg_store_empty(),
- MsgIds = [msg_id_bin(M) || M <- lists:seq(1, 10)],
- Ref = rabbit_guid:gen(),
- MSCState = msg_store_client_init(?PERSISTENT_MSG_STORE, Ref),
- ok = msg_store_write(MsgIds, MSCState),
- %% test the 'dying client' fast path for writes
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- passed.
-
-queue_name(Name) ->
- rabbit_misc:r(<<"/">>, queue, Name).
-
-test_queue() ->
- queue_name(<<"test">>).
-
-init_test_queue() ->
- TestQueue = test_queue(),
- PRef = rabbit_guid:gen(),
- PersistentClient = msg_store_client_init(?PERSISTENT_MSG_STORE, PRef),
- Res = rabbit_queue_index:recover(
- TestQueue, [], false,
- fun (MsgId) ->
- rabbit_msg_store:contains(MsgId, PersistentClient)
- end,
- fun nop/1, fun nop/1),
- ok = rabbit_msg_store:client_delete_and_terminate(PersistentClient),
- Res.
-
-restart_test_queue(Qi) ->
- _ = rabbit_queue_index:terminate([], Qi),
- ok = rabbit_variable_queue:stop(),
- {ok, _} = rabbit_variable_queue:start([test_queue()]),
- init_test_queue().
-
-empty_test_queue() ->
- ok = rabbit_variable_queue:stop(),
- {ok, _} = rabbit_variable_queue:start([]),
- {0, 0, Qi} = init_test_queue(),
- _ = rabbit_queue_index:delete_and_terminate(Qi),
- ok.
-
-with_empty_test_queue(Fun) ->
- ok = empty_test_queue(),
- {0, 0, Qi} = init_test_queue(),
- rabbit_queue_index:delete_and_terminate(Fun(Qi)).
-
-restart_app() ->
- rabbit:stop(),
- rabbit:start().
-
-queue_index_publish(SeqIds, Persistent, Qi) ->
- Ref = rabbit_guid:gen(),
- MsgStore = case Persistent of
- true -> ?PERSISTENT_MSG_STORE;
- false -> ?TRANSIENT_MSG_STORE
- end,
- MSCState = msg_store_client_init(MsgStore, Ref),
- {A, B = [{_SeqId, LastMsgIdWritten} | _]} =
- lists:foldl(
- fun (SeqId, {QiN, SeqIdsMsgIdsAcc}) ->
- MsgId = rabbit_guid:gen(),
- QiM = rabbit_queue_index:publish(
- MsgId, SeqId, #message_properties{size = 10},
- Persistent, infinity, QiN),
- ok = rabbit_msg_store:write(MsgId, MsgId, MSCState),
- {QiM, [{SeqId, MsgId} | SeqIdsMsgIdsAcc]}
- end, {Qi, []}, SeqIds),
- %% do this just to force all of the publishes through to the msg_store:
- true = rabbit_msg_store:contains(LastMsgIdWritten, MSCState),
- ok = rabbit_msg_store:client_delete_and_terminate(MSCState),
- {A, B}.
-
-verify_read_with_published(_Delivered, _Persistent, [], _) ->
- ok;
-verify_read_with_published(Delivered, Persistent,
- [{MsgId, SeqId, _Props, Persistent, Delivered}|Read],
- [{SeqId, MsgId}|Published]) ->
- verify_read_with_published(Delivered, Persistent, Read, Published);
-verify_read_with_published(_Delivered, _Persistent, _Read, _Published) ->
- ko.
-
-test_queue_index_props() ->
- with_empty_test_queue(
- fun(Qi0) ->
- MsgId = rabbit_guid:gen(),
- Props = #message_properties{expiry=12345, size = 10},
- Qi1 = rabbit_queue_index:publish(
- MsgId, 1, Props, true, infinity, Qi0),
- {[{MsgId, 1, Props, _, _}], Qi2} =
- rabbit_queue_index:read(1, 2, Qi1),
- Qi2
- end),
-
- ok = rabbit_variable_queue:stop(),
- {ok, _} = rabbit_variable_queue:start([]),
-
- passed.
-
-test_queue_index() ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
- TwoSegs = SegmentSize + SegmentSize,
- MostOfASegment = trunc(SegmentSize*0.75),
- SeqIdsA = lists:seq(0, MostOfASegment-1),
- SeqIdsB = lists:seq(MostOfASegment, 2*MostOfASegment),
- SeqIdsC = lists:seq(0, trunc(SegmentSize/2)),
- SeqIdsD = lists:seq(0, SegmentSize*4),
-
- with_empty_test_queue(
- fun (Qi0) ->
- {0, 0, Qi1} = rabbit_queue_index:bounds(Qi0),
- {Qi2, SeqIdsMsgIdsA} = queue_index_publish(SeqIdsA, false, Qi1),
- {0, SegmentSize, Qi3} = rabbit_queue_index:bounds(Qi2),
- {ReadA, Qi4} = rabbit_queue_index:read(0, SegmentSize, Qi3),
- ok = verify_read_with_published(false, false, ReadA,
- lists:reverse(SeqIdsMsgIdsA)),
- %% should get length back as 0, as all the msgs were transient
- {0, 0, Qi6} = restart_test_queue(Qi4),
- {0, 0, Qi7} = rabbit_queue_index:bounds(Qi6),
- {Qi8, SeqIdsMsgIdsB} = queue_index_publish(SeqIdsB, true, Qi7),
- {0, TwoSegs, Qi9} = rabbit_queue_index:bounds(Qi8),
- {ReadB, Qi10} = rabbit_queue_index:read(0, SegmentSize, Qi9),
- ok = verify_read_with_published(false, true, ReadB,
- lists:reverse(SeqIdsMsgIdsB)),
- %% should get length back as MostOfASegment
- LenB = length(SeqIdsB),
- BytesB = LenB * 10,
- {LenB, BytesB, Qi12} = restart_test_queue(Qi10),
- {0, TwoSegs, Qi13} = rabbit_queue_index:bounds(Qi12),
- Qi14 = rabbit_queue_index:deliver(SeqIdsB, Qi13),
- {ReadC, Qi15} = rabbit_queue_index:read(0, SegmentSize, Qi14),
- ok = verify_read_with_published(true, true, ReadC,
- lists:reverse(SeqIdsMsgIdsB)),
- Qi16 = rabbit_queue_index:ack(SeqIdsB, Qi15),
- Qi17 = rabbit_queue_index:flush(Qi16),
- %% Everything will have gone now because #pubs == #acks
- {0, 0, Qi18} = rabbit_queue_index:bounds(Qi17),
- %% should get length back as 0 because all persistent
- %% msgs have been acked
- {0, 0, Qi19} = restart_test_queue(Qi18),
- Qi19
- end),
-
- %% These next bits are just to hit the auto deletion of segment files.
- %% First, partials:
- %% a) partial pub+del+ack, then move to new segment
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsC} = queue_index_publish(SeqIdsC,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
- Qi3 = rabbit_queue_index:ack(SeqIdsC, Qi2),
- Qi4 = rabbit_queue_index:flush(Qi3),
- {Qi5, _SeqIdsMsgIdsC1} = queue_index_publish([SegmentSize],
- false, Qi4),
- Qi5
- end),
-
- %% b) partial pub+del, then move to new segment, then ack all in old segment
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsC2} = queue_index_publish(SeqIdsC,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsC, Qi1),
- {Qi3, _SeqIdsMsgIdsC3} = queue_index_publish([SegmentSize],
- false, Qi2),
- Qi4 = rabbit_queue_index:ack(SeqIdsC, Qi3),
- rabbit_queue_index:flush(Qi4)
- end),
-
- %% c) just fill up several segments of all pubs, then +dels, then +acks
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsD} = queue_index_publish(SeqIdsD,
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver(SeqIdsD, Qi1),
- Qi3 = rabbit_queue_index:ack(SeqIdsD, Qi2),
- rabbit_queue_index:flush(Qi3)
- end),
-
- %% d) get messages in all states to a segment, then flush, then do
- %% the same again, don't flush and read. This will hit all
- %% possibilities in combining the segment with the journal.
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, [Seven,Five,Four|_]} = queue_index_publish([0,1,2,4,5,7],
- false, Qi0),
- Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
- Qi3 = rabbit_queue_index:ack([0], Qi2),
- Qi4 = rabbit_queue_index:flush(Qi3),
- {Qi5, [Eight,Six|_]} = queue_index_publish([3,6,8], false, Qi4),
- Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
- Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
- {[], Qi8} = rabbit_queue_index:read(0, 4, Qi7),
- {ReadD, Qi9} = rabbit_queue_index:read(4, 7, Qi8),
- ok = verify_read_with_published(true, false, ReadD,
- [Four, Five, Six]),
- {ReadE, Qi10} = rabbit_queue_index:read(7, 9, Qi9),
- ok = verify_read_with_published(false, false, ReadE,
- [Seven, Eight]),
- Qi10
- end),
-
- %% e) as for (d), but use terminate instead of read, which will
- %% exercise journal_minus_segment, not segment_plus_journal.
- with_empty_test_queue(
- fun (Qi0) ->
- {Qi1, _SeqIdsMsgIdsE} = queue_index_publish([0,1,2,4,5,7],
- true, Qi0),
- Qi2 = rabbit_queue_index:deliver([0,1,4], Qi1),
- Qi3 = rabbit_queue_index:ack([0], Qi2),
- {5, 50, Qi4} = restart_test_queue(Qi3),
- {Qi5, _SeqIdsMsgIdsF} = queue_index_publish([3,6,8], true, Qi4),
- Qi6 = rabbit_queue_index:deliver([2,3,5,6], Qi5),
- Qi7 = rabbit_queue_index:ack([1,2,3], Qi6),
- {5, 50, Qi8} = restart_test_queue(Qi7),
- Qi8
- end),
-
- ok = rabbit_variable_queue:stop(),
- {ok, _} = rabbit_variable_queue:start([]),
-
- passed.
-
-variable_queue_init(Q, Recover) ->
- rabbit_variable_queue:init(
- Q, case Recover of
- true -> non_clean_shutdown;
- false -> new
- end, fun nop/2, fun nop/2, fun nop/1, fun nop/1).
-
-variable_queue_publish(IsPersistent, Count, VQ) ->
- variable_queue_publish(IsPersistent, Count, fun (_N, P) -> P end, VQ).
-
-variable_queue_publish(IsPersistent, Count, PropFun, VQ) ->
- variable_queue_publish(IsPersistent, 1, Count, PropFun,
- fun (_N) -> <<>> end, VQ).
-
-variable_queue_publish(IsPersistent, Start, Count, PropFun, PayloadFun, VQ) ->
- variable_queue_wait_for_shuffling_end(
- lists:foldl(
- fun (N, VQN) ->
- rabbit_variable_queue:publish(
- rabbit_basic:message(
- rabbit_misc:r(<<>>, exchange, <<>>),
- <<>>, #'P_basic'{delivery_mode = case IsPersistent of
- true -> 2;
- false -> 1
- end},
- PayloadFun(N)),
- PropFun(N, #message_properties{size = 10}),
- false, self(), noflow, VQN)
- end, VQ, lists:seq(Start, Start + Count - 1))).
-
-variable_queue_fetch(Count, IsPersistent, IsDelivered, Len, VQ) ->
- lists:foldl(fun (N, {VQN, AckTagsAcc}) ->
- Rem = Len - N,
- {{#basic_message { is_persistent = IsPersistent },
- IsDelivered, AckTagN}, VQM} =
- rabbit_variable_queue:fetch(true, VQN),
- Rem = rabbit_variable_queue:len(VQM),
- {VQM, [AckTagN | AckTagsAcc]}
- end, {VQ, []}, lists:seq(1, Count)).
-
-variable_queue_set_ram_duration_target(Duration, VQ) ->
- variable_queue_wait_for_shuffling_end(
- rabbit_variable_queue:set_ram_duration_target(Duration, VQ)).
-
-assert_prop(List, Prop, Value) ->
- case proplists:get_value(Prop, List)of
- Value -> ok;
- _ -> {exit, Prop, exp, Value, List}
- end.
-
-assert_props(List, PropVals) ->
- [assert_prop(List, Prop, Value) || {Prop, Value} <- PropVals].
-
-test_amqqueue(Durable) ->
- (rabbit_amqqueue:pseudo_queue(test_queue(), self()))
- #amqqueue { durable = Durable }.
-
-with_fresh_variable_queue(Fun) ->
- Ref = make_ref(),
- Me = self(),
- %% Run in a separate process since rabbit_msg_store will send
- %% bump_credit messages and we want to ignore them
- spawn_link(fun() ->
- ok = empty_test_queue(),
- VQ = variable_queue_init(test_amqqueue(true), false),
- S0 = variable_queue_status(VQ),
- assert_props(S0, [{q1, 0}, {q2, 0},
- {delta,
- {delta, undefined, 0, undefined}},
- {q3, 0}, {q4, 0},
- {len, 0}]),
- try
- _ = rabbit_variable_queue:delete_and_terminate(
- shutdown, Fun(VQ)),
- Me ! Ref
- catch
- Type:Error ->
- Me ! {Ref, Type, Error, erlang:get_stacktrace()}
- end
- end),
- receive
- Ref -> ok;
- {Ref, Type, Error, ST} -> exit({Type, Error, ST})
- end,
- passed.
-
-publish_and_confirm(Q, Payload, Count) ->
- Seqs = lists:seq(1, Count),
- [begin
- Msg = rabbit_basic:message(rabbit_misc:r(<<>>, exchange, <<>>),
- <<>>, #'P_basic'{delivery_mode = 2},
- Payload),
- Delivery = #delivery{mandatory = false, sender = self(),
- confirm = true, message = Msg, msg_seq_no = Seq,
- flow = noflow},
- _QPids = rabbit_amqqueue:deliver([Q], Delivery)
- end || Seq <- Seqs],
- wait_for_confirms(gb_sets:from_list(Seqs)).
-
-wait_for_confirms(Unconfirmed) ->
- case gb_sets:is_empty(Unconfirmed) of
- true -> ok;
- false -> receive {'$gen_cast', {confirm, Confirmed, _}} ->
- wait_for_confirms(
- rabbit_misc:gb_sets_difference(
- Unconfirmed, gb_sets:from_list(Confirmed)))
- after ?TIMEOUT -> exit(timeout_waiting_for_confirm)
- end
- end.
-
-test_variable_queue() ->
- [passed = with_fresh_variable_queue(F) ||
- F <- [fun test_variable_queue_dynamic_duration_change/1,
- fun test_variable_queue_partial_segments_delta_thing/1,
- fun test_variable_queue_all_the_bits_not_covered_elsewhere1/1,
- fun test_variable_queue_all_the_bits_not_covered_elsewhere2/1,
- fun test_drop/1,
- fun test_variable_queue_fold_msg_on_disk/1,
- fun test_dropfetchwhile/1,
- fun test_dropwhile_varying_ram_duration/1,
- fun test_fetchwhile_varying_ram_duration/1,
- fun test_variable_queue_ack_limiting/1,
- fun test_variable_queue_purge/1,
- fun test_variable_queue_requeue/1,
- fun test_variable_queue_requeue_ram_beta/1,
- fun test_variable_queue_fold/1]],
- passed.
-
-test_variable_queue_fold(VQ0) ->
- {PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
- variable_queue_with_holes(VQ0),
- Count = rabbit_variable_queue:depth(VQ1),
- Msgs = lists:sort(PendingMsgs ++ RequeuedMsgs ++ FreshMsgs),
- lists:foldl(fun (Cut, VQ2) ->
- test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ2)
- end, VQ1, [0, 1, 2, Count div 2,
- Count - 1, Count, Count + 1, Count * 2]).
-
-test_variable_queue_fold(Cut, Msgs, PendingMsgs, VQ0) ->
- {Acc, VQ1} = rabbit_variable_queue:fold(
- fun (M, _, Pending, A) ->
- MInt = msg2int(M),
- Pending = lists:member(MInt, PendingMsgs), %% assert
- case MInt =< Cut of
- true -> {cont, [MInt | A]};
- false -> {stop, A}
- end
- end, [], VQ0),
- Expected = lists:takewhile(fun (I) -> I =< Cut end, Msgs),
- Expected = lists:reverse(Acc), %% assertion
- VQ1.
-
-msg2int(#basic_message{content = #content{ payload_fragments_rev = P}}) ->
- binary_to_term(list_to_binary(lists:reverse(P))).
-
-ack_subset(AckSeqs, Interval, Rem) ->
- lists:filter(fun ({_Ack, N}) -> (N + Rem) rem Interval == 0 end, AckSeqs).
-
-requeue_one_by_one(Acks, VQ) ->
- lists:foldl(fun (AckTag, VQN) ->
- {_MsgId, VQM} = rabbit_variable_queue:requeue(
- [AckTag], VQN),
- VQM
- end, VQ, Acks).
-
-%% Create a vq with messages in q1, delta, and q3, and holes (in the
-%% form of pending acks) in the latter two.
-variable_queue_with_holes(VQ0) ->
- Interval = 2048, %% should match vq:IO_BATCH_SIZE
- Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2 * Interval,
- Seq = lists:seq(1, Count),
- VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
- VQ2 = variable_queue_publish(
- false, 1, Count,
- fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ1),
- {VQ3, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ2),
- Acks = lists:reverse(AcksR),
- AckSeqs = lists:zip(Acks, Seq),
- [{Subset1, _Seq1}, {Subset2, _Seq2}, {Subset3, Seq3}] =
- [lists:unzip(ack_subset(AckSeqs, Interval, I)) || I <- [0, 1, 2]],
- %% we requeue in three phases in order to exercise requeuing logic
- %% in various vq states
- {_MsgIds, VQ4} = rabbit_variable_queue:requeue(
- Acks -- (Subset1 ++ Subset2 ++ Subset3), VQ3),
- VQ5 = requeue_one_by_one(Subset1, VQ4),
- %% by now we have some messages (and holes) in delta
- VQ6 = requeue_one_by_one(Subset2, VQ5),
- VQ7 = variable_queue_set_ram_duration_target(infinity, VQ6),
- %% add the q1 tail
- VQ8 = variable_queue_publish(
- true, Count + 1, Interval,
- fun (_, P) -> P end, fun erlang:term_to_binary/1, VQ7),
- %% assertions
- [false = case V of
- {delta, _, 0, _} -> true;
- 0 -> true;
- _ -> false
- end || {K, V} <- variable_queue_status(VQ8),
- lists:member(K, [q1, delta, q3])],
- Depth = Count + Interval,
- Depth = rabbit_variable_queue:depth(VQ8),
- Len = Depth - length(Subset3),
- Len = rabbit_variable_queue:len(VQ8),
- {Seq3, Seq -- Seq3, lists:seq(Count + 1, Count + Interval), VQ8}.
-
-test_variable_queue_requeue(VQ0) ->
- {_PendingMsgs, RequeuedMsgs, FreshMsgs, VQ1} =
- variable_queue_with_holes(VQ0),
- Msgs =
- lists:zip(RequeuedMsgs,
- lists:duplicate(length(RequeuedMsgs), true)) ++
- lists:zip(FreshMsgs,
- lists:duplicate(length(FreshMsgs), false)),
- VQ2 = lists:foldl(fun ({I, Requeued}, VQa) ->
- {{M, MRequeued, _}, VQb} =
- rabbit_variable_queue:fetch(true, VQa),
- Requeued = MRequeued, %% assertion
- I = msg2int(M), %% assertion
- VQb
- end, VQ1, Msgs),
- {empty, VQ3} = rabbit_variable_queue:fetch(true, VQ2),
- VQ3.
-
-%% requeue from ram_pending_ack into q3, move to delta and then empty queue
-test_variable_queue_requeue_ram_beta(VQ0) ->
- Count = rabbit_queue_index:next_segment_boundary(0)*2 + 2,
- VQ1 = variable_queue_publish(false, Count, VQ0),
- {VQ2, AcksR} = variable_queue_fetch(Count, false, false, Count, VQ1),
- {Back, Front} = lists:split(Count div 2, AcksR),
- {_, VQ3} = rabbit_variable_queue:requeue(erlang:tl(Back), VQ2),
- VQ4 = variable_queue_set_ram_duration_target(0, VQ3),
- {_, VQ5} = rabbit_variable_queue:requeue([erlang:hd(Back)], VQ4),
- VQ6 = requeue_one_by_one(Front, VQ5),
- {VQ7, AcksAll} = variable_queue_fetch(Count, false, true, Count, VQ6),
- {_, VQ8} = rabbit_variable_queue:ack(AcksAll, VQ7),
- VQ8.
-
-test_variable_queue_purge(VQ0) ->
- LenDepth = fun (VQ) ->
- {rabbit_variable_queue:len(VQ),
- rabbit_variable_queue:depth(VQ)}
- end,
- VQ1 = variable_queue_publish(false, 10, VQ0),
- {VQ2, Acks} = variable_queue_fetch(6, false, false, 10, VQ1),
- {4, VQ3} = rabbit_variable_queue:purge(VQ2),
- {0, 6} = LenDepth(VQ3),
- {_, VQ4} = rabbit_variable_queue:requeue(lists:sublist(Acks, 2), VQ3),
- {2, 6} = LenDepth(VQ4),
- VQ5 = rabbit_variable_queue:purge_acks(VQ4),
- {2, 2} = LenDepth(VQ5),
- VQ5.
-
-test_variable_queue_ack_limiting(VQ0) ->
- %% start by sending in a bunch of messages
- Len = 1024,
- VQ1 = variable_queue_publish(false, Len, VQ0),
-
- %% squeeze and relax queue
- Churn = Len div 32,
- VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
- %% update stats for duration
- {_Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
-
- %% fetch half the messages
- {VQ4, _AckTags} = variable_queue_fetch(Len div 2, false, false, Len, VQ3),
-
- VQ5 = check_variable_queue_status(
- VQ4, [{len, Len div 2},
- {messages_unacknowledged_ram, Len div 2},
- {messages_ready_ram, Len div 2},
- {messages_ram, Len}]),
-
- %% ensure all acks go to disk on 0 duration target
- VQ6 = check_variable_queue_status(
- variable_queue_set_ram_duration_target(0, VQ5),
- [{len, Len div 2},
- {target_ram_count, 0},
- {messages_unacknowledged_ram, 0},
- {messages_ready_ram, 0},
- {messages_ram, 0}]),
-
- VQ6.
-
-test_drop(VQ0) ->
- %% start by sending a messages
- VQ1 = variable_queue_publish(false, 1, VQ0),
- %% drop message with AckRequired = true
- {{MsgId, AckTag}, VQ2} = rabbit_variable_queue:drop(true, VQ1),
- true = rabbit_variable_queue:is_empty(VQ2),
- true = AckTag =/= undefinded,
- %% drop again -> empty
- {empty, VQ3} = rabbit_variable_queue:drop(false, VQ2),
- %% requeue
- {[MsgId], VQ4} = rabbit_variable_queue:requeue([AckTag], VQ3),
- %% drop message with AckRequired = false
- {{MsgId, undefined}, VQ5} = rabbit_variable_queue:drop(false, VQ4),
- true = rabbit_variable_queue:is_empty(VQ5),
- VQ5.
-
-test_dropfetchwhile(VQ0) ->
- Count = 10,
-
- %% add messages with sequential expiry
- VQ1 = variable_queue_publish(
- false, 1, Count,
- fun (N, Props) -> Props#message_properties{expiry = N} end,
- fun erlang:term_to_binary/1, VQ0),
-
- %% fetch the first 5 messages
- {#message_properties{expiry = 6}, {Msgs, AckTags}, VQ2} =
- rabbit_variable_queue:fetchwhile(
- fun (#message_properties{expiry = Expiry}) -> Expiry =< 5 end,
- fun (Msg, AckTag, {MsgAcc, AckAcc}) ->
- {[Msg | MsgAcc], [AckTag | AckAcc]}
- end, {[], []}, VQ1),
- true = lists:seq(1, 5) == [msg2int(M) || M <- lists:reverse(Msgs)],
-
- %% requeue them
- {_MsgIds, VQ3} = rabbit_variable_queue:requeue(AckTags, VQ2),
-
- %% drop the first 5 messages
- {#message_properties{expiry = 6}, VQ4} =
- rabbit_variable_queue:dropwhile(
- fun (#message_properties {expiry = Expiry}) -> Expiry =< 5 end, VQ3),
-
- %% fetch 5
- VQ5 = lists:foldl(fun (N, VQN) ->
- {{Msg, _, _}, VQM} =
- rabbit_variable_queue:fetch(false, VQN),
- true = msg2int(Msg) == N,
- VQM
- end, VQ4, lists:seq(6, Count)),
-
- %% should be empty now
- true = rabbit_variable_queue:is_empty(VQ5),
-
- VQ5.
-
-test_dropwhile_varying_ram_duration(VQ0) ->
- test_dropfetchwhile_varying_ram_duration(
- fun (VQ1) ->
- {_, VQ2} = rabbit_variable_queue:dropwhile(
- fun (_) -> false end, VQ1),
- VQ2
- end, VQ0).
-
-test_fetchwhile_varying_ram_duration(VQ0) ->
- test_dropfetchwhile_varying_ram_duration(
- fun (VQ1) ->
- {_, ok, VQ2} = rabbit_variable_queue:fetchwhile(
- fun (_) -> false end,
- fun (_, _, A) -> A end,
- ok, VQ1),
- VQ2
- end, VQ0).
-
-test_dropfetchwhile_varying_ram_duration(Fun, VQ0) ->
- VQ1 = variable_queue_publish(false, 1, VQ0),
- VQ2 = variable_queue_set_ram_duration_target(0, VQ1),
- VQ3 = Fun(VQ2),
- VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
- VQ5 = variable_queue_publish(false, 1, VQ4),
- VQ6 = Fun(VQ5),
- VQ6.
-
-test_variable_queue_dynamic_duration_change(VQ0) ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
-
- %% start by sending in a couple of segments worth
- Len = 2*SegmentSize,
- VQ1 = variable_queue_publish(false, Len, VQ0),
- %% squeeze and relax queue
- Churn = Len div 32,
- VQ2 = publish_fetch_and_ack(Churn, Len, VQ1),
-
- {Duration, VQ3} = rabbit_variable_queue:ram_duration(VQ2),
- VQ7 = lists:foldl(
- fun (Duration1, VQ4) ->
- {_Duration, VQ5} = rabbit_variable_queue:ram_duration(VQ4),
- VQ6 = variable_queue_set_ram_duration_target(
- Duration1, VQ5),
- publish_fetch_and_ack(Churn, Len, VQ6)
- end, VQ3, [Duration / 4, 0, Duration / 4, infinity]),
-
- %% drain
- {VQ8, AckTags} = variable_queue_fetch(Len, false, false, Len, VQ7),
- {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags, VQ8),
- {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
-
- VQ10.
-
-publish_fetch_and_ack(0, _Len, VQ0) ->
- VQ0;
-publish_fetch_and_ack(N, Len, VQ0) ->
- VQ1 = variable_queue_publish(false, 1, VQ0),
- {{_Msg, false, AckTag}, VQ2} = rabbit_variable_queue:fetch(true, VQ1),
- Len = rabbit_variable_queue:len(VQ2),
- {_Guids, VQ3} = rabbit_variable_queue:ack([AckTag], VQ2),
- publish_fetch_and_ack(N-1, Len, VQ3).
-
-test_variable_queue_partial_segments_delta_thing(VQ0) ->
- SegmentSize = rabbit_queue_index:next_segment_boundary(0),
- HalfSegment = SegmentSize div 2,
- OneAndAHalfSegment = SegmentSize + HalfSegment,
- VQ1 = variable_queue_publish(true, OneAndAHalfSegment, VQ0),
- {_Duration, VQ2} = rabbit_variable_queue:ram_duration(VQ1),
- VQ3 = check_variable_queue_status(
- variable_queue_set_ram_duration_target(0, VQ2),
- %% one segment in q3, and half a segment in delta
- [{delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
- {q3, SegmentSize},
- {len, SegmentSize + HalfSegment}]),
- VQ4 = variable_queue_set_ram_duration_target(infinity, VQ3),
- VQ5 = check_variable_queue_status(
- variable_queue_publish(true, 1, VQ4),
- %% one alpha, but it's in the same segment as the deltas
- [{q1, 1},
- {delta, {delta, SegmentSize, HalfSegment, OneAndAHalfSegment}},
- {q3, SegmentSize},
- {len, SegmentSize + HalfSegment + 1}]),
- {VQ6, AckTags} = variable_queue_fetch(SegmentSize, true, false,
- SegmentSize + HalfSegment + 1, VQ5),
- VQ7 = check_variable_queue_status(
- VQ6,
- %% the half segment should now be in q3
- [{q1, 1},
- {delta, {delta, undefined, 0, undefined}},
- {q3, HalfSegment},
- {len, HalfSegment + 1}]),
- {VQ8, AckTags1} = variable_queue_fetch(HalfSegment + 1, true, false,
- HalfSegment + 1, VQ7),
- {_Guids, VQ9} = rabbit_variable_queue:ack(AckTags ++ AckTags1, VQ8),
- %% should be empty now
- {empty, VQ10} = rabbit_variable_queue:fetch(true, VQ9),
- VQ10.
-
-check_variable_queue_status(VQ0, Props) ->
- VQ1 = variable_queue_wait_for_shuffling_end(VQ0),
- S = variable_queue_status(VQ1),
- assert_props(S, Props),
- VQ1.
-
-variable_queue_status(VQ) ->
- Keys = rabbit_backing_queue:info_keys() -- [backing_queue_status],
- [{K, rabbit_variable_queue:info(K, VQ)} || K <- Keys] ++
- rabbit_variable_queue:info(backing_queue_status, VQ).
-
-variable_queue_wait_for_shuffling_end(VQ) ->
- case credit_flow:blocked() of
- false -> VQ;
- true -> receive
- {bump_credit, Msg} ->
- credit_flow:handle_bump_msg(Msg),
- variable_queue_wait_for_shuffling_end(
- rabbit_variable_queue:resume(VQ))
- end
- end.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere1(VQ0) ->
- Count = 2 * rabbit_queue_index:next_segment_boundary(0),
- VQ1 = variable_queue_publish(true, Count, VQ0),
- VQ2 = variable_queue_publish(false, Count, VQ1),
- VQ3 = variable_queue_set_ram_duration_target(0, VQ2),
- {VQ4, _AckTags} = variable_queue_fetch(Count, true, false,
- Count + Count, VQ3),
- {VQ5, _AckTags1} = variable_queue_fetch(Count, false, false,
- Count, VQ4),
- _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
- VQ7 = variable_queue_init(test_amqqueue(true), true),
- {{_Msg1, true, _AckTag1}, VQ8} = rabbit_variable_queue:fetch(true, VQ7),
- Count1 = rabbit_variable_queue:len(VQ8),
- VQ9 = variable_queue_publish(false, 1, VQ8),
- VQ10 = variable_queue_set_ram_duration_target(0, VQ9),
- {VQ11, _AckTags2} = variable_queue_fetch(Count1, true, true, Count, VQ10),
- {VQ12, _AckTags3} = variable_queue_fetch(1, false, false, 1, VQ11),
- VQ12.
-
-test_variable_queue_all_the_bits_not_covered_elsewhere2(VQ0) ->
- VQ1 = variable_queue_set_ram_duration_target(0, VQ0),
- VQ2 = variable_queue_publish(false, 4, VQ1),
- {VQ3, AckTags} = variable_queue_fetch(2, false, false, 4, VQ2),
- {_Guids, VQ4} =
- rabbit_variable_queue:requeue(AckTags, VQ3),
- VQ5 = rabbit_variable_queue:timeout(VQ4),
- _VQ6 = rabbit_variable_queue:terminate(shutdown, VQ5),
- VQ7 = variable_queue_init(test_amqqueue(true), true),
- {empty, VQ8} = rabbit_variable_queue:fetch(false, VQ7),
- VQ8.
-
-test_variable_queue_fold_msg_on_disk(VQ0) ->
- VQ1 = variable_queue_publish(true, 1, VQ0),
- {VQ2, AckTags} = variable_queue_fetch(1, true, false, 1, VQ1),
- {ok, VQ3} = rabbit_variable_queue:ackfold(fun (_M, _A, ok) -> ok end,
- ok, VQ2, AckTags),
- VQ3.
-
-test_queue_recover() ->
- Count = 2 * rabbit_queue_index:next_segment_boundary(0),
- {new, #amqqueue { pid = QPid, name = QName } = Q} =
- rabbit_amqqueue:declare(test_queue(), true, false, [], none),
- publish_and_confirm(Q, <<>>, Count),
-
- [{_, SupPid, _, _}] = supervisor:which_children(rabbit_amqqueue_sup_sup),
- exit(SupPid, kill),
- exit(QPid, kill),
- MRef = erlang:monitor(process, QPid),
- receive {'DOWN', MRef, process, QPid, _Info} -> ok
- after 10000 -> exit(timeout_waiting_for_queue_death)
- end,
- rabbit_amqqueue:stop(),
- rabbit_amqqueue:start(rabbit_amqqueue:recover()),
- {ok, Limiter} = rabbit_limiter:start_link(no_id),
- rabbit_amqqueue:with_or_die(
- QName,
- fun (Q1 = #amqqueue { pid = QPid1 }) ->
- CountMinusOne = Count - 1,
- {ok, CountMinusOne, {QName, QPid1, _AckTag, true, _Msg}} =
- rabbit_amqqueue:basic_get(Q1, self(), false, Limiter),
- exit(QPid1, shutdown),
- VQ1 = variable_queue_init(Q, true),
- {{_Msg1, true, _AckTag1}, VQ2} =
- rabbit_variable_queue:fetch(true, VQ1),
- CountMinusOne = rabbit_variable_queue:len(VQ2),
- _VQ3 = rabbit_variable_queue:delete_and_terminate(shutdown, VQ2),
- rabbit_amqqueue:internal_delete(QName)
- end),
- passed.
-
-test_variable_queue_delete_msg_store_files_callback() ->
- ok = restart_msg_store_empty(),
- {new, #amqqueue { pid = QPid, name = QName } = Q} =
- rabbit_amqqueue:declare(test_queue(), true, false, [], none),
- Payload = <<0:8388608>>, %% 1MB
- Count = 30,
- publish_and_confirm(Q, Payload, Count),
-
- rabbit_amqqueue:set_ram_duration_target(QPid, 0),
-
- {ok, Limiter} = rabbit_limiter:start_link(no_id),
-
- CountMinusOne = Count - 1,
- {ok, CountMinusOne, {QName, QPid, _AckTag, false, _Msg}} =
- rabbit_amqqueue:basic_get(Q, self(), true, Limiter),
- {ok, CountMinusOne} = rabbit_amqqueue:purge(Q),
-
- %% give the queue a second to receive the close_fds callback msg
- timer:sleep(1000),
-
- rabbit_amqqueue:delete(Q, false, false),
- passed.
-
-test_configurable_server_properties() ->
- %% List of the names of the built-in properties do we expect to find
- BuiltInPropNames = [<<"product">>, <<"version">>, <<"platform">>,
- <<"copyright">>, <<"information">>],
-
- Protocol = rabbit_framing_amqp_0_9_1,
-
- %% Verify that the built-in properties are initially present
- ActualPropNames = [Key || {Key, longstr, _} <-
- rabbit_reader:server_properties(Protocol)],
- true = lists:all(fun (X) -> lists:member(X, ActualPropNames) end,
- BuiltInPropNames),
-
- %% Get the initial server properties configured in the environment
- {ok, ServerProperties} = application:get_env(rabbit, server_properties),
-
- %% Helper functions
- ConsProp = fun (X) -> application:set_env(rabbit,
- server_properties,
- [X | ServerProperties]) end,
- IsPropPresent =
- fun (X) ->
- lists:member(X, rabbit_reader:server_properties(Protocol))
- end,
-
- %% Add a wholly new property of the simplified {KeyAtom, StringValue} form
- NewSimplifiedProperty = {NewHareKey, NewHareVal} = {hare, "soup"},
- ConsProp(NewSimplifiedProperty),
- %% Do we find hare soup, appropriately formatted in the generated properties?
- ExpectedHareImage = {list_to_binary(atom_to_list(NewHareKey)),
- longstr,
- list_to_binary(NewHareVal)},
- true = IsPropPresent(ExpectedHareImage),
-
- %% Add a wholly new property of the {BinaryKey, Type, Value} form
- %% and check for it
- NewProperty = {<<"new-bin-key">>, signedint, -1},
- ConsProp(NewProperty),
- %% Do we find the new property?
- true = IsPropPresent(NewProperty),
-
- %% Add a property that clobbers a built-in, and verify correct clobbering
- {NewVerKey, NewVerVal} = NewVersion = {version, "X.Y.Z."},
- {BinNewVerKey, BinNewVerVal} = {list_to_binary(atom_to_list(NewVerKey)),
- list_to_binary(NewVerVal)},
- ConsProp(NewVersion),
- ClobberedServerProps = rabbit_reader:server_properties(Protocol),
- %% Is the clobbering insert present?
- true = IsPropPresent({BinNewVerKey, longstr, BinNewVerVal}),
- %% Is the clobbering insert the only thing with the clobbering key?
- [{BinNewVerKey, longstr, BinNewVerVal}] =
- [E || {K, longstr, _V} = E <- ClobberedServerProps, K =:= BinNewVerKey],
-
- application:set_env(rabbit, server_properties, ServerProperties),
- passed.
-
-nop(_) -> ok.
-nop(_, _) -> ok.
-
-test_memory_high_watermark() ->
- %% set vm memory high watermark
- HWM = vm_memory_monitor:get_vm_memory_high_watermark(),
- %% this will trigger an alarm
- ok = control_action(set_vm_memory_high_watermark, ["absolute", "200000"]),
- [{{resource_limit,memory,_},[]}] = rabbit_alarm:get_alarms(),
- %% reset
- ok = control_action(set_vm_memory_high_watermark, [float_to_list(HWM)]),
-
- passed.
-
-disk_monitor_test() ->
- %% Issue: rabbitmq-server #91
- %% os module could be mocked using 'unstick', however it may have undesired
- %% side effects in following tests. Thus, we mock at rabbit_misc level
- ok = meck:new(rabbit_misc, [passthrough]),
- ok = meck:expect(rabbit_misc, os_cmd, fun(_) -> "\n" end),
- ok = rabbit_sup:stop_child(rabbit_disk_monitor_sup),
- ok = rabbit_sup:start_delayed_restartable_child(rabbit_disk_monitor, [1000]),
- meck:unload(rabbit_misc),
- passed.
diff --git a/test/src/rabbit_tests_event_receiver.erl b/test/src/rabbit_tests_event_receiver.erl
deleted file mode 100644
index 610496b60c..0000000000
--- a/test/src/rabbit_tests_event_receiver.erl
+++ /dev/null
@@ -1,58 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(rabbit_tests_event_receiver).
-
--export([start/3, stop/0]).
-
--export([init/1, handle_call/2, handle_event/2, handle_info/2,
- terminate/2, code_change/3]).
-
--include("rabbit.hrl").
-
-start(Pid, Nodes, Types) ->
- Oks = [ok || _ <- Nodes],
- {Oks, _} = rpc:multicall(Nodes, gen_event, add_handler,
- [rabbit_event, ?MODULE, [Pid, Types]]).
-
-stop() ->
- gen_event:delete_handler(rabbit_event, ?MODULE, []).
-
-%%----------------------------------------------------------------------------
-
-init([Pid, Types]) ->
- {ok, {Pid, Types}}.
-
-handle_call(_Request, State) ->
- {ok, not_understood, State}.
-
-handle_event(Event = #event{type = Type}, State = {Pid, Types}) ->
- case lists:member(Type, Types) of
- true -> Pid ! Event;
- false -> ok
- end,
- {ok, State}.
-
-handle_info(_Info, State) ->
- {ok, State}.
-
-terminate(_Arg, _State) ->
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
-%%----------------------------------------------------------------------------
diff --git a/test/src/supervisor2_tests.erl b/test/src/supervisor2_tests.erl
deleted file mode 100644
index 199c66eca0..0000000000
--- a/test/src/supervisor2_tests.erl
+++ /dev/null
@@ -1,75 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2011-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(supervisor2_tests).
--behaviour(supervisor2).
-
--export([test_all/0, start_link/0]).
--export([init/1]).
-
-test_all() ->
- ok = check_shutdown(stop, 200, 200, 2000),
- ok = check_shutdown(ignored, 1, 2, 2000).
-
-check_shutdown(SigStop, Iterations, ChildCount, SupTimeout) ->
- {ok, Sup} = supervisor2:start_link(?MODULE, [SupTimeout]),
- Res = lists:foldl(
- fun (I, ok) ->
- TestSupPid = erlang:whereis(?MODULE),
- ChildPids =
- [begin
- {ok, ChildPid} =
- supervisor2:start_child(TestSupPid, []),
- ChildPid
- end || _ <- lists:seq(1, ChildCount)],
- MRef = erlang:monitor(process, TestSupPid),
- [P ! SigStop || P <- ChildPids],
- ok = supervisor2:terminate_child(Sup, test_sup),
- {ok, _} = supervisor2:restart_child(Sup, test_sup),
- receive
- {'DOWN', MRef, process, TestSupPid, shutdown} ->
- ok;
- {'DOWN', MRef, process, TestSupPid, Reason} ->
- {error, {I, Reason}}
- end;
- (_, R) ->
- R
- end, ok, lists:seq(1, Iterations)),
- unlink(Sup),
- MSupRef = erlang:monitor(process, Sup),
- exit(Sup, shutdown),
- receive
- {'DOWN', MSupRef, process, Sup, _Reason} ->
- ok
- end,
- Res.
-
-start_link() ->
- Pid = spawn_link(fun () ->
- process_flag(trap_exit, true),
- receive stop -> ok end
- end),
- {ok, Pid}.
-
-init([Timeout]) ->
- {ok, {{one_for_one, 0, 1},
- [{test_sup, {supervisor2, start_link,
- [{local, ?MODULE}, ?MODULE, []]},
- transient, Timeout, supervisor, [?MODULE]}]}};
-init([]) ->
- {ok, {{simple_one_for_one, 0, 1},
- [{test_worker, {?MODULE, start_link, []},
- temporary, 1000, worker, [?MODULE]}]}}.
diff --git a/test/src/test_sup.erl b/test/src/test_sup.erl
deleted file mode 100644
index 84d14f725d..0000000000
--- a/test/src/test_sup.erl
+++ /dev/null
@@ -1,93 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License
-%% at http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-%% the License for the specific language governing rights and
-%% limitations under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(test_sup).
-
--behaviour(supervisor2).
-
--export([test_supervisor_delayed_restart/0,
- init/1, start_child/0]).
-
-%%----------------------------------------------------------------------------
-
--ifdef(use_specs).
-
--spec(test_supervisor_delayed_restart/0 :: () -> 'passed').
-
--endif.
-
-%%----------------------------------------------------------------------------
-%% Public API
-%%----------------------------------------------------------------------------
-
-test_supervisor_delayed_restart() ->
- passed = with_sup(simple_one_for_one,
- fun (SupPid) ->
- {ok, _ChildPid} =
- supervisor2:start_child(SupPid, []),
- test_supervisor_delayed_restart(SupPid)
- end),
- passed = with_sup(one_for_one, fun test_supervisor_delayed_restart/1).
-
-test_supervisor_delayed_restart(SupPid) ->
- ok = ping_child(SupPid),
- ok = exit_child(SupPid),
- timer:sleep(100),
- ok = ping_child(SupPid),
- ok = exit_child(SupPid),
- timer:sleep(100),
- timeout = ping_child(SupPid),
- timer:sleep(1010),
- ok = ping_child(SupPid),
- passed.
-
-with_sup(RestartStrategy, Fun) ->
- {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy]),
- Res = Fun(SupPid),
- unlink(SupPid),
- exit(SupPid, shutdown),
- Res.
-
-init([RestartStrategy]) ->
- {ok, {{RestartStrategy, 1, 1},
- [{test, {test_sup, start_child, []}, {permanent, 1},
- 16#ffffffff, worker, [test_sup]}]}}.
-
-start_child() ->
- {ok, proc_lib:spawn_link(fun run_child/0)}.
-
-ping_child(SupPid) ->
- Ref = make_ref(),
- with_child_pid(SupPid, fun(ChildPid) -> ChildPid ! {ping, Ref, self()} end),
- receive {pong, Ref} -> ok
- after 1000 -> timeout
- end.
-
-exit_child(SupPid) ->
- with_child_pid(SupPid, fun(ChildPid) -> exit(ChildPid, abnormal) end),
- ok.
-
-with_child_pid(SupPid, Fun) ->
- case supervisor2:which_children(SupPid) of
- [{_Id, undefined, worker, [test_sup]}] -> ok;
- [{_Id, ChildPid, worker, [test_sup]}] -> Fun(ChildPid);
- [] -> ok
- end.
-
-run_child() ->
- receive {ping, Ref, Pid} -> Pid ! {pong, Ref},
- run_child()
- end.
diff --git a/test/src/vm_memory_monitor_tests.erl b/test/src/vm_memory_monitor_tests.erl
deleted file mode 100644
index 61d62f862d..0000000000
--- a/test/src/vm_memory_monitor_tests.erl
+++ /dev/null
@@ -1,35 +0,0 @@
-%% The contents of this file are subject to the Mozilla Public License
-%% Version 1.1 (the "License"); you may not use this file except in
-%% compliance with the License. You may obtain a copy of the License at
-%% http://www.mozilla.org/MPL/
-%%
-%% Software distributed under the License is distributed on an "AS IS"
-%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-%% License for the specific language governing rights and limitations
-%% under the License.
-%%
-%% The Original Code is RabbitMQ.
-%%
-%% The Initial Developer of the Original Code is GoPivotal, Inc.
-%% Copyright (c) 2007-2015 Pivotal Software, Inc. All rights reserved.
-%%
-
--module(vm_memory_monitor_tests).
-
--export([all_tests/0]).
-
-%% ---------------------------------------------------------------------------
-%% Tests
-%% ---------------------------------------------------------------------------
-
-all_tests() ->
- lists:foreach(fun ({S, {K, V}}) ->
- {K, V} = vm_memory_monitor:parse_line_linux(S)
- end,
- [{"MemTotal: 0 kB", {'MemTotal', 0}},
- {"MemTotal: 502968 kB ", {'MemTotal', 515039232}},
- {"MemFree: 178232 kB", {'MemFree', 182509568}},
- {"MemTotal: 50296888", {'MemTotal', 50296888}},
- {"MemTotal 502968 kB", {'MemTotal', 515039232}},
- {"MemTotal 50296866 ", {'MemTotal', 50296866}}]),
- passed.
diff --git a/test/temp/head_message_timestamp_tests.py b/test/temp/head_message_timestamp_tests.py
new file mode 100755
index 0000000000..6698b88b7b
--- /dev/null
+++ b/test/temp/head_message_timestamp_tests.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# Tests for the SLA patch which adds the head_message_timestamp queue stat.
+# Uses both the management interface via rabbitmqadmin and the AMQP interface via Pika.
+# There's no particular reason to have used rabbitmqadmin other than saving some bulk.
+# Similarly, the separate declaration of exchanges and queues is just a preference
+# following a typical enterprise policy where admin users create these resources.
+
+from datetime import datetime
+import json
+import pika
+import os
+import sys
+from time import clock, mktime, sleep
+import unittest
+
+# Uses the rabbitmqadmin script.
+# To be imported this must be given a .py suffix and placed on the Python path
+from rabbitmqadmin import *
+
+TEXCH = 'head-message-timestamp-test'
+TQUEUE = 'head-message-timestamp-test-queue'
+
+TIMEOUT_SECS = 10
+
+TIMESTAMP1 = mktime(datetime(2010,1,1,12,00,01).timetuple())
+TIMESTAMP2 = mktime(datetime(2010,1,1,12,00,02).timetuple())
+
+AMQP_PORT = 99
+
+DELIVERY_MODE = 2
+DURABLE = False
+
+def log(msg):
+ print("\nINFO: " + msg)
+
+class RabbitTestCase(unittest.TestCase):
+ def setUp(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ AMQP_PORT = int(options.port) - 10000
+
+ self.mgmt = Management(options, args)
+ self.mgmt.put('/exchanges/%2f/' + TEXCH, '{"type" : "fanout", "durable":' + str(DURABLE).lower() + '}')
+ self.mgmt.put('/queues/%2f/' + TQUEUE, '{"auto_delete":false,"durable":' + str(DURABLE).lower() + ',"arguments":[]}')
+ self.mgmt.post('/bindings/%2f/e/' + TEXCH + '/q/' + TQUEUE, '{"routing_key": ".*", "arguments":[]}')
+ self.credentials = pika.PlainCredentials(options.username, options.password)
+ parameters = pika.ConnectionParameters(options.hostname, port=AMQP_PORT, credentials=self.credentials)
+ self.connection = pika.BlockingConnection(parameters)
+ self.channel = self.connection.channel()
+
+ def tearDown(self):
+ parser.set_conflict_handler('resolve')
+ (options, args) = make_configuration()
+ self.mgmt = Management(options, args)
+ self.mgmt.delete('/queues/%2f/' + TQUEUE)
+ self.mgmt.delete('/exchanges/%2f/' + TEXCH)
+
+class RabbitSlaTestCase(RabbitTestCase):
+ def get_queue_stats(self, queue_name):
+ stats_str = self.mgmt.get('/queues/%2f/' + queue_name)
+ return json.loads(stats_str)
+
+ def get_head_message_timestamp(self, queue_name):
+ return self.get_queue_stats(queue_name)["head_message_timestamp"]
+
+ def send(self, message, timestamp=None):
+ self.channel.basic_publish(TEXCH, '', message,
+ pika.BasicProperties(content_type='text/plain',
+ delivery_mode=DELIVERY_MODE,
+ timestamp=timestamp))
+ log("Sent message with body: " + str(message))
+
+ def receive(self, queue):
+ method_frame, header_frame, body = self.channel.basic_get(queue = queue)
+ log("Received message with body: " + str(body))
+ return method_frame.delivery_tag, body
+
+ def ack(self, delivery_tag):
+ self.channel.basic_ack(delivery_tag)
+
+ def nack(self, delivery_tag):
+ self.channel.basic_nack(delivery_tag)
+
+ def wait_for_new_timestamp(self, queue, old_timestamp):
+ stats_wait_start = clock()
+ while ((clock() - stats_wait_start) < TIMEOUT_SECS and
+ self.get_head_message_timestamp(queue) == old_timestamp):
+ sleep(0.1)
+ log('Queue stats updated in ' + str(clock() - stats_wait_start) + ' secs.')
+ return self.get_head_message_timestamp(queue)
+
+ # TESTS
+
+ def test_no_timestamp_when_queue_is_empty(self):
+ assert self.get_head_message_timestamp(TQUEUE) == ''
+
+ def test_has_timestamp_when_first_msg_is_added(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ assert stats_timestamp == TIMESTAMP1
+
+ def test_no_timestamp_when_last_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == ''
+
+ def test_timestamp_updated_when_msg_is_removed(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ self.send('Msg2', TIMESTAMP2)
+ tag, body = self.receive(TQUEUE)
+ self.ack(tag)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, TIMESTAMP1)
+ assert stats_timestamp == TIMESTAMP2
+
+ def test_timestamp_not_updated_before_msg_is_acked(self):
+ self.send('Msg1', TIMESTAMP1)
+ stats_timestamp = self.wait_for_new_timestamp(TQUEUE, '')
+ tag, body = self.receive(TQUEUE)
+ sleep(1) # Allow time for update to appear if it was going to (it shouldn't)
+ assert self.get_head_message_timestamp(TQUEUE) == TIMESTAMP1
+ self.ack(tag)
+
+if __name__ == '__main__':
+ unittest.main(verbosity = 2)
+
+
diff --git a/test/temp/rabbitmqadmin.py b/test/temp/rabbitmqadmin.py
new file mode 100755
index 0000000000..1e7552b92c
--- /dev/null
+++ b/test/temp/rabbitmqadmin.py
@@ -0,0 +1,944 @@
+#!/usr/bin/env python
+
+# The contents of this file are subject to the Mozilla Public License
+# Version 1.1 (the "License"); you may not use this file except in
+# compliance with the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
+# License for the specific language governing rights and limitations
+# under the License.
+#
+# The Original Code is RabbitMQ Management Plugin.
+#
+# The Initial Developer of the Original Code is GoPivotal, Inc.
+# Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
+
+import sys
+if sys.version_info[0] < 2 or sys.version_info[1] < 6:
+ print "Sorry, rabbitmqadmin requires at least Python 2.6."
+ sys.exit(1)
+
+from ConfigParser import ConfigParser, NoSectionError
+from optparse import OptionParser, TitledHelpFormatter
+import httplib
+import urllib
+import urlparse
+import base64
+import json
+import os
+import socket
+
+VERSION = '0.0.0'
+
+LISTABLE = {'connections': {'vhost': False},
+ 'channels': {'vhost': False},
+ 'consumers': {'vhost': True},
+ 'exchanges': {'vhost': True},
+ 'queues': {'vhost': True},
+ 'bindings': {'vhost': True},
+ 'users': {'vhost': False},
+ 'vhosts': {'vhost': False},
+ 'permissions': {'vhost': False},
+ 'nodes': {'vhost': False},
+ 'parameters': {'vhost': False,
+ 'json': ['value']},
+ 'policies': {'vhost': False,
+ 'json': ['definition']}}
+
+SHOWABLE = {'overview': {'vhost': False}}
+
+PROMOTE_COLUMNS = ['vhost', 'name', 'type',
+ 'source', 'destination', 'destination_type', 'routing_key']
+
+URIS = {
+ 'exchange': '/exchanges/{vhost}/{name}',
+ 'queue': '/queues/{vhost}/{name}',
+ 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
+ 'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
+ 'vhost': '/vhosts/{name}',
+ 'user': '/users/{name}',
+ 'permission': '/permissions/{vhost}/{user}',
+ 'parameter': '/parameters/{component}/{vhost}/{name}',
+ 'policy': '/policies/{vhost}/{name}'
+ }
+
+DECLARABLE = {
+ 'exchange': {'mandatory': ['name', 'type'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'internal': 'false', 'arguments': {}}},
+ 'queue': {'mandatory': ['name'],
+ 'json': ['arguments'],
+ 'optional': {'auto_delete': 'false', 'durable': 'true',
+ 'arguments': {}, 'node': None}},
+ 'binding': {'mandatory': ['source', 'destination'],
+ 'json': ['arguments'],
+ 'optional': {'destination_type': 'queue',
+ 'routing_key': '', 'arguments': {}}},
+ 'vhost': {'mandatory': ['name'],
+ 'optional': {'tracing': None}},
+ 'user': {'mandatory': ['name', 'password', 'tags'],
+ 'optional': {}},
+ 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
+ 'optional': {}},
+ 'parameter': {'mandatory': ['component', 'name', 'value'],
+ 'json': ['value'],
+ 'optional': {}},
+ # Priority is 'json' to convert to int
+ 'policy': {'mandatory': ['name', 'pattern', 'definition'],
+ 'json': ['definition', 'priority'],
+ 'optional': {'priority' : 0, 'apply-to': None}}
+ }
+
+DELETABLE = {
+ 'exchange': {'mandatory': ['name']},
+ 'queue': {'mandatory': ['name']},
+ 'binding': {'mandatory': ['source', 'destination_type', 'destination',
+ 'properties_key']},
+ 'vhost': {'mandatory': ['name']},
+ 'user': {'mandatory': ['name']},
+ 'permission': {'mandatory': ['vhost', 'user']},
+ 'parameter': {'mandatory': ['component', 'name']},
+ 'policy': {'mandatory': ['name']}
+ }
+
+CLOSABLE = {
+ 'connection': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/connections/{name}'}
+ }
+
+PURGABLE = {
+ 'queue': {'mandatory': ['name'],
+ 'optional': {},
+ 'uri': '/queues/{vhost}/{name}/contents'}
+ }
+
+EXTRA_VERBS = {
+ 'publish': {'mandatory': ['routing_key'],
+ 'optional': {'payload': None,
+ 'exchange': 'amq.default',
+ 'payload_encoding': 'string'},
+ 'uri': '/exchanges/{vhost}/{exchange}/publish'},
+ 'get': {'mandatory': ['queue'],
+ 'optional': {'count': '1', 'requeue': 'true',
+ 'payload_file': None, 'encoding': 'auto'},
+ 'uri': '/queues/{vhost}/{queue}/get'}
+}
+
+for k in DECLARABLE:
+ DECLARABLE[k]['uri'] = URIS[k]
+
+for k in DELETABLE:
+ DELETABLE[k]['uri'] = URIS[k]
+ DELETABLE[k]['optional'] = {}
+DELETABLE['binding']['uri'] = URIS['binding_del']
+
+def short_usage():
+ return "rabbitmqadmin [options] subcommand"
+
+def title(name):
+ return "\n%s\n%s\n\n" % (name, '=' * len(name))
+
+def subcommands_usage():
+ usage = """Usage
+=====
+ """ + short_usage() + """
+
+ where subcommand is one of:
+""" + title("Display")
+
+ for l in LISTABLE:
+ usage += " list {0} [<column>...]\n".format(l)
+ for s in SHOWABLE:
+ usage += " show {0} [<column>...]\n".format(s)
+ usage += title("Object Manipulation")
+ usage += fmt_usage_stanza(DECLARABLE, 'declare')
+ usage += fmt_usage_stanza(DELETABLE, 'delete')
+ usage += fmt_usage_stanza(CLOSABLE, 'close')
+ usage += fmt_usage_stanza(PURGABLE, 'purge')
+ usage += title("Broker Definitions")
+ usage += """ export <file>
+ import <file>
+"""
+ usage += title("Publishing and Consuming")
+ usage += fmt_usage_stanza(EXTRA_VERBS, '')
+ usage += """
+ * If payload is not specified on publish, standard input is used
+
+ * If payload_file is not specified on get, the payload will be shown on
+ standard output along with the message metadata
+
+ * If payload_file is specified on get, count must not be set
+"""
+ return usage
+
+def config_usage():
+ usage = "Usage\n=====\n" + short_usage()
+ usage += "\n" + title("Configuration File")
+ usage += """ It is possible to specify a configuration file from the command line.
+ Hosts can be configured easily in a configuration file and called
+ from the command line.
+"""
+ usage += title("Example")
+ usage += """ # rabbitmqadmin.conf.example START
+
+ [host_normal]
+ hostname = localhost
+ port = 15672
+ username = guest
+ password = guest
+ declare_vhost = / # Used as default for declare / delete only
+ vhost = / # Used as default for declare / delete / list
+
+ [host_ssl]
+ hostname = otherhost
+ port = 15672
+ username = guest
+ password = guest
+ ssl = True
+ ssl_key_file = /path/to/key.pem
+ ssl_cert_file = /path/to/cert.pem
+
+ # rabbitmqadmin.conf.example END
+"""
+ usage += title("Use")
+ usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
+ return usage
+
+def more_help():
+ return """
+More Help
+=========
+
+For more help use the help subcommand:
+
+ rabbitmqadmin help subcommands # For a list of available subcommands
+ rabbitmqadmin help config # For help with the configuration file
+"""
+
+def fmt_usage_stanza(root, verb):
+ def fmt_args(args):
+ res = " ".join(["{0}=...".format(a) for a in args['mandatory']])
+ opts = " ".join("{0}=...".format(o) for o in args['optional'].keys())
+ if opts != "":
+ res += " [{0}]".format(opts)
+ return res
+
+ text = ""
+ if verb != "":
+ verb = " " + verb
+ for k in root.keys():
+ text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
+ return text
+
+default_options = { "hostname" : "localhost",
+ "port" : "15672",
+ "declare_vhost" : "/",
+ "username" : "guest",
+ "password" : "guest",
+ "ssl" : False,
+ "verbose" : True,
+ "format" : "table",
+ "depth" : 1,
+ "bash_completion" : False }
+
+
+class MyFormatter(TitledHelpFormatter):
+ def format_epilog(self, epilog):
+ return epilog
+
+parser = OptionParser(usage=short_usage(),
+ formatter=MyFormatter(),
+ epilog=more_help())
+
+def make_parser():
+ def add(*args, **kwargs):
+ key = kwargs['dest']
+ if key in default_options:
+ default = " [default: %s]" % default_options[key]
+ kwargs['help'] = kwargs['help'] + default
+ parser.add_option(*args, **kwargs)
+
+ add("-c", "--config", dest="config",
+ help="configuration file [default: ~/.rabbitmqadmin.conf]",
+ metavar="CONFIG")
+ add("-N", "--node", dest="node",
+ help="node described in the configuration file [default: 'default'" + \
+ " only if configuration file is specified]",
+ metavar="NODE")
+ add("-H", "--host", dest="hostname",
+ help="connect to host HOST" ,
+ metavar="HOST")
+ add("-P", "--port", dest="port",
+ help="connect to port PORT",
+ metavar="PORT")
+ add("-V", "--vhost", dest="vhost",
+ help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
+ metavar="VHOST")
+ add("-u", "--username", dest="username",
+ help="connect using username USERNAME",
+ metavar="USERNAME")
+ add("-p", "--password", dest="password",
+ help="connect using password PASSWORD",
+ metavar="PASSWORD")
+ add("-q", "--quiet", action="store_false", dest="verbose",
+ help="suppress status messages")
+ add("-s", "--ssl", action="store_true", dest="ssl",
+ help="connect with ssl")
+ add("--ssl-key-file", dest="ssl_key_file",
+ help="PEM format key file for SSL")
+ add("--ssl-cert-file", dest="ssl_cert_file",
+ help="PEM format certificate file for SSL")
+ add("-f", "--format", dest="format",
+ help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
+ add("-S", "--sort", dest="sort", help="sort key for listing queries")
+ add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
+ help="reverse the sort order")
+ add("-d", "--depth", dest="depth",
+ help="maximum depth to recurse for listing tables")
+ add("--bash-completion", action="store_true",
+ dest="bash_completion",
+ help="Print bash completion script")
+ add("--version", action="store_true",
+ dest="version",
+ help="Display version and exit")
+
+def default_config():
+ home = os.getenv('USERPROFILE') or os.getenv('HOME')
+ if home is not None:
+ config_file = home + os.sep + ".rabbitmqadmin.conf"
+ if os.path.isfile(config_file):
+ return config_file
+ return None
+
+def make_configuration():
+ make_parser()
+ (options, args) = parser.parse_args()
+ setattr(options, "declare_vhost", None)
+ if options.version:
+ print_version()
+ if options.config is None:
+ config_file = default_config()
+ if config_file is not None:
+ setattr(options, "config", config_file)
+ else:
+ if not os.path.isfile(options.config):
+ assert_usage(False,
+ "Could not read config file '%s'" % options.config)
+
+ if options.node is None and options.config:
+ options.node = "default"
+ else:
+ options.node = options.node
+ for (key, val) in default_options.items():
+ if getattr(options, key) is None:
+ setattr(options, key, val)
+
+ if options.config is not None:
+ config = ConfigParser()
+ try:
+ config.read(options.config)
+ new_conf = dict(config.items(options.node))
+ except NoSectionError, error:
+ if options.node == "default":
+ pass
+ else:
+ assert_usage(False, ("Could not read section '%s' in config file" +
+ " '%s':\n %s") %
+ (options.node, options.config, error))
+ else:
+ for key, val in new_conf.items():
+ setattr(options, key, val)
+
+ return (options, args)
+
+def assert_usage(expr, error):
+ if not expr:
+ output("\nERROR: {0}\n".format(error))
+ output("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
+ sys.exit(1)
+
+def print_version():
+ output("rabbitmqadmin {0}".format(VERSION))
+ sys.exit(0)
+
+def column_sort_key(col):
+ if col in PROMOTE_COLUMNS:
+ return (1, PROMOTE_COLUMNS.index(col))
+ else:
+ return (2, col)
+
+def main():
+ (options, args) = make_configuration()
+ if options.bash_completion:
+ print_bash_completion()
+ exit(0)
+ assert_usage(len(args) > 0, 'Action not specified')
+ mgmt = Management(options, args[1:])
+ mode = "invoke_" + args[0]
+ assert_usage(hasattr(mgmt, mode),
+ 'Action {0} not understood'.format(args[0]))
+ method = getattr(mgmt, "invoke_%s" % args[0])
+ method()
+
+def output(s):
+ print maybe_utf8(s, sys.stdout)
+
+def die(s):
+ sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
+ exit(1)
+
+def maybe_utf8(s, stream):
+ if stream.isatty():
+ # It will have an encoding, which Python will respect
+ return s
+ else:
+ # It won't have an encoding, and Python will pick ASCII by default
+ return s.encode('utf-8')
+
+class Management:
+ def __init__(self, options, args):
+ self.options = options
+ self.args = args
+
+ def get(self, path):
+ return self.http("GET", "/api%s" % path, "")
+
+ def put(self, path, body):
+ return self.http("PUT", "/api%s" % path, body)
+
+ def post(self, path, body):
+ return self.http("POST", "/api%s" % path, body)
+
+ def delete(self, path):
+ return self.http("DELETE", "/api%s" % path, "")
+
+ def http(self, method, path, body):
+ if self.options.ssl:
+ conn = httplib.HTTPSConnection(self.options.hostname,
+ self.options.port,
+ self.options.ssl_key_file,
+ self.options.ssl_cert_file)
+ else:
+ conn = httplib.HTTPConnection(self.options.hostname,
+ self.options.port)
+ headers = {"Authorization":
+ "Basic " + base64.b64encode(self.options.username + ":" +
+ self.options.password)}
+ if body != "":
+ headers["Content-Type"] = "application/json"
+ try:
+ conn.request(method, path, body, headers)
+ except socket.error, e:
+ die("Could not connect: {0}".format(e))
+ resp = conn.getresponse()
+ if resp.status == 400:
+ die(json.loads(resp.read())['reason'])
+ if resp.status == 401:
+ die("Access refused: {0}".format(path))
+ if resp.status == 404:
+ die("Not found: {0}".format(path))
+ if resp.status == 301:
+ url = urlparse.urlparse(resp.getheader('location'))
+ [host, port] = url.netloc.split(':')
+ self.options.hostname = host
+ self.options.port = int(port)
+ return self.http(method, url.path + '?' + url.query, body)
+ if resp.status < 200 or resp.status > 400:
+ raise Exception("Received %d %s for path %s\n%s"
+ % (resp.status, resp.reason, path, resp.read()))
+ return resp.read()
+
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def get_arg(self):
+ assert_usage(len(self.args) == 1, 'Exactly one argument required')
+ return self.args[0]
+
+ def invoke_help(self):
+ if len(self.args) == 0:
+ parser.print_help()
+ else:
+ help_cmd = self.get_arg()
+ if help_cmd == 'subcommands':
+ usage = subcommands_usage()
+ elif help_cmd == 'config':
+ usage = config_usage()
+ else:
+ assert_usage(False, """help topic must be one of:
+ subcommands
+ config""")
+ print usage
+ exit(0)
+
+ def invoke_publish(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
+ upload['properties'] = {} # TODO do we care here?
+ if not 'payload' in upload:
+ data = sys.stdin.read()
+ upload['payload'] = base64.b64encode(data)
+ upload['payload_encoding'] = 'base64'
+ resp = json.loads(self.post(uri, json.dumps(upload)))
+ if resp['routed']:
+ self.verbose("Message published")
+ else:
+ self.verbose("Message published but NOT routed")
+
+ def invoke_get(self):
+ (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
+ payload_file = 'payload_file' in upload and upload['payload_file'] or None
+ assert_usage(not payload_file or upload['count'] == '1',
+ 'Cannot get multiple messages using payload_file')
+ result = self.post(uri, json.dumps(upload))
+ if payload_file:
+ write_payload_file(payload_file, result)
+ columns = ['routing_key', 'exchange', 'message_count',
+ 'payload_bytes', 'redelivered']
+ format_list(result, columns, {}, self.options)
+ else:
+ format_list(result, [], {}, self.options)
+
+ def invoke_export(self):
+ path = self.get_arg()
+ definitions = self.get("/definitions")
+ f = open(path, 'w')
+ f.write(definitions)
+ f.close()
+ self.verbose("Exported definitions for %s to \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_import(self):
+ path = self.get_arg()
+ f = open(path, 'r')
+ definitions = f.read()
+ f.close()
+ self.post("/definitions", definitions)
+ self.verbose("Imported definitions for %s from \"%s\""
+ % (self.options.hostname, path))
+
+ def invoke_list(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(LISTABLE, 'list', cols)
+ format_list(self.get(uri), cols, obj_info, self.options)
+
+ def invoke_show(self):
+ cols = self.args[1:]
+ (uri, obj_info) = self.list_show_uri(SHOWABLE, 'show', cols)
+ format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
+
+ def list_show_uri(self, obj_types, verb, cols):
+ obj_type = self.args[0]
+ assert_usage(obj_type in obj_types,
+ "Don't know how to {0} {1}".format(verb, obj_type))
+ obj_info = obj_types[obj_type]
+ uri = "/%s" % obj_type
+ query = []
+ if obj_info['vhost'] and self.options.vhost:
+ uri += "/%s" % urllib.quote_plus(self.options.vhost)
+ if cols != []:
+ query.append("columns=" + ",".join(cols))
+ sort = self.options.sort
+ if sort:
+ query.append("sort=" + sort)
+ if self.options.sort_reverse:
+ query.append("sort_reverse=true")
+ query = "&".join(query)
+ if query != "":
+ uri += "?" + query
+ return (uri, obj_info)
+
+ def invoke_declare(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
+ if obj_type == 'binding':
+ self.post(uri, json.dumps(upload))
+ else:
+ self.put(uri, json.dumps(upload))
+ self.verbose("{0} declared".format(obj_type))
+
+ def invoke_delete(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
+ self.delete(uri)
+ self.verbose("{0} deleted".format(obj_type))
+
+ def invoke_close(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
+ self.delete(uri)
+ self.verbose("{0} closed".format(obj_type))
+
+ def invoke_purge(self):
+ (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
+ self.delete(uri)
+ self.verbose("{0} purged".format(obj_type))
+
+ def declare_delete_parse(self, root):
+ assert_usage(len(self.args) > 0, 'Type not specified')
+ obj_type = self.args[0]
+ assert_usage(obj_type in root,
+ 'Type {0} not recognised'.format(obj_type))
+ obj = root[obj_type]
+ (uri, upload) = self.parse_args(self.args[1:], obj)
+ return (obj_type, uri, upload)
+
+ def parse_args(self, args, obj):
+ mandatory = obj['mandatory']
+ optional = obj['optional']
+ uri_template = obj['uri']
+ upload = {}
+ for k in optional.keys():
+ if optional[k]:
+ upload[k] = optional[k]
+ for arg in args:
+ assert_usage("=" in arg,
+ 'Argument "{0}" not in format name=value'.format(arg))
+ (name, value) = arg.split("=", 1)
+ assert_usage(name in mandatory or name in optional.keys(),
+ 'Argument "{0}" not recognised'.format(name))
+ if 'json' in obj and name in obj['json']:
+ upload[name] = self.parse_json(value)
+ else:
+ upload[name] = value
+ for m in mandatory:
+ assert_usage(m in upload.keys(),
+ 'mandatory argument "{0}" required'.format(m))
+ if 'vhost' not in mandatory:
+ upload['vhost'] = self.options.vhost or self.options.declare_vhost
+ uri_args = {}
+ for k in upload:
+ v = upload[k]
+ if v and isinstance(v, basestring):
+ uri_args[k] = urllib.quote_plus(v)
+ if k == 'destination_type':
+ uri_args['destination_char'] = v[0]
+ uri = uri_template.format(**uri_args)
+ return (uri, upload)
+
+ def parse_json(self, text):
+ try:
+ return json.loads(text)
+ except ValueError:
+ print "Could not parse JSON:\n {0}".format(text)
+ sys.exit(1)
+
+def format_list(json_list, columns, args, options):
+ format = options.format
+ formatter = None
+ if format == "raw_json":
+ output(json_list)
+ return
+ elif format == "pretty_json":
+ enc = json.JSONEncoder(False, False, True, True, True, 2)
+ output(enc.encode(json.loads(json_list)))
+ return
+ else:
+ formatter = FORMATS[format]
+ assert_usage(formatter != None,
+ "Format {0} not recognised".format(format))
+ formatter_instance = formatter(columns, args, options)
+ formatter_instance.display(json_list)
+
+class Lister:
+ def verbose(self, string):
+ if self.options.verbose:
+ output(string)
+
+ def display(self, json_list):
+ depth = sys.maxint
+ if len(self.columns) == 0:
+ depth = int(self.options.depth)
+ (columns, table) = self.list_to_table(json.loads(json_list), depth)
+ if len(table) > 0:
+ self.display_list(columns, table)
+ else:
+ self.verbose("No items")
+
+ def list_to_table(self, items, max_depth):
+ columns = {}
+ column_ix = {}
+ row = None
+ table = []
+
+ def add(prefix, depth, item, fun):
+ for key in item:
+ column = prefix == '' and key or (prefix + '.' + key)
+ subitem = item[key]
+ if type(subitem) == dict:
+ if self.obj_info.has_key('json') and key in self.obj_info['json']:
+ fun(column, json.dumps(subitem))
+ else:
+ if depth < max_depth:
+ add(column, depth + 1, subitem, fun)
+ elif type(subitem) == list:
+ # The first branch has slave nodes in queues in
+ # mind (which come out looking decent); the second
+ # one has applications in nodes (which look less
+ # so, but what would look good?).
+ if [x for x in subitem if type(x) != unicode] == []:
+ serialised = " ".join(subitem)
+ else:
+ serialised = json.dumps(subitem)
+ fun(column, serialised)
+ else:
+ fun(column, subitem)
+
+ def add_to_columns(col, val):
+ columns[col] = True
+
+ def add_to_row(col, val):
+ if col in column_ix:
+ row[column_ix[col]] = unicode(val)
+
+ if len(self.columns) == 0:
+ for item in items:
+ add('', 1, item, add_to_columns)
+ columns = columns.keys()
+ columns.sort(key=column_sort_key)
+ else:
+ columns = self.columns
+
+ for i in xrange(0, len(columns)):
+ column_ix[columns[i]] = i
+ for item in items:
+ row = len(columns) * ['']
+ add('', 1, item, add_to_row)
+ table.append(row)
+
+ return (columns, table)
+
+class TSVList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ head = "\t".join(columns)
+ self.verbose(head)
+
+ for row in table:
+ line = "\t".join(row)
+ output(line)
+
+class LongList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ sep = "\n" + "-" * 80 + "\n"
+ max_width = 0
+ for col in columns:
+ max_width = max(max_width, len(col))
+ fmt = "{0:>" + unicode(max_width) + "}: {1}"
+ output(sep)
+ for i in xrange(0, len(table)):
+ for j in xrange(0, len(columns)):
+ output(fmt.format(columns[j], table[i][j]))
+ output(sep)
+
+class TableList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ total = [columns]
+ total.extend(table)
+ self.ascii_table(total)
+
+ def ascii_table(self, rows):
+ table = ""
+ col_widths = [0] * len(rows[0])
+ for i in xrange(0, len(rows[0])):
+ for j in xrange(0, len(rows)):
+ col_widths[i] = max(col_widths[i], len(rows[j][i]))
+ self.ascii_bar(col_widths)
+ self.ascii_row(col_widths, rows[0], "^")
+ self.ascii_bar(col_widths)
+ for row in rows[1:]:
+ self.ascii_row(col_widths, row, "<")
+ self.ascii_bar(col_widths)
+
+ def ascii_row(self, col_widths, row, align):
+ txt = "|"
+ for i in xrange(0, len(col_widths)):
+ fmt = " {0:" + align + unicode(col_widths[i]) + "} "
+ txt += fmt.format(row[i]) + "|"
+ output(txt)
+
+ def ascii_bar(self, col_widths):
+ txt = "+"
+ for w in col_widths:
+ txt += ("-" * (w + 2)) + "+"
+ output(txt)
+
+class KeyValueList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ for i in xrange(0, len(table)):
+ row = []
+ for j in xrange(0, len(columns)):
+ row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
+ output(" ".join(row))
+
+# TODO handle spaces etc in completable names
+class BashList(Lister):
+ def __init__(self, columns, obj_info, options):
+ self.columns = columns
+ self.obj_info = obj_info
+ self.options = options
+
+ def display_list(self, columns, table):
+ ix = None
+ for i in xrange(0, len(columns)):
+ if columns[i] == 'name':
+ ix = i
+ if ix is not None:
+ res = []
+ for row in table:
+ res.append(row[ix])
+ output(" ".join(res))
+
+FORMATS = {
+ 'raw_json' : None, # Special cased
+ 'pretty_json' : None, # Ditto
+ 'tsv' : TSVList,
+ 'long' : LongList,
+ 'table' : TableList,
+ 'kvp' : KeyValueList,
+ 'bash' : BashList
+}
+
+def write_payload_file(payload_file, json_list):
+ result = json.loads(json_list)[0]
+ payload = result['payload']
+ payload_encoding = result['payload_encoding']
+ f = open(payload_file, 'w')
+ if payload_encoding == 'base64':
+ data = base64.b64decode(payload)
+ else:
+ data = payload
+ f.write(data)
+ f.close()
+
+def print_bash_completion():
+ script = """# This is a bash completion script for rabbitmqadmin.
+# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
+# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
+_rabbitmqadmin()
+{
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ opts="list show declare delete close purge import export get publish help"
+ fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
+
+ case "${prev}" in
+ list)
+ COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ show)
+ COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
+ return 0
+ ;;
+ declare)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ delete)
+ COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ close)
+ COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ purge)
+ COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
+ return 0
+ ;;
+ export)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ import)
+ COMPREPLY=( $(compgen -f ${cur}) )
+ return 0
+ ;;
+ help)
+ opts="subcommands config"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -H)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ --host)
+ COMPREPLY=( $(compgen -A hostname ${cur}) )
+ return 0
+ ;;
+ -V)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --vhost)
+ opts="$(rabbitmqadmin -q -f bash list vhosts)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -u)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ --username)
+ opts="$(rabbitmqadmin -q -f bash list users)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+ -f)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+ --format)
+ COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
+ return 0
+ ;;
+
+"""
+ for l in LISTABLE:
+ key = l[0:len(l) - 1]
+ script += " " + key + """)
+ opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+ ;;
+"""
+ script += """ *)
+ ;;
+ esac
+
+ COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
+ return 0
+}
+complete -F _rabbitmqadmin rabbitmqadmin
+"""
+ output(script)
+
+if __name__ == "__main__":
+ main()
diff --git a/version.mk b/version.mk
deleted file mode 100644
index 5683af4a2f..0000000000
--- a/version.mk
+++ /dev/null
@@ -1 +0,0 @@
-VERSION?=0.0.0