summaryrefslogtreecommitdiff
path: root/deps/rabbit_common
diff options
context:
space:
mode:
Diffstat (limited to 'deps/rabbit_common')
-rw-r--r--deps/rabbit_common/.gitignore31
-rw-r--r--deps/rabbit_common/.travis.yml61
-rw-r--r--deps/rabbit_common/CODE_OF_CONDUCT.md44
-rw-r--r--deps/rabbit_common/CONTRIBUTING.md38
-rw-r--r--deps/rabbit_common/LICENSE11
-rw-r--r--deps/rabbit_common/LICENSE-BSD-recon27
-rw-r--r--deps/rabbit_common/LICENSE-MIT-Erlware-Commons21
-rw-r--r--deps/rabbit_common/LICENSE-MIT-Mochi9
-rw-r--r--deps/rabbit_common/LICENSE-MPL-RabbitMQ373
-rw-r--r--deps/rabbit_common/Makefile53
-rw-r--r--deps/rabbit_common/README.md4
-rwxr-xr-xdeps/rabbit_common/codegen.py582
-rw-r--r--deps/rabbit_common/development.post.mk33
-rw-r--r--deps/rabbit_common/development.pre.mk14
-rw-r--r--deps/rabbit_common/erlang.mk7746
-rw-r--r--deps/rabbit_common/include/rabbit.hrl267
-rw-r--r--deps/rabbit_common/include/rabbit_core_metrics.hrl52
-rw-r--r--deps/rabbit_common/include/rabbit_log.hrl8
-rw-r--r--deps/rabbit_common/include/rabbit_memory.hrl16
-rw-r--r--deps/rabbit_common/include/rabbit_misc.hrl9
-rw-r--r--deps/rabbit_common/include/rabbit_msg_store.hrl12
-rw-r--r--deps/rabbit_common/include/resource.hrl14
-rw-r--r--deps/rabbit_common/mk/rabbitmq-build.mk42
-rw-r--r--deps/rabbit_common/mk/rabbitmq-components.hexpm.mk36
-rw-r--r--deps/rabbit_common/mk/rabbitmq-components.mk359
-rw-r--r--deps/rabbit_common/mk/rabbitmq-dist.mk365
-rw-r--r--deps/rabbit_common/mk/rabbitmq-early-plugin.mk3
-rw-r--r--deps/rabbit_common/mk/rabbitmq-early-test.mk130
-rw-r--r--deps/rabbit_common/mk/rabbitmq-hexpm.mk67
-rw-r--r--deps/rabbit_common/mk/rabbitmq-macros.mk22
-rw-r--r--deps/rabbit_common/mk/rabbitmq-plugin.mk23
-rw-r--r--deps/rabbit_common/mk/rabbitmq-run.mk428
-rw-r--r--deps/rabbit_common/mk/rabbitmq-test.mk80
-rw-r--r--deps/rabbit_common/mk/rabbitmq-tools.mk429
-rwxr-xr-xdeps/rabbit_common/mk/xrefr338
-rw-r--r--deps/rabbit_common/src/app_utils.erl167
-rw-r--r--deps/rabbit_common/src/code_version.erl348
-rw-r--r--deps/rabbit_common/src/credit_flow.erl210
-rw-r--r--deps/rabbit_common/src/delegate.erl277
-rw-r--r--deps/rabbit_common/src/delegate_sup.erl55
-rw-r--r--deps/rabbit_common/src/file_handle_cache.erl1564
-rw-r--r--deps/rabbit_common/src/file_handle_cache_stats.erl57
-rw-r--r--deps/rabbit_common/src/gen_server2.erl1419
-rw-r--r--deps/rabbit_common/src/lager_forwarder_backend.erl120
-rw-r--r--deps/rabbit_common/src/mirrored_supervisor.erl513
-rw-r--r--deps/rabbit_common/src/mnesia_sync.erl64
-rw-r--r--deps/rabbit_common/src/pmon.erl96
-rw-r--r--deps/rabbit_common/src/priority_queue.erl234
-rw-r--r--deps/rabbit_common/src/rabbit_amqp_connection.erl34
-rw-r--r--deps/rabbit_common/src/rabbit_amqqueue_common.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_auth_backend_dummy.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_auth_mechanism.erl41
-rw-r--r--deps/rabbit_common/src/rabbit_authn_backend.erl27
-rw-r--r--deps/rabbit_common/src/rabbit_authz_backend.erl88
-rw-r--r--deps/rabbit_common/src/rabbit_basic_common.erl41
-rw-r--r--deps/rabbit_common/src/rabbit_binary_generator.erl235
-rw-r--r--deps/rabbit_common/src/rabbit_binary_parser.erl172
-rw-r--r--deps/rabbit_common/src/rabbit_cert_info.erl270
-rw-r--r--deps/rabbit_common/src/rabbit_channel_common.erl25
-rw-r--r--deps/rabbit_common/src/rabbit_command_assembler.erl124
-rw-r--r--deps/rabbit_common/src/rabbit_control_misc.erl179
-rw-r--r--deps/rabbit_common/src/rabbit_core_metrics.erl437
-rw-r--r--deps/rabbit_common/src/rabbit_data_coercion.erl47
-rw-r--r--deps/rabbit_common/src/rabbit_env.erl1850
-rw-r--r--deps/rabbit_common/src/rabbit_error_logger_handler.erl169
-rw-r--r--deps/rabbit_common/src/rabbit_event.erl164
-rw-r--r--deps/rabbit_common/src/rabbit_exchange_type.erl68
-rw-r--r--deps/rabbit_common/src/rabbit_heartbeat.erl184
-rw-r--r--deps/rabbit_common/src/rabbit_http_util.erl967
-rw-r--r--deps/rabbit_common/src/rabbit_json.erl63
-rw-r--r--deps/rabbit_common/src/rabbit_log.erl164
-rw-r--r--deps/rabbit_common/src/rabbit_log_osiris_shim.erl26
-rw-r--r--deps/rabbit_common/src/rabbit_log_ra_shim.erl16
-rw-r--r--deps/rabbit_common/src/rabbit_misc.erl1434
-rw-r--r--deps/rabbit_common/src/rabbit_msg_store_index.erl89
-rw-r--r--deps/rabbit_common/src/rabbit_net.erl321
-rw-r--r--deps/rabbit_common/src/rabbit_nodes_common.erl227
-rw-r--r--deps/rabbit_common/src/rabbit_numerical.erl358
-rw-r--r--deps/rabbit_common/src/rabbit_password_hashing.erl11
-rw-r--r--deps/rabbit_common/src/rabbit_pbe.erl54
-rw-r--r--deps/rabbit_common/src/rabbit_peer_discovery_backend.erl59
-rw-r--r--deps/rabbit_common/src/rabbit_policy_validator.erl22
-rw-r--r--deps/rabbit_common/src/rabbit_queue_collector.erl80
-rw-r--r--deps/rabbit_common/src/rabbit_registry.erl165
-rw-r--r--deps/rabbit_common/src/rabbit_registry_class.erl12
-rw-r--r--deps/rabbit_common/src/rabbit_resource_monitor_misc.erl39
-rw-r--r--deps/rabbit_common/src/rabbit_runtime.erl66
-rw-r--r--deps/rabbit_common/src/rabbit_runtime_parameter.erl25
-rw-r--r--deps/rabbit_common/src/rabbit_semver.erl730
-rw-r--r--deps/rabbit_common/src/rabbit_semver_parser.erl306
-rw-r--r--deps/rabbit_common/src/rabbit_ssl_options.erl86
-rw-r--r--deps/rabbit_common/src/rabbit_types.erl196
-rw-r--r--deps/rabbit_common/src/rabbit_writer.erl437
-rw-r--r--deps/rabbit_common/src/supervisor2.erl1651
-rw-r--r--deps/rabbit_common/src/vm_memory_monitor.erl576
-rw-r--r--deps/rabbit_common/src/worker_pool.erl172
-rw-r--r--deps/rabbit_common/src/worker_pool_sup.erl69
-rw-r--r--deps/rabbit_common/src/worker_pool_worker.erl192
-rw-r--r--deps/rabbit_common/test/gen_server2_test_server.erl72
-rw-r--r--deps/rabbit_common/test/rabbit_env_SUITE.erl1098
-rw-r--r--deps/rabbit_common/test/supervisor2_SUITE.erl128
-rw-r--r--deps/rabbit_common/test/unit_SUITE.erl446
-rw-r--r--deps/rabbit_common/test/unit_priority_queue_SUITE.erl35
-rw-r--r--deps/rabbit_common/test/worker_pool_SUITE.erl220
104 files changed, 31716 insertions, 0 deletions
diff --git a/deps/rabbit_common/.gitignore b/deps/rabbit_common/.gitignore
new file mode 100644
index 0000000000..f609631433
--- /dev/null
+++ b/deps/rabbit_common/.gitignore
@@ -0,0 +1,31 @@
+*~
+.sw?
+.*.sw?
+.*.plt
+*.beam
+*.coverdata
+/.*.plt
+/.erlang.mk/
+/cover/
+/deps/
+/doc/
+/ebin/
+/escript/
+/escript.lock
+/git-revisions.txt
+/logs/
+/plugins/
+/plugins.lock
+/rebar.config
+/rebar.lock
+/sbin/
+/sbin.lock
+/test/ct.cover.spec
+/xrefr
+
+/rabbit_common.d
+
+# Generated source files.
+/include/rabbit_framing.hrl
+/src/rabbit_framing_amqp_0_8.erl
+/src/rabbit_framing_amqp_0_9_1.erl
diff --git a/deps/rabbit_common/.travis.yml b/deps/rabbit_common/.travis.yml
new file mode 100644
index 0000000000..a9b75d084d
--- /dev/null
+++ b/deps/rabbit_common/.travis.yml
@@ -0,0 +1,61 @@
+# vim:sw=2:et:
+
+os: linux
+dist: xenial
+language: elixir
+notifications:
+ email:
+ recipients:
+ - alerts@rabbitmq.com
+ on_success: never
+ on_failure: always
+addons:
+ apt:
+ packages:
+ - awscli
+cache:
+ apt: true
+env:
+ global:
+ - secure: Tu26VJ9BsXxL20xxwWk4cbCkZyqyxYmNpSSqco5r3FLeU5hk5Vkk+s2BareRvqKhKHFlvyxu8GwsKtajMvsieP6y5J99gSeub6fDOIskPz61bo0aKA9nbDuBFSG1Z5wgXx1XRo0yDatLxXCXe3FbThRsylG7XNjtRaru1/lwuVxfxPtBGQ1opvQX71sST3GYSPoBYR+JlcVpU+uDHMAzsP8J0m5rEpxcl821aTMk3iz90hBQMsoLTBmSQePPcNqOA/1OH75VfjuXR8JBXHvA9njrUBrsyxgHf2uOh3jAXdIrHZwZg/17+y7gNVqByfx/UpGb8XEpVkncg/cRyVIHMk7/gFCZkeVC1QkIN5+EPiGLF7u32x9QaT7Zqz57iLh3IJzED2dj12qWaeX8QypF1K1r5qq4pRrN6iEZx76stpZbyFT4XnExHRdzPuouy7yz1gDHF0HOxbNLowzc/jk7tuTp+qmDSR5tRvegAIH3TONegxXyB7smdbvdI6MCN5/GP2bGK7HiqYWCmTGHtJwgxBKc5XoV8ZjpXfKxG98WbK5RsSP1miRnmxSbxaV0Gai1hfFlanJFFxTA9584O+NVRXNNFMfnnt20Ts6OwoXTcJ/boIPjF5Mcm0eJ4nz4R18TArXE4B5S4pTk3eQkG1ACDigkYZ3fc6ws4cWrt8BZASI=
+ - secure: fNEx9OXi2UisiYu0FiHJpV9+vWLB9DIUAIKG24GfUHVgZqFQOInBf5fEYrjlVgm5zNezSBS3hFNHXd/EXJF8KNgbf6mI0z4h4RyyQY98N+78tWvINoIawEeYpgC6NTI52MdaCfV+fTVWhiL0uP7mqWhLmll2bKXIy6HA6I9PnmiQSloNe64vUPF+UsVZHzzeabK4DR2VdI3h+BGXzOY9FG8Kt2voiXOLd2RFpVeN86FDTp+uVZY/K9e/MsktoK+XaZZ4qMAgm6lB32LVkzl3KA9ki6y6BY7le1m2c90hxAtBJGWZptkMb+VL0Fem39nEBnLjE0a0vIddp32PLJQmv6eopMfLay5BIkwtkRwv3P0uCwYd0bgYQSHF/gdTCcK1nr7fMhkQveBh6vmnbhrca7OeQRHz08+jo6EquUgNQZKmTZPWXQn9lS9mU/0EDLJJhn4KhJezGw6DcAAqB0KqmQedxtHMUT87by7LzhINwKZnm4y5WKA/W/zLI6dNqvIgc5C6UJh0EVgxa13GRmrnGmttV1dtLRQhiMJCbJykaekjPMULUmli0RbFz7bSFqFqEUsF+wwovyD+Y6D8KGOJdvvEYPdPIFpRPnhGUvH86JzsFdVKNJBicGI9LpCtlXlWNRbQIQ8uV5ze2HhxSJhtM6e6dB4d9yzpp6a81uR77bk=
+
+ # $base_rmq_ref is used by rabbitmq-components.mk to select the
+ # appropriate branch for dependencies.
+ - base_rmq_ref=master
+
+elixir:
+ - '1.10'
+otp_release:
+ - '22.3'
+ - '23.0'
+
+install:
+ # This project being an Erlang one (we just set language to Elixir
+ # to ensure it is installed), we don't want Travis to run mix(1)
+ # automatically as it will break.
+ skip
+
+script:
+ # $current_rmq_ref is also used by rabbitmq-components.mk to select
+ # the appropriate branch for dependencies.
+ - make check-rabbitmq-components.mk
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make xref
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+ - make tests
+ current_rmq_ref="${TRAVIS_PULL_REQUEST_BRANCH:-${TRAVIS_BRANCH}}"
+
+after_failure:
+ - |
+ cd "$TRAVIS_BUILD_DIR"
+ if test -d logs && test "$AWS_ACCESS_KEY_ID" && test "$AWS_SECRET_ACCESS_KEY"; then
+ archive_name="$(basename "$TRAVIS_REPO_SLUG")-$TRAVIS_JOB_NUMBER"
+
+ tar -c --transform "s/^logs/${archive_name}/" -f - logs | \
+ xz > "${archive_name}.tar.xz"
+
+ aws s3 cp "${archive_name}.tar.xz" s3://server-release-pipeline/travis-ci-logs/ \
+ --region eu-west-1 \
+ --acl public-read
+ fi
diff --git a/deps/rabbit_common/CODE_OF_CONDUCT.md b/deps/rabbit_common/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..08697906fd
--- /dev/null
+++ b/deps/rabbit_common/CODE_OF_CONDUCT.md
@@ -0,0 +1,44 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of fostering an open
+and welcoming community, we pledge to respect all people who contribute through reporting
+issues, posting feature requests, updating documentation, submitting pull requests or
+patches, and other activities.
+
+We are committed to making participation in this project a harassment-free experience for
+everyone, regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
+religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+ * The use of sexualized language or imagery
+ * Personal attacks
+ * Trolling or insulting/derogatory comments
+ * Public or private harassment
+ * Publishing other's private information, such as physical or electronic addresses,
+ without explicit permission
+ * Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments,
+commits, code, wiki edits, issues, and other contributions that are not aligned to this
+Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors
+that they deem inappropriate, threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to fairly and
+consistently applying these principles to every aspect of managing this project. Project
+maintainers who do not follow or enforce the Code of Conduct may be permanently removed
+from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces when an
+individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
+contacting a project maintainer at [info@rabbitmq.com](mailto:info@rabbitmq.com). All complaints will
+be reviewed and investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. Maintainers are obligated to maintain confidentiality
+with regard to the reporter of an incident.
+
+This Code of Conduct is adapted from the
+[Contributor Covenant](https://contributor-covenant.org), version 1.3.0, available at
+[contributor-covenant.org/version/1/3/0/](https://contributor-covenant.org/version/1/3/0/)
diff --git a/deps/rabbit_common/CONTRIBUTING.md b/deps/rabbit_common/CONTRIBUTING.md
new file mode 100644
index 0000000000..23a92fef9c
--- /dev/null
+++ b/deps/rabbit_common/CONTRIBUTING.md
@@ -0,0 +1,38 @@
+## Overview
+
+RabbitMQ projects use pull requests to discuss, collaborate on and accept code contributions.
+Pull requests is the primary place of discussing code changes.
+
+## How to Contribute
+
+The process is fairly standard:
+
+ * Fork the repository or repositories you plan on contributing to
+ * Clone [RabbitMQ umbrella repository](https://github.com/rabbitmq/rabbitmq-public-umbrella)
+ * `cd umbrella`, `make co`
+ * Create a branch with a descriptive name in the relevant repositories
+ * Make your changes, run tests, commit with a [descriptive message](https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html), push to your fork
+ * Submit pull requests with an explanation what has been changed and **why**
+ * Submit a filled out and signed [Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) if needed (see below)
+ * Be patient. We will get to your pull request eventually
+
+If what you are going to work on is a substantial change, please first ask the core team
+of their opinion on [RabbitMQ mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
+
+
+## Code of Conduct
+
+See [CODE_OF_CONDUCT.md](./CODE_OF_CONDUCT.md).
+
+
+## Contributor Agreement
+
+If you want to contribute a non-trivial change, please submit a signed copy of our
+[Contributor Agreement](https://github.com/rabbitmq/ca#how-to-submit) around the time
+you submit your pull request. This will make it much easier (in some cases, possible)
+for the RabbitMQ team at Pivotal to merge your contribution.
+
+
+## Where to Ask Questions
+
+If something isn't clear, feel free to ask on our [mailing list](https://groups.google.com/forum/#!forum/rabbitmq-users).
diff --git a/deps/rabbit_common/LICENSE b/deps/rabbit_common/LICENSE
new file mode 100644
index 0000000000..bd1046bc1d
--- /dev/null
+++ b/deps/rabbit_common/LICENSE
@@ -0,0 +1,11 @@
+This package, the RabbitMQ commons library, is licensed under the MPL 2.0. For
+the MPL, please see LICENSE-MPL-RabbitMQ.
+
+The files `rabbit_numerical.erl' and `rabbit_http_util.erl` are (c) 2007
+Mochi Media, Inc and licensed under a MIT license, see LICENSE-MIT-Mochi.
+
+The files 'rabbit_semver.erl' and 'rabbit_semver_parser.erl' are Copyright (c) 2011
+Erlware, LLC and licensed under a MIT license, see LICENSE-MIT-Erlware-Commons.
+
+If you have any questions regarding licensing, please contact us at
+info@rabbitmq.com.
diff --git a/deps/rabbit_common/LICENSE-BSD-recon b/deps/rabbit_common/LICENSE-BSD-recon
new file mode 100644
index 0000000000..be0aebbaf1
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-BSD-recon
@@ -0,0 +1,27 @@
+Copyright (c) 2012-2017, Frédéric Trottier-Hébert
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+ The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/rabbit_common/LICENSE-MIT-Erlware-Commons b/deps/rabbit_common/LICENSE-MIT-Erlware-Commons
new file mode 100644
index 0000000000..fc89c0272d
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MIT-Erlware-Commons
@@ -0,0 +1,21 @@
+Copyright (c) 2011 Erlware, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/deps/rabbit_common/LICENSE-MIT-Mochi b/deps/rabbit_common/LICENSE-MIT-Mochi
new file mode 100644
index 0000000000..c85b65a4d3
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MIT-Mochi
@@ -0,0 +1,9 @@
+This is the MIT license.
+
+Copyright (c) 2007 Mochi Media, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/deps/rabbit_common/LICENSE-MPL-RabbitMQ b/deps/rabbit_common/LICENSE-MPL-RabbitMQ
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/deps/rabbit_common/LICENSE-MPL-RabbitMQ
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/deps/rabbit_common/Makefile b/deps/rabbit_common/Makefile
new file mode 100644
index 0000000000..6a31a9ccbf
--- /dev/null
+++ b/deps/rabbit_common/Makefile
@@ -0,0 +1,53 @@
+PROJECT = rabbit_common
+PROJECT_DESCRIPTION = Modules shared by rabbitmq-server and rabbitmq-erlang-client
+
+define PROJECT_APP_EXTRA_KEYS
+%% Hex.pm package informations.
+ {licenses, ["MPL-2.0"]},
+ {links, [
+ {"Website", "https://www.rabbitmq.com/"},
+ {"GitHub", "https://github.com/rabbitmq/rabbitmq-common"}
+ ]},
+ {build_tools, ["make", "rebar3"]},
+ {files, [
+ $(RABBITMQ_HEXPM_DEFAULT_FILES),
+ "mk"
+ ]}
+endef
+
+LOCAL_DEPS = compiler crypto public_key sasl ssl syntax_tools tools xmerl
+DEPS = lager jsx ranch recon credentials_obfuscation
+
+dep_credentials_obfuscation = hex 2.2.0
+
+# FIXME: Use erlang.mk patched for RabbitMQ, while waiting for PRs to be
+# reviewed and merged.
+
+ERLANG_MK_REPO = https://github.com/rabbitmq/erlang.mk.git
+ERLANG_MK_COMMIT = rabbitmq-tmp
+
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+#
+# That's why those Makefiles are included with `-include`: we ignore any
+# inclusion errors.
+
+-include development.pre.mk
+
+DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-test.mk
+DEP_PLUGINS = $(PROJECT)/mk/rabbitmq-build.mk \
+ $(PROJECT)/mk/rabbitmq-hexpm.mk \
+ $(PROJECT)/mk/rabbitmq-dist.mk \
+ $(PROJECT)/mk/rabbitmq-test.mk \
+ $(PROJECT)/mk/rabbitmq-tools.mk
+
+WITHOUT = plugins/proper
+
+PLT_APPS += mnesia crypto ssl
+
+include mk/rabbitmq-components.mk
+include erlang.mk
+
+-include development.post.mk
diff --git a/deps/rabbit_common/README.md b/deps/rabbit_common/README.md
new file mode 100644
index 0000000000..139a0ab018
--- /dev/null
+++ b/deps/rabbit_common/README.md
@@ -0,0 +1,4 @@
+# RabbitMQ Common
+
+This library is shared between [RabbitMQ server](https://github.com/rabbitmq/rabbitmq-server), [RabbitMQ Erlang client](https://github.com/rabbitmq/rabbitmq-erlang-client)
+and other RabbitMQ ecosystem projects.
diff --git a/deps/rabbit_common/codegen.py b/deps/rabbit_common/codegen.py
new file mode 100755
index 0000000000..2e7bad69e9
--- /dev/null
+++ b/deps/rabbit_common/codegen.py
@@ -0,0 +1,582 @@
+#!/usr/bin/env python
+
+## This Source Code Form is subject to the terms of the Mozilla Public
+## License, v. 2.0. If a copy of the MPL was not distributed with this
+## file, You can obtain one at https://mozilla.org/MPL/2.0/.
+##
+## Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+##
+
+from __future__ import nested_scopes
+from __future__ import print_function
+
+import sys
+
+from amqp_codegen import *
+import string
+import re
+
+# Coming up with a proper encoding of AMQP tables in JSON is too much
+# hassle at this stage. Given that the only default value we are
+# interested in is for the empty table, we only support that.
+def convertTable(d):
+ if len(d) == 0:
+ return "[]"
+ else:
+ raise Exception('Non-empty table defaults not supported ' + d)
+
+erlangDefaultValueTypeConvMap = {
+ bool : lambda x: str(x).lower(),
+ int : lambda x: str(x),
+ float : lambda x: str(x),
+ dict: convertTable
+}
+
+try:
+ _checkIfPython2 = unicode
+ erlangDefaultValueTypeConvMap[str] = lambda x: "<<\"" + x + "\">>"
+ erlangDefaultValueTypeConvMap[unicode] = lambda x: "<<\"" + x.encode("utf-8") + "\">>"
+except NameError:
+ erlangDefaultValueTypeConvMap[bytes] = lambda x: "<<\"" + x + "\">>"
+ erlangDefaultValueTypeConvMap[str] = lambda x: "<<\"" + x + "\">>"
+
+def erlangize(s):
+ s = s.replace('-', '_')
+ s = s.replace(' ', '_')
+ return s
+
+AmqpMethod.erlangName = lambda m: "'" + erlangize(m.klass.name) + '.' + erlangize(m.name) + "'"
+
+AmqpClass.erlangName = lambda c: "'" + erlangize(c.name) + "'"
+
+def erlangConstantName(s):
+ return '_'.join(re.split('[- ]', s.upper()))
+
+class PackedMethodBitField:
+ def __init__(self, index):
+ self.index = index
+ self.domain = 'bit'
+ self.contents = []
+
+ def extend(self, f):
+ self.contents.append(f)
+
+ def count(self):
+ return len(self.contents)
+
+ def full(self):
+ return self.count() == 8
+
+def multiLineFormat(things, prologue, separator, lineSeparator, epilogue, thingsPerLine = 4):
+ r = [prologue]
+ i = 0
+ for t in things:
+ if i != 0:
+ if i % thingsPerLine == 0:
+ r += [lineSeparator]
+ else:
+ r += [separator]
+ r += [t]
+ i += 1
+ r += [epilogue]
+ return "".join(r)
+
+def prettyType(typeName, subTypes, typesPerLine = 4):
+ """Pretty print a type signature made up of many alternative subtypes"""
+ sTs = multiLineFormat(subTypes,
+ "( ", " | ", "\n | ", " )",
+ thingsPerLine = typesPerLine)
+ return "-type %s ::\n %s." % (typeName, sTs)
+
+def printFileHeader():
+ print("""%% Autogenerated code. Do not edit.
+%%
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%""")
+
+def genErl(spec):
+ def erlType(domain):
+ return erlangize(spec.resolveDomain(domain))
+
+ def fieldTypeList(fields):
+ return '[' + ', '.join([erlType(f.domain) for f in fields]) + ']'
+
+ def fieldNameList(fields):
+ return '[' + ', '.join([erlangize(f.name) for f in fields]) + ']'
+
+ def fieldTempList(fields):
+ return '[' + ', '.join(['F' + str(f.index) for f in fields]) + ']'
+
+ def fieldMapList(fields):
+ return ', '.join([erlangize(f.name) + " = F" + str(f.index) for f in fields])
+
+ def genLookupMethodName(m):
+ print("lookup_method_name({%d, %d}) -> %s;" % (m.klass.index, m.index, m.erlangName()))
+
+ def genLookupClassName(c):
+ print("lookup_class_name(%d) -> %s;" % (c.index, c.erlangName()))
+
+ def genMethodId(m):
+ print("method_id(%s) -> {%d, %d};" % (m.erlangName(), m.klass.index, m.index))
+
+ def genMethodHasContent(m):
+ print("method_has_content(%s) -> %s;" % (m.erlangName(), str(m.hasContent).lower()))
+
+ def genMethodIsSynchronous(m):
+ hasNoWait = "nowait" in fieldNameList(m.arguments)
+ if m.isSynchronous and hasNoWait:
+ print("is_method_synchronous(#%s{nowait = NoWait}) -> not(NoWait);" % (m.erlangName()))
+ else:
+ print("is_method_synchronous(#%s{}) -> %s;" % (m.erlangName(), str(m.isSynchronous).lower()))
+
+ def genMethodFieldTypes(m):
+ """Not currently used - may be useful in future?"""
+ print("method_fieldtypes(%s) -> %s;" % (m.erlangName(), fieldTypeList(m.arguments)))
+
+ def genMethodFieldNames(m):
+ print("method_fieldnames(%s) -> %s;" % (m.erlangName(), fieldNameList(m.arguments)))
+
+ def packMethodFields(fields):
+ packed = []
+ bitfield = None
+ for f in fields:
+ if erlType(f.domain) == 'bit':
+ if not(bitfield) or bitfield.full():
+ bitfield = PackedMethodBitField(f.index)
+ packed.append(bitfield)
+ bitfield.extend(f)
+ else:
+ bitfield = None
+ packed.append(f)
+ return packed
+
+ def methodFieldFragment(f):
+ type = erlType(f.domain)
+ p = 'F' + str(f.index)
+ if type == 'shortstr':
+ return p+'Len:8/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'longstr':
+ return p+'Len:32/unsigned, '+p+':'+p+'Len/binary'
+ elif type == 'octet':
+ return p+':8/unsigned'
+ elif type == 'short':
+ return p+':16/unsigned'
+ elif type == 'long':
+ return p+':32/unsigned'
+ elif type == 'longlong':
+ return p+':64/unsigned'
+ elif type == 'timestamp':
+ return p+':64/unsigned'
+ elif type == 'bit':
+ return p+'Bits:8'
+ elif type == 'table':
+ return p+'Len:32/unsigned, '+p+'Tab:'+p+'Len/binary'
+
+ def genFieldPostprocessing(packed, hasContent):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ for index in range(f.count()):
+ print(" F%d = ((F%dBits band %d) /= 0)," % \
+ (f.index + index,
+ f.index,
+ 1 << index))
+ elif type == 'table':
+ print(" F%d = rabbit_binary_parser:parse_table(F%dTab)," % \
+ (f.index, f.index))
+ # We skip the check on content-bearing methods for
+ # speed. This is a sanity check, not a security thing.
+ elif type == 'shortstr' and not hasContent:
+ print(" rabbit_binary_parser:assert_utf8(F%d)," % (f.index))
+ else:
+ pass
+
+ def genMethodRecord(m):
+ print("method_record(%s) -> #%s{};" % (m.erlangName(), m.erlangName()))
+
+ def genDecodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ binaryPattern = ', '.join([methodFieldFragment(f) for f in packedFields])
+ if binaryPattern:
+ restSeparator = ', '
+ else:
+ restSeparator = ''
+ recordConstructorExpr = '#%s{%s}' % (m.erlangName(), fieldMapList(m.arguments))
+ print("decode_method_fields(%s, <<%s>>) ->" % (m.erlangName(), binaryPattern))
+ genFieldPostprocessing(packedFields, m.hasContent)
+ print(" %s;" % (recordConstructorExpr,))
+
+ def genDecodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', _:%d, R0/binary>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print(" {F%s, R%s} = {P%s =/= 0, R%s}," % \
+ (i, str(field.index + 1), i, i))
+ else:
+ print(" {F%s, R%s} = if P%s =:= 0 -> {undefined, R%s}; true -> ?%s_VAL(R%s, L%s, V%s, X%s) end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i, i))
+
+ if len(c.fields) == 0:
+ print("decode_properties(%d, <<>>) ->" % (c.index,))
+ else:
+ print(("decode_properties(%d, %s) ->" %
+ (c.index, presentBin(c.fields))))
+ for field in c.fields:
+ writePropFieldLine(field)
+ print(" <<>> = %s," % ('R' + str(len(c.fields))))
+ print(" #'P_%s'{%s};" % (erlangize(c.name), fieldMapList(c.fields)))
+
+ def genFieldPreprocessing(packed):
+ for f in packed:
+ type = erlType(f.domain)
+ if type == 'bit':
+ print(" F%dBits = (%s)," % \
+ (f.index,
+ ' bor '.join(['(bitvalue(F%d) bsl %d)' % (x.index, x.index - f.index)
+ for x in f.contents])))
+ elif type == 'table':
+ print(" F%dTab = rabbit_binary_generator:generate_table(F%d)," % (f.index, f.index))
+ print(" F%dLen = size(F%dTab)," % (f.index, f.index))
+ elif type == 'shortstr':
+ print(" F%dLen = shortstr_size(F%d)," % (f.index, f.index))
+ elif type == 'longstr':
+ print(" F%dLen = size(F%d)," % (f.index, f.index))
+ else:
+ pass
+
+ def genEncodeMethodFields(m):
+ packedFields = packMethodFields(m.arguments)
+ print("encode_method_fields(#%s{%s}) ->" % (m.erlangName(), fieldMapList(m.arguments)))
+ genFieldPreprocessing(packedFields)
+ print(" <<%s>>;" % (', '.join([methodFieldFragment(f) for f in packedFields])))
+
+ def genEncodeProperties(c):
+ def presentBin(fields):
+ ps = ', '.join(['P' + str(f.index) + ':1' for f in fields])
+ return '<<' + ps + ', 0:%d>>' % (16 - len(fields),)
+ def writePropFieldLine(field):
+ i = str(field.index)
+ if field.domain == 'bit':
+ print(" {P%s, R%s} = {F%s =:= 1, R%s}," % \
+ (i, str(field.index + 1), i, i))
+ else:
+ print(" {P%s, R%s} = if F%s =:= undefined -> {0, R%s}; true -> {1, [?%s_PROP(F%s, L%s) | R%s]} end," % \
+ (i, str(field.index + 1), i, i, erlType(field.domain).upper(), i, i, i))
+
+ print("encode_properties(#'P_%s'{%s}) ->" % (erlangize(c.name), fieldMapList(c.fields)))
+ if len(c.fields) == 0:
+ print(" <<>>;")
+ else:
+ print(" R0 = [<<>>],")
+ for field in c.fields:
+ writePropFieldLine(field)
+ print(" list_to_binary([%s | lists:reverse(R%s)]);" % \
+ (presentBin(c.fields), str(len(c.fields))))
+
+ def messageConstantClass(cls):
+ # We do this because 0.8 uses "soft error" and 8.1 uses "soft-error".
+ return erlangConstantName(cls)
+
+ def genLookupException(c,v,cls):
+ mCls = messageConstantClass(cls)
+ if mCls == 'SOFT_ERROR': genLookupException1(c,'false')
+ elif mCls == 'HARD_ERROR': genLookupException1(c, 'true')
+ elif mCls == '': pass
+ else: raise Exception('Unknown constant class' + cls)
+
+ def genLookupException1(c,hardErrorBoolStr):
+ n = erlangConstantName(c)
+ print('lookup_amqp_exception(%s) -> {%s, ?%s, <<"%s">>};' % \
+ (n.lower(), hardErrorBoolStr, n, n))
+
+ def genAmqpException(c,v,cls):
+ n = erlangConstantName(c)
+ print('amqp_exception(?%s) -> %s;' % \
+ (n, n.lower()))
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ module = "rabbit_framing_amqp_%d_%d" % (spec.major, spec.minor)
+ if spec.revision != 0:
+ module = "%s_%d" % (module, spec.revision)
+ if module == "rabbit_framing_amqp_8_0":
+ module = "rabbit_framing_amqp_0_8"
+ print("-module(%s)." % module)
+ print("""-include("rabbit_framing.hrl").
+
+-export([version/0]).
+-export([lookup_method_name/1]).
+-export([lookup_class_name/1]).
+
+-export([method_id/1]).
+-export([method_has_content/1]).
+-export([is_method_synchronous/1]).
+-export([method_record/1]).
+-export([method_fieldnames/1]).
+-export([decode_method_fields/2]).
+-export([decode_properties/2]).
+-export([encode_method_fields/1]).
+-export([encode_properties/1]).
+-export([lookup_amqp_exception/1]).
+-export([amqp_exception/1]).
+
+""")
+ print("%% Various types")
+
+ print("""-export_type([amqp_field_type/0, amqp_property_type/0,
+ amqp_table/0, amqp_array/0, amqp_value/0,
+ amqp_method_name/0, amqp_method/0, amqp_method_record/0,
+ amqp_method_field_name/0, amqp_property_record/0,
+ amqp_exception/0, amqp_exception_code/0, amqp_class_id/0]).
+
+-type amqp_field_type() ::
+ 'longstr' | 'signedint' | 'decimal' | 'timestamp' |
+ 'unsignedbyte' | 'unsignedshort' | 'unsignedint' |
+ 'table' | 'byte' | 'double' | 'float' | 'long' |
+ 'short' | 'bool' | 'binary' | 'void' | 'array'.
+-type amqp_property_type() ::
+ 'shortstr' | 'longstr' | 'octet' | 'short' | 'long' |
+ 'longlong' | 'timestamp' | 'bit' | 'table'.
+
+-type amqp_table() :: [{binary(), amqp_field_type(), amqp_value()}].
+-type amqp_array() :: [{amqp_field_type(), amqp_value()}].
+-type amqp_value() :: binary() | % longstr
+ integer() | % signedint
+ {non_neg_integer(), non_neg_integer()} | % decimal
+ amqp_table() |
+ amqp_array() |
+ byte() | % byte
+ float() | % double
+ integer() | % long
+ integer() | % short
+ boolean() | % bool
+ binary() | % binary
+ 'undefined' | % void
+ non_neg_integer(). % timestamp
+""")
+
+ print(prettyType("amqp_method_name()",
+ [m.erlangName() for m in methods]))
+ print(prettyType("amqp_method()",
+ ["{%s, %s}" % (m.klass.index, m.index) for m in methods],
+ 6))
+ print(prettyType("amqp_method_record()",
+ ["#%s{}" % (m.erlangName()) for m in methods]))
+ fieldNames = set()
+ for m in methods:
+ fieldNames.update([erlangize(f.name) for f in m.arguments])
+ fieldNames = [f for f in fieldNames]
+ fieldNames.sort()
+ print(prettyType("amqp_method_field_name()",
+ fieldNames))
+ print(prettyType("amqp_property_record()",
+ ["#'P_%s'{}" % erlangize(c.name) for c in spec.allClasses()]))
+ print(prettyType("amqp_exception()",
+ ["'%s'" % erlangConstantName(c).lower() for (c, v, cls) in spec.constants]))
+ print(prettyType("amqp_exception_code()",
+ ["%i" % v for (c, v, cls) in spec.constants]))
+ classIds = set()
+ for m in spec.allMethods():
+ classIds.add(m.klass.index)
+ print(prettyType("amqp_class_id()",
+ ["%i" % ci for ci in classIds]))
+ print(prettyType("amqp_class_name()",
+ ["%s" % c.erlangName() for c in spec.allClasses()]))
+
+ print("""
+%% Method signatures
+-spec version() -> {non_neg_integer(), non_neg_integer(), non_neg_integer()}.
+-spec lookup_method_name(amqp_method()) -> amqp_method_name().
+-spec lookup_class_name(amqp_class_id()) -> amqp_class_name().
+-spec method_id(amqp_method_name()) -> amqp_method().
+-spec method_has_content(amqp_method_name()) -> boolean().
+-spec is_method_synchronous(amqp_method_record()) -> boolean().
+-spec method_record(amqp_method_name()) -> amqp_method_record().
+-spec method_fieldnames(amqp_method_name()) -> [amqp_method_field_name()].
+-spec decode_method_fields(amqp_method_name(), binary()) ->
+ amqp_method_record() | rabbit_types:connection_exit().
+-spec decode_properties(non_neg_integer(), binary()) -> amqp_property_record().
+-spec encode_method_fields(amqp_method_record()) -> binary().
+-spec encode_properties(amqp_property_record()) -> binary().
+-spec lookup_amqp_exception(amqp_exception()) ->
+ {boolean(), amqp_exception_code(), binary()}.
+-spec amqp_exception(amqp_exception_code()) -> amqp_exception().
+
+bitvalue(true) -> 1;
+bitvalue(false) -> 0;
+bitvalue(undefined) -> 0.
+
+shortstr_size(S) ->
+ case size(S) of
+ Len when Len =< 255 -> Len;
+ _ -> exit(method_field_shortstr_overflow)
+ end.
+
+-define(SHORTSTR_VAL(R, L, V, X),
+ begin
+ <<L:8/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGSTR_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORT_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONG_VAL(R, L, V, X),
+ begin
+ <<V:32/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(LONGLONG_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(OCTET_VAL(R, L, V, X),
+ begin
+ <<V:8/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(TABLE_VAL(R, L, V, X),
+ begin
+ <<L:32/unsigned, V:L/binary, X/binary>> = R,
+ {rabbit_binary_parser:parse_table(V), X}
+ end).
+
+-define(TIMESTAMP_VAL(R, L, V, X),
+ begin
+ <<V:64/unsigned, X/binary>> = R,
+ {V, X}
+ end).
+
+-define(SHORTSTR_PROP(X, L),
+ begin
+ L = size(X),
+ if L < 256 -> <<L:8, X:L/binary>>;
+ true -> exit(content_properties_shortstr_overflow)
+ end
+ end).
+
+-define(LONGSTR_PROP(X, L),
+ begin
+ L = size(X),
+ <<L:32, X:L/binary>>
+ end).
+
+-define(OCTET_PROP(X, L), <<X:8/unsigned>>).
+-define(SHORT_PROP(X, L), <<X:16/unsigned>>).
+-define(LONG_PROP(X, L), <<X:32/unsigned>>).
+-define(LONGLONG_PROP(X, L), <<X:64/unsigned>>).
+-define(TIMESTAMP_PROP(X, L), <<X:64/unsigned>>).
+
+-define(TABLE_PROP(X, T),
+ begin
+ T = rabbit_binary_generator:generate_table(X),
+ <<(size(T)):32, T/binary>>
+ end).
+""")
+ version = "{%d, %d, %d}" % (spec.major, spec.minor, spec.revision)
+ if version == '{8, 0, 0}': version = '{0, 8, 0}'
+ print("version() -> %s." % (version))
+
+ for m in methods: genLookupMethodName(m)
+ print("lookup_method_name({_ClassId, _MethodId} = Id) -> exit({unknown_method_id, Id}).")
+
+ for c in spec.allClasses(): genLookupClassName(c)
+ print("lookup_class_name(ClassId) -> exit({unknown_class_id, ClassId}).")
+
+ for m in methods: genMethodId(m)
+ print("method_id(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodHasContent(m)
+ print("method_has_content(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodIsSynchronous(m)
+ print("is_method_synchronous(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodRecord(m)
+ print("method_record(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genMethodFieldNames(m)
+ print("method_fieldnames(Name) -> exit({unknown_method_name, Name}).")
+
+ for m in methods: genDecodeMethodFields(m)
+ print("decode_method_fields(Name, BinaryFields) ->")
+ print(" rabbit_misc:frame_error(Name, BinaryFields).")
+
+ for c in spec.allClasses(): genDecodeProperties(c)
+ print("decode_properties(ClassId, _BinaryFields) -> exit({unknown_class_id, ClassId}).")
+
+ for m in methods: genEncodeMethodFields(m)
+ print("encode_method_fields(Record) -> exit({unknown_method_name, element(1, Record)}).")
+
+ for c in spec.allClasses(): genEncodeProperties(c)
+ print("encode_properties(Record) -> exit({unknown_properties_record, Record}).")
+
+ for (c,v,cls) in spec.constants: genLookupException(c,v,cls)
+ print("lookup_amqp_exception(Code) ->")
+ print(" rabbit_log:warning(\"Unknown AMQP error code '~p'~n\", [Code]),")
+ print(" {true, ?INTERNAL_ERROR, <<\"INTERNAL_ERROR\">>}.")
+
+ for(c,v,cls) in spec.constants: genAmqpException(c,v,cls)
+ print("amqp_exception(_Code) -> undefined.")
+
+def genHrl(spec):
+ def fieldNameList(fields):
+ return ', '.join([erlangize(f.name) for f in fields])
+
+ def fieldNameListDefaults(fields):
+ def fillField(field):
+ result = erlangize(field.name)
+ if field.defaultvalue != None:
+ conv_fn = erlangDefaultValueTypeConvMap[type(field.defaultvalue)]
+ result += ' = ' + conv_fn(field.defaultvalue)
+ return result
+ return ', '.join([fillField(f) for f in fields])
+
+ methods = spec.allMethods()
+
+ printFileHeader()
+ print("-define(PROTOCOL_PORT, %d)." % (spec.port))
+
+ for (c,v,cls) in spec.constants:
+ print("-define(%s, %s)." % (erlangConstantName(c), v))
+
+ print("%% Method field records.")
+ for m in methods:
+ print("-record(%s, {%s})." % (m.erlangName(), fieldNameListDefaults(m.arguments)))
+
+ print("%% Class property records.")
+ for c in spec.allClasses():
+ print("-record('P_%s', {%s})." % (erlangize(c.name), fieldNameList(c.fields)))
+
+
+def generateErl(specPath):
+ genErl(AmqpSpec(specPath))
+
+def generateHrl(specPath):
+ genHrl(AmqpSpec(specPath))
+
+if __name__ == "__main__":
+ do_main_dict({"header": generateHrl,
+ "body": generateErl})
+
diff --git a/deps/rabbit_common/development.post.mk b/deps/rabbit_common/development.post.mk
new file mode 100644
index 0000000000..65708dbcd7
--- /dev/null
+++ b/deps/rabbit_common/development.post.mk
@@ -0,0 +1,33 @@
+# --------------------------------------------------------------------
+# Framing sources generation.
+# --------------------------------------------------------------------
+
+PYTHON ?= python
+CODEGEN = $(CURDIR)/codegen.py
+CODEGEN_DIR ?= $(DEPS_DIR)/rabbitmq_codegen
+CODEGEN_AMQP = $(CODEGEN_DIR)/amqp_codegen.py
+
+AMQP_SPEC_JSON_FILES_0_8 = $(CODEGEN_DIR)/amqp-rabbitmq-0.8.json
+AMQP_SPEC_JSON_FILES_0_9_1 = $(CODEGEN_DIR)/amqp-rabbitmq-0.9.1.json \
+ $(CODEGEN_DIR)/credit_extension.json
+
+include/rabbit_framing.hrl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) --ignore-conflicts header \
+ $(AMQP_SPEC_JSON_FILES_0_9_1) $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+src/rabbit_framing_amqp_0_9_1.erl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_9_1)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) body $(AMQP_SPEC_JSON_FILES_0_9_1) $@
+
+src/rabbit_framing_amqp_0_8.erl:: $(CODEGEN) $(CODEGEN_AMQP) \
+ $(AMQP_SPEC_JSON_FILES_0_8)
+ $(gen_verbose) env PYTHONPATH=$(CODEGEN_DIR) \
+ $(PYTHON) $(CODEGEN) body $(AMQP_SPEC_JSON_FILES_0_8) $@
+
+clean:: clean-extra-sources
+
+clean-extra-sources:
+ $(gen_verbose) rm -f $(EXTRA_SOURCES)
diff --git a/deps/rabbit_common/development.pre.mk b/deps/rabbit_common/development.pre.mk
new file mode 100644
index 0000000000..0b11877df3
--- /dev/null
+++ b/deps/rabbit_common/development.pre.mk
@@ -0,0 +1,14 @@
+# Variables and recipes in development.*.mk are meant to be used from
+# any Git clone. They are excluded from the files published to Hex.pm.
+# Generated files are published to Hex.pm however so people using this
+# source won't have to depend on Python and rabbitmq-codegen.
+
+BUILD_DEPS = rabbitmq_codegen
+TEST_DEPS = proper
+
+EXTRA_SOURCES += include/rabbit_framing.hrl \
+ src/rabbit_framing_amqp_0_8.erl \
+ src/rabbit_framing_amqp_0_9_1.erl
+
+.DEFAULT_GOAL = all
+$(PROJECT).d:: $(EXTRA_SOURCES)
diff --git a/deps/rabbit_common/erlang.mk b/deps/rabbit_common/erlang.mk
new file mode 100644
index 0000000000..defddc4865
--- /dev/null
+++ b/deps/rabbit_common/erlang.mk
@@ -0,0 +1,7746 @@
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk
+
+ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST)))
+export ERLANG_MK_FILENAME
+
+ERLANG_MK_VERSION = 2019.07.01-40-geb3e4b0
+ERLANG_MK_WITHOUT = plugins/proper
+
+# Make 3.81 and 3.82 are deprecated.
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82)
+$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html)
+endif
+
+# Core configuration.
+
+PROJECT ?= $(notdir $(CURDIR))
+PROJECT := $(strip $(PROJECT))
+
+PROJECT_VERSION ?= rolling
+PROJECT_MOD ?= $(PROJECT)_app
+PROJECT_ENV ?= []
+
+# Verbosity.
+
+V ?= 0
+
+verbose_0 = @
+verbose_2 = set -x;
+verbose = $(verbose_$(V))
+
+ifeq ($(V),3)
+SHELL := $(SHELL) -x
+endif
+
+gen_verbose_0 = @echo " GEN " $@;
+gen_verbose_2 = set -x;
+gen_verbose = $(gen_verbose_$(V))
+
+gen_verbose_esc_0 = @echo " GEN " $$@;
+gen_verbose_esc_2 = set -x;
+gen_verbose_esc = $(gen_verbose_esc_$(V))
+
+# Temporary files directory.
+
+ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk
+export ERLANG_MK_TMP
+
+# "erl" command.
+
+ERL = erl +A1 -noinput -boot no_dot_erlang
+
+# Platform detection.
+
+ifeq ($(PLATFORM),)
+UNAME_S := $(shell uname -s)
+
+ifeq ($(UNAME_S),Linux)
+PLATFORM = linux
+else ifeq ($(UNAME_S),Darwin)
+PLATFORM = darwin
+else ifeq ($(UNAME_S),SunOS)
+PLATFORM = solaris
+else ifeq ($(UNAME_S),GNU)
+PLATFORM = gnu
+else ifeq ($(UNAME_S),FreeBSD)
+PLATFORM = freebsd
+else ifeq ($(UNAME_S),NetBSD)
+PLATFORM = netbsd
+else ifeq ($(UNAME_S),OpenBSD)
+PLATFORM = openbsd
+else ifeq ($(UNAME_S),DragonFly)
+PLATFORM = dragonfly
+else ifeq ($(shell uname -o),Msys)
+PLATFORM = msys2
+else
+$(error Unable to detect platform. Please open a ticket with the output of uname -a.)
+endif
+
+export PLATFORM
+endif
+
+# Core targets.
+
+all:: deps app rel
+
+# Noop to avoid a Make warning when there's nothing to do.
+rel::
+ $(verbose) :
+
+relup:: deps app
+
+check:: tests
+
+clean:: clean-crashdump
+
+clean-crashdump:
+ifneq ($(wildcard erl_crash.dump),)
+ $(gen_verbose) rm -f erl_crash.dump
+endif
+
+distclean:: clean distclean-tmp
+
+$(ERLANG_MK_TMP):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+
+distclean-tmp:
+ $(gen_verbose) rm -rf $(ERLANG_MK_TMP)
+
+help::
+ $(verbose) printf "%s\n" \
+ "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \
+ "Copyright (c) 2013-2016 Loïc Hoguin <essen@ninenines.eu>" \
+ "" \
+ "Usage: [V=1] $(MAKE) [target]..." \
+ "" \
+ "Core targets:" \
+ " all Run deps, app and rel targets in that order" \
+ " app Compile the project" \
+ " deps Fetch dependencies (if needed) and compile them" \
+ " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \
+ " list-deps List dependencies recursively on stdout" \
+ " search q=... Search for a package in the built-in index" \
+ " rel Build a release for this project, if applicable" \
+ " docs Build the documentation for this project" \
+ " install-docs Install the man pages for this project" \
+ " check Compile and run all tests and analysis for this project" \
+ " tests Run the tests for this project" \
+ " clean Delete temporary and output files from most targets" \
+ " distclean Delete all temporary and output files" \
+ " help Display this help and exit" \
+ " erlang-mk Update erlang.mk to the latest version"
+
+# Core functions.
+
+empty :=
+space := $(empty) $(empty)
+tab := $(empty) $(empty)
+comma := ,
+
+define newline
+
+
+endef
+
+define comma_list
+$(subst $(space),$(comma),$(strip $(1)))
+endef
+
+define escape_dquotes
+$(subst ",\",$1)
+endef
+
+# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy.
+define erlang
+$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk
+endef
+
+ifeq ($(PLATFORM),msys2)
+core_native_path = $(shell cygpath -m $1)
+else
+core_native_path = $1
+endif
+
+core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2
+
+core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1)))
+
+# We skip files that contain spaces because they end up causing issues.
+core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " "))
+
+core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1)))))))))))))))))))))))))))
+
+core_ls = $(filter-out $(1),$(shell echo $(1)))
+
+# @todo Use a solution that does not require using perl.
+core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2)
+
+define core_render
+ printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2)
+endef
+
+# Automated update.
+
+ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk
+ERLANG_MK_COMMIT ?=
+ERLANG_MK_BUILD_CONFIG ?= build.config
+ERLANG_MK_BUILD_DIR ?= .erlang.mk.build
+
+erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT)
+erlang-mk:
+ifdef ERLANG_MK_COMMIT
+ $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+ $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT)
+else
+ $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR)
+endif
+ $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi
+ $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1
+ $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk
+ $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR)
+ $(verbose) rm -rf $(ERLANG_MK_TMP)
+
+# The erlang.mk package index is bundled in the default erlang.mk build.
+# Search for the string "copyright" to skip to the rest of the code.
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-kerl
+
+KERL_INSTALL_DIR ?= $(HOME)/erlang
+
+ifeq ($(strip $(KERL)),)
+KERL := $(ERLANG_MK_TMP)/kerl/kerl
+endif
+
+KERL_DIR = $(ERLANG_MK_TMP)/kerl
+
+export KERL
+
+KERL_GIT ?= https://github.com/kerl/kerl
+KERL_COMMIT ?= master
+
+KERL_MAKEFLAGS ?=
+
+OTP_GIT ?= https://github.com/erlang/otp
+
+define kerl_otp_target
+$(KERL_INSTALL_DIR)/$(1): $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \
+ $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \
+ fi
+endef
+
+define kerl_hipe_target
+$(KERL_INSTALL_DIR)/$1-native: $(KERL)
+ $(verbose) if [ ! -d $$@ ]; then \
+ KERL_CONFIGURE_OPTIONS=--enable-native-libs \
+ MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \
+ $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \
+ fi
+endef
+
+$(KERL): $(KERL_DIR)
+
+$(KERL_DIR): | $(ERLANG_MK_TMP)
+ $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl
+ $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT)
+ $(verbose) chmod +x $(KERL)
+
+distclean:: distclean-kerl
+
+distclean-kerl:
+ $(gen_verbose) rm -rf $(KERL_DIR)
+
+# Allow users to select which version of Erlang/OTP to use for a project.
+
+ifneq ($(strip $(LATEST_ERLANG_OTP)),)
+# In some environments it is necessary to filter out master.
+ERLANG_OTP := $(notdir $(lastword $(sort\
+ $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\
+ $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native]))))))
+endif
+
+ERLANG_OTP ?=
+ERLANG_HIPE ?=
+
+# Use kerl to enforce a specific Erlang/OTP version for a project.
+ifneq ($(strip $(ERLANG_OTP)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_otp_target,$(ERLANG_OTP)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),)
+$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+else
+# Same for a HiPE enabled VM.
+ifneq ($(strip $(ERLANG_HIPE)),)
+export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH)
+SHELL := env PATH=$(PATH) $(SHELL)
+$(eval $(call kerl_hipe_target,$(ERLANG_HIPE)))
+
+# Build Erlang/OTP only if it doesn't already exist.
+ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),)
+$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...)
+$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2)
+endif
+
+endif
+endif
+
+PACKAGES += aberth
+pkg_aberth_name = aberth
+pkg_aberth_description = Generic BERT-RPC server in Erlang
+pkg_aberth_homepage = https://github.com/a13x/aberth
+pkg_aberth_fetch = git
+pkg_aberth_repo = https://github.com/a13x/aberth
+pkg_aberth_commit = master
+
+PACKAGES += active
+pkg_active_name = active
+pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running
+pkg_active_homepage = https://github.com/proger/active
+pkg_active_fetch = git
+pkg_active_repo = https://github.com/proger/active
+pkg_active_commit = master
+
+PACKAGES += actordb_core
+pkg_actordb_core_name = actordb_core
+pkg_actordb_core_description = ActorDB main source
+pkg_actordb_core_homepage = http://www.actordb.com/
+pkg_actordb_core_fetch = git
+pkg_actordb_core_repo = https://github.com/biokoda/actordb_core
+pkg_actordb_core_commit = master
+
+PACKAGES += actordb_thrift
+pkg_actordb_thrift_name = actordb_thrift
+pkg_actordb_thrift_description = Thrift API for ActorDB
+pkg_actordb_thrift_homepage = http://www.actordb.com/
+pkg_actordb_thrift_fetch = git
+pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift
+pkg_actordb_thrift_commit = master
+
+PACKAGES += aleppo
+pkg_aleppo_name = aleppo
+pkg_aleppo_description = Alternative Erlang Pre-Processor
+pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo
+pkg_aleppo_fetch = git
+pkg_aleppo_repo = https://github.com/ErlyORM/aleppo
+pkg_aleppo_commit = master
+
+PACKAGES += alog
+pkg_alog_name = alog
+pkg_alog_description = Simply the best logging framework for Erlang
+pkg_alog_homepage = https://github.com/siberian-fast-food/alogger
+pkg_alog_fetch = git
+pkg_alog_repo = https://github.com/siberian-fast-food/alogger
+pkg_alog_commit = master
+
+PACKAGES += amqp_client
+pkg_amqp_client_name = amqp_client
+pkg_amqp_client_description = RabbitMQ Erlang AMQP client
+pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html
+pkg_amqp_client_fetch = git
+pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git
+pkg_amqp_client_commit = master
+
+PACKAGES += annotations
+pkg_annotations_name = annotations
+pkg_annotations_description = Simple code instrumentation utilities
+pkg_annotations_homepage = https://github.com/hyperthunk/annotations
+pkg_annotations_fetch = git
+pkg_annotations_repo = https://github.com/hyperthunk/annotations
+pkg_annotations_commit = master
+
+PACKAGES += antidote
+pkg_antidote_name = antidote
+pkg_antidote_description = Large-scale computation without synchronisation
+pkg_antidote_homepage = https://syncfree.lip6.fr/
+pkg_antidote_fetch = git
+pkg_antidote_repo = https://github.com/SyncFree/antidote
+pkg_antidote_commit = master
+
+PACKAGES += apns
+pkg_apns_name = apns
+pkg_apns_description = Apple Push Notification Server for Erlang
+pkg_apns_homepage = http://inaka.github.com/apns4erl
+pkg_apns_fetch = git
+pkg_apns_repo = https://github.com/inaka/apns4erl
+pkg_apns_commit = master
+
+PACKAGES += asciideck
+pkg_asciideck_name = asciideck
+pkg_asciideck_description = Asciidoc for Erlang.
+pkg_asciideck_homepage = https://ninenines.eu
+pkg_asciideck_fetch = git
+pkg_asciideck_repo = https://github.com/ninenines/asciideck
+pkg_asciideck_commit = master
+
+PACKAGES += azdht
+pkg_azdht_name = azdht
+pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang
+pkg_azdht_homepage = https://github.com/arcusfelis/azdht
+pkg_azdht_fetch = git
+pkg_azdht_repo = https://github.com/arcusfelis/azdht
+pkg_azdht_commit = master
+
+PACKAGES += backoff
+pkg_backoff_name = backoff
+pkg_backoff_description = Simple exponential backoffs in Erlang
+pkg_backoff_homepage = https://github.com/ferd/backoff
+pkg_backoff_fetch = git
+pkg_backoff_repo = https://github.com/ferd/backoff
+pkg_backoff_commit = master
+
+PACKAGES += barrel_tcp
+pkg_barrel_tcp_name = barrel_tcp
+pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang.
+pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_fetch = git
+pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp
+pkg_barrel_tcp_commit = master
+
+PACKAGES += basho_bench
+pkg_basho_bench_name = basho_bench
+pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for.
+pkg_basho_bench_homepage = https://github.com/basho/basho_bench
+pkg_basho_bench_fetch = git
+pkg_basho_bench_repo = https://github.com/basho/basho_bench
+pkg_basho_bench_commit = master
+
+PACKAGES += bcrypt
+pkg_bcrypt_name = bcrypt
+pkg_bcrypt_description = Bcrypt Erlang / C library
+pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt
+pkg_bcrypt_fetch = git
+pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git
+pkg_bcrypt_commit = master
+
+PACKAGES += beam
+pkg_beam_name = beam
+pkg_beam_description = BEAM emulator written in Erlang
+pkg_beam_homepage = https://github.com/tonyrog/beam
+pkg_beam_fetch = git
+pkg_beam_repo = https://github.com/tonyrog/beam
+pkg_beam_commit = master
+
+PACKAGES += beanstalk
+pkg_beanstalk_name = beanstalk
+pkg_beanstalk_description = An Erlang client for beanstalkd
+pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_fetch = git
+pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk
+pkg_beanstalk_commit = master
+
+PACKAGES += bear
+pkg_bear_name = bear
+pkg_bear_description = a set of statistics functions for erlang
+pkg_bear_homepage = https://github.com/boundary/bear
+pkg_bear_fetch = git
+pkg_bear_repo = https://github.com/boundary/bear
+pkg_bear_commit = master
+
+PACKAGES += bertconf
+pkg_bertconf_name = bertconf
+pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded
+pkg_bertconf_homepage = https://github.com/ferd/bertconf
+pkg_bertconf_fetch = git
+pkg_bertconf_repo = https://github.com/ferd/bertconf
+pkg_bertconf_commit = master
+
+PACKAGES += bifrost
+pkg_bifrost_name = bifrost
+pkg_bifrost_description = Erlang FTP Server Framework
+pkg_bifrost_homepage = https://github.com/thorstadt/bifrost
+pkg_bifrost_fetch = git
+pkg_bifrost_repo = https://github.com/thorstadt/bifrost
+pkg_bifrost_commit = master
+
+PACKAGES += binpp
+pkg_binpp_name = binpp
+pkg_binpp_description = Erlang Binary Pretty Printer
+pkg_binpp_homepage = https://github.com/jtendo/binpp
+pkg_binpp_fetch = git
+pkg_binpp_repo = https://github.com/jtendo/binpp
+pkg_binpp_commit = master
+
+PACKAGES += bisect
+pkg_bisect_name = bisect
+pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang
+pkg_bisect_homepage = https://github.com/knutin/bisect
+pkg_bisect_fetch = git
+pkg_bisect_repo = https://github.com/knutin/bisect
+pkg_bisect_commit = master
+
+PACKAGES += bitcask
+pkg_bitcask_name = bitcask
+pkg_bitcask_description = because you need another a key/value storage engine
+pkg_bitcask_homepage = https://github.com/basho/bitcask
+pkg_bitcask_fetch = git
+pkg_bitcask_repo = https://github.com/basho/bitcask
+pkg_bitcask_commit = develop
+
+PACKAGES += bitstore
+pkg_bitstore_name = bitstore
+pkg_bitstore_description = A document based ontology development environment
+pkg_bitstore_homepage = https://github.com/bdionne/bitstore
+pkg_bitstore_fetch = git
+pkg_bitstore_repo = https://github.com/bdionne/bitstore
+pkg_bitstore_commit = master
+
+PACKAGES += bootstrap
+pkg_bootstrap_name = bootstrap
+pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application.
+pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap
+pkg_bootstrap_fetch = git
+pkg_bootstrap_repo = https://github.com/schlagert/bootstrap
+pkg_bootstrap_commit = master
+
+PACKAGES += boss
+pkg_boss_name = boss
+pkg_boss_description = Erlang web MVC, now featuring Comet
+pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_fetch = git
+pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss
+pkg_boss_commit = master
+
+PACKAGES += boss_db
+pkg_boss_db_name = boss_db
+pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang
+pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db
+pkg_boss_db_fetch = git
+pkg_boss_db_repo = https://github.com/ErlyORM/boss_db
+pkg_boss_db_commit = master
+
+PACKAGES += brod
+pkg_brod_name = brod
+pkg_brod_description = Kafka client in Erlang
+pkg_brod_homepage = https://github.com/klarna/brod
+pkg_brod_fetch = git
+pkg_brod_repo = https://github.com/klarna/brod.git
+pkg_brod_commit = master
+
+PACKAGES += bson
+pkg_bson_name = bson
+pkg_bson_description = BSON documents in Erlang, see bsonspec.org
+pkg_bson_homepage = https://github.com/comtihon/bson-erlang
+pkg_bson_fetch = git
+pkg_bson_repo = https://github.com/comtihon/bson-erlang
+pkg_bson_commit = master
+
+PACKAGES += bullet
+pkg_bullet_name = bullet
+pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy.
+pkg_bullet_homepage = http://ninenines.eu
+pkg_bullet_fetch = git
+pkg_bullet_repo = https://github.com/ninenines/bullet
+pkg_bullet_commit = master
+
+PACKAGES += cache
+pkg_cache_name = cache
+pkg_cache_description = Erlang in-memory cache
+pkg_cache_homepage = https://github.com/fogfish/cache
+pkg_cache_fetch = git
+pkg_cache_repo = https://github.com/fogfish/cache
+pkg_cache_commit = master
+
+PACKAGES += cake
+pkg_cake_name = cake
+pkg_cake_description = Really simple terminal colorization
+pkg_cake_homepage = https://github.com/darach/cake-erl
+pkg_cake_fetch = git
+pkg_cake_repo = https://github.com/darach/cake-erl
+pkg_cake_commit = master
+
+PACKAGES += carotene
+pkg_carotene_name = carotene
+pkg_carotene_description = Real-time server
+pkg_carotene_homepage = https://github.com/carotene/carotene
+pkg_carotene_fetch = git
+pkg_carotene_repo = https://github.com/carotene/carotene
+pkg_carotene_commit = master
+
+PACKAGES += cberl
+pkg_cberl_name = cberl
+pkg_cberl_description = NIF based Erlang bindings for Couchbase
+pkg_cberl_homepage = https://github.com/chitika/cberl
+pkg_cberl_fetch = git
+pkg_cberl_repo = https://github.com/chitika/cberl
+pkg_cberl_commit = master
+
+PACKAGES += cecho
+pkg_cecho_name = cecho
+pkg_cecho_description = An ncurses library for Erlang
+pkg_cecho_homepage = https://github.com/mazenharake/cecho
+pkg_cecho_fetch = git
+pkg_cecho_repo = https://github.com/mazenharake/cecho
+pkg_cecho_commit = master
+
+PACKAGES += cferl
+pkg_cferl_name = cferl
+pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client
+pkg_cferl_homepage = https://github.com/ddossot/cferl
+pkg_cferl_fetch = git
+pkg_cferl_repo = https://github.com/ddossot/cferl
+pkg_cferl_commit = master
+
+PACKAGES += chaos_monkey
+pkg_chaos_monkey_name = chaos_monkey
+pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes.
+pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_fetch = git
+pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey
+pkg_chaos_monkey_commit = master
+
+PACKAGES += check_node
+pkg_check_node_name = check_node
+pkg_check_node_description = Nagios Scripts for monitoring Riak
+pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios
+pkg_check_node_fetch = git
+pkg_check_node_repo = https://github.com/basho-labs/riak_nagios
+pkg_check_node_commit = master
+
+PACKAGES += chronos
+pkg_chronos_name = chronos
+pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests.
+pkg_chronos_homepage = https://github.com/lehoff/chronos
+pkg_chronos_fetch = git
+pkg_chronos_repo = https://github.com/lehoff/chronos
+pkg_chronos_commit = master
+
+PACKAGES += chumak
+pkg_chumak_name = chumak
+pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol.
+pkg_chumak_homepage = http://choven.ca
+pkg_chumak_fetch = git
+pkg_chumak_repo = https://github.com/chovencorp/chumak
+pkg_chumak_commit = master
+
+PACKAGES += cl
+pkg_cl_name = cl
+pkg_cl_description = OpenCL binding for Erlang
+pkg_cl_homepage = https://github.com/tonyrog/cl
+pkg_cl_fetch = git
+pkg_cl_repo = https://github.com/tonyrog/cl
+pkg_cl_commit = master
+
+PACKAGES += clique
+pkg_clique_name = clique
+pkg_clique_description = CLI Framework for Erlang
+pkg_clique_homepage = https://github.com/basho/clique
+pkg_clique_fetch = git
+pkg_clique_repo = https://github.com/basho/clique
+pkg_clique_commit = develop
+
+PACKAGES += cloudi_core
+pkg_cloudi_core_name = cloudi_core
+pkg_cloudi_core_description = CloudI internal service runtime
+pkg_cloudi_core_homepage = http://cloudi.org/
+pkg_cloudi_core_fetch = git
+pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core
+pkg_cloudi_core_commit = master
+
+PACKAGES += cloudi_service_api_requests
+pkg_cloudi_service_api_requests_name = cloudi_service_api_requests
+pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support)
+pkg_cloudi_service_api_requests_homepage = http://cloudi.org/
+pkg_cloudi_service_api_requests_fetch = git
+pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests
+pkg_cloudi_service_api_requests_commit = master
+
+PACKAGES += cloudi_service_db
+pkg_cloudi_service_db_name = cloudi_service_db
+pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic)
+pkg_cloudi_service_db_homepage = http://cloudi.org/
+pkg_cloudi_service_db_fetch = git
+pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db
+pkg_cloudi_service_db_commit = master
+
+PACKAGES += cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service
+pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_fetch = git
+pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra
+pkg_cloudi_service_db_cassandra_commit = master
+
+PACKAGES += cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service
+pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_cassandra_cql_fetch = git
+pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql
+pkg_cloudi_service_db_cassandra_cql_commit = master
+
+PACKAGES += cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service
+pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/
+pkg_cloudi_service_db_couchdb_fetch = git
+pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb
+pkg_cloudi_service_db_couchdb_commit = master
+
+PACKAGES += cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service
+pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/
+pkg_cloudi_service_db_elasticsearch_fetch = git
+pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch
+pkg_cloudi_service_db_elasticsearch_commit = master
+
+PACKAGES += cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_description = memcached CloudI Service
+pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/
+pkg_cloudi_service_db_memcached_fetch = git
+pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached
+pkg_cloudi_service_db_memcached_commit = master
+
+PACKAGES += cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_description = MySQL CloudI Service
+pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_mysql_fetch = git
+pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql
+pkg_cloudi_service_db_mysql_commit = master
+
+PACKAGES += cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service
+pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/
+pkg_cloudi_service_db_pgsql_fetch = git
+pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql
+pkg_cloudi_service_db_pgsql_commit = master
+
+PACKAGES += cloudi_service_db_riak
+pkg_cloudi_service_db_riak_name = cloudi_service_db_riak
+pkg_cloudi_service_db_riak_description = Riak CloudI Service
+pkg_cloudi_service_db_riak_homepage = http://cloudi.org/
+pkg_cloudi_service_db_riak_fetch = git
+pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak
+pkg_cloudi_service_db_riak_commit = master
+
+PACKAGES += cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service
+pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/
+pkg_cloudi_service_db_tokyotyrant_fetch = git
+pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant
+pkg_cloudi_service_db_tokyotyrant_commit = master
+
+PACKAGES += cloudi_service_filesystem
+pkg_cloudi_service_filesystem_name = cloudi_service_filesystem
+pkg_cloudi_service_filesystem_description = Filesystem CloudI Service
+pkg_cloudi_service_filesystem_homepage = http://cloudi.org/
+pkg_cloudi_service_filesystem_fetch = git
+pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem
+pkg_cloudi_service_filesystem_commit = master
+
+PACKAGES += cloudi_service_http_client
+pkg_cloudi_service_http_client_name = cloudi_service_http_client
+pkg_cloudi_service_http_client_description = HTTP client CloudI Service
+pkg_cloudi_service_http_client_homepage = http://cloudi.org/
+pkg_cloudi_service_http_client_fetch = git
+pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client
+pkg_cloudi_service_http_client_commit = master
+
+PACKAGES += cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service
+pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/
+pkg_cloudi_service_http_cowboy_fetch = git
+pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy
+pkg_cloudi_service_http_cowboy_commit = master
+
+PACKAGES += cloudi_service_http_elli
+pkg_cloudi_service_http_elli_name = cloudi_service_http_elli
+pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service
+pkg_cloudi_service_http_elli_homepage = http://cloudi.org/
+pkg_cloudi_service_http_elli_fetch = git
+pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli
+pkg_cloudi_service_http_elli_commit = master
+
+PACKAGES += cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service
+pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/
+pkg_cloudi_service_map_reduce_fetch = git
+pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce
+pkg_cloudi_service_map_reduce_commit = master
+
+PACKAGES += cloudi_service_oauth1
+pkg_cloudi_service_oauth1_name = cloudi_service_oauth1
+pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service
+pkg_cloudi_service_oauth1_homepage = http://cloudi.org/
+pkg_cloudi_service_oauth1_fetch = git
+pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1
+pkg_cloudi_service_oauth1_commit = master
+
+PACKAGES += cloudi_service_queue
+pkg_cloudi_service_queue_name = cloudi_service_queue
+pkg_cloudi_service_queue_description = Persistent Queue Service
+pkg_cloudi_service_queue_homepage = http://cloudi.org/
+pkg_cloudi_service_queue_fetch = git
+pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue
+pkg_cloudi_service_queue_commit = master
+
+PACKAGES += cloudi_service_quorum
+pkg_cloudi_service_quorum_name = cloudi_service_quorum
+pkg_cloudi_service_quorum_description = CloudI Quorum Service
+pkg_cloudi_service_quorum_homepage = http://cloudi.org/
+pkg_cloudi_service_quorum_fetch = git
+pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum
+pkg_cloudi_service_quorum_commit = master
+
+PACKAGES += cloudi_service_router
+pkg_cloudi_service_router_name = cloudi_service_router
+pkg_cloudi_service_router_description = CloudI Router Service
+pkg_cloudi_service_router_homepage = http://cloudi.org/
+pkg_cloudi_service_router_fetch = git
+pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router
+pkg_cloudi_service_router_commit = master
+
+PACKAGES += cloudi_service_tcp
+pkg_cloudi_service_tcp_name = cloudi_service_tcp
+pkg_cloudi_service_tcp_description = TCP CloudI Service
+pkg_cloudi_service_tcp_homepage = http://cloudi.org/
+pkg_cloudi_service_tcp_fetch = git
+pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp
+pkg_cloudi_service_tcp_commit = master
+
+PACKAGES += cloudi_service_timers
+pkg_cloudi_service_timers_name = cloudi_service_timers
+pkg_cloudi_service_timers_description = Timers CloudI Service
+pkg_cloudi_service_timers_homepage = http://cloudi.org/
+pkg_cloudi_service_timers_fetch = git
+pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers
+pkg_cloudi_service_timers_commit = master
+
+PACKAGES += cloudi_service_udp
+pkg_cloudi_service_udp_name = cloudi_service_udp
+pkg_cloudi_service_udp_description = UDP CloudI Service
+pkg_cloudi_service_udp_homepage = http://cloudi.org/
+pkg_cloudi_service_udp_fetch = git
+pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp
+pkg_cloudi_service_udp_commit = master
+
+PACKAGES += cloudi_service_validate
+pkg_cloudi_service_validate_name = cloudi_service_validate
+pkg_cloudi_service_validate_description = CloudI Validate Service
+pkg_cloudi_service_validate_homepage = http://cloudi.org/
+pkg_cloudi_service_validate_fetch = git
+pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate
+pkg_cloudi_service_validate_commit = master
+
+PACKAGES += cloudi_service_zeromq
+pkg_cloudi_service_zeromq_name = cloudi_service_zeromq
+pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service
+pkg_cloudi_service_zeromq_homepage = http://cloudi.org/
+pkg_cloudi_service_zeromq_fetch = git
+pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq
+pkg_cloudi_service_zeromq_commit = master
+
+PACKAGES += cluster_info
+pkg_cluster_info_name = cluster_info
+pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app
+pkg_cluster_info_homepage = https://github.com/basho/cluster_info
+pkg_cluster_info_fetch = git
+pkg_cluster_info_repo = https://github.com/basho/cluster_info
+pkg_cluster_info_commit = master
+
+PACKAGES += color
+pkg_color_name = color
+pkg_color_description = ANSI colors for your Erlang
+pkg_color_homepage = https://github.com/julianduque/erlang-color
+pkg_color_fetch = git
+pkg_color_repo = https://github.com/julianduque/erlang-color
+pkg_color_commit = master
+
+PACKAGES += confetti
+pkg_confetti_name = confetti
+pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids
+pkg_confetti_homepage = https://github.com/jtendo/confetti
+pkg_confetti_fetch = git
+pkg_confetti_repo = https://github.com/jtendo/confetti
+pkg_confetti_commit = master
+
+PACKAGES += couchbeam
+pkg_couchbeam_name = couchbeam
+pkg_couchbeam_description = Apache CouchDB client in Erlang
+pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam
+pkg_couchbeam_fetch = git
+pkg_couchbeam_repo = https://github.com/benoitc/couchbeam
+pkg_couchbeam_commit = master
+
+PACKAGES += covertool
+pkg_covertool_name = covertool
+pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports
+pkg_covertool_homepage = https://github.com/idubrov/covertool
+pkg_covertool_fetch = git
+pkg_covertool_repo = https://github.com/idubrov/covertool
+pkg_covertool_commit = master
+
+PACKAGES += cowboy
+pkg_cowboy_name = cowboy
+pkg_cowboy_description = Small, fast and modular HTTP server.
+pkg_cowboy_homepage = http://ninenines.eu
+pkg_cowboy_fetch = git
+pkg_cowboy_repo = https://github.com/ninenines/cowboy
+pkg_cowboy_commit = 1.0.4
+
+PACKAGES += cowdb
+pkg_cowdb_name = cowdb
+pkg_cowdb_description = Pure Key/Value database library for Erlang Applications
+pkg_cowdb_homepage = https://github.com/refuge/cowdb
+pkg_cowdb_fetch = git
+pkg_cowdb_repo = https://github.com/refuge/cowdb
+pkg_cowdb_commit = master
+
+PACKAGES += cowlib
+pkg_cowlib_name = cowlib
+pkg_cowlib_description = Support library for manipulating Web protocols.
+pkg_cowlib_homepage = http://ninenines.eu
+pkg_cowlib_fetch = git
+pkg_cowlib_repo = https://github.com/ninenines/cowlib
+pkg_cowlib_commit = 1.0.2
+
+PACKAGES += cpg
+pkg_cpg_name = cpg
+pkg_cpg_description = CloudI Process Groups
+pkg_cpg_homepage = https://github.com/okeuday/cpg
+pkg_cpg_fetch = git
+pkg_cpg_repo = https://github.com/okeuday/cpg
+pkg_cpg_commit = master
+
+PACKAGES += cqerl
+pkg_cqerl_name = cqerl
+pkg_cqerl_description = Native Erlang CQL client for Cassandra
+pkg_cqerl_homepage = https://matehat.github.io/cqerl/
+pkg_cqerl_fetch = git
+pkg_cqerl_repo = https://github.com/matehat/cqerl
+pkg_cqerl_commit = master
+
+PACKAGES += cr
+pkg_cr_name = cr
+pkg_cr_description = Chain Replication
+pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm
+pkg_cr_fetch = git
+pkg_cr_repo = https://github.com/spawnproc/cr
+pkg_cr_commit = master
+
+PACKAGES += cuttlefish
+pkg_cuttlefish_name = cuttlefish
+pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me?
+pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish
+pkg_cuttlefish_fetch = git
+pkg_cuttlefish_repo = https://github.com/basho/cuttlefish
+pkg_cuttlefish_commit = master
+
+PACKAGES += damocles
+pkg_damocles_name = damocles
+pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box.
+pkg_damocles_homepage = https://github.com/lostcolony/damocles
+pkg_damocles_fetch = git
+pkg_damocles_repo = https://github.com/lostcolony/damocles
+pkg_damocles_commit = master
+
+PACKAGES += debbie
+pkg_debbie_name = debbie
+pkg_debbie_description = .DEB Built In Erlang
+pkg_debbie_homepage = https://github.com/crownedgrouse/debbie
+pkg_debbie_fetch = git
+pkg_debbie_repo = https://github.com/crownedgrouse/debbie
+pkg_debbie_commit = master
+
+PACKAGES += decimal
+pkg_decimal_name = decimal
+pkg_decimal_description = An Erlang decimal arithmetic library
+pkg_decimal_homepage = https://github.com/tim/erlang-decimal
+pkg_decimal_fetch = git
+pkg_decimal_repo = https://github.com/tim/erlang-decimal
+pkg_decimal_commit = master
+
+PACKAGES += detergent
+pkg_detergent_name = detergent
+pkg_detergent_description = An emulsifying Erlang SOAP library
+pkg_detergent_homepage = https://github.com/devinus/detergent
+pkg_detergent_fetch = git
+pkg_detergent_repo = https://github.com/devinus/detergent
+pkg_detergent_commit = master
+
+PACKAGES += detest
+pkg_detest_name = detest
+pkg_detest_description = Tool for running tests on a cluster of erlang nodes
+pkg_detest_homepage = https://github.com/biokoda/detest
+pkg_detest_fetch = git
+pkg_detest_repo = https://github.com/biokoda/detest
+pkg_detest_commit = master
+
+PACKAGES += dh_date
+pkg_dh_date_name = dh_date
+pkg_dh_date_description = Date formatting / parsing library for erlang
+pkg_dh_date_homepage = https://github.com/daleharvey/dh_date
+pkg_dh_date_fetch = git
+pkg_dh_date_repo = https://github.com/daleharvey/dh_date
+pkg_dh_date_commit = master
+
+PACKAGES += dirbusterl
+pkg_dirbusterl_name = dirbusterl
+pkg_dirbusterl_description = DirBuster successor in Erlang
+pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_fetch = git
+pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl
+pkg_dirbusterl_commit = master
+
+PACKAGES += dispcount
+pkg_dispcount_name = dispcount
+pkg_dispcount_description = Erlang task dispatcher based on ETS counters.
+pkg_dispcount_homepage = https://github.com/ferd/dispcount
+pkg_dispcount_fetch = git
+pkg_dispcount_repo = https://github.com/ferd/dispcount
+pkg_dispcount_commit = master
+
+PACKAGES += dlhttpc
+pkg_dlhttpc_name = dlhttpc
+pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints
+pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_fetch = git
+pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc
+pkg_dlhttpc_commit = master
+
+PACKAGES += dns
+pkg_dns_name = dns
+pkg_dns_description = Erlang DNS library
+pkg_dns_homepage = https://github.com/aetrion/dns_erlang
+pkg_dns_fetch = git
+pkg_dns_repo = https://github.com/aetrion/dns_erlang
+pkg_dns_commit = master
+
+PACKAGES += dnssd
+pkg_dnssd_name = dnssd
+pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation
+pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_fetch = git
+pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang
+pkg_dnssd_commit = master
+
+PACKAGES += dynamic_compile
+pkg_dynamic_compile_name = dynamic_compile
+pkg_dynamic_compile_description = compile and load erlang modules from string input
+pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_fetch = git
+pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile
+pkg_dynamic_compile_commit = master
+
+PACKAGES += e2
+pkg_e2_name = e2
+pkg_e2_description = Library to simply writing correct OTP applications.
+pkg_e2_homepage = http://e2project.org
+pkg_e2_fetch = git
+pkg_e2_repo = https://github.com/gar1t/e2
+pkg_e2_commit = master
+
+PACKAGES += eamf
+pkg_eamf_name = eamf
+pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang
+pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_fetch = git
+pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf
+pkg_eamf_commit = master
+
+PACKAGES += eavro
+pkg_eavro_name = eavro
+pkg_eavro_description = Apache Avro encoder/decoder
+pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_fetch = git
+pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro
+pkg_eavro_commit = master
+
+PACKAGES += ecapnp
+pkg_ecapnp_name = ecapnp
+pkg_ecapnp_description = Cap'n Proto library for Erlang
+pkg_ecapnp_homepage = https://github.com/kaos/ecapnp
+pkg_ecapnp_fetch = git
+pkg_ecapnp_repo = https://github.com/kaos/ecapnp
+pkg_ecapnp_commit = master
+
+PACKAGES += econfig
+pkg_econfig_name = econfig
+pkg_econfig_description = simple Erlang config handler using INI files
+pkg_econfig_homepage = https://github.com/benoitc/econfig
+pkg_econfig_fetch = git
+pkg_econfig_repo = https://github.com/benoitc/econfig
+pkg_econfig_commit = master
+
+PACKAGES += edate
+pkg_edate_name = edate
+pkg_edate_description = date manipulation library for erlang
+pkg_edate_homepage = https://github.com/dweldon/edate
+pkg_edate_fetch = git
+pkg_edate_repo = https://github.com/dweldon/edate
+pkg_edate_commit = master
+
+PACKAGES += edgar
+pkg_edgar_name = edgar
+pkg_edgar_description = Erlang Does GNU AR
+pkg_edgar_homepage = https://github.com/crownedgrouse/edgar
+pkg_edgar_fetch = git
+pkg_edgar_repo = https://github.com/crownedgrouse/edgar
+pkg_edgar_commit = master
+
+PACKAGES += edis
+pkg_edis_name = edis
+pkg_edis_description = An Erlang implementation of Redis KV Store
+pkg_edis_homepage = http://inaka.github.com/edis/
+pkg_edis_fetch = git
+pkg_edis_repo = https://github.com/inaka/edis
+pkg_edis_commit = master
+
+PACKAGES += edns
+pkg_edns_name = edns
+pkg_edns_description = Erlang/OTP DNS server
+pkg_edns_homepage = https://github.com/hcvst/erlang-dns
+pkg_edns_fetch = git
+pkg_edns_repo = https://github.com/hcvst/erlang-dns
+pkg_edns_commit = master
+
+PACKAGES += edown
+pkg_edown_name = edown
+pkg_edown_description = EDoc extension for generating Github-flavored Markdown
+pkg_edown_homepage = https://github.com/uwiger/edown
+pkg_edown_fetch = git
+pkg_edown_repo = https://github.com/uwiger/edown
+pkg_edown_commit = master
+
+PACKAGES += eep
+pkg_eep_name = eep
+pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy
+pkg_eep_homepage = https://github.com/virtan/eep
+pkg_eep_fetch = git
+pkg_eep_repo = https://github.com/virtan/eep
+pkg_eep_commit = master
+
+PACKAGES += eep_app
+pkg_eep_app_name = eep_app
+pkg_eep_app_description = Embedded Event Processing
+pkg_eep_app_homepage = https://github.com/darach/eep-erl
+pkg_eep_app_fetch = git
+pkg_eep_app_repo = https://github.com/darach/eep-erl
+pkg_eep_app_commit = master
+
+PACKAGES += efene
+pkg_efene_name = efene
+pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX
+pkg_efene_homepage = https://github.com/efene/efene
+pkg_efene_fetch = git
+pkg_efene_repo = https://github.com/efene/efene
+pkg_efene_commit = master
+
+PACKAGES += egeoip
+pkg_egeoip_name = egeoip
+pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database.
+pkg_egeoip_homepage = https://github.com/mochi/egeoip
+pkg_egeoip_fetch = git
+pkg_egeoip_repo = https://github.com/mochi/egeoip
+pkg_egeoip_commit = master
+
+PACKAGES += ehsa
+pkg_ehsa_name = ehsa
+pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules
+pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_fetch = hg
+pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa
+pkg_ehsa_commit = default
+
+PACKAGES += ej
+pkg_ej_name = ej
+pkg_ej_description = Helper module for working with Erlang terms representing JSON
+pkg_ej_homepage = https://github.com/seth/ej
+pkg_ej_fetch = git
+pkg_ej_repo = https://github.com/seth/ej
+pkg_ej_commit = master
+
+PACKAGES += ejabberd
+pkg_ejabberd_name = ejabberd
+pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform
+pkg_ejabberd_homepage = https://github.com/processone/ejabberd
+pkg_ejabberd_fetch = git
+pkg_ejabberd_repo = https://github.com/processone/ejabberd
+pkg_ejabberd_commit = master
+
+PACKAGES += ejwt
+pkg_ejwt_name = ejwt
+pkg_ejwt_description = erlang library for JSON Web Token
+pkg_ejwt_homepage = https://github.com/artefactop/ejwt
+pkg_ejwt_fetch = git
+pkg_ejwt_repo = https://github.com/artefactop/ejwt
+pkg_ejwt_commit = master
+
+PACKAGES += ekaf
+pkg_ekaf_name = ekaf
+pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang.
+pkg_ekaf_homepage = https://github.com/helpshift/ekaf
+pkg_ekaf_fetch = git
+pkg_ekaf_repo = https://github.com/helpshift/ekaf
+pkg_ekaf_commit = master
+
+PACKAGES += elarm
+pkg_elarm_name = elarm
+pkg_elarm_description = Alarm Manager for Erlang.
+pkg_elarm_homepage = https://github.com/esl/elarm
+pkg_elarm_fetch = git
+pkg_elarm_repo = https://github.com/esl/elarm
+pkg_elarm_commit = master
+
+PACKAGES += eleveldb
+pkg_eleveldb_name = eleveldb
+pkg_eleveldb_description = Erlang LevelDB API
+pkg_eleveldb_homepage = https://github.com/basho/eleveldb
+pkg_eleveldb_fetch = git
+pkg_eleveldb_repo = https://github.com/basho/eleveldb
+pkg_eleveldb_commit = master
+
+PACKAGES += elixir
+pkg_elixir_name = elixir
+pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications
+pkg_elixir_homepage = https://elixir-lang.org/
+pkg_elixir_fetch = git
+pkg_elixir_repo = https://github.com/elixir-lang/elixir
+pkg_elixir_commit = master
+
+PACKAGES += elli
+pkg_elli_name = elli
+pkg_elli_description = Simple, robust and performant Erlang web server
+pkg_elli_homepage = https://github.com/elli-lib/elli
+pkg_elli_fetch = git
+pkg_elli_repo = https://github.com/elli-lib/elli
+pkg_elli_commit = master
+
+PACKAGES += elvis
+pkg_elvis_name = elvis
+pkg_elvis_description = Erlang Style Reviewer
+pkg_elvis_homepage = https://github.com/inaka/elvis
+pkg_elvis_fetch = git
+pkg_elvis_repo = https://github.com/inaka/elvis
+pkg_elvis_commit = master
+
+PACKAGES += emagick
+pkg_emagick_name = emagick
+pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool.
+pkg_emagick_homepage = https://github.com/kivra/emagick
+pkg_emagick_fetch = git
+pkg_emagick_repo = https://github.com/kivra/emagick
+pkg_emagick_commit = master
+
+PACKAGES += emysql
+pkg_emysql_name = emysql
+pkg_emysql_description = Stable, pure Erlang MySQL driver.
+pkg_emysql_homepage = https://github.com/Eonblast/Emysql
+pkg_emysql_fetch = git
+pkg_emysql_repo = https://github.com/Eonblast/Emysql
+pkg_emysql_commit = master
+
+PACKAGES += enm
+pkg_enm_name = enm
+pkg_enm_description = Erlang driver for nanomsg
+pkg_enm_homepage = https://github.com/basho/enm
+pkg_enm_fetch = git
+pkg_enm_repo = https://github.com/basho/enm
+pkg_enm_commit = master
+
+PACKAGES += entop
+pkg_entop_name = entop
+pkg_entop_description = A top-like tool for monitoring an Erlang node
+pkg_entop_homepage = https://github.com/mazenharake/entop
+pkg_entop_fetch = git
+pkg_entop_repo = https://github.com/mazenharake/entop
+pkg_entop_commit = master
+
+PACKAGES += epcap
+pkg_epcap_name = epcap
+pkg_epcap_description = Erlang packet capture interface using pcap
+pkg_epcap_homepage = https://github.com/msantos/epcap
+pkg_epcap_fetch = git
+pkg_epcap_repo = https://github.com/msantos/epcap
+pkg_epcap_commit = master
+
+PACKAGES += eper
+pkg_eper_name = eper
+pkg_eper_description = Erlang performance and debugging tools.
+pkg_eper_homepage = https://github.com/massemanet/eper
+pkg_eper_fetch = git
+pkg_eper_repo = https://github.com/massemanet/eper
+pkg_eper_commit = master
+
+PACKAGES += epgsql
+pkg_epgsql_name = epgsql
+pkg_epgsql_description = Erlang PostgreSQL client library.
+pkg_epgsql_homepage = https://github.com/epgsql/epgsql
+pkg_epgsql_fetch = git
+pkg_epgsql_repo = https://github.com/epgsql/epgsql
+pkg_epgsql_commit = master
+
+PACKAGES += episcina
+pkg_episcina_name = episcina
+pkg_episcina_description = A simple non intrusive resource pool for connections
+pkg_episcina_homepage = https://github.com/erlware/episcina
+pkg_episcina_fetch = git
+pkg_episcina_repo = https://github.com/erlware/episcina
+pkg_episcina_commit = master
+
+PACKAGES += eplot
+pkg_eplot_name = eplot
+pkg_eplot_description = A plot engine written in erlang.
+pkg_eplot_homepage = https://github.com/psyeugenic/eplot
+pkg_eplot_fetch = git
+pkg_eplot_repo = https://github.com/psyeugenic/eplot
+pkg_eplot_commit = master
+
+PACKAGES += epocxy
+pkg_epocxy_name = epocxy
+pkg_epocxy_description = Erlang Patterns of Concurrency
+pkg_epocxy_homepage = https://github.com/duomark/epocxy
+pkg_epocxy_fetch = git
+pkg_epocxy_repo = https://github.com/duomark/epocxy
+pkg_epocxy_commit = master
+
+PACKAGES += epubnub
+pkg_epubnub_name = epubnub
+pkg_epubnub_description = Erlang PubNub API
+pkg_epubnub_homepage = https://github.com/tsloughter/epubnub
+pkg_epubnub_fetch = git
+pkg_epubnub_repo = https://github.com/tsloughter/epubnub
+pkg_epubnub_commit = master
+
+PACKAGES += eqm
+pkg_eqm_name = eqm
+pkg_eqm_description = Erlang pub sub with supply-demand channels
+pkg_eqm_homepage = https://github.com/loucash/eqm
+pkg_eqm_fetch = git
+pkg_eqm_repo = https://github.com/loucash/eqm
+pkg_eqm_commit = master
+
+PACKAGES += eredis
+pkg_eredis_name = eredis
+pkg_eredis_description = Erlang Redis client
+pkg_eredis_homepage = https://github.com/wooga/eredis
+pkg_eredis_fetch = git
+pkg_eredis_repo = https://github.com/wooga/eredis
+pkg_eredis_commit = master
+
+PACKAGES += eredis_pool
+pkg_eredis_pool_name = eredis_pool
+pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy.
+pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_fetch = git
+pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool
+pkg_eredis_pool_commit = master
+
+PACKAGES += erl_streams
+pkg_erl_streams_name = erl_streams
+pkg_erl_streams_description = Streams in Erlang
+pkg_erl_streams_homepage = https://github.com/epappas/erl_streams
+pkg_erl_streams_fetch = git
+pkg_erl_streams_repo = https://github.com/epappas/erl_streams
+pkg_erl_streams_commit = master
+
+PACKAGES += erlang_cep
+pkg_erlang_cep_name = erlang_cep
+pkg_erlang_cep_description = A basic CEP package written in erlang
+pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_fetch = git
+pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep
+pkg_erlang_cep_commit = master
+
+PACKAGES += erlang_js
+pkg_erlang_js_name = erlang_js
+pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime.
+pkg_erlang_js_homepage = https://github.com/basho/erlang_js
+pkg_erlang_js_fetch = git
+pkg_erlang_js_repo = https://github.com/basho/erlang_js
+pkg_erlang_js_commit = master
+
+PACKAGES += erlang_localtime
+pkg_erlang_localtime_name = erlang_localtime
+pkg_erlang_localtime_description = Erlang library for conversion from one local time to another
+pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_fetch = git
+pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime
+pkg_erlang_localtime_commit = master
+
+PACKAGES += erlang_smtp
+pkg_erlang_smtp_name = erlang_smtp
+pkg_erlang_smtp_description = Erlang SMTP and POP3 server code.
+pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_fetch = git
+pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp
+pkg_erlang_smtp_commit = master
+
+PACKAGES += erlang_term
+pkg_erlang_term_name = erlang_term
+pkg_erlang_term_description = Erlang Term Info
+pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term
+pkg_erlang_term_fetch = git
+pkg_erlang_term_repo = https://github.com/okeuday/erlang_term
+pkg_erlang_term_commit = master
+
+PACKAGES += erlastic_search
+pkg_erlastic_search_name = erlastic_search
+pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface.
+pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_fetch = git
+pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search
+pkg_erlastic_search_commit = master
+
+PACKAGES += erlasticsearch
+pkg_erlasticsearch_name = erlasticsearch
+pkg_erlasticsearch_description = Erlang thrift interface to elastic_search
+pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_fetch = git
+pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch
+pkg_erlasticsearch_commit = master
+
+PACKAGES += erlbrake
+pkg_erlbrake_name = erlbrake
+pkg_erlbrake_description = Erlang Airbrake notification client
+pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_fetch = git
+pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake
+pkg_erlbrake_commit = master
+
+PACKAGES += erlcloud
+pkg_erlcloud_name = erlcloud
+pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB)
+pkg_erlcloud_homepage = https://github.com/gleber/erlcloud
+pkg_erlcloud_fetch = git
+pkg_erlcloud_repo = https://github.com/gleber/erlcloud
+pkg_erlcloud_commit = master
+
+PACKAGES += erlcron
+pkg_erlcron_name = erlcron
+pkg_erlcron_description = Erlang cronish system
+pkg_erlcron_homepage = https://github.com/erlware/erlcron
+pkg_erlcron_fetch = git
+pkg_erlcron_repo = https://github.com/erlware/erlcron
+pkg_erlcron_commit = master
+
+PACKAGES += erldb
+pkg_erldb_name = erldb
+pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang
+pkg_erldb_homepage = http://erldb.org
+pkg_erldb_fetch = git
+pkg_erldb_repo = https://github.com/erldb/erldb
+pkg_erldb_commit = master
+
+PACKAGES += erldis
+pkg_erldis_name = erldis
+pkg_erldis_description = redis erlang client library
+pkg_erldis_homepage = https://github.com/cstar/erldis
+pkg_erldis_fetch = git
+pkg_erldis_repo = https://github.com/cstar/erldis
+pkg_erldis_commit = master
+
+PACKAGES += erldns
+pkg_erldns_name = erldns
+pkg_erldns_description = DNS server, in erlang.
+pkg_erldns_homepage = https://github.com/aetrion/erl-dns
+pkg_erldns_fetch = git
+pkg_erldns_repo = https://github.com/aetrion/erl-dns
+pkg_erldns_commit = master
+
+PACKAGES += erldocker
+pkg_erldocker_name = erldocker
+pkg_erldocker_description = Docker Remote API client for Erlang
+pkg_erldocker_homepage = https://github.com/proger/erldocker
+pkg_erldocker_fetch = git
+pkg_erldocker_repo = https://github.com/proger/erldocker
+pkg_erldocker_commit = master
+
+PACKAGES += erlfsmon
+pkg_erlfsmon_name = erlfsmon
+pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX
+pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon
+pkg_erlfsmon_fetch = git
+pkg_erlfsmon_repo = https://github.com/proger/erlfsmon
+pkg_erlfsmon_commit = master
+
+PACKAGES += erlgit
+pkg_erlgit_name = erlgit
+pkg_erlgit_description = Erlang convenience wrapper around git executable
+pkg_erlgit_homepage = https://github.com/gleber/erlgit
+pkg_erlgit_fetch = git
+pkg_erlgit_repo = https://github.com/gleber/erlgit
+pkg_erlgit_commit = master
+
+PACKAGES += erlguten
+pkg_erlguten_name = erlguten
+pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang.
+pkg_erlguten_homepage = https://github.com/richcarl/erlguten
+pkg_erlguten_fetch = git
+pkg_erlguten_repo = https://github.com/richcarl/erlguten
+pkg_erlguten_commit = master
+
+PACKAGES += erlmc
+pkg_erlmc_name = erlmc
+pkg_erlmc_description = Erlang memcached binary protocol client
+pkg_erlmc_homepage = https://github.com/jkvor/erlmc
+pkg_erlmc_fetch = git
+pkg_erlmc_repo = https://github.com/jkvor/erlmc
+pkg_erlmc_commit = master
+
+PACKAGES += erlmongo
+pkg_erlmongo_name = erlmongo
+pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support
+pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_fetch = git
+pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo
+pkg_erlmongo_commit = master
+
+PACKAGES += erlog
+pkg_erlog_name = erlog
+pkg_erlog_description = Prolog interpreter in and for Erlang
+pkg_erlog_homepage = https://github.com/rvirding/erlog
+pkg_erlog_fetch = git
+pkg_erlog_repo = https://github.com/rvirding/erlog
+pkg_erlog_commit = master
+
+PACKAGES += erlpass
+pkg_erlpass_name = erlpass
+pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever.
+pkg_erlpass_homepage = https://github.com/ferd/erlpass
+pkg_erlpass_fetch = git
+pkg_erlpass_repo = https://github.com/ferd/erlpass
+pkg_erlpass_commit = master
+
+PACKAGES += erlport
+pkg_erlport_name = erlport
+pkg_erlport_description = ErlPort - connect Erlang to other languages
+pkg_erlport_homepage = https://github.com/hdima/erlport
+pkg_erlport_fetch = git
+pkg_erlport_repo = https://github.com/hdima/erlport
+pkg_erlport_commit = master
+
+PACKAGES += erlsh
+pkg_erlsh_name = erlsh
+pkg_erlsh_description = Erlang shell tools
+pkg_erlsh_homepage = https://github.com/proger/erlsh
+pkg_erlsh_fetch = git
+pkg_erlsh_repo = https://github.com/proger/erlsh
+pkg_erlsh_commit = master
+
+PACKAGES += erlsha2
+pkg_erlsha2_name = erlsha2
+pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs.
+pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2
+pkg_erlsha2_fetch = git
+pkg_erlsha2_repo = https://github.com/vinoski/erlsha2
+pkg_erlsha2_commit = master
+
+PACKAGES += erlsom
+pkg_erlsom_name = erlsom
+pkg_erlsom_description = XML parser for Erlang
+pkg_erlsom_homepage = https://github.com/willemdj/erlsom
+pkg_erlsom_fetch = git
+pkg_erlsom_repo = https://github.com/willemdj/erlsom
+pkg_erlsom_commit = master
+
+PACKAGES += erlubi
+pkg_erlubi_name = erlubi
+pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer)
+pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi
+pkg_erlubi_fetch = git
+pkg_erlubi_repo = https://github.com/krestenkrab/erlubi
+pkg_erlubi_commit = master
+
+PACKAGES += erlvolt
+pkg_erlvolt_name = erlvolt
+pkg_erlvolt_description = VoltDB Erlang Client Driver
+pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_fetch = git
+pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang
+pkg_erlvolt_commit = master
+
+PACKAGES += erlware_commons
+pkg_erlware_commons_name = erlware_commons
+pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components.
+pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_fetch = git
+pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons
+pkg_erlware_commons_commit = master
+
+PACKAGES += erlydtl
+pkg_erlydtl_name = erlydtl
+pkg_erlydtl_description = Django Template Language for Erlang.
+pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_fetch = git
+pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl
+pkg_erlydtl_commit = master
+
+PACKAGES += errd
+pkg_errd_name = errd
+pkg_errd_description = Erlang RRDTool library
+pkg_errd_homepage = https://github.com/archaelus/errd
+pkg_errd_fetch = git
+pkg_errd_repo = https://github.com/archaelus/errd
+pkg_errd_commit = master
+
+PACKAGES += erserve
+pkg_erserve_name = erserve
+pkg_erserve_description = Erlang/Rserve communication interface
+pkg_erserve_homepage = https://github.com/del/erserve
+pkg_erserve_fetch = git
+pkg_erserve_repo = https://github.com/del/erserve
+pkg_erserve_commit = master
+
+PACKAGES += erwa
+pkg_erwa_name = erwa
+pkg_erwa_description = A WAMP router and client written in Erlang.
+pkg_erwa_homepage = https://github.com/bwegh/erwa
+pkg_erwa_fetch = git
+pkg_erwa_repo = https://github.com/bwegh/erwa
+pkg_erwa_commit = master
+
+PACKAGES += escalus
+pkg_escalus_name = escalus
+pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers
+pkg_escalus_homepage = https://github.com/esl/escalus
+pkg_escalus_fetch = git
+pkg_escalus_repo = https://github.com/esl/escalus
+pkg_escalus_commit = master
+
+PACKAGES += esh_mk
+pkg_esh_mk_name = esh_mk
+pkg_esh_mk_description = esh template engine plugin for erlang.mk
+pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk
+pkg_esh_mk_fetch = git
+pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git
+pkg_esh_mk_commit = master
+
+PACKAGES += espec
+pkg_espec_name = espec
+pkg_espec_description = ESpec: Behaviour driven development framework for Erlang
+pkg_espec_homepage = https://github.com/lucaspiller/espec
+pkg_espec_fetch = git
+pkg_espec_repo = https://github.com/lucaspiller/espec
+pkg_espec_commit = master
+
+PACKAGES += estatsd
+pkg_estatsd_name = estatsd
+pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite
+pkg_estatsd_homepage = https://github.com/RJ/estatsd
+pkg_estatsd_fetch = git
+pkg_estatsd_repo = https://github.com/RJ/estatsd
+pkg_estatsd_commit = master
+
+PACKAGES += etap
+pkg_etap_name = etap
+pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output.
+pkg_etap_homepage = https://github.com/ngerakines/etap
+pkg_etap_fetch = git
+pkg_etap_repo = https://github.com/ngerakines/etap
+pkg_etap_commit = master
+
+PACKAGES += etest
+pkg_etest_name = etest
+pkg_etest_description = A lightweight, convention over configuration test framework for Erlang
+pkg_etest_homepage = https://github.com/wooga/etest
+pkg_etest_fetch = git
+pkg_etest_repo = https://github.com/wooga/etest
+pkg_etest_commit = master
+
+PACKAGES += etest_http
+pkg_etest_http_name = etest_http
+pkg_etest_http_description = etest Assertions around HTTP (client-side)
+pkg_etest_http_homepage = https://github.com/wooga/etest_http
+pkg_etest_http_fetch = git
+pkg_etest_http_repo = https://github.com/wooga/etest_http
+pkg_etest_http_commit = master
+
+PACKAGES += etoml
+pkg_etoml_name = etoml
+pkg_etoml_description = TOML language erlang parser
+pkg_etoml_homepage = https://github.com/kalta/etoml
+pkg_etoml_fetch = git
+pkg_etoml_repo = https://github.com/kalta/etoml
+pkg_etoml_commit = master
+
+PACKAGES += eunit
+pkg_eunit_name = eunit
+pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository.
+pkg_eunit_homepage = https://github.com/richcarl/eunit
+pkg_eunit_fetch = git
+pkg_eunit_repo = https://github.com/richcarl/eunit
+pkg_eunit_commit = master
+
+PACKAGES += eunit_formatters
+pkg_eunit_formatters_name = eunit_formatters
+pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better.
+pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_fetch = git
+pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters
+pkg_eunit_formatters_commit = master
+
+PACKAGES += euthanasia
+pkg_euthanasia_name = euthanasia
+pkg_euthanasia_description = Merciful killer for your Erlang processes
+pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_fetch = git
+pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia
+pkg_euthanasia_commit = master
+
+PACKAGES += evum
+pkg_evum_name = evum
+pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM
+pkg_evum_homepage = https://github.com/msantos/evum
+pkg_evum_fetch = git
+pkg_evum_repo = https://github.com/msantos/evum
+pkg_evum_commit = master
+
+PACKAGES += exec
+pkg_exec_name = erlexec
+pkg_exec_description = Execute and control OS processes from Erlang/OTP.
+pkg_exec_homepage = http://saleyn.github.com/erlexec
+pkg_exec_fetch = git
+pkg_exec_repo = https://github.com/saleyn/erlexec
+pkg_exec_commit = master
+
+PACKAGES += exml
+pkg_exml_name = exml
+pkg_exml_description = XML parsing library in Erlang
+pkg_exml_homepage = https://github.com/paulgray/exml
+pkg_exml_fetch = git
+pkg_exml_repo = https://github.com/paulgray/exml
+pkg_exml_commit = master
+
+PACKAGES += exometer
+pkg_exometer_name = exometer
+pkg_exometer_description = Basic measurement objects and probe behavior
+pkg_exometer_homepage = https://github.com/Feuerlabs/exometer
+pkg_exometer_fetch = git
+pkg_exometer_repo = https://github.com/Feuerlabs/exometer
+pkg_exometer_commit = master
+
+PACKAGES += exs1024
+pkg_exs1024_name = exs1024
+pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang.
+pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024
+pkg_exs1024_fetch = git
+pkg_exs1024_repo = https://github.com/jj1bdx/exs1024
+pkg_exs1024_commit = master
+
+PACKAGES += exs64
+pkg_exs64_name = exs64
+pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang.
+pkg_exs64_homepage = https://github.com/jj1bdx/exs64
+pkg_exs64_fetch = git
+pkg_exs64_repo = https://github.com/jj1bdx/exs64
+pkg_exs64_commit = master
+
+PACKAGES += exsplus116
+pkg_exsplus116_name = exsplus116
+pkg_exsplus116_description = Xorshift116plus for Erlang
+pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_fetch = git
+pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116
+pkg_exsplus116_commit = master
+
+PACKAGES += exsplus128
+pkg_exsplus128_name = exsplus128
+pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang.
+pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_fetch = git
+pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128
+pkg_exsplus128_commit = master
+
+PACKAGES += ezmq
+pkg_ezmq_name = ezmq
+pkg_ezmq_description = zMQ implemented in Erlang
+pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_fetch = git
+pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq
+pkg_ezmq_commit = master
+
+PACKAGES += ezmtp
+pkg_ezmtp_name = ezmtp
+pkg_ezmtp_description = ZMTP protocol in pure Erlang.
+pkg_ezmtp_homepage = https://github.com/a13x/ezmtp
+pkg_ezmtp_fetch = git
+pkg_ezmtp_repo = https://github.com/a13x/ezmtp
+pkg_ezmtp_commit = master
+
+PACKAGES += fast_disk_log
+pkg_fast_disk_log_name = fast_disk_log
+pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger
+pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_fetch = git
+pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log
+pkg_fast_disk_log_commit = master
+
+PACKAGES += feeder
+pkg_feeder_name = feeder
+pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds.
+pkg_feeder_homepage = https://github.com/michaelnisi/feeder
+pkg_feeder_fetch = git
+pkg_feeder_repo = https://github.com/michaelnisi/feeder
+pkg_feeder_commit = master
+
+PACKAGES += find_crate
+pkg_find_crate_name = find_crate
+pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory
+pkg_find_crate_homepage = https://github.com/goertzenator/find_crate
+pkg_find_crate_fetch = git
+pkg_find_crate_repo = https://github.com/goertzenator/find_crate
+pkg_find_crate_commit = master
+
+PACKAGES += fix
+pkg_fix_name = fix
+pkg_fix_description = http://fixprotocol.org/ implementation.
+pkg_fix_homepage = https://github.com/maxlapshin/fix
+pkg_fix_fetch = git
+pkg_fix_repo = https://github.com/maxlapshin/fix
+pkg_fix_commit = master
+
+PACKAGES += flower
+pkg_flower_name = flower
+pkg_flower_description = FlowER - a Erlang OpenFlow development platform
+pkg_flower_homepage = https://github.com/travelping/flower
+pkg_flower_fetch = git
+pkg_flower_repo = https://github.com/travelping/flower
+pkg_flower_commit = master
+
+PACKAGES += fn
+pkg_fn_name = fn
+pkg_fn_description = Function utilities for Erlang
+pkg_fn_homepage = https://github.com/reiddraper/fn
+pkg_fn_fetch = git
+pkg_fn_repo = https://github.com/reiddraper/fn
+pkg_fn_commit = master
+
+PACKAGES += folsom
+pkg_folsom_name = folsom
+pkg_folsom_description = Expose Erlang Events and Metrics
+pkg_folsom_homepage = https://github.com/boundary/folsom
+pkg_folsom_fetch = git
+pkg_folsom_repo = https://github.com/boundary/folsom
+pkg_folsom_commit = master
+
+PACKAGES += folsom_cowboy
+pkg_folsom_cowboy_name = folsom_cowboy
+pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper.
+pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_fetch = git
+pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy
+pkg_folsom_cowboy_commit = master
+
+PACKAGES += folsomite
+pkg_folsomite_name = folsomite
+pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics
+pkg_folsomite_homepage = https://github.com/campanja/folsomite
+pkg_folsomite_fetch = git
+pkg_folsomite_repo = https://github.com/campanja/folsomite
+pkg_folsomite_commit = master
+
+PACKAGES += fs
+pkg_fs_name = fs
+pkg_fs_description = Erlang FileSystem Listener
+pkg_fs_homepage = https://github.com/synrc/fs
+pkg_fs_fetch = git
+pkg_fs_repo = https://github.com/synrc/fs
+pkg_fs_commit = master
+
+PACKAGES += fuse
+pkg_fuse_name = fuse
+pkg_fuse_description = A Circuit Breaker for Erlang
+pkg_fuse_homepage = https://github.com/jlouis/fuse
+pkg_fuse_fetch = git
+pkg_fuse_repo = https://github.com/jlouis/fuse
+pkg_fuse_commit = master
+
+PACKAGES += gcm
+pkg_gcm_name = gcm
+pkg_gcm_description = An Erlang application for Google Cloud Messaging
+pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang
+pkg_gcm_fetch = git
+pkg_gcm_repo = https://github.com/pdincau/gcm-erlang
+pkg_gcm_commit = master
+
+PACKAGES += gcprof
+pkg_gcprof_name = gcprof
+pkg_gcprof_description = Garbage Collection profiler for Erlang
+pkg_gcprof_homepage = https://github.com/knutin/gcprof
+pkg_gcprof_fetch = git
+pkg_gcprof_repo = https://github.com/knutin/gcprof
+pkg_gcprof_commit = master
+
+PACKAGES += geas
+pkg_geas_name = geas
+pkg_geas_description = Guess Erlang Application Scattering
+pkg_geas_homepage = https://github.com/crownedgrouse/geas
+pkg_geas_fetch = git
+pkg_geas_repo = https://github.com/crownedgrouse/geas
+pkg_geas_commit = master
+
+PACKAGES += geef
+pkg_geef_name = geef
+pkg_geef_description = Git NEEEEF (Erlang NIF)
+pkg_geef_homepage = https://github.com/carlosmn/geef
+pkg_geef_fetch = git
+pkg_geef_repo = https://github.com/carlosmn/geef
+pkg_geef_commit = master
+
+PACKAGES += gen_coap
+pkg_gen_coap_name = gen_coap
+pkg_gen_coap_description = Generic Erlang CoAP Client/Server
+pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_fetch = git
+pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap
+pkg_gen_coap_commit = master
+
+PACKAGES += gen_cycle
+pkg_gen_cycle_name = gen_cycle
+pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks
+pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_fetch = git
+pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle
+pkg_gen_cycle_commit = develop
+
+PACKAGES += gen_icmp
+pkg_gen_icmp_name = gen_icmp
+pkg_gen_icmp_description = Erlang interface to ICMP sockets
+pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_fetch = git
+pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp
+pkg_gen_icmp_commit = master
+
+PACKAGES += gen_leader
+pkg_gen_leader_name = gen_leader
+pkg_gen_leader_description = leader election behavior
+pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_fetch = git
+pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival
+pkg_gen_leader_commit = master
+
+PACKAGES += gen_nb_server
+pkg_gen_nb_server_name = gen_nb_server
+pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers
+pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_fetch = git
+pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server
+pkg_gen_nb_server_commit = master
+
+PACKAGES += gen_paxos
+pkg_gen_paxos_name = gen_paxos
+pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol
+pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_fetch = git
+pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos
+pkg_gen_paxos_commit = master
+
+PACKAGES += gen_rpc
+pkg_gen_rpc_name = gen_rpc
+pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages
+pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_fetch = git
+pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git
+pkg_gen_rpc_commit = master
+
+PACKAGES += gen_smtp
+pkg_gen_smtp_name = gen_smtp
+pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules
+pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_fetch = git
+pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp
+pkg_gen_smtp_commit = master
+
+PACKAGES += gen_tracker
+pkg_gen_tracker_name = gen_tracker
+pkg_gen_tracker_description = supervisor with ets handling of children and their metadata
+pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_fetch = git
+pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker
+pkg_gen_tracker_commit = master
+
+PACKAGES += gen_unix
+pkg_gen_unix_name = gen_unix
+pkg_gen_unix_description = Erlang Unix socket interface
+pkg_gen_unix_homepage = https://github.com/msantos/gen_unix
+pkg_gen_unix_fetch = git
+pkg_gen_unix_repo = https://github.com/msantos/gen_unix
+pkg_gen_unix_commit = master
+
+PACKAGES += geode
+pkg_geode_name = geode
+pkg_geode_description = geohash/proximity lookup in pure, uncut erlang.
+pkg_geode_homepage = https://github.com/bradfordw/geode
+pkg_geode_fetch = git
+pkg_geode_repo = https://github.com/bradfordw/geode
+pkg_geode_commit = master
+
+PACKAGES += getopt
+pkg_getopt_name = getopt
+pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax
+pkg_getopt_homepage = https://github.com/jcomellas/getopt
+pkg_getopt_fetch = git
+pkg_getopt_repo = https://github.com/jcomellas/getopt
+pkg_getopt_commit = master
+
+PACKAGES += gettext
+pkg_gettext_name = gettext
+pkg_gettext_description = Erlang internationalization library.
+pkg_gettext_homepage = https://github.com/etnt/gettext
+pkg_gettext_fetch = git
+pkg_gettext_repo = https://github.com/etnt/gettext
+pkg_gettext_commit = master
+
+PACKAGES += giallo
+pkg_giallo_name = giallo
+pkg_giallo_description = Small and flexible web framework on top of Cowboy
+pkg_giallo_homepage = https://github.com/kivra/giallo
+pkg_giallo_fetch = git
+pkg_giallo_repo = https://github.com/kivra/giallo
+pkg_giallo_commit = master
+
+PACKAGES += gin
+pkg_gin_name = gin
+pkg_gin_description = The guards and for Erlang parse_transform
+pkg_gin_homepage = https://github.com/mad-cocktail/gin
+pkg_gin_fetch = git
+pkg_gin_repo = https://github.com/mad-cocktail/gin
+pkg_gin_commit = master
+
+PACKAGES += gitty
+pkg_gitty_name = gitty
+pkg_gitty_description = Git access in erlang
+pkg_gitty_homepage = https://github.com/maxlapshin/gitty
+pkg_gitty_fetch = git
+pkg_gitty_repo = https://github.com/maxlapshin/gitty
+pkg_gitty_commit = master
+
+PACKAGES += gold_fever
+pkg_gold_fever_name = gold_fever
+pkg_gold_fever_description = A Treasure Hunt for Erlangers
+pkg_gold_fever_homepage = https://github.com/inaka/gold_fever
+pkg_gold_fever_fetch = git
+pkg_gold_fever_repo = https://github.com/inaka/gold_fever
+pkg_gold_fever_commit = master
+
+PACKAGES += gpb
+pkg_gpb_name = gpb
+pkg_gpb_description = A Google Protobuf implementation for Erlang
+pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_fetch = git
+pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb
+pkg_gpb_commit = master
+
+PACKAGES += gproc
+pkg_gproc_name = gproc
+pkg_gproc_description = Extended process registry for Erlang
+pkg_gproc_homepage = https://github.com/uwiger/gproc
+pkg_gproc_fetch = git
+pkg_gproc_repo = https://github.com/uwiger/gproc
+pkg_gproc_commit = master
+
+PACKAGES += grapherl
+pkg_grapherl_name = grapherl
+pkg_grapherl_description = Create graphs of Erlang systems and programs
+pkg_grapherl_homepage = https://github.com/eproxus/grapherl
+pkg_grapherl_fetch = git
+pkg_grapherl_repo = https://github.com/eproxus/grapherl
+pkg_grapherl_commit = master
+
+PACKAGES += grpc
+pkg_grpc_name = grpc
+pkg_grpc_description = gRPC server in Erlang
+pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_fetch = git
+pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc
+pkg_grpc_commit = master
+
+PACKAGES += grpc_client
+pkg_grpc_client_name = grpc_client
+pkg_grpc_client_description = gRPC client in Erlang
+pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_fetch = git
+pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client
+pkg_grpc_client_commit = master
+
+PACKAGES += gun
+pkg_gun_name = gun
+pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang.
+pkg_gun_homepage = http//ninenines.eu
+pkg_gun_fetch = git
+pkg_gun_repo = https://github.com/ninenines/gun
+pkg_gun_commit = master
+
+PACKAGES += gut
+pkg_gut_name = gut
+pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman
+pkg_gut_homepage = https://github.com/unbalancedparentheses/gut
+pkg_gut_fetch = git
+pkg_gut_repo = https://github.com/unbalancedparentheses/gut
+pkg_gut_commit = master
+
+PACKAGES += hackney
+pkg_hackney_name = hackney
+pkg_hackney_description = simple HTTP client in Erlang
+pkg_hackney_homepage = https://github.com/benoitc/hackney
+pkg_hackney_fetch = git
+pkg_hackney_repo = https://github.com/benoitc/hackney
+pkg_hackney_commit = master
+
+PACKAGES += hamcrest
+pkg_hamcrest_name = hamcrest
+pkg_hamcrest_description = Erlang port of Hamcrest
+pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_fetch = git
+pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang
+pkg_hamcrest_commit = master
+
+PACKAGES += hanoidb
+pkg_hanoidb_name = hanoidb
+pkg_hanoidb_description = Erlang LSM BTree Storage
+pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_fetch = git
+pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb
+pkg_hanoidb_commit = master
+
+PACKAGES += hottub
+pkg_hottub_name = hottub
+pkg_hottub_description = Permanent Erlang Worker Pool
+pkg_hottub_homepage = https://github.com/bfrog/hottub
+pkg_hottub_fetch = git
+pkg_hottub_repo = https://github.com/bfrog/hottub
+pkg_hottub_commit = master
+
+PACKAGES += hpack
+pkg_hpack_name = hpack
+pkg_hpack_description = HPACK Implementation for Erlang
+pkg_hpack_homepage = https://github.com/joedevivo/hpack
+pkg_hpack_fetch = git
+pkg_hpack_repo = https://github.com/joedevivo/hpack
+pkg_hpack_commit = master
+
+PACKAGES += hyper
+pkg_hyper_name = hyper
+pkg_hyper_description = Erlang implementation of HyperLogLog
+pkg_hyper_homepage = https://github.com/GameAnalytics/hyper
+pkg_hyper_fetch = git
+pkg_hyper_repo = https://github.com/GameAnalytics/hyper
+pkg_hyper_commit = master
+
+PACKAGES += i18n
+pkg_i18n_name = i18n
+pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e)
+pkg_i18n_homepage = https://github.com/erlang-unicode/i18n
+pkg_i18n_fetch = git
+pkg_i18n_repo = https://github.com/erlang-unicode/i18n
+pkg_i18n_commit = master
+
+PACKAGES += ibrowse
+pkg_ibrowse_name = ibrowse
+pkg_ibrowse_description = Erlang HTTP client
+pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_fetch = git
+pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse
+pkg_ibrowse_commit = master
+
+PACKAGES += idna
+pkg_idna_name = idna
+pkg_idna_description = Erlang IDNA lib
+pkg_idna_homepage = https://github.com/benoitc/erlang-idna
+pkg_idna_fetch = git
+pkg_idna_repo = https://github.com/benoitc/erlang-idna
+pkg_idna_commit = master
+
+PACKAGES += ierlang
+pkg_ierlang_name = ierlang
+pkg_ierlang_description = An Erlang language kernel for IPython.
+pkg_ierlang_homepage = https://github.com/robbielynch/ierlang
+pkg_ierlang_fetch = git
+pkg_ierlang_repo = https://github.com/robbielynch/ierlang
+pkg_ierlang_commit = master
+
+PACKAGES += iota
+pkg_iota_name = iota
+pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code
+pkg_iota_homepage = https://github.com/jpgneves/iota
+pkg_iota_fetch = git
+pkg_iota_repo = https://github.com/jpgneves/iota
+pkg_iota_commit = master
+
+PACKAGES += irc_lib
+pkg_irc_lib_name = irc_lib
+pkg_irc_lib_description = Erlang irc client library
+pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_fetch = git
+pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib
+pkg_irc_lib_commit = master
+
+PACKAGES += ircd
+pkg_ircd_name = ircd
+pkg_ircd_description = A pluggable IRC daemon application/library for Erlang.
+pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd
+pkg_ircd_fetch = git
+pkg_ircd_repo = https://github.com/tonyg/erlang-ircd
+pkg_ircd_commit = master
+
+PACKAGES += iris
+pkg_iris_name = iris
+pkg_iris_description = Iris Erlang binding
+pkg_iris_homepage = https://github.com/project-iris/iris-erl
+pkg_iris_fetch = git
+pkg_iris_repo = https://github.com/project-iris/iris-erl
+pkg_iris_commit = master
+
+PACKAGES += iso8601
+pkg_iso8601_name = iso8601
+pkg_iso8601_description = Erlang ISO 8601 date formatter/parser
+pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_fetch = git
+pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601
+pkg_iso8601_commit = master
+
+PACKAGES += jamdb_sybase
+pkg_jamdb_sybase_name = jamdb_sybase
+pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE
+pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_fetch = git
+pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase
+pkg_jamdb_sybase_commit = master
+
+PACKAGES += jerg
+pkg_jerg_name = jerg
+pkg_jerg_description = JSON Schema to Erlang Records Generator
+pkg_jerg_homepage = https://github.com/ddossot/jerg
+pkg_jerg_fetch = git
+pkg_jerg_repo = https://github.com/ddossot/jerg
+pkg_jerg_commit = master
+
+PACKAGES += jesse
+pkg_jesse_name = jesse
+pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang.
+pkg_jesse_homepage = https://github.com/for-GET/jesse
+pkg_jesse_fetch = git
+pkg_jesse_repo = https://github.com/for-GET/jesse
+pkg_jesse_commit = master
+
+PACKAGES += jiffy
+pkg_jiffy_name = jiffy
+pkg_jiffy_description = JSON NIFs for Erlang.
+pkg_jiffy_homepage = https://github.com/davisp/jiffy
+pkg_jiffy_fetch = git
+pkg_jiffy_repo = https://github.com/davisp/jiffy
+pkg_jiffy_commit = master
+
+PACKAGES += jiffy_v
+pkg_jiffy_v_name = jiffy_v
+pkg_jiffy_v_description = JSON validation utility
+pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_fetch = git
+pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v
+pkg_jiffy_v_commit = master
+
+PACKAGES += jobs
+pkg_jobs_name = jobs
+pkg_jobs_description = a Job scheduler for load regulation
+pkg_jobs_homepage = https://github.com/esl/jobs
+pkg_jobs_fetch = git
+pkg_jobs_repo = https://github.com/esl/jobs
+pkg_jobs_commit = master
+
+PACKAGES += joxa
+pkg_joxa_name = joxa
+pkg_joxa_description = A Modern Lisp for the Erlang VM
+pkg_joxa_homepage = https://github.com/joxa/joxa
+pkg_joxa_fetch = git
+pkg_joxa_repo = https://github.com/joxa/joxa
+pkg_joxa_commit = master
+
+PACKAGES += json
+pkg_json_name = json
+pkg_json_description = a high level json library for erlang (17.0+)
+pkg_json_homepage = https://github.com/talentdeficit/json
+pkg_json_fetch = git
+pkg_json_repo = https://github.com/talentdeficit/json
+pkg_json_commit = master
+
+PACKAGES += json_rec
+pkg_json_rec_name = json_rec
+pkg_json_rec_description = JSON to erlang record
+pkg_json_rec_homepage = https://github.com/justinkirby/json_rec
+pkg_json_rec_fetch = git
+pkg_json_rec_repo = https://github.com/justinkirby/json_rec
+pkg_json_rec_commit = master
+
+PACKAGES += jsone
+pkg_jsone_name = jsone
+pkg_jsone_description = An Erlang library for encoding, decoding JSON data.
+pkg_jsone_homepage = https://github.com/sile/jsone.git
+pkg_jsone_fetch = git
+pkg_jsone_repo = https://github.com/sile/jsone.git
+pkg_jsone_commit = master
+
+PACKAGES += jsonerl
+pkg_jsonerl_name = jsonerl
+pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder
+pkg_jsonerl_homepage = https://github.com/lambder/jsonerl
+pkg_jsonerl_fetch = git
+pkg_jsonerl_repo = https://github.com/lambder/jsonerl
+pkg_jsonerl_commit = master
+
+PACKAGES += jsonpath
+pkg_jsonpath_name = jsonpath
+pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation
+pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_fetch = git
+pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath
+pkg_jsonpath_commit = master
+
+PACKAGES += jsonx
+pkg_jsonx_name = jsonx
+pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C.
+pkg_jsonx_homepage = https://github.com/iskra/jsonx
+pkg_jsonx_fetch = git
+pkg_jsonx_repo = https://github.com/iskra/jsonx
+pkg_jsonx_commit = master
+
+PACKAGES += jsx
+pkg_jsx_name = jsx
+pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON.
+pkg_jsx_homepage = https://github.com/talentdeficit/jsx
+pkg_jsx_fetch = git
+pkg_jsx_repo = https://github.com/talentdeficit/jsx
+pkg_jsx_commit = master
+
+PACKAGES += kafka
+pkg_kafka_name = kafka
+pkg_kafka_description = Kafka consumer and producer in Erlang
+pkg_kafka_homepage = https://github.com/wooga/kafka-erlang
+pkg_kafka_fetch = git
+pkg_kafka_repo = https://github.com/wooga/kafka-erlang
+pkg_kafka_commit = master
+
+PACKAGES += kafka_protocol
+pkg_kafka_protocol_name = kafka_protocol
+pkg_kafka_protocol_description = Kafka protocol Erlang library
+pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol
+pkg_kafka_protocol_fetch = git
+pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git
+pkg_kafka_protocol_commit = master
+
+PACKAGES += kai
+pkg_kai_name = kai
+pkg_kai_description = DHT storage by Takeshi Inoue
+pkg_kai_homepage = https://github.com/synrc/kai
+pkg_kai_fetch = git
+pkg_kai_repo = https://github.com/synrc/kai
+pkg_kai_commit = master
+
+PACKAGES += katja
+pkg_katja_name = katja
+pkg_katja_description = A simple Riemann client written in Erlang.
+pkg_katja_homepage = https://github.com/nifoc/katja
+pkg_katja_fetch = git
+pkg_katja_repo = https://github.com/nifoc/katja
+pkg_katja_commit = master
+
+PACKAGES += kdht
+pkg_kdht_name = kdht
+pkg_kdht_description = kdht is an erlang DHT implementation
+pkg_kdht_homepage = https://github.com/kevinlynx/kdht
+pkg_kdht_fetch = git
+pkg_kdht_repo = https://github.com/kevinlynx/kdht
+pkg_kdht_commit = master
+
+PACKAGES += key2value
+pkg_key2value_name = key2value
+pkg_key2value_description = Erlang 2-way map
+pkg_key2value_homepage = https://github.com/okeuday/key2value
+pkg_key2value_fetch = git
+pkg_key2value_repo = https://github.com/okeuday/key2value
+pkg_key2value_commit = master
+
+PACKAGES += keys1value
+pkg_keys1value_name = keys1value
+pkg_keys1value_description = Erlang set associative map for key lists
+pkg_keys1value_homepage = https://github.com/okeuday/keys1value
+pkg_keys1value_fetch = git
+pkg_keys1value_repo = https://github.com/okeuday/keys1value
+pkg_keys1value_commit = master
+
+PACKAGES += kinetic
+pkg_kinetic_name = kinetic
+pkg_kinetic_description = Erlang Kinesis Client
+pkg_kinetic_homepage = https://github.com/AdRoll/kinetic
+pkg_kinetic_fetch = git
+pkg_kinetic_repo = https://github.com/AdRoll/kinetic
+pkg_kinetic_commit = master
+
+PACKAGES += kjell
+pkg_kjell_name = kjell
+pkg_kjell_description = Erlang Shell
+pkg_kjell_homepage = https://github.com/karlll/kjell
+pkg_kjell_fetch = git
+pkg_kjell_repo = https://github.com/karlll/kjell
+pkg_kjell_commit = master
+
+PACKAGES += kraken
+pkg_kraken_name = kraken
+pkg_kraken_description = Distributed Pubsub Server for Realtime Apps
+pkg_kraken_homepage = https://github.com/Asana/kraken
+pkg_kraken_fetch = git
+pkg_kraken_repo = https://github.com/Asana/kraken
+pkg_kraken_commit = master
+
+PACKAGES += kucumberl
+pkg_kucumberl_name = kucumberl
+pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber
+pkg_kucumberl_homepage = https://github.com/openshine/kucumberl
+pkg_kucumberl_fetch = git
+pkg_kucumberl_repo = https://github.com/openshine/kucumberl
+pkg_kucumberl_commit = master
+
+PACKAGES += kvc
+pkg_kvc_name = kvc
+pkg_kvc_description = KVC - Key Value Coding for Erlang data structures
+pkg_kvc_homepage = https://github.com/etrepum/kvc
+pkg_kvc_fetch = git
+pkg_kvc_repo = https://github.com/etrepum/kvc
+pkg_kvc_commit = master
+
+PACKAGES += kvlists
+pkg_kvlists_name = kvlists
+pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang
+pkg_kvlists_homepage = https://github.com/jcomellas/kvlists
+pkg_kvlists_fetch = git
+pkg_kvlists_repo = https://github.com/jcomellas/kvlists
+pkg_kvlists_commit = master
+
+PACKAGES += kvs
+pkg_kvs_name = kvs
+pkg_kvs_description = Container and Iterator
+pkg_kvs_homepage = https://github.com/synrc/kvs
+pkg_kvs_fetch = git
+pkg_kvs_repo = https://github.com/synrc/kvs
+pkg_kvs_commit = master
+
+PACKAGES += lager
+pkg_lager_name = lager
+pkg_lager_description = A logging framework for Erlang/OTP.
+pkg_lager_homepage = https://github.com/erlang-lager/lager
+pkg_lager_fetch = git
+pkg_lager_repo = https://github.com/erlang-lager/lager
+pkg_lager_commit = master
+
+PACKAGES += lager_amqp_backend
+pkg_lager_amqp_backend_name = lager_amqp_backend
+pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend
+pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_fetch = git
+pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend
+pkg_lager_amqp_backend_commit = master
+
+PACKAGES += lager_syslog
+pkg_lager_syslog_name = lager_syslog
+pkg_lager_syslog_description = Syslog backend for lager
+pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_fetch = git
+pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog
+pkg_lager_syslog_commit = master
+
+PACKAGES += lambdapad
+pkg_lambdapad_name = lambdapad
+pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang.
+pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad
+pkg_lambdapad_fetch = git
+pkg_lambdapad_repo = https://github.com/gar1t/lambdapad
+pkg_lambdapad_commit = master
+
+PACKAGES += lasp
+pkg_lasp_name = lasp
+pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations
+pkg_lasp_homepage = http://lasp-lang.org/
+pkg_lasp_fetch = git
+pkg_lasp_repo = https://github.com/lasp-lang/lasp
+pkg_lasp_commit = master
+
+PACKAGES += lasse
+pkg_lasse_name = lasse
+pkg_lasse_description = SSE handler for Cowboy
+pkg_lasse_homepage = https://github.com/inaka/lasse
+pkg_lasse_fetch = git
+pkg_lasse_repo = https://github.com/inaka/lasse
+pkg_lasse_commit = master
+
+PACKAGES += ldap
+pkg_ldap_name = ldap
+pkg_ldap_description = LDAP server written in Erlang
+pkg_ldap_homepage = https://github.com/spawnproc/ldap
+pkg_ldap_fetch = git
+pkg_ldap_repo = https://github.com/spawnproc/ldap
+pkg_ldap_commit = master
+
+PACKAGES += lethink
+pkg_lethink_name = lethink
+pkg_lethink_description = erlang driver for rethinkdb
+pkg_lethink_homepage = https://github.com/taybin/lethink
+pkg_lethink_fetch = git
+pkg_lethink_repo = https://github.com/taybin/lethink
+pkg_lethink_commit = master
+
+PACKAGES += lfe
+pkg_lfe_name = lfe
+pkg_lfe_description = Lisp Flavoured Erlang (LFE)
+pkg_lfe_homepage = https://github.com/rvirding/lfe
+pkg_lfe_fetch = git
+pkg_lfe_repo = https://github.com/rvirding/lfe
+pkg_lfe_commit = master
+
+PACKAGES += ling
+pkg_ling_name = ling
+pkg_ling_description = Erlang on Xen
+pkg_ling_homepage = https://github.com/cloudozer/ling
+pkg_ling_fetch = git
+pkg_ling_repo = https://github.com/cloudozer/ling
+pkg_ling_commit = master
+
+PACKAGES += live
+pkg_live_name = live
+pkg_live_description = Automated module and configuration reloader.
+pkg_live_homepage = http://ninenines.eu
+pkg_live_fetch = git
+pkg_live_repo = https://github.com/ninenines/live
+pkg_live_commit = master
+
+PACKAGES += lmq
+pkg_lmq_name = lmq
+pkg_lmq_description = Lightweight Message Queue
+pkg_lmq_homepage = https://github.com/iij/lmq
+pkg_lmq_fetch = git
+pkg_lmq_repo = https://github.com/iij/lmq
+pkg_lmq_commit = master
+
+PACKAGES += locker
+pkg_locker_name = locker
+pkg_locker_description = Atomic distributed 'check and set' for short-lived keys
+pkg_locker_homepage = https://github.com/wooga/locker
+pkg_locker_fetch = git
+pkg_locker_repo = https://github.com/wooga/locker
+pkg_locker_commit = master
+
+PACKAGES += locks
+pkg_locks_name = locks
+pkg_locks_description = A scalable, deadlock-resolving resource locker
+pkg_locks_homepage = https://github.com/uwiger/locks
+pkg_locks_fetch = git
+pkg_locks_repo = https://github.com/uwiger/locks
+pkg_locks_commit = master
+
+PACKAGES += log4erl
+pkg_log4erl_name = log4erl
+pkg_log4erl_description = A logger for erlang in the spirit of Log4J.
+pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl
+pkg_log4erl_fetch = git
+pkg_log4erl_repo = https://github.com/ahmednawras/log4erl
+pkg_log4erl_commit = master
+
+PACKAGES += lol
+pkg_lol_name = lol
+pkg_lol_description = Lisp on erLang, and programming is fun again
+pkg_lol_homepage = https://github.com/b0oh/lol
+pkg_lol_fetch = git
+pkg_lol_repo = https://github.com/b0oh/lol
+pkg_lol_commit = master
+
+PACKAGES += lucid
+pkg_lucid_name = lucid
+pkg_lucid_description = HTTP/2 server written in Erlang
+pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_fetch = git
+pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid
+pkg_lucid_commit = master
+
+PACKAGES += luerl
+pkg_luerl_name = luerl
+pkg_luerl_description = Lua in Erlang
+pkg_luerl_homepage = https://github.com/rvirding/luerl
+pkg_luerl_fetch = git
+pkg_luerl_repo = https://github.com/rvirding/luerl
+pkg_luerl_commit = develop
+
+PACKAGES += luwak
+pkg_luwak_name = luwak
+pkg_luwak_description = Large-object storage interface for Riak
+pkg_luwak_homepage = https://github.com/basho/luwak
+pkg_luwak_fetch = git
+pkg_luwak_repo = https://github.com/basho/luwak
+pkg_luwak_commit = master
+
+PACKAGES += lux
+pkg_lux_name = lux
+pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands
+pkg_lux_homepage = https://github.com/hawk/lux
+pkg_lux_fetch = git
+pkg_lux_repo = https://github.com/hawk/lux
+pkg_lux_commit = master
+
+PACKAGES += machi
+pkg_machi_name = machi
+pkg_machi_description = Machi file store
+pkg_machi_homepage = https://github.com/basho/machi
+pkg_machi_fetch = git
+pkg_machi_repo = https://github.com/basho/machi
+pkg_machi_commit = master
+
+PACKAGES += mad
+pkg_mad_name = mad
+pkg_mad_description = Small and Fast Rebar Replacement
+pkg_mad_homepage = https://github.com/synrc/mad
+pkg_mad_fetch = git
+pkg_mad_repo = https://github.com/synrc/mad
+pkg_mad_commit = master
+
+PACKAGES += marina
+pkg_marina_name = marina
+pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client
+pkg_marina_homepage = https://github.com/lpgauth/marina
+pkg_marina_fetch = git
+pkg_marina_repo = https://github.com/lpgauth/marina
+pkg_marina_commit = master
+
+PACKAGES += mavg
+pkg_mavg_name = mavg
+pkg_mavg_description = Erlang :: Exponential moving average library
+pkg_mavg_homepage = https://github.com/EchoTeam/mavg
+pkg_mavg_fetch = git
+pkg_mavg_repo = https://github.com/EchoTeam/mavg
+pkg_mavg_commit = master
+
+PACKAGES += mc_erl
+pkg_mc_erl_name = mc_erl
+pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang.
+pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl
+pkg_mc_erl_fetch = git
+pkg_mc_erl_repo = https://github.com/clonejo/mc-erl
+pkg_mc_erl_commit = master
+
+PACKAGES += mcd
+pkg_mcd_name = mcd
+pkg_mcd_description = Fast memcached protocol client in pure Erlang
+pkg_mcd_homepage = https://github.com/EchoTeam/mcd
+pkg_mcd_fetch = git
+pkg_mcd_repo = https://github.com/EchoTeam/mcd
+pkg_mcd_commit = master
+
+PACKAGES += mcerlang
+pkg_mcerlang_name = mcerlang
+pkg_mcerlang_description = The McErlang model checker for Erlang
+pkg_mcerlang_homepage = https://github.com/fredlund/McErlang
+pkg_mcerlang_fetch = git
+pkg_mcerlang_repo = https://github.com/fredlund/McErlang
+pkg_mcerlang_commit = master
+
+PACKAGES += meck
+pkg_meck_name = meck
+pkg_meck_description = A mocking library for Erlang
+pkg_meck_homepage = https://github.com/eproxus/meck
+pkg_meck_fetch = git
+pkg_meck_repo = https://github.com/eproxus/meck
+pkg_meck_commit = master
+
+PACKAGES += mekao
+pkg_mekao_name = mekao
+pkg_mekao_description = SQL constructor
+pkg_mekao_homepage = https://github.com/ddosia/mekao
+pkg_mekao_fetch = git
+pkg_mekao_repo = https://github.com/ddosia/mekao
+pkg_mekao_commit = master
+
+PACKAGES += memo
+pkg_memo_name = memo
+pkg_memo_description = Erlang memoization server
+pkg_memo_homepage = https://github.com/tuncer/memo
+pkg_memo_fetch = git
+pkg_memo_repo = https://github.com/tuncer/memo
+pkg_memo_commit = master
+
+PACKAGES += merge_index
+pkg_merge_index_name = merge_index
+pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop).
+pkg_merge_index_homepage = https://github.com/basho/merge_index
+pkg_merge_index_fetch = git
+pkg_merge_index_repo = https://github.com/basho/merge_index
+pkg_merge_index_commit = master
+
+PACKAGES += merl
+pkg_merl_name = merl
+pkg_merl_description = Metaprogramming in Erlang
+pkg_merl_homepage = https://github.com/richcarl/merl
+pkg_merl_fetch = git
+pkg_merl_repo = https://github.com/richcarl/merl
+pkg_merl_commit = master
+
+PACKAGES += mimerl
+pkg_mimerl_name = mimerl
+pkg_mimerl_description = library to handle mimetypes
+pkg_mimerl_homepage = https://github.com/benoitc/mimerl
+pkg_mimerl_fetch = git
+pkg_mimerl_repo = https://github.com/benoitc/mimerl
+pkg_mimerl_commit = master
+
+PACKAGES += mimetypes
+pkg_mimetypes_name = mimetypes
+pkg_mimetypes_description = Erlang MIME types library
+pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_fetch = git
+pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes
+pkg_mimetypes_commit = master
+
+PACKAGES += mixer
+pkg_mixer_name = mixer
+pkg_mixer_description = Mix in functions from other modules
+pkg_mixer_homepage = https://github.com/chef/mixer
+pkg_mixer_fetch = git
+pkg_mixer_repo = https://github.com/chef/mixer
+pkg_mixer_commit = master
+
+PACKAGES += mochiweb
+pkg_mochiweb_name = mochiweb
+pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers.
+pkg_mochiweb_homepage = https://github.com/mochi/mochiweb
+pkg_mochiweb_fetch = git
+pkg_mochiweb_repo = https://github.com/mochi/mochiweb
+pkg_mochiweb_commit = master
+
+PACKAGES += mochiweb_xpath
+pkg_mochiweb_xpath_name = mochiweb_xpath
+pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser
+pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_fetch = git
+pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath
+pkg_mochiweb_xpath_commit = master
+
+PACKAGES += mockgyver
+pkg_mockgyver_name = mockgyver
+pkg_mockgyver_description = A mocking library for Erlang
+pkg_mockgyver_homepage = https://github.com/klajo/mockgyver
+pkg_mockgyver_fetch = git
+pkg_mockgyver_repo = https://github.com/klajo/mockgyver
+pkg_mockgyver_commit = master
+
+PACKAGES += modlib
+pkg_modlib_name = modlib
+pkg_modlib_description = Web framework based on Erlang's inets httpd
+pkg_modlib_homepage = https://github.com/gar1t/modlib
+pkg_modlib_fetch = git
+pkg_modlib_repo = https://github.com/gar1t/modlib
+pkg_modlib_commit = master
+
+PACKAGES += mongodb
+pkg_mongodb_name = mongodb
+pkg_mongodb_description = MongoDB driver for Erlang
+pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_fetch = git
+pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang
+pkg_mongodb_commit = master
+
+PACKAGES += mongooseim
+pkg_mongooseim_name = mongooseim
+pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions
+pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform
+pkg_mongooseim_fetch = git
+pkg_mongooseim_repo = https://github.com/esl/MongooseIM
+pkg_mongooseim_commit = master
+
+PACKAGES += moyo
+pkg_moyo_name = moyo
+pkg_moyo_description = Erlang utility functions library
+pkg_moyo_homepage = https://github.com/dwango/moyo
+pkg_moyo_fetch = git
+pkg_moyo_repo = https://github.com/dwango/moyo
+pkg_moyo_commit = master
+
+PACKAGES += msgpack
+pkg_msgpack_name = msgpack
+pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang
+pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_fetch = git
+pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang
+pkg_msgpack_commit = master
+
+PACKAGES += mu2
+pkg_mu2_name = mu2
+pkg_mu2_description = Erlang mutation testing tool
+pkg_mu2_homepage = https://github.com/ramsay-t/mu2
+pkg_mu2_fetch = git
+pkg_mu2_repo = https://github.com/ramsay-t/mu2
+pkg_mu2_commit = master
+
+PACKAGES += mustache
+pkg_mustache_name = mustache
+pkg_mustache_description = Mustache template engine for Erlang.
+pkg_mustache_homepage = https://github.com/mojombo/mustache.erl
+pkg_mustache_fetch = git
+pkg_mustache_repo = https://github.com/mojombo/mustache.erl
+pkg_mustache_commit = master
+
+PACKAGES += myproto
+pkg_myproto_name = myproto
+pkg_myproto_description = MySQL Server Protocol in Erlang
+pkg_myproto_homepage = https://github.com/altenwald/myproto
+pkg_myproto_fetch = git
+pkg_myproto_repo = https://github.com/altenwald/myproto
+pkg_myproto_commit = master
+
+PACKAGES += mysql
+pkg_mysql_name = mysql
+pkg_mysql_description = MySQL client library for Erlang/OTP
+pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_fetch = git
+pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp
+pkg_mysql_commit = 1.5.1
+
+PACKAGES += n2o
+pkg_n2o_name = n2o
+pkg_n2o_description = WebSocket Application Server
+pkg_n2o_homepage = https://github.com/5HT/n2o
+pkg_n2o_fetch = git
+pkg_n2o_repo = https://github.com/5HT/n2o
+pkg_n2o_commit = master
+
+PACKAGES += nat_upnp
+pkg_nat_upnp_name = nat_upnp
+pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD
+pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_fetch = git
+pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp
+pkg_nat_upnp_commit = master
+
+PACKAGES += neo4j
+pkg_neo4j_name = neo4j
+pkg_neo4j_description = Erlang client library for Neo4J.
+pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_fetch = git
+pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang
+pkg_neo4j_commit = master
+
+PACKAGES += neotoma
+pkg_neotoma_name = neotoma
+pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars.
+pkg_neotoma_homepage = https://github.com/seancribbs/neotoma
+pkg_neotoma_fetch = git
+pkg_neotoma_repo = https://github.com/seancribbs/neotoma
+pkg_neotoma_commit = master
+
+PACKAGES += newrelic
+pkg_newrelic_name = newrelic
+pkg_newrelic_description = Erlang library for sending metrics to New Relic
+pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_fetch = git
+pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang
+pkg_newrelic_commit = master
+
+PACKAGES += nifty
+pkg_nifty_name = nifty
+pkg_nifty_description = Erlang NIF wrapper generator
+pkg_nifty_homepage = https://github.com/parapluu/nifty
+pkg_nifty_fetch = git
+pkg_nifty_repo = https://github.com/parapluu/nifty
+pkg_nifty_commit = master
+
+PACKAGES += nitrogen_core
+pkg_nitrogen_core_name = nitrogen_core
+pkg_nitrogen_core_description = The core Nitrogen library.
+pkg_nitrogen_core_homepage = http://nitrogenproject.com/
+pkg_nitrogen_core_fetch = git
+pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core
+pkg_nitrogen_core_commit = master
+
+PACKAGES += nkbase
+pkg_nkbase_name = nkbase
+pkg_nkbase_description = NkBASE distributed database
+pkg_nkbase_homepage = https://github.com/Nekso/nkbase
+pkg_nkbase_fetch = git
+pkg_nkbase_repo = https://github.com/Nekso/nkbase
+pkg_nkbase_commit = develop
+
+PACKAGES += nkdocker
+pkg_nkdocker_name = nkdocker
+pkg_nkdocker_description = Erlang Docker client
+pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker
+pkg_nkdocker_fetch = git
+pkg_nkdocker_repo = https://github.com/Nekso/nkdocker
+pkg_nkdocker_commit = master
+
+PACKAGES += nkpacket
+pkg_nkpacket_name = nkpacket
+pkg_nkpacket_description = Generic Erlang transport layer
+pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket
+pkg_nkpacket_fetch = git
+pkg_nkpacket_repo = https://github.com/Nekso/nkpacket
+pkg_nkpacket_commit = master
+
+PACKAGES += nksip
+pkg_nksip_name = nksip
+pkg_nksip_description = Erlang SIP application server
+pkg_nksip_homepage = https://github.com/kalta/nksip
+pkg_nksip_fetch = git
+pkg_nksip_repo = https://github.com/kalta/nksip
+pkg_nksip_commit = master
+
+PACKAGES += nodefinder
+pkg_nodefinder_name = nodefinder
+pkg_nodefinder_description = automatic node discovery via UDP multicast
+pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder
+pkg_nodefinder_fetch = git
+pkg_nodefinder_repo = https://github.com/okeuday/nodefinder
+pkg_nodefinder_commit = master
+
+PACKAGES += nprocreg
+pkg_nprocreg_name = nprocreg
+pkg_nprocreg_description = Minimal Distributed Erlang Process Registry
+pkg_nprocreg_homepage = http://nitrogenproject.com/
+pkg_nprocreg_fetch = git
+pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg
+pkg_nprocreg_commit = master
+
+PACKAGES += oauth
+pkg_oauth_name = oauth
+pkg_oauth_description = An Erlang OAuth 1.0 implementation
+pkg_oauth_homepage = https://github.com/tim/erlang-oauth
+pkg_oauth_fetch = git
+pkg_oauth_repo = https://github.com/tim/erlang-oauth
+pkg_oauth_commit = master
+
+PACKAGES += oauth2
+pkg_oauth2_name = oauth2
+pkg_oauth2_description = Erlang Oauth2 implementation
+pkg_oauth2_homepage = https://github.com/kivra/oauth2
+pkg_oauth2_fetch = git
+pkg_oauth2_repo = https://github.com/kivra/oauth2
+pkg_oauth2_commit = master
+
+PACKAGES += observer_cli
+pkg_observer_cli_name = observer_cli
+pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line
+pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli
+pkg_observer_cli_fetch = git
+pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli
+pkg_observer_cli_commit = master
+
+PACKAGES += octopus
+pkg_octopus_name = octopus
+pkg_octopus_description = Small and flexible pool manager written in Erlang
+pkg_octopus_homepage = https://github.com/erlangbureau/octopus
+pkg_octopus_fetch = git
+pkg_octopus_repo = https://github.com/erlangbureau/octopus
+pkg_octopus_commit = master
+
+PACKAGES += of_protocol
+pkg_of_protocol_name = of_protocol
+pkg_of_protocol_description = OpenFlow Protocol Library for Erlang
+pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_fetch = git
+pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol
+pkg_of_protocol_commit = master
+
+PACKAGES += opencouch
+pkg_opencouch_name = couch
+pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB
+pkg_opencouch_homepage = https://github.com/benoitc/opencouch
+pkg_opencouch_fetch = git
+pkg_opencouch_repo = https://github.com/benoitc/opencouch
+pkg_opencouch_commit = master
+
+PACKAGES += openflow
+pkg_openflow_name = openflow
+pkg_openflow_description = An OpenFlow controller written in pure erlang
+pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_fetch = git
+pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow
+pkg_openflow_commit = master
+
+PACKAGES += openid
+pkg_openid_name = openid
+pkg_openid_description = Erlang OpenID
+pkg_openid_homepage = https://github.com/brendonh/erl_openid
+pkg_openid_fetch = git
+pkg_openid_repo = https://github.com/brendonh/erl_openid
+pkg_openid_commit = master
+
+PACKAGES += openpoker
+pkg_openpoker_name = openpoker
+pkg_openpoker_description = Genesis Texas hold'em Game Server
+pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker
+pkg_openpoker_fetch = git
+pkg_openpoker_repo = https://github.com/hpyhacking/openpoker
+pkg_openpoker_commit = master
+
+PACKAGES += otpbp
+pkg_otpbp_name = otpbp
+pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19)
+pkg_otpbp_homepage = https://github.com/Ledest/otpbp
+pkg_otpbp_fetch = git
+pkg_otpbp_repo = https://github.com/Ledest/otpbp
+pkg_otpbp_commit = master
+
+PACKAGES += pal
+pkg_pal_name = pal
+pkg_pal_description = Pragmatic Authentication Library
+pkg_pal_homepage = https://github.com/manifest/pal
+pkg_pal_fetch = git
+pkg_pal_repo = https://github.com/manifest/pal
+pkg_pal_commit = master
+
+PACKAGES += parse_trans
+pkg_parse_trans_name = parse_trans
+pkg_parse_trans_description = Parse transform utilities for Erlang
+pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans
+pkg_parse_trans_fetch = git
+pkg_parse_trans_repo = https://github.com/uwiger/parse_trans
+pkg_parse_trans_commit = master
+
+PACKAGES += parsexml
+pkg_parsexml_name = parsexml
+pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API
+pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml
+pkg_parsexml_fetch = git
+pkg_parsexml_repo = https://github.com/maxlapshin/parsexml
+pkg_parsexml_commit = master
+
+PACKAGES += partisan
+pkg_partisan_name = partisan
+pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir.
+pkg_partisan_homepage = http://partisan.cloud
+pkg_partisan_fetch = git
+pkg_partisan_repo = https://github.com/lasp-lang/partisan
+pkg_partisan_commit = master
+
+PACKAGES += pegjs
+pkg_pegjs_name = pegjs
+pkg_pegjs_description = An implementation of PEG.js grammar for Erlang.
+pkg_pegjs_homepage = https://github.com/dmitriid/pegjs
+pkg_pegjs_fetch = git
+pkg_pegjs_repo = https://github.com/dmitriid/pegjs
+pkg_pegjs_commit = master
+
+PACKAGES += percept2
+pkg_percept2_name = percept2
+pkg_percept2_description = Concurrent profiling tool for Erlang
+pkg_percept2_homepage = https://github.com/huiqing/percept2
+pkg_percept2_fetch = git
+pkg_percept2_repo = https://github.com/huiqing/percept2
+pkg_percept2_commit = master
+
+PACKAGES += pgo
+pkg_pgo_name = pgo
+pkg_pgo_description = Erlang Postgres client and connection pool
+pkg_pgo_homepage = https://github.com/erleans/pgo.git
+pkg_pgo_fetch = git
+pkg_pgo_repo = https://github.com/erleans/pgo.git
+pkg_pgo_commit = master
+
+PACKAGES += pgsql
+pkg_pgsql_name = pgsql
+pkg_pgsql_description = Erlang PostgreSQL driver
+pkg_pgsql_homepage = https://github.com/semiocast/pgsql
+pkg_pgsql_fetch = git
+pkg_pgsql_repo = https://github.com/semiocast/pgsql
+pkg_pgsql_commit = master
+
+PACKAGES += pkgx
+pkg_pkgx_name = pkgx
+pkg_pkgx_description = Build .deb packages from Erlang releases
+pkg_pkgx_homepage = https://github.com/arjan/pkgx
+pkg_pkgx_fetch = git
+pkg_pkgx_repo = https://github.com/arjan/pkgx
+pkg_pkgx_commit = master
+
+PACKAGES += pkt
+pkg_pkt_name = pkt
+pkg_pkt_description = Erlang network protocol library
+pkg_pkt_homepage = https://github.com/msantos/pkt
+pkg_pkt_fetch = git
+pkg_pkt_repo = https://github.com/msantos/pkt
+pkg_pkt_commit = master
+
+PACKAGES += plain_fsm
+pkg_plain_fsm_name = plain_fsm
+pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs.
+pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_fetch = git
+pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm
+pkg_plain_fsm_commit = master
+
+PACKAGES += plumtree
+pkg_plumtree_name = plumtree
+pkg_plumtree_description = Epidemic Broadcast Trees
+pkg_plumtree_homepage = https://github.com/helium/plumtree
+pkg_plumtree_fetch = git
+pkg_plumtree_repo = https://github.com/helium/plumtree
+pkg_plumtree_commit = master
+
+PACKAGES += pmod_transform
+pkg_pmod_transform_name = pmod_transform
+pkg_pmod_transform_description = Parse transform for parameterized modules
+pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_fetch = git
+pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform
+pkg_pmod_transform_commit = master
+
+PACKAGES += pobox
+pkg_pobox_name = pobox
+pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang
+pkg_pobox_homepage = https://github.com/ferd/pobox
+pkg_pobox_fetch = git
+pkg_pobox_repo = https://github.com/ferd/pobox
+pkg_pobox_commit = master
+
+PACKAGES += ponos
+pkg_ponos_name = ponos
+pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang
+pkg_ponos_homepage = https://github.com/klarna/ponos
+pkg_ponos_fetch = git
+pkg_ponos_repo = https://github.com/klarna/ponos
+pkg_ponos_commit = master
+
+PACKAGES += poolboy
+pkg_poolboy_name = poolboy
+pkg_poolboy_description = A hunky Erlang worker pool factory
+pkg_poolboy_homepage = https://github.com/devinus/poolboy
+pkg_poolboy_fetch = git
+pkg_poolboy_repo = https://github.com/devinus/poolboy
+pkg_poolboy_commit = master
+
+PACKAGES += pooler
+pkg_pooler_name = pooler
+pkg_pooler_description = An OTP Process Pool Application
+pkg_pooler_homepage = https://github.com/seth/pooler
+pkg_pooler_fetch = git
+pkg_pooler_repo = https://github.com/seth/pooler
+pkg_pooler_commit = master
+
+PACKAGES += pqueue
+pkg_pqueue_name = pqueue
+pkg_pqueue_description = Erlang Priority Queues
+pkg_pqueue_homepage = https://github.com/okeuday/pqueue
+pkg_pqueue_fetch = git
+pkg_pqueue_repo = https://github.com/okeuday/pqueue
+pkg_pqueue_commit = master
+
+PACKAGES += procket
+pkg_procket_name = procket
+pkg_procket_description = Erlang interface to low level socket operations
+pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket
+pkg_procket_fetch = git
+pkg_procket_repo = https://github.com/msantos/procket
+pkg_procket_commit = master
+
+PACKAGES += prometheus
+pkg_prometheus_name = prometheus
+pkg_prometheus_description = Prometheus.io client in Erlang
+pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_fetch = git
+pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl
+pkg_prometheus_commit = master
+
+PACKAGES += prop
+pkg_prop_name = prop
+pkg_prop_description = An Erlang code scaffolding and generator system.
+pkg_prop_homepage = https://github.com/nuex/prop
+pkg_prop_fetch = git
+pkg_prop_repo = https://github.com/nuex/prop
+pkg_prop_commit = master
+
+PACKAGES += proper
+pkg_proper_name = proper
+pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang.
+pkg_proper_homepage = http://proper.softlab.ntua.gr
+pkg_proper_fetch = git
+pkg_proper_repo = https://github.com/manopapad/proper
+pkg_proper_commit = master
+
+PACKAGES += props
+pkg_props_name = props
+pkg_props_description = Property structure library
+pkg_props_homepage = https://github.com/greyarea/props
+pkg_props_fetch = git
+pkg_props_repo = https://github.com/greyarea/props
+pkg_props_commit = master
+
+PACKAGES += protobuffs
+pkg_protobuffs_name = protobuffs
+pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs.
+pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_fetch = git
+pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs
+pkg_protobuffs_commit = master
+
+PACKAGES += psycho
+pkg_psycho_name = psycho
+pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware.
+pkg_psycho_homepage = https://github.com/gar1t/psycho
+pkg_psycho_fetch = git
+pkg_psycho_repo = https://github.com/gar1t/psycho
+pkg_psycho_commit = master
+
+PACKAGES += purity
+pkg_purity_name = purity
+pkg_purity_description = A side-effect analyzer for Erlang
+pkg_purity_homepage = https://github.com/mpitid/purity
+pkg_purity_fetch = git
+pkg_purity_repo = https://github.com/mpitid/purity
+pkg_purity_commit = master
+
+PACKAGES += push_service
+pkg_push_service_name = push_service
+pkg_push_service_description = Push service
+pkg_push_service_homepage = https://github.com/hairyhum/push_service
+pkg_push_service_fetch = git
+pkg_push_service_repo = https://github.com/hairyhum/push_service
+pkg_push_service_commit = master
+
+PACKAGES += qdate
+pkg_qdate_name = qdate
+pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang.
+pkg_qdate_homepage = https://github.com/choptastic/qdate
+pkg_qdate_fetch = git
+pkg_qdate_repo = https://github.com/choptastic/qdate
+pkg_qdate_commit = master
+
+PACKAGES += qrcode
+pkg_qrcode_name = qrcode
+pkg_qrcode_description = QR Code encoder in Erlang
+pkg_qrcode_homepage = https://github.com/komone/qrcode
+pkg_qrcode_fetch = git
+pkg_qrcode_repo = https://github.com/komone/qrcode
+pkg_qrcode_commit = master
+
+PACKAGES += quest
+pkg_quest_name = quest
+pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang.
+pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest
+pkg_quest_fetch = git
+pkg_quest_repo = https://github.com/eriksoe/ErlangQuest
+pkg_quest_commit = master
+
+PACKAGES += quickrand
+pkg_quickrand_name = quickrand
+pkg_quickrand_description = Quick Erlang Random Number Generation
+pkg_quickrand_homepage = https://github.com/okeuday/quickrand
+pkg_quickrand_fetch = git
+pkg_quickrand_repo = https://github.com/okeuday/quickrand
+pkg_quickrand_commit = master
+
+PACKAGES += rabbit
+pkg_rabbit_name = rabbit
+pkg_rabbit_description = RabbitMQ Server
+pkg_rabbit_homepage = https://www.rabbitmq.com/
+pkg_rabbit_fetch = git
+pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git
+pkg_rabbit_commit = master
+
+PACKAGES += rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak
+pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak
+pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_fetch = git
+pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange
+pkg_rabbit_exchange_type_riak_commit = master
+
+PACKAGES += rack
+pkg_rack_name = rack
+pkg_rack_description = Rack handler for erlang
+pkg_rack_homepage = https://github.com/erlyvideo/rack
+pkg_rack_fetch = git
+pkg_rack_repo = https://github.com/erlyvideo/rack
+pkg_rack_commit = master
+
+PACKAGES += radierl
+pkg_radierl_name = radierl
+pkg_radierl_description = RADIUS protocol stack implemented in Erlang.
+pkg_radierl_homepage = https://github.com/vances/radierl
+pkg_radierl_fetch = git
+pkg_radierl_repo = https://github.com/vances/radierl
+pkg_radierl_commit = master
+
+PACKAGES += rafter
+pkg_rafter_name = rafter
+pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol
+pkg_rafter_homepage = https://github.com/andrewjstone/rafter
+pkg_rafter_fetch = git
+pkg_rafter_repo = https://github.com/andrewjstone/rafter
+pkg_rafter_commit = master
+
+PACKAGES += ranch
+pkg_ranch_name = ranch
+pkg_ranch_description = Socket acceptor pool for TCP protocols.
+pkg_ranch_homepage = http://ninenines.eu
+pkg_ranch_fetch = git
+pkg_ranch_repo = https://github.com/ninenines/ranch
+pkg_ranch_commit = 1.2.1
+
+PACKAGES += rbeacon
+pkg_rbeacon_name = rbeacon
+pkg_rbeacon_description = LAN discovery and presence in Erlang.
+pkg_rbeacon_homepage = https://github.com/refuge/rbeacon
+pkg_rbeacon_fetch = git
+pkg_rbeacon_repo = https://github.com/refuge/rbeacon
+pkg_rbeacon_commit = master
+
+PACKAGES += rebar
+pkg_rebar_name = rebar
+pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases.
+pkg_rebar_homepage = http://www.rebar3.org
+pkg_rebar_fetch = git
+pkg_rebar_repo = https://github.com/rebar/rebar3
+pkg_rebar_commit = master
+
+PACKAGES += rebus
+pkg_rebus_name = rebus
+pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang.
+pkg_rebus_homepage = https://github.com/olle/rebus
+pkg_rebus_fetch = git
+pkg_rebus_repo = https://github.com/olle/rebus
+pkg_rebus_commit = master
+
+PACKAGES += rec2json
+pkg_rec2json_name = rec2json
+pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily.
+pkg_rec2json_homepage = https://github.com/lordnull/rec2json
+pkg_rec2json_fetch = git
+pkg_rec2json_repo = https://github.com/lordnull/rec2json
+pkg_rec2json_commit = master
+
+PACKAGES += recon
+pkg_recon_name = recon
+pkg_recon_description = Collection of functions and scripts to debug Erlang in production.
+pkg_recon_homepage = https://github.com/ferd/recon
+pkg_recon_fetch = git
+pkg_recon_repo = https://github.com/ferd/recon
+pkg_recon_commit = master
+
+PACKAGES += record_info
+pkg_record_info_name = record_info
+pkg_record_info_description = Convert between record and proplist
+pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_fetch = git
+pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info
+pkg_record_info_commit = master
+
+PACKAGES += redgrid
+pkg_redgrid_name = redgrid
+pkg_redgrid_description = automatic Erlang node discovery via redis
+pkg_redgrid_homepage = https://github.com/jkvor/redgrid
+pkg_redgrid_fetch = git
+pkg_redgrid_repo = https://github.com/jkvor/redgrid
+pkg_redgrid_commit = master
+
+PACKAGES += redo
+pkg_redo_name = redo
+pkg_redo_description = pipelined erlang redis client
+pkg_redo_homepage = https://github.com/jkvor/redo
+pkg_redo_fetch = git
+pkg_redo_repo = https://github.com/jkvor/redo
+pkg_redo_commit = master
+
+PACKAGES += reload_mk
+pkg_reload_mk_name = reload_mk
+pkg_reload_mk_description = Live reload plugin for erlang.mk.
+pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk
+pkg_reload_mk_fetch = git
+pkg_reload_mk_repo = https://github.com/bullno1/reload.mk
+pkg_reload_mk_commit = master
+
+PACKAGES += reltool_util
+pkg_reltool_util_name = reltool_util
+pkg_reltool_util_description = Erlang reltool utility functionality application
+pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util
+pkg_reltool_util_fetch = git
+pkg_reltool_util_repo = https://github.com/okeuday/reltool_util
+pkg_reltool_util_commit = master
+
+PACKAGES += relx
+pkg_relx_name = relx
+pkg_relx_description = Sane, simple release creation for Erlang
+pkg_relx_homepage = https://github.com/erlware/relx
+pkg_relx_fetch = git
+pkg_relx_repo = https://github.com/erlware/relx
+pkg_relx_commit = master
+
+PACKAGES += resource_discovery
+pkg_resource_discovery_name = resource_discovery
+pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster.
+pkg_resource_discovery_homepage = http://erlware.org/
+pkg_resource_discovery_fetch = git
+pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery
+pkg_resource_discovery_commit = master
+
+PACKAGES += restc
+pkg_restc_name = restc
+pkg_restc_description = Erlang Rest Client
+pkg_restc_homepage = https://github.com/kivra/restclient
+pkg_restc_fetch = git
+pkg_restc_repo = https://github.com/kivra/restclient
+pkg_restc_commit = master
+
+PACKAGES += rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc
+pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation.
+pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_fetch = git
+pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627
+pkg_rfc4627_jsonrpc_commit = master
+
+PACKAGES += riak_control
+pkg_riak_control_name = riak_control
+pkg_riak_control_description = Webmachine-based administration interface for Riak.
+pkg_riak_control_homepage = https://github.com/basho/riak_control
+pkg_riak_control_fetch = git
+pkg_riak_control_repo = https://github.com/basho/riak_control
+pkg_riak_control_commit = master
+
+PACKAGES += riak_core
+pkg_riak_core_name = riak_core
+pkg_riak_core_description = Distributed systems infrastructure used by Riak.
+pkg_riak_core_homepage = https://github.com/basho/riak_core
+pkg_riak_core_fetch = git
+pkg_riak_core_repo = https://github.com/basho/riak_core
+pkg_riak_core_commit = master
+
+PACKAGES += riak_dt
+pkg_riak_dt_name = riak_dt
+pkg_riak_dt_description = Convergent replicated datatypes in Erlang
+pkg_riak_dt_homepage = https://github.com/basho/riak_dt
+pkg_riak_dt_fetch = git
+pkg_riak_dt_repo = https://github.com/basho/riak_dt
+pkg_riak_dt_commit = master
+
+PACKAGES += riak_ensemble
+pkg_riak_ensemble_name = riak_ensemble
+pkg_riak_ensemble_description = Multi-Paxos framework in Erlang
+pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_fetch = git
+pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble
+pkg_riak_ensemble_commit = master
+
+PACKAGES += riak_kv
+pkg_riak_kv_name = riak_kv
+pkg_riak_kv_description = Riak Key/Value Store
+pkg_riak_kv_homepage = https://github.com/basho/riak_kv
+pkg_riak_kv_fetch = git
+pkg_riak_kv_repo = https://github.com/basho/riak_kv
+pkg_riak_kv_commit = master
+
+PACKAGES += riak_pg
+pkg_riak_pg_name = riak_pg
+pkg_riak_pg_description = Distributed process groups with riak_core.
+pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_fetch = git
+pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg
+pkg_riak_pg_commit = master
+
+PACKAGES += riak_pipe
+pkg_riak_pipe_name = riak_pipe
+pkg_riak_pipe_description = Riak Pipelines
+pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe
+pkg_riak_pipe_fetch = git
+pkg_riak_pipe_repo = https://github.com/basho/riak_pipe
+pkg_riak_pipe_commit = master
+
+PACKAGES += riak_sysmon
+pkg_riak_sysmon_name = riak_sysmon
+pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages
+pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_fetch = git
+pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon
+pkg_riak_sysmon_commit = master
+
+PACKAGES += riak_test
+pkg_riak_test_name = riak_test
+pkg_riak_test_description = I'm in your cluster, testing your riaks
+pkg_riak_test_homepage = https://github.com/basho/riak_test
+pkg_riak_test_fetch = git
+pkg_riak_test_repo = https://github.com/basho/riak_test
+pkg_riak_test_commit = master
+
+PACKAGES += riakc
+pkg_riakc_name = riakc
+pkg_riakc_description = Erlang clients for Riak.
+pkg_riakc_homepage = https://github.com/basho/riak-erlang-client
+pkg_riakc_fetch = git
+pkg_riakc_repo = https://github.com/basho/riak-erlang-client
+pkg_riakc_commit = master
+
+PACKAGES += riakhttpc
+pkg_riakhttpc_name = riakhttpc
+pkg_riakhttpc_description = Riak Erlang client using the HTTP interface
+pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_fetch = git
+pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client
+pkg_riakhttpc_commit = master
+
+PACKAGES += riaknostic
+pkg_riaknostic_name = riaknostic
+pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap
+pkg_riaknostic_homepage = https://github.com/basho/riaknostic
+pkg_riaknostic_fetch = git
+pkg_riaknostic_repo = https://github.com/basho/riaknostic
+pkg_riaknostic_commit = master
+
+PACKAGES += riakpool
+pkg_riakpool_name = riakpool
+pkg_riakpool_description = erlang riak client pool
+pkg_riakpool_homepage = https://github.com/dweldon/riakpool
+pkg_riakpool_fetch = git
+pkg_riakpool_repo = https://github.com/dweldon/riakpool
+pkg_riakpool_commit = master
+
+PACKAGES += rivus_cep
+pkg_rivus_cep_name = rivus_cep
+pkg_rivus_cep_description = Complex event processing in Erlang
+pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_fetch = git
+pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep
+pkg_rivus_cep_commit = master
+
+PACKAGES += rlimit
+pkg_rlimit_name = rlimit
+pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent
+pkg_rlimit_homepage = https://github.com/jlouis/rlimit
+pkg_rlimit_fetch = git
+pkg_rlimit_repo = https://github.com/jlouis/rlimit
+pkg_rlimit_commit = master
+
+PACKAGES += rust_mk
+pkg_rust_mk_name = rust_mk
+pkg_rust_mk_description = Build Rust crates in an Erlang application
+pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_fetch = git
+pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk
+pkg_rust_mk_commit = master
+
+PACKAGES += safetyvalve
+pkg_safetyvalve_name = safetyvalve
+pkg_safetyvalve_description = A safety valve for your erlang node
+pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_fetch = git
+pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve
+pkg_safetyvalve_commit = master
+
+PACKAGES += seestar
+pkg_seestar_name = seestar
+pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol
+pkg_seestar_homepage = https://github.com/iamaleksey/seestar
+pkg_seestar_fetch = git
+pkg_seestar_repo = https://github.com/iamaleksey/seestar
+pkg_seestar_commit = master
+
+PACKAGES += service
+pkg_service_name = service
+pkg_service_description = A minimal Erlang behavior for creating CloudI internal services
+pkg_service_homepage = http://cloudi.org/
+pkg_service_fetch = git
+pkg_service_repo = https://github.com/CloudI/service
+pkg_service_commit = master
+
+PACKAGES += setup
+pkg_setup_name = setup
+pkg_setup_description = Generic setup utility for Erlang-based systems
+pkg_setup_homepage = https://github.com/uwiger/setup
+pkg_setup_fetch = git
+pkg_setup_repo = https://github.com/uwiger/setup
+pkg_setup_commit = master
+
+PACKAGES += sext
+pkg_sext_name = sext
+pkg_sext_description = Sortable Erlang Term Serialization
+pkg_sext_homepage = https://github.com/uwiger/sext
+pkg_sext_fetch = git
+pkg_sext_repo = https://github.com/uwiger/sext
+pkg_sext_commit = master
+
+PACKAGES += sfmt
+pkg_sfmt_name = sfmt
+pkg_sfmt_description = SFMT pseudo random number generator for Erlang.
+pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_fetch = git
+pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang
+pkg_sfmt_commit = master
+
+PACKAGES += sgte
+pkg_sgte_name = sgte
+pkg_sgte_description = A simple Erlang Template Engine
+pkg_sgte_homepage = https://github.com/filippo/sgte
+pkg_sgte_fetch = git
+pkg_sgte_repo = https://github.com/filippo/sgte
+pkg_sgte_commit = master
+
+PACKAGES += sheriff
+pkg_sheriff_name = sheriff
+pkg_sheriff_description = Parse transform for type based validation.
+pkg_sheriff_homepage = http://ninenines.eu
+pkg_sheriff_fetch = git
+pkg_sheriff_repo = https://github.com/extend/sheriff
+pkg_sheriff_commit = master
+
+PACKAGES += shotgun
+pkg_shotgun_name = shotgun
+pkg_shotgun_description = better than just a gun
+pkg_shotgun_homepage = https://github.com/inaka/shotgun
+pkg_shotgun_fetch = git
+pkg_shotgun_repo = https://github.com/inaka/shotgun
+pkg_shotgun_commit = master
+
+PACKAGES += sidejob
+pkg_sidejob_name = sidejob
+pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang
+pkg_sidejob_homepage = https://github.com/basho/sidejob
+pkg_sidejob_fetch = git
+pkg_sidejob_repo = https://github.com/basho/sidejob
+pkg_sidejob_commit = master
+
+PACKAGES += sieve
+pkg_sieve_name = sieve
+pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang
+pkg_sieve_homepage = https://github.com/benoitc/sieve
+pkg_sieve_fetch = git
+pkg_sieve_repo = https://github.com/benoitc/sieve
+pkg_sieve_commit = master
+
+PACKAGES += sighandler
+pkg_sighandler_name = sighandler
+pkg_sighandler_description = Handle UNIX signals in Er lang
+pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler
+pkg_sighandler_fetch = git
+pkg_sighandler_repo = https://github.com/jkingsbery/sighandler
+pkg_sighandler_commit = master
+
+PACKAGES += simhash
+pkg_simhash_name = simhash
+pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data.
+pkg_simhash_homepage = https://github.com/ferd/simhash
+pkg_simhash_fetch = git
+pkg_simhash_repo = https://github.com/ferd/simhash
+pkg_simhash_commit = master
+
+PACKAGES += simple_bridge
+pkg_simple_bridge_name = simple_bridge
+pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers.
+pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_fetch = git
+pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge
+pkg_simple_bridge_commit = master
+
+PACKAGES += simple_oauth2
+pkg_simple_oauth2_name = simple_oauth2
+pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured)
+pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_fetch = git
+pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2
+pkg_simple_oauth2_commit = master
+
+PACKAGES += skel
+pkg_skel_name = skel
+pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang
+pkg_skel_homepage = https://github.com/ParaPhrase/skel
+pkg_skel_fetch = git
+pkg_skel_repo = https://github.com/ParaPhrase/skel
+pkg_skel_commit = master
+
+PACKAGES += slack
+pkg_slack_name = slack
+pkg_slack_description = Minimal slack notification OTP library.
+pkg_slack_homepage = https://github.com/DonBranson/slack
+pkg_slack_fetch = git
+pkg_slack_repo = https://github.com/DonBranson/slack.git
+pkg_slack_commit = master
+
+PACKAGES += smother
+pkg_smother_name = smother
+pkg_smother_description = Extended code coverage metrics for Erlang.
+pkg_smother_homepage = https://ramsay-t.github.io/Smother/
+pkg_smother_fetch = git
+pkg_smother_repo = https://github.com/ramsay-t/Smother
+pkg_smother_commit = master
+
+PACKAGES += snappyer
+pkg_snappyer_name = snappyer
+pkg_snappyer_description = Snappy as nif for Erlang
+pkg_snappyer_homepage = https://github.com/zmstone/snappyer
+pkg_snappyer_fetch = git
+pkg_snappyer_repo = https://github.com/zmstone/snappyer.git
+pkg_snappyer_commit = master
+
+PACKAGES += social
+pkg_social_name = social
+pkg_social_description = Cowboy handler for social login via OAuth2 providers
+pkg_social_homepage = https://github.com/dvv/social
+pkg_social_fetch = git
+pkg_social_repo = https://github.com/dvv/social
+pkg_social_commit = master
+
+PACKAGES += spapi_router
+pkg_spapi_router_name = spapi_router
+pkg_spapi_router_description = Partially-connected Erlang clustering
+pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router
+pkg_spapi_router_fetch = git
+pkg_spapi_router_repo = https://github.com/spilgames/spapi-router
+pkg_spapi_router_commit = master
+
+PACKAGES += sqerl
+pkg_sqerl_name = sqerl
+pkg_sqerl_description = An Erlang-flavoured SQL DSL
+pkg_sqerl_homepage = https://github.com/hairyhum/sqerl
+pkg_sqerl_fetch = git
+pkg_sqerl_repo = https://github.com/hairyhum/sqerl
+pkg_sqerl_commit = master
+
+PACKAGES += srly
+pkg_srly_name = srly
+pkg_srly_description = Native Erlang Unix serial interface
+pkg_srly_homepage = https://github.com/msantos/srly
+pkg_srly_fetch = git
+pkg_srly_repo = https://github.com/msantos/srly
+pkg_srly_commit = master
+
+PACKAGES += sshrpc
+pkg_sshrpc_name = sshrpc
+pkg_sshrpc_description = Erlang SSH RPC module (experimental)
+pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_fetch = git
+pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc
+pkg_sshrpc_commit = master
+
+PACKAGES += stable
+pkg_stable_name = stable
+pkg_stable_description = Library of assorted helpers for Cowboy web server.
+pkg_stable_homepage = https://github.com/dvv/stable
+pkg_stable_fetch = git
+pkg_stable_repo = https://github.com/dvv/stable
+pkg_stable_commit = master
+
+PACKAGES += statebox
+pkg_statebox_name = statebox
+pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak.
+pkg_statebox_homepage = https://github.com/mochi/statebox
+pkg_statebox_fetch = git
+pkg_statebox_repo = https://github.com/mochi/statebox
+pkg_statebox_commit = master
+
+PACKAGES += statebox_riak
+pkg_statebox_riak_name = statebox_riak
+pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media.
+pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_fetch = git
+pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak
+pkg_statebox_riak_commit = master
+
+PACKAGES += statman
+pkg_statman_name = statman
+pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM
+pkg_statman_homepage = https://github.com/knutin/statman
+pkg_statman_fetch = git
+pkg_statman_repo = https://github.com/knutin/statman
+pkg_statman_commit = master
+
+PACKAGES += statsderl
+pkg_statsderl_name = statsderl
+pkg_statsderl_description = StatsD client (erlang)
+pkg_statsderl_homepage = https://github.com/lpgauth/statsderl
+pkg_statsderl_fetch = git
+pkg_statsderl_repo = https://github.com/lpgauth/statsderl
+pkg_statsderl_commit = master
+
+PACKAGES += stdinout_pool
+pkg_stdinout_pool_name = stdinout_pool
+pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication.
+pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_fetch = git
+pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool
+pkg_stdinout_pool_commit = master
+
+PACKAGES += stockdb
+pkg_stockdb_name = stockdb
+pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang
+pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb
+pkg_stockdb_fetch = git
+pkg_stockdb_repo = https://github.com/maxlapshin/stockdb
+pkg_stockdb_commit = master
+
+PACKAGES += stripe
+pkg_stripe_name = stripe
+pkg_stripe_description = Erlang interface to the stripe.com API
+pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang
+pkg_stripe_fetch = git
+pkg_stripe_repo = https://github.com/mattsta/stripe-erlang
+pkg_stripe_commit = v1
+
+PACKAGES += subproc
+pkg_subproc_name = subproc
+pkg_subproc_description = unix subprocess manager with {active,once|false} modes
+pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc
+pkg_subproc_fetch = git
+pkg_subproc_repo = https://github.com/dozzie/subproc
+pkg_subproc_commit = v0.1.0
+
+PACKAGES += supervisor3
+pkg_supervisor3_name = supervisor3
+pkg_supervisor3_description = OTP supervisor with additional strategies
+pkg_supervisor3_homepage = https://github.com/klarna/supervisor3
+pkg_supervisor3_fetch = git
+pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git
+pkg_supervisor3_commit = master
+
+PACKAGES += surrogate
+pkg_surrogate_name = surrogate
+pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes.
+pkg_surrogate_homepage = https://github.com/skruger/Surrogate
+pkg_surrogate_fetch = git
+pkg_surrogate_repo = https://github.com/skruger/Surrogate
+pkg_surrogate_commit = master
+
+PACKAGES += swab
+pkg_swab_name = swab
+pkg_swab_description = General purpose buffer handling module
+pkg_swab_homepage = https://github.com/crownedgrouse/swab
+pkg_swab_fetch = git
+pkg_swab_repo = https://github.com/crownedgrouse/swab
+pkg_swab_commit = master
+
+PACKAGES += swarm
+pkg_swarm_name = swarm
+pkg_swarm_description = Fast and simple acceptor pool for Erlang
+pkg_swarm_homepage = https://github.com/jeremey/swarm
+pkg_swarm_fetch = git
+pkg_swarm_repo = https://github.com/jeremey/swarm
+pkg_swarm_commit = master
+
+PACKAGES += switchboard
+pkg_switchboard_name = switchboard
+pkg_switchboard_description = A framework for processing email using worker plugins.
+pkg_switchboard_homepage = https://github.com/thusfresh/switchboard
+pkg_switchboard_fetch = git
+pkg_switchboard_repo = https://github.com/thusfresh/switchboard
+pkg_switchboard_commit = master
+
+PACKAGES += syn
+pkg_syn_name = syn
+pkg_syn_description = A global Process Registry and Process Group manager for Erlang.
+pkg_syn_homepage = https://github.com/ostinelli/syn
+pkg_syn_fetch = git
+pkg_syn_repo = https://github.com/ostinelli/syn
+pkg_syn_commit = master
+
+PACKAGES += sync
+pkg_sync_name = sync
+pkg_sync_description = On-the-fly recompiling and reloading in Erlang.
+pkg_sync_homepage = https://github.com/rustyio/sync
+pkg_sync_fetch = git
+pkg_sync_repo = https://github.com/rustyio/sync
+pkg_sync_commit = master
+
+PACKAGES += syntaxerl
+pkg_syntaxerl_name = syntaxerl
+pkg_syntaxerl_description = Syntax checker for Erlang
+pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_fetch = git
+pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl
+pkg_syntaxerl_commit = master
+
+PACKAGES += syslog
+pkg_syslog_name = syslog
+pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3)
+pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_fetch = git
+pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog
+pkg_syslog_commit = master
+
+PACKAGES += taskforce
+pkg_taskforce_name = taskforce
+pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks.
+pkg_taskforce_homepage = https://github.com/g-andrade/taskforce
+pkg_taskforce_fetch = git
+pkg_taskforce_repo = https://github.com/g-andrade/taskforce
+pkg_taskforce_commit = master
+
+PACKAGES += tddreloader
+pkg_tddreloader_name = tddreloader
+pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes
+pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader
+pkg_tddreloader_fetch = git
+pkg_tddreloader_repo = https://github.com/version2beta/tddreloader
+pkg_tddreloader_commit = master
+
+PACKAGES += tempo
+pkg_tempo_name = tempo
+pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang.
+pkg_tempo_homepage = https://github.com/selectel/tempo
+pkg_tempo_fetch = git
+pkg_tempo_repo = https://github.com/selectel/tempo
+pkg_tempo_commit = master
+
+PACKAGES += ticktick
+pkg_ticktick_name = ticktick
+pkg_ticktick_description = Ticktick is an id generator for message service.
+pkg_ticktick_homepage = https://github.com/ericliang/ticktick
+pkg_ticktick_fetch = git
+pkg_ticktick_repo = https://github.com/ericliang/ticktick
+pkg_ticktick_commit = master
+
+PACKAGES += tinymq
+pkg_tinymq_name = tinymq
+pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue
+pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_fetch = git
+pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq
+pkg_tinymq_commit = master
+
+PACKAGES += tinymt
+pkg_tinymt_name = tinymt
+pkg_tinymt_description = TinyMT pseudo random number generator for Erlang.
+pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_fetch = git
+pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang
+pkg_tinymt_commit = master
+
+PACKAGES += tirerl
+pkg_tirerl_name = tirerl
+pkg_tirerl_description = Erlang interface to Elastic Search
+pkg_tirerl_homepage = https://github.com/inaka/tirerl
+pkg_tirerl_fetch = git
+pkg_tirerl_repo = https://github.com/inaka/tirerl
+pkg_tirerl_commit = master
+
+PACKAGES += toml
+pkg_toml_name = toml
+pkg_toml_description = TOML (0.4.0) config parser
+pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML
+pkg_toml_fetch = git
+pkg_toml_repo = https://github.com/dozzie/toml
+pkg_toml_commit = v0.2.0
+
+PACKAGES += traffic_tools
+pkg_traffic_tools_name = traffic_tools
+pkg_traffic_tools_description = Simple traffic limiting library
+pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools
+pkg_traffic_tools_fetch = git
+pkg_traffic_tools_repo = https://github.com/systra/traffic_tools
+pkg_traffic_tools_commit = master
+
+PACKAGES += trails
+pkg_trails_name = trails
+pkg_trails_description = A couple of improvements over Cowboy Routes
+pkg_trails_homepage = http://inaka.github.io/cowboy-trails/
+pkg_trails_fetch = git
+pkg_trails_repo = https://github.com/inaka/cowboy-trails
+pkg_trails_commit = master
+
+PACKAGES += trane
+pkg_trane_name = trane
+pkg_trane_description = SAX style broken HTML parser in Erlang
+pkg_trane_homepage = https://github.com/massemanet/trane
+pkg_trane_fetch = git
+pkg_trane_repo = https://github.com/massemanet/trane
+pkg_trane_commit = master
+
+PACKAGES += transit
+pkg_transit_name = transit
+pkg_transit_description = transit format for erlang
+pkg_transit_homepage = https://github.com/isaiah/transit-erlang
+pkg_transit_fetch = git
+pkg_transit_repo = https://github.com/isaiah/transit-erlang
+pkg_transit_commit = master
+
+PACKAGES += trie
+pkg_trie_name = trie
+pkg_trie_description = Erlang Trie Implementation
+pkg_trie_homepage = https://github.com/okeuday/trie
+pkg_trie_fetch = git
+pkg_trie_repo = https://github.com/okeuday/trie
+pkg_trie_commit = master
+
+PACKAGES += triq
+pkg_triq_name = triq
+pkg_triq_description = Trifork QuickCheck
+pkg_triq_homepage = https://triq.gitlab.io
+pkg_triq_fetch = git
+pkg_triq_repo = https://gitlab.com/triq/triq.git
+pkg_triq_commit = master
+
+PACKAGES += tunctl
+pkg_tunctl_name = tunctl
+pkg_tunctl_description = Erlang TUN/TAP interface
+pkg_tunctl_homepage = https://github.com/msantos/tunctl
+pkg_tunctl_fetch = git
+pkg_tunctl_repo = https://github.com/msantos/tunctl
+pkg_tunctl_commit = master
+
+PACKAGES += twerl
+pkg_twerl_name = twerl
+pkg_twerl_description = Erlang client for the Twitter Streaming API
+pkg_twerl_homepage = https://github.com/lucaspiller/twerl
+pkg_twerl_fetch = git
+pkg_twerl_repo = https://github.com/lucaspiller/twerl
+pkg_twerl_commit = oauth
+
+PACKAGES += twitter_erlang
+pkg_twitter_erlang_name = twitter_erlang
+pkg_twitter_erlang_description = An Erlang twitter client
+pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_fetch = git
+pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter
+pkg_twitter_erlang_commit = master
+
+PACKAGES += ucol_nif
+pkg_ucol_nif_name = ucol_nif
+pkg_ucol_nif_description = ICU based collation Erlang module
+pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_fetch = git
+pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif
+pkg_ucol_nif_commit = master
+
+PACKAGES += unicorn
+pkg_unicorn_name = unicorn
+pkg_unicorn_description = Generic configuration server
+pkg_unicorn_homepage = https://github.com/shizzard/unicorn
+pkg_unicorn_fetch = git
+pkg_unicorn_repo = https://github.com/shizzard/unicorn
+pkg_unicorn_commit = master
+
+PACKAGES += unsplit
+pkg_unsplit_name = unsplit
+pkg_unsplit_description = Resolves conflicts in Mnesia after network splits
+pkg_unsplit_homepage = https://github.com/uwiger/unsplit
+pkg_unsplit_fetch = git
+pkg_unsplit_repo = https://github.com/uwiger/unsplit
+pkg_unsplit_commit = master
+
+PACKAGES += uuid
+pkg_uuid_name = uuid
+pkg_uuid_description = Erlang UUID Implementation
+pkg_uuid_homepage = https://github.com/okeuday/uuid
+pkg_uuid_fetch = git
+pkg_uuid_repo = https://github.com/okeuday/uuid
+pkg_uuid_commit = master
+
+PACKAGES += ux
+pkg_ux_name = ux
+pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation)
+pkg_ux_homepage = https://github.com/erlang-unicode/ux
+pkg_ux_fetch = git
+pkg_ux_repo = https://github.com/erlang-unicode/ux
+pkg_ux_commit = master
+
+PACKAGES += vert
+pkg_vert_name = vert
+pkg_vert_description = Erlang binding to libvirt virtualization API
+pkg_vert_homepage = https://github.com/msantos/erlang-libvirt
+pkg_vert_fetch = git
+pkg_vert_repo = https://github.com/msantos/erlang-libvirt
+pkg_vert_commit = master
+
+PACKAGES += verx
+pkg_verx_name = verx
+pkg_verx_description = Erlang implementation of the libvirtd remote protocol
+pkg_verx_homepage = https://github.com/msantos/verx
+pkg_verx_fetch = git
+pkg_verx_repo = https://github.com/msantos/verx
+pkg_verx_commit = master
+
+PACKAGES += vmq_acl
+pkg_vmq_acl_name = vmq_acl
+pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_acl_homepage = https://verne.mq/
+pkg_vmq_acl_fetch = git
+pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl
+pkg_vmq_acl_commit = master
+
+PACKAGES += vmq_bridge
+pkg_vmq_bridge_name = vmq_bridge
+pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_bridge_homepage = https://verne.mq/
+pkg_vmq_bridge_fetch = git
+pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge
+pkg_vmq_bridge_commit = master
+
+PACKAGES += vmq_graphite
+pkg_vmq_graphite_name = vmq_graphite
+pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_graphite_homepage = https://verne.mq/
+pkg_vmq_graphite_fetch = git
+pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite
+pkg_vmq_graphite_commit = master
+
+PACKAGES += vmq_passwd
+pkg_vmq_passwd_name = vmq_passwd
+pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_passwd_homepage = https://verne.mq/
+pkg_vmq_passwd_fetch = git
+pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd
+pkg_vmq_passwd_commit = master
+
+PACKAGES += vmq_server
+pkg_vmq_server_name = vmq_server
+pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_server_homepage = https://verne.mq/
+pkg_vmq_server_fetch = git
+pkg_vmq_server_repo = https://github.com/erlio/vmq_server
+pkg_vmq_server_commit = master
+
+PACKAGES += vmq_snmp
+pkg_vmq_snmp_name = vmq_snmp
+pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_snmp_homepage = https://verne.mq/
+pkg_vmq_snmp_fetch = git
+pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp
+pkg_vmq_snmp_commit = master
+
+PACKAGES += vmq_systree
+pkg_vmq_systree_name = vmq_systree
+pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker
+pkg_vmq_systree_homepage = https://verne.mq/
+pkg_vmq_systree_fetch = git
+pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree
+pkg_vmq_systree_commit = master
+
+PACKAGES += vmstats
+pkg_vmstats_name = vmstats
+pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs.
+pkg_vmstats_homepage = https://github.com/ferd/vmstats
+pkg_vmstats_fetch = git
+pkg_vmstats_repo = https://github.com/ferd/vmstats
+pkg_vmstats_commit = master
+
+PACKAGES += walrus
+pkg_walrus_name = walrus
+pkg_walrus_description = Walrus - Mustache-like Templating
+pkg_walrus_homepage = https://github.com/devinus/walrus
+pkg_walrus_fetch = git
+pkg_walrus_repo = https://github.com/devinus/walrus
+pkg_walrus_commit = master
+
+PACKAGES += webmachine
+pkg_webmachine_name = webmachine
+pkg_webmachine_description = A REST-based system for building web applications.
+pkg_webmachine_homepage = https://github.com/basho/webmachine
+pkg_webmachine_fetch = git
+pkg_webmachine_repo = https://github.com/basho/webmachine
+pkg_webmachine_commit = master
+
+PACKAGES += websocket_client
+pkg_websocket_client_name = websocket_client
+pkg_websocket_client_description = Erlang websocket client (ws and wss supported)
+pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_fetch = git
+pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client
+pkg_websocket_client_commit = master
+
+PACKAGES += worker_pool
+pkg_worker_pool_name = worker_pool
+pkg_worker_pool_description = a simple erlang worker pool
+pkg_worker_pool_homepage = https://github.com/inaka/worker_pool
+pkg_worker_pool_fetch = git
+pkg_worker_pool_repo = https://github.com/inaka/worker_pool
+pkg_worker_pool_commit = master
+
+PACKAGES += wrangler
+pkg_wrangler_name = wrangler
+pkg_wrangler_description = Import of the Wrangler svn repository.
+pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html
+pkg_wrangler_fetch = git
+pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler
+pkg_wrangler_commit = master
+
+PACKAGES += wsock
+pkg_wsock_name = wsock
+pkg_wsock_description = Erlang library to build WebSocket clients and servers
+pkg_wsock_homepage = https://github.com/madtrick/wsock
+pkg_wsock_fetch = git
+pkg_wsock_repo = https://github.com/madtrick/wsock
+pkg_wsock_commit = master
+
+PACKAGES += xhttpc
+pkg_xhttpc_name = xhttpc
+pkg_xhttpc_description = Extensible HTTP Client for Erlang
+pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc
+pkg_xhttpc_fetch = git
+pkg_xhttpc_repo = https://github.com/seriyps/xhttpc
+pkg_xhttpc_commit = master
+
+PACKAGES += xref_runner
+pkg_xref_runner_name = xref_runner
+pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref)
+pkg_xref_runner_homepage = https://github.com/inaka/xref_runner
+pkg_xref_runner_fetch = git
+pkg_xref_runner_repo = https://github.com/inaka/xref_runner
+pkg_xref_runner_commit = master
+
+PACKAGES += yamerl
+pkg_yamerl_name = yamerl
+pkg_yamerl_description = YAML 1.2 parser in pure Erlang
+pkg_yamerl_homepage = https://github.com/yakaz/yamerl
+pkg_yamerl_fetch = git
+pkg_yamerl_repo = https://github.com/yakaz/yamerl
+pkg_yamerl_commit = master
+
+PACKAGES += yamler
+pkg_yamler_name = yamler
+pkg_yamler_description = libyaml-based yaml loader for Erlang
+pkg_yamler_homepage = https://github.com/goertzenator/yamler
+pkg_yamler_fetch = git
+pkg_yamler_repo = https://github.com/goertzenator/yamler
+pkg_yamler_commit = master
+
+PACKAGES += yaws
+pkg_yaws_name = yaws
+pkg_yaws_description = Yaws webserver
+pkg_yaws_homepage = http://yaws.hyber.org
+pkg_yaws_fetch = git
+pkg_yaws_repo = https://github.com/klacke/yaws
+pkg_yaws_commit = master
+
+PACKAGES += zab_engine
+pkg_zab_engine_name = zab_engine
+pkg_zab_engine_description = zab propotocol implement by erlang
+pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_fetch = git
+pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine
+pkg_zab_engine_commit = master
+
+PACKAGES += zabbix_sender
+pkg_zabbix_sender_name = zabbix_sender
+pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang
+pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender
+pkg_zabbix_sender_fetch = git
+pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git
+pkg_zabbix_sender_commit = master
+
+PACKAGES += zeta
+pkg_zeta_name = zeta
+pkg_zeta_description = HTTP access log parser in Erlang
+pkg_zeta_homepage = https://github.com/s1n4/zeta
+pkg_zeta_fetch = git
+pkg_zeta_repo = https://github.com/s1n4/zeta
+pkg_zeta_commit = master
+
+PACKAGES += zippers
+pkg_zippers_name = zippers
+pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers
+pkg_zippers_homepage = https://github.com/ferd/zippers
+pkg_zippers_fetch = git
+pkg_zippers_repo = https://github.com/ferd/zippers
+pkg_zippers_commit = master
+
+PACKAGES += zlists
+pkg_zlists_name = zlists
+pkg_zlists_description = Erlang lazy lists library.
+pkg_zlists_homepage = https://github.com/vjache/erlang-zlists
+pkg_zlists_fetch = git
+pkg_zlists_repo = https://github.com/vjache/erlang-zlists
+pkg_zlists_commit = master
+
+PACKAGES += zraft_lib
+pkg_zraft_lib_name = zraft_lib
+pkg_zraft_lib_description = Erlang raft consensus protocol implementation
+pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_fetch = git
+pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib
+pkg_zraft_lib_commit = master
+
+PACKAGES += zucchini
+pkg_zucchini_name = zucchini
+pkg_zucchini_description = An Erlang INI parser
+pkg_zucchini_homepage = https://github.com/devinus/zucchini
+pkg_zucchini_fetch = git
+pkg_zucchini_repo = https://github.com/devinus/zucchini
+pkg_zucchini_commit = master
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: search
+
+define pkg_print
+ $(verbose) printf "%s\n" \
+ $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \
+ "App name: $(pkg_$(1)_name)" \
+ "Description: $(pkg_$(1)_description)" \
+ "Home page: $(pkg_$(1)_homepage)" \
+ "Fetch with: $(pkg_$(1)_fetch)" \
+ "Repository: $(pkg_$(1)_repo)" \
+ "Commit: $(pkg_$(1)_commit)" \
+ ""
+
+endef
+
+search:
+ifdef q
+ $(foreach p,$(PACKAGES), \
+ $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \
+ $(call pkg_print,$(p))))
+else
+ $(foreach p,$(PACKAGES),$(call pkg_print,$(p)))
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-deps clean-tmp-deps.log
+
+# Configuration.
+
+ifdef OTP_DEPS
+$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.)
+endif
+
+IGNORE_DEPS ?=
+export IGNORE_DEPS
+
+APPS_DIR ?= $(CURDIR)/apps
+export APPS_DIR
+
+DEPS_DIR ?= $(CURDIR)/deps
+export DEPS_DIR
+
+REBAR_DEPS_DIR = $(DEPS_DIR)
+export REBAR_DEPS_DIR
+
+REBAR_GIT ?= https://github.com/rebar/rebar
+REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01
+
+# External "early" plugins (see core/plugins.mk for regular plugins).
+# They both use the core_dep_plugin macro.
+
+define core_dep_plugin
+ifeq ($(2),$(PROJECT))
+-include $$(patsubst $(PROJECT)/%,%,$(1))
+else
+-include $(DEPS_DIR)/$(1)
+
+$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ;
+endif
+endef
+
+DEP_EARLY_PLUGINS ?=
+
+$(foreach p,$(DEP_EARLY_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/early-plugins.mk,$p))))
+
+# Query functions.
+
+query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1)))
+_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail))
+_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail)
+
+query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1)))
+
+query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1)))
+_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1)))
+
+query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo))
+query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1)))
+query_repo_git-subfolder = $(call query_repo_git,$(1))
+query_repo_git-submodule = -
+query_repo_hg = $(call query_repo_default,$(1))
+query_repo_svn = $(call query_repo_default,$(1))
+query_repo_cp = $(call query_repo_default,$(1))
+query_repo_ln = $(call query_repo_default,$(1))
+query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1))
+query_repo_fail = -
+query_repo_legacy = -
+
+query_version = $(call _qv,$(1),$(call query_fetch_method,$(1)))
+_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1)))
+
+query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_git = $(call query_version_default,$(1))
+query_version_git-subfolder = $(call query_version_git,$(1))
+query_version_git-submodule = -
+query_version_hg = $(call query_version_default,$(1))
+query_version_svn = -
+query_version_cp = -
+query_version_ln = -
+query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit)))
+query_version_fail = -
+query_version_legacy = -
+
+query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1)))
+_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-)
+
+query_extra_git = -
+query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-)
+query_extra_git-submodule = -
+query_extra_hg = -
+query_extra_svn = -
+query_extra_cp = -
+query_extra_ln = -
+query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-)
+query_extra_fail = -
+query_extra_legacy = -
+
+query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1)))
+
+# Deprecated legacy query functions.
+dep_fetch = $(call query_fetch_method,$(1))
+dep_name = $(call query_name,$(1))
+dep_repo = $(call query_repo_git,$(1))
+dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit)))
+
+LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a)))
+ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep))))
+
+# When we are calling an app directly we don't want to include it here
+# otherwise it'll be treated both as an apps and a top-level project.
+ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d)))
+ifdef ROOT_DIR
+ifndef IS_APP
+ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS))
+endif
+endif
+
+ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),)
+ifeq ($(ERL_LIBS),)
+ ERL_LIBS = $(APPS_DIR):$(DEPS_DIR)
+else
+ ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR)
+endif
+endif
+export ERL_LIBS
+
+export NO_AUTOPATCH
+
+# Verbosity.
+
+dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))";
+dep_verbose_2 = set -x;
+dep_verbose = $(dep_verbose_$(V))
+
+# Optimization: don't recompile deps unless truly necessary.
+
+ifndef IS_DEP
+ifneq ($(MAKELEVEL),0)
+$(shell rm -f ebin/dep_built)
+endif
+endif
+
+# Core targets.
+
+ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS))
+
+apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP)
+# Create ebin directory for all apps to make sure Erlang recognizes them
+# as proper OTP applications when using -include_lib. This is a temporary
+# fix, a proper fix would be to compile apps/* in the right order.
+ifndef IS_APP
+ifneq ($(ALL_APPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ mkdir -p $$dep/ebin; \
+ done
+endif
+endif
+# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only
+# compile that list of apps. Otherwise, compile everything.
+# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps.
+ifneq ($(ALL_APPS_DIRS_TO_BUILD),)
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \
+ $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \
+ fi \
+ done
+endif
+
+clean-tmp-deps.log:
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log
+endif
+
+# Erlang.mk does not rebuild dependencies after they were compiled
+# once. If a developer is working on the top-level project and some
+# dependencies at the same time, he may want to change this behavior.
+# There are two solutions:
+# 1. Set `FULL=1` so that all dependencies are visited and
+# recursively recompiled if necessary.
+# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that
+# should be recompiled (instead of the whole set).
+
+FORCE_REBUILD ?=
+
+ifeq ($(origin FULL),undefined)
+ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),)
+define force_rebuild_dep
+echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")"
+endef
+endif
+endif
+
+ifneq ($(SKIP_DEPS),)
+deps::
+else
+deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP)
+ifneq ($(ALL_DEPS_DIRS),)
+ $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \
+ if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \
+ :; \
+ else \
+ echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \
+ if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ else \
+ echo "Error: No Makefile to build dependency $$dep." >&2; \
+ exit 2; \
+ fi \
+ fi \
+ done
+endif
+endif
+
+# Deps related targets.
+
+# @todo rename GNUmakefile and makefile into Makefile first, if they exist
+# While Makefile file could be GNUmakefile or makefile,
+# in practice only Makefile is needed so far.
+define dep_autopatch
+ if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \
+ rm -rf $(DEPS_DIR)/$1/ebin/; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ $(call dep_autopatch_erlang_mk,$(1)); \
+ elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch2,$1); \
+ elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \
+ $(call dep_autopatch2,$(1)); \
+ elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ else \
+ if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \
+ $(call dep_autopatch_noop,$(1)); \
+ else \
+ $(call dep_autopatch2,$(1)); \
+ fi \
+ fi
+endef
+
+define dep_autopatch2
+ ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \
+ mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \
+ rm -f $(DEPS_DIR)/$1/ebin/$1.app; \
+ if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \
+ $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \
+ fi; \
+ $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \
+ if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \
+ $(call dep_autopatch_fetch_rebar); \
+ $(call dep_autopatch_rebar,$(1)); \
+ else \
+ $(call dep_autopatch_gen,$(1)); \
+ fi
+endef
+
+define dep_autopatch_noop
+ printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# Replace "include erlang.mk" with a line that will load the parent Erlang.mk
+# if given. Do it for all 3 possible Makefile file names.
+ifeq ($(NO_AUTOPATCH_ERLANG_MK),)
+define dep_autopatch_erlang_mk
+ for f in Makefile makefile GNUmakefile; do \
+ if [ -f $(DEPS_DIR)/$1/$$f ]; then \
+ sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \
+ fi \
+ done
+endef
+else
+define dep_autopatch_erlang_mk
+ :
+endef
+endif
+
+define dep_autopatch_gen
+ printf "%s\n" \
+ "ERLC_OPTS = +debug_info" \
+ "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile
+endef
+
+# We use flock/lockf when available to avoid concurrency issues.
+define dep_autopatch_fetch_rebar
+ if command -v flock >/dev/null; then \
+ flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \
+ else \
+ $(call dep_autopatch_fetch_rebar2); \
+ fi
+endef
+
+define dep_autopatch_fetch_rebar2
+ if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \
+ git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \
+ cd $(ERLANG_MK_TMP)/rebar; \
+ git checkout -q $(REBAR_COMMIT); \
+ ./bootstrap; \
+ cd -; \
+ fi
+endef
+
+define dep_autopatch_rebar
+ if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \
+ mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \
+ fi; \
+ $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \
+ rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app
+endef
+
+define dep_autopatch_rebar.erl
+ application:load(rebar),
+ application:set_env(rebar, log_level, debug),
+ rmemo:start(),
+ Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of
+ {ok, Conf0} -> Conf0;
+ _ -> []
+ end,
+ {Conf, OsEnv} = fun() ->
+ case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of
+ false -> {Conf1, []};
+ true ->
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1),
+ Before = os:getenv(),
+ {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings),
+ {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)}
+ end
+ end(),
+ Write = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append])
+ end,
+ Escape = fun (Text) ->
+ re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}])
+ end,
+ Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package "
+ "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"),
+ Write("C_SRC_DIR = /path/do/not/exist\n"),
+ Write("C_SRC_TYPE = rebar\n"),
+ Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"),
+ Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]),
+ ToList = fun
+ (V) when is_atom(V) -> atom_to_list(V);
+ (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'"
+ end,
+ fun() ->
+ Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"),
+ case lists:keyfind(erl_opts, 1, Conf) of
+ false -> ok;
+ {_, ErlOpts} ->
+ lists:foreach(fun
+ ({d, D}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ ({d, DKey, DVal}) ->
+ Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n");
+ ({i, I}) ->
+ Write(["ERLC_OPTS += -I ", I, "\n"]);
+ ({platform_define, Regex, D}) ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n");
+ false -> ok
+ end;
+ ({parse_transform, PT}) ->
+ Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n");
+ (_) -> ok
+ end, ErlOpts)
+ end,
+ Write("\n")
+ end(),
+ GetHexVsn = fun(N, NP) ->
+ case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of
+ {ok, Lock} ->
+ io:format("~p~n", [Lock]),
+ case lists:keyfind("1.1.0", 1, Lock) of
+ {_, LockPkgs} ->
+ io:format("~p~n", [LockPkgs]),
+ case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of
+ {_, {pkg, _, Vsn}, _} ->
+ io:format("~p~n", [Vsn]),
+ {N, {hex, NP, binary_to_list(Vsn)}};
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end;
+ _ ->
+ false
+ end
+ end,
+ SemVsn = fun
+ ("~>" ++ S0) ->
+ S = case S0 of
+ " " ++ S1 -> S1;
+ _ -> S0
+ end,
+ case length([ok || $$. <- S]) of
+ 0 -> S ++ ".0.0";
+ 1 -> S ++ ".0";
+ _ -> S
+ end;
+ (S) -> S
+ end,
+ fun() ->
+ File = case lists:keyfind(deps, 1, Conf) of
+ false -> [];
+ {_, Deps} ->
+ [begin case case Dep of
+ N when is_atom(N) -> GetHexVsn(N, N);
+ {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}};
+ {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP);
+ {N, S, {pkg, NP}} -> {N, {hex, NP, S}};
+ {N, S} when is_tuple(S) -> {N, S};
+ {N, _, S} -> {N, S};
+ {N, _, S, _} -> {N, S};
+ _ -> false
+ end of
+ false -> ok;
+ {Name, Source} ->
+ {Method, Repo, Commit} = case Source of
+ {hex, NPV, V} -> {hex, V, NPV};
+ {git, R} -> {git, R, master};
+ {M, R, {branch, C}} -> {M, R, C};
+ {M, R, {ref, C}} -> {M, R, C};
+ {M, R, {tag, C}} -> {M, R, C};
+ {M, R, C} -> {M, R, C}
+ end,
+ Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit]))
+ end end || Dep <- Deps]
+ end
+ end(),
+ fun() ->
+ case lists:keyfind(erl_first_files, 1, Conf) of
+ false -> ok;
+ {_, Files} ->
+ Names = [[" ", case lists:reverse(F) of
+ "lre." ++ Elif -> lists:reverse(Elif);
+ "lrx." ++ Elif -> lists:reverse(Elif);
+ "lry." ++ Elif -> lists:reverse(Elif);
+ Elif -> lists:reverse(Elif)
+ end] || "src/" ++ F <- Files],
+ Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names]))
+ end
+ end(),
+ Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"),
+ Write("\npreprocess::\n"),
+ Write("\npre-deps::\n"),
+ Write("\npre-app::\n"),
+ PatchHook = fun(Cmd) ->
+ Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]),
+ case Cmd2 of
+ "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1);
+ "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1);
+ _ -> Escape(Cmd)
+ end
+ end,
+ fun() ->
+ case lists:keyfind(pre_hooks, 1, Conf) of
+ false -> ok;
+ {_, Hooks} ->
+ [case H of
+ {'get-deps', Cmd} ->
+ Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n");
+ {compile, Cmd} ->
+ Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ {Regex, compile, Cmd} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n");
+ false -> ok
+ end;
+ _ -> ok
+ end || H <- Hooks]
+ end
+ end(),
+ ShellToMk = fun(V0) ->
+ V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]),
+ V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]),
+ re:replace(V, "-Werror\\\\b", "", [{return, list}, global])
+ end,
+ PortSpecs = fun() ->
+ case lists:keyfind(port_specs, 1, Conf) of
+ false ->
+ case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of
+ false -> [];
+ true ->
+ [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"),
+ proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}]
+ end;
+ {_, Specs} ->
+ lists:flatten([case S of
+ {Output, Input} -> {ShellToMk(Output), Input, []};
+ {Regex, Output, Input} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, []};
+ false -> []
+ end;
+ {Regex, Output, Input, [{env, Env}]} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {ShellToMk(Output), Input, Env};
+ false -> []
+ end
+ end || S <- Specs])
+ end
+ end(),
+ PortSpecWrite = fun (Text) ->
+ file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append])
+ end,
+ case PortSpecs of
+ [] -> ok;
+ _ ->
+ Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"),
+ PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n",
+ [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])),
+ PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n",
+ [code:lib_dir(erl_interface, lib)])),
+ [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv],
+ FilterEnv = fun(Env) ->
+ lists:flatten([case E of
+ {_, _} -> E;
+ {Regex, K, V} ->
+ case rebar_utils:is_arch(Regex) of
+ true -> {K, V};
+ false -> []
+ end
+ end || E <- Env])
+ end,
+ MergeEnv = fun(Env) ->
+ lists:foldl(fun ({K, V}, Acc) ->
+ case lists:keyfind(K, 1, Acc) of
+ false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc];
+ {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc]
+ end
+ end, [], Env)
+ end,
+ PortEnv = case lists:keyfind(port_env, 1, Conf) of
+ false -> [];
+ {_, PortEnv0} -> FilterEnv(PortEnv0)
+ end,
+ PortSpec = fun ({Output, Input0, Env}) ->
+ filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output),
+ Input = [[" ", I] || I <- Input0],
+ PortSpecWrite([
+ [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))],
+ case $(PLATFORM) of
+ darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress";
+ _ -> ""
+ end,
+ "\n\nall:: ", Output, "\n\t@:\n\n",
+ "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n",
+ [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))],
+ Output, ": $$\(foreach ext,.c .C .cc .cpp,",
+ "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n",
+ "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)",
+ case {filename:extension(Output), $(PLATFORM)} of
+ {[], _} -> "\n";
+ {_, darwin} -> "\n";
+ _ -> " -shared\n"
+ end])
+ end,
+ [PortSpec(S) || S <- PortSpecs]
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_tuple(P)],
+ case lists:keyfind('lfe-compile', 1, Plugins) of
+ false -> ok;
+ _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n")
+ end
+ end
+ end(),
+ Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"),
+ RunPlugin = fun(Plugin, Step) ->
+ case erlang:function_exported(Plugin, Step, 2) of
+ false -> ok;
+ true ->
+ c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"),
+ Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(),
+ dict:store(base_dir, "", dict:new())}, undefined),
+ io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret])
+ end
+ end,
+ fun() ->
+ case lists:keyfind(plugins, 1, Conf) of
+ false -> ok;
+ {_, Plugins0} ->
+ Plugins = [P || P <- Plugins0, is_atom(P)],
+ [begin
+ case lists:keyfind(deps, 1, Conf) of
+ false -> ok;
+ {_, Deps} ->
+ case lists:keyfind(P, 1, Deps) of
+ false -> ok;
+ _ ->
+ Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P),
+ io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]),
+ io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]),
+ code:add_patha(Path ++ "/ebin")
+ end
+ end
+ end || P <- Plugins],
+ [case code:load_file(P) of
+ {module, P} -> ok;
+ _ ->
+ case lists:keyfind(plugin_dir, 1, Conf) of
+ false -> ok;
+ {_, PluginsDir} ->
+ ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl",
+ {ok, P, Bin} = compile:file(ErlFile, [binary]),
+ {module, P} = code:load_binary(P, ErlFile, Bin)
+ end
+ end || P <- Plugins],
+ [RunPlugin(P, preprocess) || P <- Plugins],
+ [RunPlugin(P, pre_compile) || P <- Plugins],
+ [RunPlugin(P, compile) || P <- Plugins]
+ end
+ end(),
+ halt()
+endef
+
+define dep_autopatch_appsrc_script.erl
+ AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcScript = AppSrc ++ ".script",
+ {ok, Conf0} = file:consult(AppSrc),
+ Bindings0 = erl_eval:new_bindings(),
+ Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0),
+ Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1),
+ Conf = case file:script(AppSrcScript, Bindings) of
+ {ok, [C]} -> C;
+ {ok, C} -> C
+ end,
+ ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])),
+ halt()
+endef
+
+define dep_autopatch_appsrc.erl
+ AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)",
+ AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end,
+ case filelib:is_regular(AppSrcIn) of
+ false -> ok;
+ true ->
+ {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn),
+ L1 = lists:keystore(modules, 1, L0, {modules, []}),
+ L2 = case lists:keyfind(vsn, 1, L1) of
+ {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))});
+ {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"});
+ _ -> L1
+ end,
+ L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end,
+ ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])),
+ case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end
+ end,
+ halt()
+endef
+
+define dep_fetch_git
+ git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_git-subfolder
+ mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \
+ git clone -q -n -- $(call dep_repo,$1) \
+ $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \
+ cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \
+ && git checkout -q $(call dep_commit,$1); \
+ ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \
+ $(DEPS_DIR)/$(call dep_name,$1);
+endef
+
+define dep_fetch_git-submodule
+ git submodule update --init -- $(DEPS_DIR)/$1;
+endef
+
+define dep_fetch_hg
+ hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1));
+endef
+
+define dep_fetch_svn
+ svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_cp
+ cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+define dep_fetch_ln
+ ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1));
+endef
+
+# Hex only has a package version. No need to look in the Erlang.mk packages.
+define dep_fetch_hex
+ mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \
+ $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\
+ https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \
+ tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -;
+endef
+
+define dep_fetch_fail
+ echo "Error: Unknown or invalid dependency: $(1)." >&2; \
+ exit 78;
+endef
+
+# Kept for compatibility purposes with older Erlang.mk configuration.
+define dep_fetch_legacy
+ $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \
+ git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \
+ cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master);
+endef
+
+define dep_target
+$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP)
+ $(eval DEP_NAME := $(call dep_name,$1))
+ $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))"))
+ $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \
+ echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \
+ exit 17; \
+ fi
+ $(verbose) mkdir -p $(DEPS_DIR)
+ $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1))
+ $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \
+ && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \
+ echo " AUTO " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \
+ fi
+ - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \
+ echo " CONF " $(DEP_STR); \
+ cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \
+ fi
+ifeq ($(filter $(1),$(NO_AUTOPATCH)),)
+ $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME)
+endif
+
+.PHONY: autopatch-$(call dep_name,$1)
+
+autopatch-$(call dep_name,$1)::
+ $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi; \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \
+ echo " PATCH Downloading rabbitmq-server"; \
+ git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \
+ fi; \
+ ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \
+ elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \
+ if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \
+ echo " PATCH Downloading rabbitmq-codegen"; \
+ git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \
+ fi \
+ elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \
+ ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \
+ else \
+ $$(call dep_autopatch,$(call dep_name,$1)) \
+ fi
+endef
+
+$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep))))
+
+ifndef IS_APP
+clean:: clean-apps
+
+clean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep clean IS_APP=1; \
+ done
+
+distclean:: distclean-apps
+
+distclean-apps:
+ $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \
+ $(MAKE) -C $$dep distclean IS_APP=1; \
+ done
+endif
+
+ifndef SKIP_DEPS
+distclean:: distclean-deps
+
+distclean-deps:
+ $(gen_verbose) rm -rf $(DEPS_DIR)
+endif
+
+# Forward-declare variables used in core/deps-tools.mk. This is required
+# in case plugins use them.
+
+ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log
+ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log
+ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log
+ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log
+ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log
+
+ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log
+ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log
+ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log
+ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log
+ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-app
+
+# Configuration.
+
+ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \
+ +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec
+COMPILE_FIRST ?=
+COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST)))
+ERLC_EXCLUDE ?=
+ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE)))
+
+ERLC_ASN1_OPTS ?=
+
+ERLC_MIB_OPTS ?=
+COMPILE_MIB_FIRST ?=
+COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST)))
+
+# Verbosity.
+
+app_verbose_0 = @echo " APP " $(PROJECT);
+app_verbose_2 = set -x;
+app_verbose = $(app_verbose_$(V))
+
+appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src;
+appsrc_verbose_2 = set -x;
+appsrc_verbose = $(appsrc_verbose_$(V))
+
+makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d;
+makedep_verbose_2 = set -x;
+makedep_verbose = $(makedep_verbose_$(V))
+
+erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(?F)));
+erlc_verbose_2 = set -x;
+erlc_verbose = $(erlc_verbose_$(V))
+
+xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F));
+xyrl_verbose_2 = set -x;
+xyrl_verbose = $(xyrl_verbose_$(V))
+
+asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F));
+asn1_verbose_2 = set -x;
+asn1_verbose = $(asn1_verbose_$(V))
+
+mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F));
+mib_verbose_2 = set -x;
+mib_verbose = $(mib_verbose_$(V))
+
+ifneq ($(wildcard src/),)
+
+# Targets.
+
+app:: $(if $(wildcard ebin/test),clean) deps
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d
+ $(verbose) $(MAKE) --no-print-directory app-build
+
+ifeq ($(wildcard src/$(PROJECT_MOD).erl),)
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, []},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+else
+define app_file
+{application, '$(PROJECT)', [
+ {description, "$(PROJECT_DESCRIPTION)"},
+ {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP),
+ {id$(comma)$(space)"$(1)"}$(comma))
+ {modules, [$(call comma_list,$(2))]},
+ {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]},
+ {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]},
+ {mod, {$(PROJECT_MOD), []}},
+ {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),)
+]}.
+endef
+endif
+
+app-build: ebin/$(PROJECT).app
+ $(verbose) :
+
+# Source files.
+
+ALL_SRC_FILES := $(sort $(call core_find,src/,*))
+
+ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES))
+CORE_FILES := $(filter %.core,$(ALL_SRC_FILES))
+
+# ASN.1 files.
+
+ifneq ($(wildcard asn1/),)
+ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1))
+ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+define compile_asn1
+ $(verbose) mkdir -p include/
+ $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1)
+ $(verbose) mv asn1/*.erl src/
+ -$(verbose) mv asn1/*.hrl include/
+ $(verbose) mv asn1/*.asn1db include/
+endef
+
+$(PROJECT).d:: $(ASN1_FILES)
+ $(if $(strip $?),$(call compile_asn1,$?))
+endif
+
+# SNMP MIB files.
+
+ifneq ($(wildcard mibs/),)
+MIB_FILES = $(sort $(call core_find,mibs/,*.mib))
+
+$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES)
+ $(verbose) mkdir -p include/ priv/mibs/
+ $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $?
+ $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?)))
+endif
+
+# Leex and Yecc files.
+
+XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES))
+XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES))))
+ERL_FILES += $(XRL_ERL_FILES)
+
+YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES))
+YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES))))
+ERL_FILES += $(YRL_ERL_FILES)
+
+$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES)
+ $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?)
+
+# Erlang and Core Erlang files.
+
+define makedep.erl
+ E = ets:new(makedep, [bag]),
+ G = digraph:new([acyclic]),
+ ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")),
+ DepsDir = "$(call core_native_path,$(DEPS_DIR))",
+ AppsDir = "$(call core_native_path,$(APPS_DIR))",
+ DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))",
+ DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))",
+ AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))",
+ AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))",
+ DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")),
+ AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")),
+ Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles],
+ Add = fun (Mod, Dep) ->
+ case lists:keyfind(Dep, 1, Modules) of
+ false -> ok;
+ {_, DepFile} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ ets:insert(E, {ModFile, DepFile}),
+ digraph:add_vertex(G, Mod),
+ digraph:add_vertex(G, Dep),
+ digraph:add_edge(G, Mod, Dep)
+ end
+ end,
+ AddHd = fun (F, Mod, DepFile) ->
+ case file:open(DepFile, [read]) of
+ {error, enoent} ->
+ ok;
+ {ok, Fd} ->
+ {_, ModFile} = lists:keyfind(Mod, 1, Modules),
+ case ets:match(E, {ModFile, DepFile}) of
+ [] ->
+ ets:insert(E, {ModFile, DepFile}),
+ F(F, Fd, Mod,0);
+ _ -> ok
+ end
+ end
+ end,
+ SearchHrl = fun
+ F(_Hrl, []) -> {error,enoent};
+ F(Hrl, [Dir|Dirs]) ->
+ HrlF = filename:join([Dir,Hrl]),
+ case filelib:is_file(HrlF) of
+ true ->
+ {ok, HrlF};
+ false -> F(Hrl,Dirs)
+ end
+ end,
+ Attr = fun
+ (_F, Mod, behavior, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, behaviour, Dep) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, {parse_transform, Dep}) ->
+ Add(Mod, Dep);
+ (_F, Mod, compile, Opts) when is_list(Opts) ->
+ case proplists:get_value(parse_transform, Opts) of
+ undefined -> ok;
+ Dep -> Add(Mod, Dep)
+ end;
+ (F, Mod, include, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, include_lib, Hrl) ->
+ case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of
+ {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl);
+ {error, _} -> false
+ end;
+ (F, Mod, import, {Imp, _}) ->
+ IsFile =
+ case lists:keyfind(Imp, 1, Modules) of
+ false -> false;
+ {_, FilePath} -> filelib:is_file(FilePath)
+ end,
+ case IsFile of
+ false -> ok;
+ true -> Add(Mod, Imp)
+ end;
+ (_, _, _, _) -> ok
+ end,
+ MakeDepend = fun
+ (F, Fd, Mod, StartLocation) ->
+ {ok, Filename} = file:pid2name(Fd),
+ case io:parse_erl_form(Fd, undefined, StartLocation) of
+ {ok, AbsData, EndLocation} ->
+ case AbsData of
+ {attribute, _, Key, Value} ->
+ Attr(F, Mod, Key, Value),
+ F(F, Fd, Mod, EndLocation);
+ _ -> F(F, Fd, Mod, EndLocation)
+ end;
+ {eof, _ } -> file:close(Fd);
+ {error, ErrorDescription } ->
+ file:close(Fd);
+ {error, ErrorInfo, ErrorLocation} ->
+ F(F, Fd, Mod, ErrorLocation)
+ end,
+ ok
+ end,
+ [begin
+ Mod = list_to_atom(filename:basename(F, ".erl")),
+ case file:open(F, [read]) of
+ {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0);
+ {error, enoent} -> ok
+ end
+ end || F <- ErlFiles],
+ Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))),
+ CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)],
+ TargetPath = fun(Target) ->
+ case lists:keyfind(Target, 1, Modules) of
+ false -> "";
+ {_, DepFile} ->
+ DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")),
+ string:join(DirSubname ++ [atom_to_list(Target)], "/")
+ end
+ end,
+ Output0 = [
+ "# Generated by Erlang.mk. Edit at your own risk!\n\n",
+ [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend],
+ "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n"
+ ],
+ Output = case "é" of
+ [233] -> unicode:characters_to_binary(Output0);
+ _ -> Output0
+ end,
+ ok = file:write_file("$(1)", Output),
+ halt()
+endef
+
+ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),)
+$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST)
+ $(makedep_verbose) $(call erlang,$(call makedep.erl,$@))
+endif
+
+ifeq ($(IS_APP)$(IS_DEP),)
+ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0)
+# Rebuild everything when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \
+ touch -c $(PROJECT).d; \
+ fi
+ $(verbose) touch $@
+
+$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change
+endif
+endif
+
+$(PROJECT).d::
+ $(verbose) :
+
+include $(wildcard $(PROJECT).d)
+
+ebin/$(PROJECT).app:: ebin/
+
+ebin/:
+ $(verbose) mkdir -p ebin/
+
+define compile_erl
+ $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \
+ -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1))
+endef
+
+define validate_app_file
+ case file:consult("ebin/$(PROJECT).app") of
+ {ok, _} -> halt();
+ _ -> halt(1)
+ end
+endef
+
+ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src)
+ $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE)))
+# Older git versions do not have the --first-parent flag. Do without in that case.
+ $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \
+ || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true))
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES)))))))
+ifeq ($(wildcard src/$(PROJECT).app.src),)
+ $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \
+ > ebin/$(PROJECT).app
+ $(verbose) if ! $(call erlang,$(call validate_app_file)); then \
+ echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \
+ exit 1; \
+ fi
+else
+ $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \
+ echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \
+ exit 1; \
+ fi
+ $(appsrc_verbose) cat src/$(PROJECT).app.src \
+ | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \
+ | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \
+ > ebin/$(PROJECT).app
+endif
+ifneq ($(wildcard src/$(PROJECT).appup),)
+ $(verbose) cp src/$(PROJECT).appup ebin/
+endif
+
+clean:: clean-app
+
+clean-app:
+ $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \
+ $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \
+ $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \
+ $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES))))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: docs-deps
+
+# Configuration.
+
+ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS))
+
+# Targets.
+
+$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+doc-deps:
+else
+doc-deps: $(ALL_DOC_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rel-deps
+
+# Configuration.
+
+ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS))
+
+# Targets.
+
+$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+rel-deps:
+else
+rel-deps: $(ALL_REL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: test-deps test-dir test-build clean-test-dir
+
+# Configuration.
+
+TEST_DIR ?= $(CURDIR)/test
+
+ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS))
+
+TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard
+TEST_ERLC_OPTS += -DTEST=1
+
+# Targets.
+
+$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+test-deps:
+else
+test-deps: $(ALL_TEST_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+ifneq ($(wildcard $(TEST_DIR)),)
+test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+ @:
+
+test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\
+ $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE))));
+test_erlc_verbose_2 = set -x;
+test_erlc_verbose = $(test_erlc_verbose_$(V))
+
+define compile_test_erl
+ $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \
+ -pa ebin/ -I include/ $(1)
+endef
+
+ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl)
+$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST)
+ $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?))
+ $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@)
+endif
+
+test-build:: IS_TEST=1
+test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps)
+# We already compiled everything when IS_APP=1.
+ifndef IS_APP
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+# Roughly the same as test-build, but when IS_APP=1.
+# We only care about compiling the current application.
+ifdef IS_APP
+test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS)
+test-build-app:: deps test-deps
+ifneq ($(wildcard src),)
+ $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+ $(gen_verbose) touch ebin/test
+endif
+ifneq ($(wildcard $(TEST_DIR)),)
+ $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))"
+endif
+endif
+
+clean:: clean-test-dir
+
+clean-test-dir:
+ifneq ($(wildcard $(TEST_DIR)/*.beam),)
+ $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: rebar.config
+
+# We strip out -Werror because we don't want to fail due to
+# warnings when used as a dependency.
+
+compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g')
+
+define compat_convert_erlc_opts
+$(if $(filter-out -Werror,$1),\
+ $(if $(findstring +,$1),\
+ $(shell echo $1 | cut -b 2-)))
+endef
+
+define compat_erlc_opts_to_list
+[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))]
+endef
+
+define compat_rebar_config
+{deps, [
+$(call comma_list,$(foreach d,$(DEPS),\
+ $(if $(filter hex,$(call dep_fetch,$d)),\
+ {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\
+ {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}})))
+]}.
+{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}.
+endef
+
+rebar.config:
+ $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck)
+
+.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Core targets.
+
+docs:: asciidoc
+
+distclean:: distclean-asciidoc-guide distclean-asciidoc-manual
+
+# Plugin-specific targets.
+
+asciidoc: asciidoc-guide asciidoc-manual
+
+# User guide.
+
+ifeq ($(wildcard doc/src/guide/book.asciidoc),)
+asciidoc-guide:
+else
+asciidoc-guide: distclean-asciidoc-guide doc-deps
+ a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf
+ a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/
+
+distclean-asciidoc-guide:
+ $(gen_verbose) rm -rf doc/html/ doc/guide.pdf
+endif
+
+# Man pages.
+
+ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc)
+
+ifeq ($(ASCIIDOC_MANUAL_FILES),)
+asciidoc-manual:
+else
+
+# Configuration.
+
+MAN_INSTALL_PATH ?= /usr/local/share/man
+MAN_SECTIONS ?= 3 7
+MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/')
+MAN_VERSION ?= $(PROJECT_VERSION)
+
+# Plugin-specific targets.
+
+define asciidoc2man.erl
+try
+ [begin
+ io:format(" ADOC ~s~n", [F]),
+ ok = asciideck:to_manpage(asciideck:parse_file(F), #{
+ compress => gzip,
+ outdir => filename:dirname(F),
+ extra2 => "$(MAN_PROJECT) $(MAN_VERSION)",
+ extra3 => "$(MAN_PROJECT) Function Reference"
+ })
+ end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]],
+ halt(0)
+catch C:E ->
+ io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]),
+ halt(1)
+end.
+endef
+
+asciidoc-manual:: doc-deps
+
+asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES)
+ $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?))
+ $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;)
+
+install-docs:: install-asciidoc
+
+install-asciidoc: asciidoc-manual
+ $(foreach s,$(MAN_SECTIONS),\
+ mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \
+ install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;)
+
+distclean-asciidoc-manual:
+ $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS))
+endif
+endif
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates
+
+# Core targets.
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Bootstrap targets:" \
+ " bootstrap Generate a skeleton of an OTP application" \
+ " bootstrap-lib Generate a skeleton of an OTP library" \
+ " bootstrap-rel Generate the files needed to build a release" \
+ " new-app in=NAME Create a new local OTP application NAME" \
+ " new-lib in=NAME Create a new local OTP library NAME" \
+ " new t=TPL n=NAME Generate a module NAME based on the template TPL" \
+ " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \
+ " list-templates List available templates"
+
+# Bootstrap templates.
+
+define bs_appsrc
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]},
+ {mod, {$p_app, []}},
+ {env, []}
+]}.
+endef
+
+define bs_appsrc_lib
+{application, $p, [
+ {description, ""},
+ {vsn, "0.1.0"},
+ {id, "git"},
+ {modules, []},
+ {registered, []},
+ {applications, [
+ kernel,
+ stdlib
+ ]}
+]}.
+endef
+
+# To prevent autocompletion issues with ZSH, we add "include erlang.mk"
+# separately during the actual bootstrap.
+define bs_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+endef
+
+define bs_apps_Makefile
+PROJECT = $p
+PROJECT_DESCRIPTION = New project
+PROJECT_VERSION = 0.1.0
+$(if $(SP),
+# Whitespace to be used when creating files from templates.
+SP = $(SP)
+)
+# Make sure we know where the applications are located.
+ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app)
+APPS_DIR ?= ..
+DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app)
+
+include $$(ROOT_DIR)/erlang.mk
+endef
+
+define bs_app
+-module($p_app).
+-behaviour(application).
+
+-export([start/2]).
+-export([stop/1]).
+
+start(_Type, _Args) ->
+ $p_sup:start_link().
+
+stop(_State) ->
+ ok.
+endef
+
+define bs_relx_config
+{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}.
+{extended_start_script, true}.
+{sys_config, "config/sys.config"}.
+{vm_args, "config/vm.args"}.
+endef
+
+define bs_sys_config
+[
+].
+endef
+
+define bs_vm_args
+-name $p@127.0.0.1
+-setcookie $p
+-heart
+endef
+
+# Normal templates.
+
+define tpl_supervisor
+-module($(n)).
+-behaviour(supervisor).
+
+-export([start_link/0]).
+-export([init/1]).
+
+start_link() ->
+ supervisor:start_link({local, ?MODULE}, ?MODULE, []).
+
+init([]) ->
+ Procs = [],
+ {ok, {{one_for_one, 1, 5}, Procs}}.
+endef
+
+define tpl_gen_server
+-module($(n)).
+-behaviour(gen_server).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_server.
+-export([init/1]).
+-export([handle_call/3]).
+-export([handle_cast/2]).
+-export([handle_info/2]).
+-export([terminate/2]).
+-export([code_change/3]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_server:start_link(?MODULE, [], []).
+
+%% gen_server.
+
+init([]) ->
+ {ok, #state{}}.
+
+handle_call(_Request, _From, State) ->
+ {reply, ignored, State}.
+
+handle_cast(_Msg, State) ->
+ {noreply, State}.
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+endef
+
+define tpl_module
+-module($(n)).
+-export([]).
+endef
+
+define tpl_cowboy_http
+-module($(n)).
+-behaviour(cowboy_http_handler).
+
+-export([init/3]).
+-export([handle/2]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {ok, Req, #state{}}.
+
+handle(Req, State=#state{}) ->
+ {ok, Req2} = cowboy_req:reply(200, Req),
+ {ok, Req2, State}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_gen_fsm
+-module($(n)).
+-behaviour(gen_fsm).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_fsm.
+-export([init/1]).
+-export([state_name/2]).
+-export([handle_event/3]).
+-export([state_name/3]).
+-export([handle_sync_event/4]).
+-export([handle_info/3]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_fsm:start_link(?MODULE, [], []).
+
+%% gen_fsm.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_Event, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_Event, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+state_name(_Event, _From, StateData) ->
+ {reply, ignored, state_name, StateData}.
+
+handle_sync_event(_Event, _From, StateName, StateData) ->
+ {reply, ignored, StateName, StateData}.
+
+handle_info(_Info, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_gen_statem
+-module($(n)).
+-behaviour(gen_statem).
+
+%% API.
+-export([start_link/0]).
+
+%% gen_statem.
+-export([callback_mode/0]).
+-export([init/1]).
+-export([state_name/3]).
+-export([handle_event/4]).
+-export([terminate/3]).
+-export([code_change/4]).
+
+-record(state, {
+}).
+
+%% API.
+
+-spec start_link() -> {ok, pid()}.
+start_link() ->
+ gen_statem:start_link(?MODULE, [], []).
+
+%% gen_statem.
+
+callback_mode() ->
+ state_functions.
+
+init([]) ->
+ {ok, state_name, #state{}}.
+
+state_name(_EventType, _EventData, StateData) ->
+ {next_state, state_name, StateData}.
+
+handle_event(_EventType, _EventData, StateName, StateData) ->
+ {next_state, StateName, StateData}.
+
+terminate(_Reason, _StateName, _StateData) ->
+ ok.
+
+code_change(_OldVsn, StateName, StateData, _Extra) ->
+ {ok, StateName, StateData}.
+endef
+
+define tpl_cowboy_loop
+-module($(n)).
+-behaviour(cowboy_loop_handler).
+
+-export([init/3]).
+-export([info/3]).
+-export([terminate/3]).
+
+-record(state, {
+}).
+
+init(_, Req, _Opts) ->
+ {loop, Req, #state{}, 5000, hibernate}.
+
+info(_Info, Req, State) ->
+ {loop, Req, State, hibernate}.
+
+terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_cowboy_rest
+-module($(n)).
+
+-export([init/3]).
+-export([content_types_provided/2]).
+-export([get_html/2]).
+
+init(_, _Req, _Opts) ->
+ {upgrade, protocol, cowboy_rest}.
+
+content_types_provided(Req, State) ->
+ {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}.
+
+get_html(Req, State) ->
+ {<<"<html><body>This is REST!</body></html>">>, Req, State}.
+endef
+
+define tpl_cowboy_ws
+-module($(n)).
+-behaviour(cowboy_websocket_handler).
+
+-export([init/3]).
+-export([websocket_init/3]).
+-export([websocket_handle/3]).
+-export([websocket_info/3]).
+-export([websocket_terminate/3]).
+
+-record(state, {
+}).
+
+init(_, _, _) ->
+ {upgrade, protocol, cowboy_websocket}.
+
+websocket_init(_, Req, _Opts) ->
+ Req2 = cowboy_req:compact(Req),
+ {ok, Req2, #state{}}.
+
+websocket_handle({text, Data}, Req, State) ->
+ {reply, {text, Data}, Req, State};
+websocket_handle({binary, Data}, Req, State) ->
+ {reply, {binary, Data}, Req, State};
+websocket_handle(_Frame, Req, State) ->
+ {ok, Req, State}.
+
+websocket_info(_Info, Req, State) ->
+ {ok, Req, State}.
+
+websocket_terminate(_Reason, _Req, _State) ->
+ ok.
+endef
+
+define tpl_ranch_protocol
+-module($(n)).
+-behaviour(ranch_protocol).
+
+-export([start_link/4]).
+-export([init/4]).
+
+-type opts() :: [].
+-export_type([opts/0]).
+
+-record(state, {
+ socket :: inet:socket(),
+ transport :: module()
+}).
+
+start_link(Ref, Socket, Transport, Opts) ->
+ Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]),
+ {ok, Pid}.
+
+-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok.
+init(Ref, Socket, Transport, _Opts) ->
+ ok = ranch:accept_ack(Ref),
+ loop(#state{socket=Socket, transport=Transport}).
+
+loop(State) ->
+ loop(State).
+endef
+
+# Plugin-specific targets.
+
+ifndef WS
+ifdef SP
+WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a))
+else
+WS = $(tab)
+endif
+endif
+
+bootstrap:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(PROJECT)_sup)
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src)
+endif
+ $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl)
+
+bootstrap-lib:
+ifneq ($(wildcard src/),)
+ $(error Error: src/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) $(call core_render,bs_Makefile,Makefile)
+ $(verbose) echo "include erlang.mk" >> Makefile
+ $(verbose) mkdir src/
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src)
+endif
+
+bootstrap-rel:
+ifneq ($(wildcard relx.config),)
+ $(error Error: relx.config already exists)
+endif
+ifneq ($(wildcard config/),)
+ $(error Error: config/ directory already exists)
+endif
+ $(eval p := $(PROJECT))
+ $(verbose) $(call core_render,bs_relx_config,relx.config)
+ $(verbose) mkdir config/
+ $(verbose) $(call core_render,bs_sys_config,config/sys.config)
+ $(verbose) $(call core_render,bs_vm_args,config/vm.args)
+
+new-app:
+ifndef in
+ $(error Usage: $(MAKE) new-app in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(eval n := $(in)_sup)
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+ $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl)
+ $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl)
+
+new-lib:
+ifndef in
+ $(error Usage: $(MAKE) new-lib in=APP)
+endif
+ifneq ($(wildcard $(APPS_DIR)/$in),)
+ $(error Error: Application $in already exists)
+endif
+ $(eval p := $(in))
+ $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\
+ $(error Error: Invalid characters in the application name))
+ $(verbose) mkdir -p $(APPS_DIR)/$p/src/
+ $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile)
+ifdef LEGACY
+ $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src)
+endif
+
+new:
+ifeq ($(wildcard src/)$(in),)
+ $(error Error: src/ directory does not exist)
+endif
+ifndef t
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifndef n
+ $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl)
+else
+ $(verbose) $(call core_render,tpl_$(t),src/$(n).erl)
+endif
+
+list-templates:
+ $(verbose) @echo Available templates:
+ $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES))))
+
+# Copyright (c) 2014-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: clean-c_src distclean-c_src-env
+
+# Configuration.
+
+C_SRC_DIR ?= $(CURDIR)/c_src
+C_SRC_ENV ?= $(C_SRC_DIR)/env.mk
+C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT)
+C_SRC_TYPE ?= shared
+
+# System type and C compiler/flags.
+
+ifeq ($(PLATFORM),msys2)
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll
+else
+ C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?=
+ C_SRC_OUTPUT_SHARED_EXTENSION ?= .so
+endif
+
+ifeq ($(C_SRC_TYPE),shared)
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION)
+else
+ C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION)
+endif
+
+ifeq ($(PLATFORM),msys2)
+# We hardcode the compiler used on MSYS2. The default CC=cc does
+# not produce working code. The "gcc" MSYS2 package also doesn't.
+ CC = /mingw64/bin/gcc
+ export CC
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),darwin)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -arch x86_64 -Wall
+ LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress
+else ifeq ($(PLATFORM),freebsd)
+ CC ?= cc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+else ifeq ($(PLATFORM),linux)
+ CC ?= gcc
+ CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes
+ CXXFLAGS ?= -O3 -finline-functions -Wall
+endif
+
+ifneq ($(PLATFORM),msys2)
+ CFLAGS += -fPIC
+ CXXFLAGS += -fPIC
+endif
+
+CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)"
+
+LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei
+
+# Verbosity.
+
+c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+c_verbose = $(c_verbose_$(V))
+
+cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F));
+cpp_verbose = $(cpp_verbose_$(V))
+
+link_verbose_0 = @echo " LD " $(@F);
+link_verbose = $(link_verbose_$(V))
+
+# Targets.
+
+ifeq ($(wildcard $(C_SRC_DIR)),)
+else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),)
+app:: app-c_src
+
+test-build:: app-c_src
+
+app-c_src:
+ $(MAKE) -C $(C_SRC_DIR)
+
+clean::
+ $(MAKE) -C $(C_SRC_DIR) clean
+
+else
+
+ifeq ($(SOURCES),)
+SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat))))
+endif
+OBJECTS = $(addsuffix .o, $(basename $(SOURCES)))
+
+COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c
+COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c
+
+app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE)
+
+$(C_SRC_OUTPUT_FILE): $(OBJECTS)
+ $(verbose) mkdir -p $(dir $@)
+ $(link_verbose) $(CC) $(OBJECTS) \
+ $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \
+ -o $(C_SRC_OUTPUT_FILE)
+
+$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV)
+
+%.o: %.c
+ $(COMPILE_C) $(OUTPUT_OPTION) $<
+
+%.o: %.cc
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.C
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+%.o: %.cpp
+ $(COMPILE_CPP) $(OUTPUT_OPTION) $<
+
+clean:: clean-c_src
+
+clean-c_src:
+ $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS)
+
+endif
+
+ifneq ($(wildcard $(C_SRC_DIR)),)
+ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().')
+
+$(C_SRC_ENV):
+ $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \
+ io_lib:format( \
+ \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \
+ \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \
+ \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \
+ \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \
+ \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \
+ [code:root_dir(), erlang:system_info(version), \
+ code:lib_dir(erl_interface, include), \
+ code:lib_dir(erl_interface, lib)])), \
+ halt()."
+
+distclean:: distclean-c_src-env
+
+distclean-c_src-env:
+ $(gen_verbose) rm -f $(C_SRC_ENV)
+
+-include $(C_SRC_ENV)
+
+ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR))
+$(shell rm -f $(C_SRC_ENV))
+endif
+endif
+
+# Templates.
+
+define bs_c_nif
+#include "erl_nif.h"
+
+static int loads = 0;
+
+static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info)
+{
+ /* Initialize private data. */
+ *priv_data = NULL;
+
+ loads++;
+
+ return 0;
+}
+
+static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info)
+{
+ /* Convert the private data to the new version. */
+ *priv_data = *old_priv_data;
+
+ loads++;
+
+ return 0;
+}
+
+static void unload(ErlNifEnv* env, void* priv_data)
+{
+ if (loads == 1) {
+ /* Destroy the private data. */
+ }
+
+ loads--;
+}
+
+static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
+{
+ if (enif_is_atom(env, argv[0])) {
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "hello"),
+ argv[0]);
+ }
+
+ return enif_make_tuple2(env,
+ enif_make_atom(env, "error"),
+ enif_make_atom(env, "badarg"));
+}
+
+static ErlNifFunc nif_funcs[] = {
+ {"hello", 1, hello}
+};
+
+ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload)
+endef
+
+define bs_erl_nif
+-module($n).
+
+-export([hello/1]).
+
+-on_load(on_load/0).
+on_load() ->
+ PrivDir = case code:priv_dir(?MODULE) of
+ {error, _} ->
+ AppPath = filename:dirname(filename:dirname(code:which(?MODULE))),
+ filename:join(AppPath, "priv");
+ Path ->
+ Path
+ end,
+ erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0).
+
+hello(_) ->
+ erlang:nif_error({not_loaded, ?MODULE}).
+endef
+
+new-nif:
+ifneq ($(wildcard $(C_SRC_DIR)/$n.c),)
+ $(error Error: $(C_SRC_DIR)/$n.c already exists)
+endif
+ifneq ($(wildcard src/$n.erl),)
+ $(error Error: src/$n.erl already exists)
+endif
+ifndef n
+ $(error Usage: $(MAKE) new-nif n=NAME [in=APP])
+endif
+ifdef in
+ $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in=
+else
+ $(verbose) mkdir -p $(C_SRC_DIR) src/
+ $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c)
+ $(verbose) $(call core_render,bs_erl_nif,src/$n.erl)
+endif
+
+# Copyright (c) 2015-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ci ci-prepare ci-setup
+
+CI_OTP ?=
+CI_HIPE ?=
+CI_ERLLVM ?=
+
+ifeq ($(CI_VM),native)
+ERLC_OPTS += +native
+TEST_ERLC_OPTS += +native
+else ifeq ($(CI_VM),erllvm)
+ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}'
+endif
+
+ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),)
+ci::
+else
+
+ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM)))
+
+ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)))
+
+ci-setup::
+ $(verbose) :
+
+ci-extra::
+ $(verbose) :
+
+ci_verbose_0 = @echo " CI " $(1);
+ci_verbose = $(ci_verbose_$(V))
+
+define ci_target
+ci-$1: $(KERL_INSTALL_DIR)/$2
+ $(verbose) $(MAKE) --no-print-directory clean
+ $(ci_verbose) \
+ PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \
+ CI_OTP_RELEASE="$1" \
+ CT_OPTS="-label $1" \
+ CI_VM="$3" \
+ $(MAKE) ci-setup tests
+ $(verbose) $(MAKE) --no-print-directory ci-extra
+endef
+
+$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp)))
+$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native)))
+$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm)))
+
+$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp))))
+$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp))))
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Continuous Integration targets:" \
+ " ci Run '$(MAKE) tests' on all configured Erlang versions." \
+ "" \
+ "The CI_OTP variable must be defined with the Erlang versions" \
+ "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3"
+
+endif
+
+# Copyright (c) 2020, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifdef CONCUERROR_TESTS
+
+.PHONY: concuerror distclean-concuerror
+
+# Configuration
+
+CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs
+CONCUERROR_OPTS ?=
+
+# Core targets.
+
+check:: concuerror
+
+ifndef KEEP_LOGS
+distclean:: distclean-concuerror
+endif
+
+# Plugin-specific targets.
+
+$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP)
+ $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror
+ $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror
+
+$(CONCUERROR_LOGS_DIR):
+ $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR)
+
+define concuerror_html_report
+<!DOCTYPE html>
+<html lang="en">
+<head>
+<meta charset="utf-8">
+<title>Concuerror HTML report</title>
+</head>
+<body>
+<h1>Concuerror HTML report</h1>
+<p>Generated on $(concuerror_date)</p>
+<ul>
+$(foreach t,$(concuerror_targets),<li><a href="$(t).txt">$(t)</a></li>)
+</ul>
+</body>
+</html>
+endef
+
+concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS)))
+ $(eval concuerror_date := $(shell date))
+ $(eval concuerror_targets := $^)
+ $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html)
+
+define concuerror_target
+.PHONY: concuerror-$1-$2
+
+concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR)
+ $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \
+ --pa $(CURDIR)/ebin --pa $(TEST_DIR) \
+ -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \
+ $$(CONCUERROR_OPTS) -m $1 -t $2
+endef
+
+$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test))))))
+
+distclean-concuerror:
+ $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR)
+
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: ct apps-ct distclean-ct
+
+# Configuration.
+
+CT_OPTS ?=
+
+ifneq ($(wildcard $(TEST_DIR)),)
+ifndef CT_SUITES
+CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl))))
+endif
+endif
+CT_SUITES ?=
+CT_LOGS_DIR ?= $(CURDIR)/logs
+
+# Core targets.
+
+tests:: ct
+
+ifndef KEEP_LOGS
+distclean:: distclean-ct
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Common_test targets:" \
+ " ct Run all the common_test suites for this project" \
+ "" \
+ "All your common_test suites have their associated targets." \
+ "A suite named http_SUITE can be ran using the ct-http target."
+
+# Plugin-specific targets.
+
+CT_RUN = ct_run \
+ -no_auto_compile \
+ -noinput \
+ -pa $(CURDIR)/ebin $(TEST_DIR) \
+ -dir $(TEST_DIR) \
+ -logdir $(CT_LOGS_DIR)
+
+ifeq ($(CT_SUITES),)
+ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+else
+# We do not run tests if we are in an apps/* with no test directory.
+ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1)
+ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct)
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS)
+endif
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+define ct_app_target
+apps-ct-$1: test-build
+ $$(MAKE) -C $1 ct IS_APP=1
+endef
+
+$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app))))
+
+apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS))
+endif
+
+ifdef t
+ifeq (,$(findstring :,$t))
+CT_EXTRA = -group $t
+else
+t_words = $(subst :, ,$t)
+CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words))
+endif
+else
+ifdef c
+CT_EXTRA = -case $c
+else
+CT_EXTRA =
+endif
+endif
+
+define ct_suite_target
+ct-$(1): test-build
+ $(verbose) mkdir -p $(CT_LOGS_DIR)
+ $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS)
+endef
+
+$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test))))
+
+distclean-ct:
+ $(gen_verbose) rm -rf $(CT_LOGS_DIR)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: plt distclean-plt dialyze
+
+# Configuration.
+
+DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt
+export DIALYZER_PLT
+
+PLT_APPS ?=
+DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS)
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs
+DIALYZER_PLT_OPTS ?=
+
+# Core targets.
+
+check:: dialyze
+
+distclean:: distclean-plt
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Dialyzer targets:" \
+ " plt Build a PLT file for this project" \
+ " dialyze Analyze the project using Dialyzer"
+
+# Plugin-specific targets.
+
+define filter_opts.erl
+ Opts = init:get_plain_arguments(),
+ {Filtered, _} = lists:foldl(fun
+ (O, {Os, true}) -> {[O|Os], false};
+ (O = "-D", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-I", {Os, _}) -> {[O|Os], true};
+ (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false};
+ (O = "-pa", {Os, _}) -> {[O|Os], true};
+ (_, Acc) -> Acc
+ end, {[], false}, Opts),
+ io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]),
+ halt().
+endef
+
+# DIALYZER_PLT is a variable understood directly by Dialyzer.
+#
+# We append the path to erts at the end of the PLT. This works
+# because the PLT file is in the external term format and the
+# function binary_to_term/1 ignores any trailing data.
+$(DIALYZER_PLT): deps app
+ $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \
+ while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log))
+ $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \
+ erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2
+ $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@
+
+plt: $(DIALYZER_PLT)
+
+distclean-plt:
+ $(gen_verbose) rm -f $(DIALYZER_PLT)
+
+ifneq ($(wildcard $(DIALYZER_PLT)),)
+dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app)
+ $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \
+ grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \
+ rm $(DIALYZER_PLT); \
+ $(MAKE) plt; \
+ fi
+else
+dialyze: $(DIALYZER_PLT)
+endif
+ $(verbose) dialyzer --no_native `$(ERL) \
+ -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \
+ -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/)
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-edoc edoc
+
+# Configuration.
+
+EDOC_OPTS ?=
+EDOC_SRC_DIRS ?=
+EDOC_OUTPUT ?= doc
+
+define edoc.erl
+ SrcPaths = lists:foldl(fun(P, Acc) ->
+ filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc
+ end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]),
+ DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}],
+ edoc:application($(1), ".", [$(2)] ++ DefaultOpts),
+ halt(0).
+endef
+
+# Core targets.
+
+ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),)
+docs:: edoc
+endif
+
+distclean:: distclean-edoc
+
+# Plugin-specific targets.
+
+edoc: distclean-edoc doc-deps
+ $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS)))
+
+distclean-edoc:
+ $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Configuration.
+
+DTL_FULL_PATH ?=
+DTL_PATH ?= templates/
+DTL_PREFIX ?=
+DTL_SUFFIX ?= _dtl
+DTL_OPTS ?=
+
+# Verbosity.
+
+dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F));
+dtl_verbose = $(dtl_verbose_$(V))
+
+# Core targets.
+
+DTL_PATH := $(abspath $(DTL_PATH))
+DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl))
+
+ifneq ($(DTL_FILES),)
+
+DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%)))
+DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES)))
+BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES)))
+
+ifneq ($(words $(DTL_FILES)),0)
+# Rebuild templates when the Makefile changes.
+$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(DTL_FILES); \
+ fi
+ $(verbose) touch $@
+
+ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl
+endif
+
+define erlydtl_compile.erl
+ [begin
+ Module0 = case "$(strip $(DTL_FULL_PATH))" of
+ "" ->
+ filename:basename(F, ".dtl");
+ _ ->
+ "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"),
+ re:replace(F2, "/", "_", [{return, list}, global])
+ end,
+ Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"),
+ case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of
+ ok -> ok;
+ {ok, _} -> ok
+ end
+ end || F <- string:tokens("$(1)", " ")],
+ halt().
+endef
+
+ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/
+ $(if $(strip $?),\
+ $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\
+ -pa ebin/))
+
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Dave Cottlehuber <dch@skunkwerks.at>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-escript escript escript-zip
+
+# Configuration.
+
+ESCRIPT_NAME ?= $(PROJECT)
+ESCRIPT_FILE ?= $(ESCRIPT_NAME)
+
+ESCRIPT_SHEBANG ?= /usr/bin/env escript
+ESCRIPT_COMMENT ?= This is an -*- erlang -*- file
+ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME)
+
+ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null)
+ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip
+
+# Core targets.
+
+distclean:: distclean-escript
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Escript targets:" \
+ " escript Build an executable escript archive" \
+
+# Plugin-specific targets.
+
+escript-zip:: FULL=1
+escript-zip:: deps app
+ $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP))
+ $(verbose) rm -f $(ESCRIPT_ZIP_FILE)
+ $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/*
+ifneq ($(DEPS),)
+ $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \
+ $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \
+ $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log)))))
+endif
+
+escript:: escript-zip
+ $(gen_verbose) printf "%s\n" \
+ "#!$(ESCRIPT_SHEBANG)" \
+ "%% $(ESCRIPT_COMMENT)" \
+ "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE)
+ $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE)
+ $(verbose) chmod +x $(ESCRIPT_FILE)
+
+distclean-escript:
+ $(gen_verbose) rm -f $(ESCRIPT_FILE)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, Enrique Fernandez <enrique.fernandez@erlang-solutions.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: eunit apps-eunit
+
+# Configuration
+
+EUNIT_OPTS ?=
+EUNIT_ERL_OPTS ?=
+
+# Core targets.
+
+tests:: eunit
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "EUnit targets:" \
+ " eunit Run all the EUnit tests for this project"
+
+# Plugin-specific targets.
+
+define eunit.erl
+ $(call cover.erl)
+ CoverSetup(),
+ case eunit:test($1, [$(EUNIT_OPTS)]) of
+ ok -> ok;
+ error -> halt(2)
+ end,
+ CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"),
+ halt()
+endef
+
+EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS))
+else
+eunit: test-build cover-data-dir
+ $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS))
+endif
+else
+EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES)))
+EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl)))
+
+EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \
+ $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)')
+
+eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir
+ifneq ($(wildcard src/ $(TEST_DIR)),)
+ $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS))
+endif
+
+ifneq ($(ALL_APPS_DIRS),)
+apps-eunit: test-build
+ $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \
+ [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \
+ exit $$eunit_retcode
+endif
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Verbosity.
+
+proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F));
+proto_verbose = $(proto_verbose_$(V))
+
+# Core targets.
+
+ifneq ($(wildcard src/),)
+ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),)
+PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES))
+ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES))))
+
+ifeq ($(PROTO_FILES),)
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs:
+ $(verbose) :
+else
+# Rebuild proto files when the Makefile changes.
+# We exclude $(PROJECT).d to avoid a circular dependency.
+$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP)
+ $(verbose) if test -f $@; then \
+ touch $(PROTO_FILES); \
+ fi
+ $(verbose) touch $@
+
+$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs
+endif
+
+ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),)
+define compile_proto.erl
+ [begin
+ protobuffs_compile:generate_source(F, [
+ {output_include_dir, "./include"},
+ {output_src_dir, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+else
+define compile_proto.erl
+ [begin
+ gpb_compile:file(F, [
+ {include_as_lib, true},
+ {module_name_suffix, "_pb"},
+ {o_hrl, "./include"},
+ {o_erl, "./src"}])
+ end || F <- string:tokens("$1", " ")],
+ halt().
+endef
+endif
+
+ifneq ($(PROTO_FILES),)
+$(PROJECT).d:: $(PROTO_FILES)
+ $(verbose) mkdir -p ebin/ include/
+ $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?)))
+endif
+endif
+endif
+
+# Copyright (c) 2013-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: relx-rel relx-relup distclean-relx-rel run
+
+# Configuration.
+
+RELX ?= $(ERLANG_MK_TMP)/relx
+RELX_CONFIG ?= $(CURDIR)/relx.config
+
+RELX_URL ?= https://erlang.mk/res/relx-v3.27.0
+RELX_OPTS ?=
+RELX_OUTPUT_DIR ?= _rel
+RELX_REL_EXT ?=
+RELX_TAR ?= 1
+
+ifdef SFX
+ RELX_TAR = 1
+endif
+
+ifeq ($(firstword $(RELX_OPTS)),-o)
+ RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS))
+else
+ RELX_OPTS += -o $(RELX_OUTPUT_DIR)
+endif
+
+# Core targets.
+
+ifeq ($(IS_DEP),)
+ifneq ($(wildcard $(RELX_CONFIG)),)
+rel:: relx-rel
+
+relup:: relx-relup
+endif
+endif
+
+distclean:: distclean-relx-rel
+
+# Plugin-specific targets.
+
+$(RELX): | $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL))
+ $(verbose) chmod +x $(RELX)
+
+relx-rel: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(verbose) $(MAKE) relx-post-rel
+ifeq ($(RELX_TAR),1)
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar
+endif
+
+relx-relup: $(RELX) rel-deps app
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release
+ $(MAKE) relx-post-rel
+ $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar)
+
+distclean-relx-rel:
+ $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR)
+
+# Default hooks.
+relx-post-rel::
+ $(verbose) :
+
+# Run target.
+
+ifeq ($(wildcard $(RELX_CONFIG)),)
+run::
+else
+
+define get_relx_release.erl
+ {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"),
+ {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config),
+ Vsn = case Vsn0 of
+ {cmd, Cmd} -> os:cmd(Cmd);
+ semver -> "";
+ {semver, _} -> "";
+ VsnStr -> Vsn0
+ end,
+ Extended = case lists:keyfind(extended_start_script, 1, Config) of
+ {_, true} -> "1";
+ _ -> ""
+ end,
+ io:format("~s ~s ~s", [Name, Vsn, Extended]),
+ halt(0).
+endef
+
+RELX_REL := $(shell $(call erlang,$(get_relx_release.erl)))
+RELX_REL_NAME := $(word 1,$(RELX_REL))
+RELX_REL_VSN := $(word 2,$(RELX_REL))
+RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console)
+
+ifeq ($(PLATFORM),msys2)
+RELX_REL_EXT := .cmd
+endif
+
+run:: all
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD)
+
+ifdef RELOAD
+rel::
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping
+ $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \
+ eval "io:format(\"~p~n\", [c:lm()])"
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Relx targets:" \
+ " run Compile the project, build the release and run it"
+
+endif
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2014, M Robert Martin <rob@version2beta.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: shell
+
+# Configuration.
+
+SHELL_ERL ?= erl
+SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR)
+SHELL_OPTS ?=
+
+ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS))
+
+# Core targets
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Shell targets:" \
+ " shell Run an erlang shell with SHELL_OPTS or reasonable default"
+
+# Plugin-specific targets.
+
+$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep))))
+
+ifneq ($(SKIP_DEPS),)
+build-shell-deps:
+else
+build-shell-deps: $(ALL_SHELL_DEPS_DIRS)
+ $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \
+ if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \
+ :; \
+ else \
+ $(MAKE) -C $$dep IS_DEP=1; \
+ if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \
+ fi \
+ done
+endif
+
+shell:: build-shell-deps
+ $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS)
+
+# Copyright 2017, Stanislaw Klekot <dozzie@jarowit.net>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: distclean-sphinx sphinx
+
+# Configuration.
+
+SPHINX_BUILD ?= sphinx-build
+SPHINX_SOURCE ?= doc
+SPHINX_CONFDIR ?=
+SPHINX_FORMATS ?= html
+SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees
+SPHINX_OPTS ?=
+
+#sphinx_html_opts =
+#sphinx_html_output = html
+#sphinx_man_opts =
+#sphinx_man_output = man
+#sphinx_latex_opts =
+#sphinx_latex_output = latex
+
+# Helpers.
+
+sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q
+sphinx_build_1 = $(SPHINX_BUILD) -N
+sphinx_build_2 = set -x; $(SPHINX_BUILD)
+sphinx_build = $(sphinx_build_$(V))
+
+define sphinx.build
+$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1)
+
+endef
+
+define sphinx.output
+$(if $(sphinx_$1_output),$(sphinx_$1_output),$1)
+endef
+
+# Targets.
+
+ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),)
+docs:: sphinx
+distclean:: distclean-sphinx
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Sphinx targets:" \
+ " sphinx Generate Sphinx documentation." \
+ "" \
+ "ReST sources and 'conf.py' file are expected in directory pointed by" \
+ "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \
+ "'html' format is generated by default); target directory can be specified by" \
+ 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \
+ "Additional Sphinx options can be set in SPHINX_OPTS."
+
+# Plugin-specific targets.
+
+sphinx:
+ $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F))
+
+distclean-sphinx:
+ $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F)))
+
+# Copyright (c) 2017, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is contributed to erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS
+
+show-ERL_LIBS:
+ @echo $(ERL_LIBS)
+
+show-ERLC_OPTS:
+ @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+show-TEST_ERLC_OPTS:
+ @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";)
+
+# Copyright (c) 2015-2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq)
+.PHONY: triq
+
+# Targets.
+
+tests:: triq
+
+define triq_check.erl
+ $(call cover.erl)
+ code:add_pathsa([
+ "$(call core_native_path,$(CURDIR)/ebin)",
+ "$(call core_native_path,$(DEPS_DIR)/*/ebin)",
+ "$(call core_native_path,$(TEST_DIR))"]),
+ try begin
+ CoverSetup(),
+ Res = case $(1) of
+ all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]);
+ module -> triq:check($(2));
+ function -> triq:check($(2))
+ end,
+ CoverExport("$(COVER_DATA_DIR)/triq.coverdata"),
+ Res
+ end of
+ true -> halt(0);
+ _ -> halt(1)
+ catch error:undef ->
+ io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]),
+ halt(0)
+ end.
+endef
+
+ifdef t
+ifeq (,$(findstring :,$(t)))
+triq: test-build cover-data-dir
+ $(verbose) $(call erlang,$(call triq_check.erl,module,$(t)))
+else
+triq: test-build cover-data-dir
+ $(verbose) echo Testing $(t)/0
+ $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)()))
+endif
+else
+triq: test-build cover-data-dir
+ $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \
+ $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam))))))
+ $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES)))
+endif
+endif
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Erlang Solutions Ltd.
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: xref distclean-xref
+
+# Configuration.
+
+ifeq ($(XREF_CONFIG),)
+ XREFR_ARGS :=
+else
+ XREFR_ARGS := -c $(XREF_CONFIG)
+endif
+
+XREFR ?= $(CURDIR)/xrefr
+export XREFR
+
+XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr
+
+# Core targets.
+
+help::
+ $(verbose) printf '%s\n' '' \
+ 'Xref targets:' \
+ ' xref Run Xrefr using $$XREF_CONFIG as config file if defined'
+
+distclean:: distclean-xref
+
+# Plugin-specific targets.
+
+$(XREFR):
+ $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL))
+ $(verbose) chmod +x $(XREFR)
+
+xref: deps app $(XREFR)
+ $(gen_verbose) $(XREFR) $(XREFR_ARGS)
+
+distclean-xref:
+ $(gen_verbose) rm -rf $(XREFR)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015, Viktor Söderqvist <viktor@zuiderkwast.se>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+COVER_REPORT_DIR ?= cover
+COVER_DATA_DIR ?= $(COVER_REPORT_DIR)
+
+ifdef COVER
+COVER_APPS ?= $(notdir $(ALL_APPS_DIRS))
+COVER_DEPS ?=
+endif
+
+# Code coverage for Common Test.
+
+ifdef COVER
+ifdef CT_RUN
+ifneq ($(wildcard $(TEST_DIR)),)
+test-build:: $(TEST_DIR)/ct.cover.spec
+
+$(TEST_DIR)/ct.cover.spec: cover-data-dir
+ $(gen_verbose) printf "%s\n" \
+ "{incl_app, '$(PROJECT)', details}." \
+ "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \
+ $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \
+ $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \
+ '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@
+
+CT_RUN += -cover $(TEST_DIR)/ct.cover.spec
+endif
+endif
+endif
+
+# Code coverage for other tools.
+
+ifdef COVER
+define cover.erl
+ CoverSetup = fun() ->
+ Dirs = ["$(call core_native_path,$(CURDIR)/ebin)"
+ $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)")
+ $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")],
+ [begin
+ case filelib:is_dir(Dir) of
+ false -> false;
+ true ->
+ case cover:compile_beam_directory(Dir) of
+ {error, _} -> halt(1);
+ _ -> true
+ end
+ end
+ end || Dir <- Dirs]
+ end,
+ CoverExport = fun(Filename) -> cover:export(Filename) end,
+endef
+else
+define cover.erl
+ CoverSetup = fun() -> ok end,
+ CoverExport = fun(_) -> ok end,
+endef
+endif
+
+# Core targets
+
+ifdef COVER
+ifneq ($(COVER_REPORT_DIR),)
+tests::
+ $(verbose) $(MAKE) --no-print-directory cover-report
+endif
+
+cover-data-dir: | $(COVER_DATA_DIR)
+
+$(COVER_DATA_DIR):
+ $(verbose) mkdir -p $(COVER_DATA_DIR)
+else
+cover-data-dir:
+endif
+
+clean:: coverdata-clean
+
+ifneq ($(COVER_REPORT_DIR),)
+distclean:: cover-report-clean
+endif
+
+help::
+ $(verbose) printf "%s\n" "" \
+ "Cover targets:" \
+ " cover-report Generate a HTML coverage report from previously collected" \
+ " cover data." \
+ " all.coverdata Merge all coverdata files into all.coverdata." \
+ "" \
+ "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \
+ "target tests additionally generates a HTML coverage report from the combined" \
+ "coverdata files from each of these testing tools. HTML reports can be disabled" \
+ "by setting COVER_REPORT_DIR to empty."
+
+# Plugin specific targets
+
+COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata))
+
+.PHONY: coverdata-clean
+coverdata-clean:
+ $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec
+
+# Merge all coverdata files into one.
+define cover_export.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ cover:export("$(COVER_DATA_DIR)/$@"), halt(0).
+endef
+
+all.coverdata: $(COVERDATA) cover-data-dir
+ $(gen_verbose) $(call erlang,$(cover_export.erl))
+
+# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to
+# empty if you want the coverdata files but not the HTML report.
+ifneq ($(COVER_REPORT_DIR),)
+
+.PHONY: cover-report-clean cover-report
+
+cover-report-clean:
+ $(gen_verbose) rm -rf $(COVER_REPORT_DIR)
+ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR))
+ $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR))
+endif
+
+ifeq ($(COVERDATA),)
+cover-report:
+else
+
+# Modules which include eunit.hrl always contain one line without coverage
+# because eunit defines test/0 which is never called. We compensate for this.
+EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \
+ grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \
+ | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq))
+
+define cover_report.erl
+ $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),)
+ Ms = cover:imported_modules(),
+ [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M)
+ ++ ".COVER.html", [html]) || M <- Ms],
+ Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms],
+ EunitHrlMods = [$(EUNIT_HRL_MODS)],
+ Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of
+ true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report],
+ TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]),
+ TotalN = lists:sum([N || {_, {_, N}} <- Report1]),
+ Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end,
+ TotalPerc = Perc(TotalY, TotalN),
+ {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]),
+ io:format(F, "<!DOCTYPE html><html>~n"
+ "<head><meta charset=\"UTF-8\">~n"
+ "<title>Coverage report</title></head>~n"
+ "<body>~n", []),
+ io:format(F, "<h1>Coverage</h1>~n<p>Total: ~p%</p>~n", [TotalPerc]),
+ io:format(F, "<table><tr><th>Module</th><th>Coverage</th></tr>~n", []),
+ [io:format(F, "<tr><td><a href=\"~p.COVER.html\">~p</a></td>"
+ "<td>~p%</td></tr>~n",
+ [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1],
+ How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))",
+ Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")",
+ io:format(F, "</table>~n"
+ "<p>Generated using ~s and erlang.mk on ~s.</p>~n"
+ "</body></html>", [How, Date]),
+ halt().
+endef
+
+cover-report:
+ $(verbose) mkdir -p $(COVER_REPORT_DIR)
+ $(gen_verbose) $(call erlang,$(cover_report.erl))
+
+endif
+endif # ifneq ($(COVER_REPORT_DIR),)
+
+# Copyright (c) 2016, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+.PHONY: sfx
+
+ifdef RELX_REL
+ifdef SFX
+
+# Configuration.
+
+SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz
+SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run
+
+# Core targets.
+
+rel:: sfx
+
+# Plugin-specific targets.
+
+define sfx_stub
+#!/bin/sh
+
+TMPDIR=`mktemp -d`
+ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0`
+FILENAME=$$(basename $$0)
+REL=$${FILENAME%.*}
+
+tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR
+
+$$TMPDIR/bin/$$REL console
+RET=$$?
+
+rm -rf $$TMPDIR
+
+exit $$RET
+
+__ARCHIVE_BELOW__
+endef
+
+sfx:
+ $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE))
+ $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE)
+ $(verbose) chmod +x $(SFX_OUTPUT_FILE)
+
+endif
+endif
+
+# Copyright (c) 2013-2017, Loïc Hoguin <essen@ninenines.eu>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# External plugins.
+
+DEP_PLUGINS ?=
+
+$(foreach p,$(DEP_PLUGINS),\
+ $(eval $(if $(findstring /,$p),\
+ $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\
+ $(call core_dep_plugin,$p/plugins.mk,$p))))
+
+help:: help-plugins
+
+help-plugins::
+ $(verbose) :
+
+# Copyright (c) 2013-2015, Loïc Hoguin <essen@ninenines.eu>
+# Copyright (c) 2015-2016, Jean-Sébastien Pédron <jean-sebastien@rabbitmq.com>
+# This file is part of erlang.mk and subject to the terms of the ISC License.
+
+# Fetch dependencies recursively (without building them).
+
+.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \
+ fetch-shell-deps
+
+.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+ifneq ($(SKIP_DEPS),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST):
+ $(verbose) :> $@
+else
+# By default, we fetch "normal" dependencies. They are also included no
+# matter the type of requested dependencies.
+#
+# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS).
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS)
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS)
+
+# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of
+# dependencies with a single target.
+ifneq ($(filter doc,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS)
+endif
+ifneq ($(filter rel,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS)
+endif
+ifneq ($(filter test,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS)
+endif
+ifneq ($(filter shell,$(DEP_TYPES)),)
+$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS)
+endif
+
+ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log)
+
+$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+ $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST)
+ $(verbose) set -e; for dep in $^ ; do \
+ if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \
+ echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \
+ $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \
+ $(MAKE) -C $$dep fetch-deps \
+ IS_DEP=1 \
+ ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \
+ fi \
+ fi \
+ done
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \
+ uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \
+ || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@
+ $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted
+ $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST)
+endif
+endif # ifneq ($(SKIP_DEPS),)
+
+# List dependencies recursively.
+
+.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \
+ list-shell-deps
+
+list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST)
+list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST)
+list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST)
+
+list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps:
+ $(verbose) cat $^
+
+# Query dependencies recursively.
+
+.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \
+ query-shell-deps
+
+QUERY ?= name fetch_method repo version
+
+define query_target
+$(1): $(2) clean-tmp-query.log
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) rm -f $(4)
+endif
+ $(verbose) $(foreach dep,$(3),\
+ echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;)
+ $(if $(filter-out query-deps,$(1)),,\
+ $(verbose) set -e; for dep in $(3) ; do \
+ if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \
+ :; \
+ else \
+ echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \
+ $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \
+ fi \
+ done)
+ifeq ($(IS_APP)$(IS_DEP),)
+ $(verbose) touch $(4)
+ $(verbose) cat $(4)
+endif
+endef
+
+clean-tmp-query.log:
+ifeq ($(IS_DEP),)
+ $(verbose) rm -f $(ERLANG_MK_TMP)/query.log
+endif
+
+$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE)))
+$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE)))
+$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE)))
+$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE)))
+$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE)))
diff --git a/deps/rabbit_common/include/rabbit.hrl b/deps/rabbit_common/include/rabbit.hrl
new file mode 100644
index 0000000000..707f8099e0
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit.hrl
@@ -0,0 +1,267 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("resource.hrl").
+
+%% Passed around most places
+-record(user, {username,
+ tags,
+ authz_backends}). %% List of {Module, AuthUserImpl} pairs
+
+%% Passed to auth backends
+-record(auth_user, {username,
+ tags,
+ impl}).
+
+-record(permission, {configure, write, read}).
+-record(user_vhost, {username, virtual_host}).
+-record(user_permission, {user_vhost, permission}).
+-record(topic_permission_key, {user_vhost, exchange}).
+-record(topic_permission, {topic_permission_key, permission}).
+
+%% Client connection, used by rabbit_reader
+%% and related modules.
+-record(connection, {
+ %% e.g. <<"127.0.0.1:55054 -> 127.0.0.1:5672">>
+ name,
+ %% used for logging: same as `name`, but optionally
+ %% augmented with user-supplied name
+ log_name,
+ %% server host
+ host,
+ %% client host
+ peer_host,
+ %% server port
+ port,
+ %% client port
+ peer_port,
+ %% protocol implementation module,
+ %% e.g. rabbit_framing_amqp_0_9_1
+ protocol,
+ user,
+ %% heartbeat timeout value used, 0 means
+ %% heartbeats are disabled
+ timeout_sec,
+ %% maximum allowed frame size,
+ %% see frame_max in the AMQP 0-9-1 spec
+ frame_max,
+ %% greatest channel number allowed,
+ %% see channel_max in the AMQP 0-9-1 spec
+ channel_max,
+ vhost,
+ %% client name, version, platform, etc
+ client_properties,
+ %% what lists protocol extensions
+ %% does this client support?
+ capabilities,
+ %% authentication mechanism used
+ %% as a pair of {Name, Module}
+ auth_mechanism,
+ %% authentication mechanism state,
+ %% initialised by rabbit_auth_mechanism:init/1
+ %% implementations
+ auth_state,
+ %% time of connection
+ connected_at}).
+
+-record(content,
+ {class_id,
+ properties, %% either 'none', or a decoded record/tuple
+ properties_bin, %% either 'none', or an encoded properties binary
+ %% Note: at most one of properties and properties_bin can be
+ %% 'none' at once.
+ protocol, %% The protocol under which properties_bin was encoded
+ payload_fragments_rev %% list of binaries, in reverse order (!)
+ }).
+
+%% fields described as 'transient' here are cleared when writing to
+%% rabbit_durable_<thing>
+-record(exchange, {
+ name, type, durable, auto_delete, internal, arguments, %% immutable
+ scratches, %% durable, explicitly updated via update_scratch/3
+ policy, %% durable, implicitly updated when policy changes
+ operator_policy, %% durable, implicitly updated when policy changes
+ decorators,
+ options = #{}}). %% transient, recalculated in store/1 (i.e. recovery)
+
+-record(exchange_serial, {name, next}).
+
+%% mnesia doesn't like unary records, so we add a dummy 'value' field
+-record(route, {binding, value = const}).
+-record(reverse_route, {reverse_binding, value = const}).
+
+-record(binding, {source, key, destination, args = []}).
+-record(reverse_binding, {destination, key, source, args = []}).
+
+-record(topic_trie_node, {trie_node, edge_count, binding_count}).
+-record(topic_trie_edge, {trie_edge, node_id}).
+-record(topic_trie_binding, {trie_binding, value = const}).
+
+-record(trie_node, {exchange_name, node_id}).
+-record(trie_edge, {exchange_name, node_id, word}).
+-record(trie_binding, {exchange_name, node_id, destination, arguments}).
+
+-record(listener, {node, protocol, host, ip_address, port, opts = []}).
+
+-record(runtime_parameters, {key, value}).
+
+-record(basic_message,
+ {exchange_name, %% The exchange where the message was received
+ routing_keys = [], %% Routing keys used during publish
+ content, %% The message content
+ id, %% A `rabbit_guid:gen()` generated id
+ is_persistent}). %% Whether the message was published as persistent
+
+-record(delivery,
+ {mandatory, %% Whether the message was published as mandatory
+ confirm, %% Whether the message needs confirming
+ sender, %% The pid of the process that created the delivery
+ message, %% The #basic_message record
+ msg_seq_no, %% Msg Sequence Number from the channel publish_seqno field
+ flow}). %% Should flow control be used for this delivery
+
+-record(amqp_error, {name, explanation = "", method = none}).
+
+-record(event, {type, props, reference = undefined, timestamp}).
+
+-record(message_properties, {expiry, needs_confirming = false, size}).
+
+-record(plugin, {name, %% atom()
+ version, %% string()
+ description, %% string()
+ type, %% 'ez' or 'dir'
+ dependencies, %% [atom()]
+ location, %% string()
+ %% List of supported broker version ranges,
+ %% e.g. ["3.5.7", "3.6.1"]
+ broker_version_requirements, %% [string()]
+ %% Proplist of supported dependency versions,
+ %% e.g. [{rabbitmq_management, ["3.5.7", "3.6.1"]},
+ %% {rabbitmq_federation, ["3.5.7", "3.6.1"]},
+ %% {rabbitmq_email, ["0.1.0"]}]
+ dependency_version_requirements, %% [{atom(), [string()]}]
+ extra_dependencies %% string()
+ }).
+
+%% used to track connections across virtual hosts
+%% so that limits can be enforced
+-record(tracked_connection_per_vhost, {
+ vhost,
+ connection_count}).
+
+%% Used to track connections per user
+%% so that limits can be enforced
+-record(tracked_connection_per_user, {
+ user,
+ connection_count
+ }).
+
+%% Used to track detailed information
+%% about connections.
+-record(tracked_connection, {
+ %% {Node, ConnectionName}
+ id,
+ node,
+ vhost,
+ name,
+ pid,
+ protocol,
+ %% network or direct
+ type,
+ %% client host
+ peer_host,
+ %% client port
+ peer_port,
+ username,
+ %% time of connection
+ connected_at
+ }).
+
+%% Used to track channels per user
+%% so that limits can be enforced
+-record(tracked_channel_per_user, {
+ user,
+ channel_count
+ }).
+
+%% Used to track detailed information
+%% about channels.
+-record(tracked_channel, {
+ %% {Node, ChannelName}
+ id,
+ node,
+ vhost,
+ name,
+ pid,
+ username,
+ connection}).
+
+%% Indicates maintenance state of a node
+-record(node_maintenance_state, {
+ node,
+ status = regular,
+ context = #{}
+ }).
+%%----------------------------------------------------------------------------
+
+-define(COPYRIGHT_MESSAGE, "Copyright (c) 2007-2020 VMware, Inc. or its affiliates.").
+-define(INFORMATION_MESSAGE, "Licensed under the MPL 2.0. Website: https://rabbitmq.com").
+
+%% EMPTY_FRAME_SIZE, 8 = 1 + 2 + 4 + 1
+%% - 1 byte of frame type
+%% - 2 bytes of channel number
+%% - 4 bytes of frame payload length
+%% - 1 byte of payload trailer FRAME_END byte
+%% See rabbit_binary_generator:check_empty_frame_size/0, an assertion
+%% called at startup.
+-define(EMPTY_FRAME_SIZE, 8).
+
+-define(MAX_WAIT, 16#ffffffff).
+-define(SUPERVISOR_WAIT,
+ rabbit_misc:get_env(rabbit, supervisor_shutdown_timeout, infinity)).
+-define(WORKER_WAIT,
+ rabbit_misc:get_env(rabbit, worker_shutdown_timeout, 30000)).
+-define(MSG_STORE_WORKER_WAIT,
+ rabbit_misc:get_env(rabbit, msg_store_shutdown_timeout, 600000)).
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(CREDIT_DISC_BOUND, {4000, 800}).
+%% When we discover that we should write some indices to disk for some
+%% betas, the IO_BATCH_SIZE sets the number of betas that we must be
+%% due to write indices for before we do any work at all.
+-define(IO_BATCH_SIZE, 4096). %% next power-of-2 after ?CREDIT_DISC_BOUND
+
+-define(INVALID_HEADERS_KEY, <<"x-invalid-headers">>).
+-define(ROUTING_HEADERS, [<<"CC">>, <<"BCC">>]).
+-define(DELETED_HEADER, <<"BCC">>).
+
+-define(EXCHANGE_DELETE_IN_PROGRESS_COMPONENT, <<"exchange-delete-in-progress">>).
+
+-define(CHANNEL_OPERATION_TIMEOUT, rabbit_misc:get_channel_operation_timeout()).
+
+%% Max supported number of priorities for a priority queue.
+-define(MAX_SUPPORTED_PRIORITY, 255).
+
+%% Max message size is hard limited to 512 MiB.
+%% If user configures a greater rabbit.max_message_size,
+%% this value is used instead.
+-define(MAX_MSG_SIZE, 536870912).
+
+-define(store_proc_name(N), rabbit_misc:store_proc_name(?MODULE, N)).
+
+%% For event audit purposes
+-define(INTERNAL_USER, <<"rmq-internal">>).
+-define(UNKNOWN_USER, <<"unknown">>).
+
+%% Store metadata in the trace files when message tracing is enabled.
+-define(LG_INFO(Info), is_pid(whereis(lg)) andalso (lg ! Info)).
+-define(LG_PROCESS_TYPE(Type), ?LG_INFO(#{process_type => Type})).
+
+%% Execution timeout of connection and channel tracking operations
+-define(TRACKING_EXECUTION_TIMEOUT,
+ rabbit_misc:get_env(rabbit, tracking_execution_timeout, 5000)).
diff --git a/deps/rabbit_common/include/rabbit_core_metrics.hrl b/deps/rabbit_common/include/rabbit_core_metrics.hrl
new file mode 100644
index 0000000000..17ffa2535b
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_core_metrics.hrl
@@ -0,0 +1,52 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% These tables contain the raw metrics as stored by RabbitMQ core
+-define(CORE_TABLES, [{connection_created, set},
+ {connection_metrics, set},
+ {connection_coarse_metrics, set},
+ {channel_created, set},
+ {channel_metrics, set},
+ {channel_queue_exchange_metrics, set},
+ {channel_queue_metrics, set},
+ {channel_exchange_metrics, set},
+ {channel_process_metrics, set},
+ {consumer_created, set},
+ {queue_metrics, set},
+ {queue_coarse_metrics, set},
+ {node_persister_metrics, set},
+ {node_coarse_metrics, set},
+ {node_metrics, set},
+ {node_node_metrics, set},
+ {connection_churn_metrics, set}]).
+
+-define(CORE_EXTRA_TABLES, [{gen_server2_metrics, set},
+ {auth_attempt_metrics, set},
+ {auth_attempt_detailed_metrics, set}]).
+
+-define(CONNECTION_CHURN_METRICS, {node(), 0, 0, 0, 0, 0, 0, 0}).
+
+%% connection_created :: {connection_id, proplist}
+%% connection_metrics :: {connection_id, proplist}
+%% connection_coarse_metrics :: {connection_id, recv_oct, send_oct, reductions}
+%% channel_created :: {channel_id, proplist}
+%% channel_metrics :: {channel_id, proplist}
+%% channel_queue_exchange_metrics :: {{channel_id, {queue_id, exchange_id}}, publish}
+%% channel_queue_metrics :: {{channel_id, queue_id}, proplist}
+%% channel_exchange_metrics :: {{channel_id, exchange_id}, proplist}
+%% channel_process_metrics :: {channel_id, reductions}
+%% consumer_created :: {{queue_id, channel_id, consumer_tag}, exclusive_consume,
+%% ack_required, prefetch_count, args}
+%% queue_metrics :: {queue_id, proplist}
+%% queue_coarse_metrics :: {queue_id, messages_ready, messages_unacknowledge,
+%% messages, reductions}
+%% node_persister_metrics :: {node_id, proplist}
+%% node_coarse_metrics :: {node_id, proplist}
+%% node_metrics :: {node_id, proplist}
+%% node_node_metrics :: {{node_id, node_id}, proplist}
+%% gen_server2_metrics :: {pid, buffer_length}
+%% connection_churn_metrics :: {node(), connection_created, connection_closed, channel_created, channel_closed, queue_declared, queue_created, queue_deleted}
diff --git a/deps/rabbit_common/include/rabbit_log.hrl b/deps/rabbit_common/include/rabbit_log.hrl
new file mode 100644
index 0000000000..9ce908e997
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_log.hrl
@@ -0,0 +1,8 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(LAGER_SINK, rabbit_log_lager_event).
diff --git a/deps/rabbit_common/include/rabbit_memory.hrl b/deps/rabbit_common/include/rabbit_memory.hrl
new file mode 100644
index 0000000000..c9991550fb
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_memory.hrl
@@ -0,0 +1,16 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(DEFAULT_MEMORY_CHECK_INTERVAL, 1000).
+-define(ONE_MiB, 1048576).
+
+%% For an unknown OS, we assume that we have 1GB of memory. It'll be
+%% wrong. Scale by vm_memory_high_watermark in configuration to get a
+%% sensible value.
+-define(MEMORY_SIZE_FOR_UNKNOWN_OS, 1073741824).
+-define(DEFAULT_VM_MEMORY_HIGH_WATERMARK, 0.4).
+-define(MAX_VM_MEMORY_HIGH_WATERMARK, 1.0).
diff --git a/deps/rabbit_common/include/rabbit_misc.hrl b/deps/rabbit_common/include/rabbit_misc.hrl
new file mode 100644
index 0000000000..98d4051a27
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_misc.hrl
@@ -0,0 +1,9 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-define(RPC_TIMEOUT, 15000).
+-define(RPC_INFINITE_TIMEOUT, infinity).
diff --git a/deps/rabbit_common/include/rabbit_msg_store.hrl b/deps/rabbit_common/include/rabbit_msg_store.hrl
new file mode 100644
index 0000000000..9d184ae153
--- /dev/null
+++ b/deps/rabbit_common/include/rabbit_msg_store.hrl
@@ -0,0 +1,12 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-include("rabbit.hrl").
+
+-type(msg() :: any()).
+
+-record(msg_location, {msg_id, ref_count, file, offset, total_size}).
diff --git a/deps/rabbit_common/include/resource.hrl b/deps/rabbit_common/include/resource.hrl
new file mode 100644
index 0000000000..5b2697f4d0
--- /dev/null
+++ b/deps/rabbit_common/include/resource.hrl
@@ -0,0 +1,14 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-record(resource, {
+ virtual_host,
+ %% exchange, queue, ...
+ kind,
+ %% name as a binary
+ name
+}).
diff --git a/deps/rabbit_common/mk/rabbitmq-build.mk b/deps/rabbit_common/mk/rabbitmq-build.mk
new file mode 100644
index 0000000000..2fedcf629b
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-build.mk
@@ -0,0 +1,42 @@
+# --------------------------------------------------------------------
+# Compiler flags.
+# --------------------------------------------------------------------
+
+ifeq ($(filter rabbitmq-macros.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-macros.mk
+endif
+
+# NOTE: This plugin is loaded twice because Erlang.mk recurses. That's
+# why ERL_LIBS may contain twice the path to Elixir libraries or
+# ERLC_OPTS may contain duplicated flags.
+
+TEST_ERLC_OPTS += +nowarn_export_all
+
+ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),)
+# Add the CLI ebin directory to the code path for the compiler: plugin
+# CLI extensions may access behaviour modules defined in this directory.
+RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin
+endif
+
+# Add Lager parse_transform module and our default Lager extra sinks.
+LAGER_EXTRA_SINKS += rabbit_log \
+ rabbit_log_channel \
+ rabbit_log_connection \
+ rabbit_log_feature_flags \
+ rabbit_log_federation \
+ rabbit_log_ldap \
+ rabbit_log_mirroring \
+ rabbit_log_osiris \
+ rabbit_log_prelaunch \
+ rabbit_log_queue \
+ rabbit_log_ra \
+ rabbit_log_shovel \
+ rabbit_log_upgrade
+lager_extra_sinks = $(subst $(space),$(comma),$(LAGER_EXTRA_SINKS))
+
+RMQ_ERLC_OPTS += +'{parse_transform,lager_transform}' \
+ +'{lager_extra_sinks,[$(lager_extra_sinks)]}'
+
+# Push our compilation options to both the normal and test ERLC_OPTS.
+ERLC_OPTS += $(RMQ_ERLC_OPTS)
+TEST_ERLC_OPTS += $(RMQ_ERLC_OPTS)
diff --git a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk
new file mode 100644
index 0000000000..4b110176a7
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk
@@ -0,0 +1,36 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+dep_amqp_client = hex $(PROJECT_VERSION)
+dep_rabbit_common = hex $(PROJECT_VERSION)
+
+# Third-party dependencies version pinning.
diff --git a/deps/rabbit_common/mk/rabbitmq-components.mk b/deps/rabbit_common/mk/rabbitmq-components.mk
new file mode 100644
index 0000000000..b2a3be8b35
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-components.mk
@@ -0,0 +1,359 @@
+ifeq ($(.DEFAULT_GOAL),)
+# Define default goal to `all` because this file defines some targets
+# before the inclusion of erlang.mk leading to the wrong target becoming
+# the default.
+.DEFAULT_GOAL = all
+endif
+
+# PROJECT_VERSION defaults to:
+# 1. the version exported by rabbitmq-server-release;
+# 2. the version stored in `git-revisions.txt`, if it exists;
+# 3. a version based on git-describe(1), if it is a Git clone;
+# 4. 0.0.0
+
+PROJECT_VERSION := $(RABBITMQ_VERSION)
+
+ifeq ($(PROJECT_VERSION),)
+PROJECT_VERSION := $(shell \
+if test -f git-revisions.txt; then \
+ head -n1 git-revisions.txt | \
+ awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \
+else \
+ (git describe --dirty --abbrev=7 --tags --always --first-parent \
+ 2>/dev/null || echo rabbitmq_v0_0_0) | \
+ sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \
+ -e 's/-/./g'; \
+fi)
+endif
+
+# --------------------------------------------------------------------
+# RabbitMQ components.
+# --------------------------------------------------------------------
+
+# For RabbitMQ repositories, we want to checkout branches which match
+# the parent project. For instance, if the parent project is on a
+# release tag, dependencies must be on the same release tag. If the
+# parent project is on a topic branch, dependencies must be on the same
+# topic branch or fallback to `stable` or `master` whichever was the
+# base of the topic branch.
+
+dep_amqp_client = git_rmq rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_client = git_rmq rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_amqp10_common = git_rmq rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit = git_rmq rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbit_common = git_rmq rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_amqp1_0 = git_rmq rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_cache = git_rmq rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_http = git_rmq rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_ldap = git_rmq rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_backend_oauth2 = git_rmq rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_auth_mechanism_ssl = git_rmq rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_aws = git_rmq rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_cli = git_rmq rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_codegen = git_rmq rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_consistent_hash_exchange = git_rmq rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_client_helpers = git_rmq rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_ct_helpers = git_rmq rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_event_exchange = git_rmq rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation = git_rmq rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_federation_management = git_rmq rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_jms_topic_exchange = git_rmq rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management = git_rmq rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_agent = git_rmq rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_mqtt = git_rmq rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_aws = git_rmq rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_common = git_rmq rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_consul = git_rmq rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_etcd = git_rmq rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_peer_discovery_k8s = git_rmq rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_prometheus = git_rmq rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_random_exchange = git_rmq rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_recent_history_exchange = git_rmq rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_server_release = git_rmq rabbitmq-server-release $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_sharding = git_rmq rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel = git_rmq rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_shovel_management = git_rmq rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stomp = git_rmq rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_stream = git_rmq rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_top = git_rmq rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_tracing = git_rmq rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_trust_store = git_rmq rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_dispatch = git_rmq rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp = git_rmq rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_stomp_examples = git_rmq rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt = git_rmq rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_web_mqtt_examples = git_rmq rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) master
+dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live master
+dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master
+
+dep_rabbitmq_public_umbrella = git_rmq rabbitmq-public-umbrella $(current_rmq_ref) $(base_rmq_ref) master
+
+# Third-party dependencies version pinning.
+#
+# We do that in this file, which is copied in all projects, to ensure
+# all projects use the same versions. It avoids conflicts and makes it
+# possible to work with rabbitmq-public-umbrella.
+
+dep_accept = hex 0.3.5
+dep_cowboy = hex 2.8.0
+dep_cowlib = hex 2.9.1
+dep_jsx = hex 2.11.0
+dep_lager = hex 3.8.0
+dep_prometheus = git https://github.com/deadtrickster/prometheus.erl.git master
+dep_ra = git https://github.com/rabbitmq/ra.git master
+dep_ranch = hex 1.7.1
+dep_recon = hex 2.5.1
+dep_observer_cli = hex 1.5.4
+dep_stdout_formatter = hex 0.2.4
+dep_sysmon_handler = hex 1.3.0
+
+RABBITMQ_COMPONENTS = amqp_client \
+ amqp10_common \
+ amqp10_client \
+ rabbit \
+ rabbit_common \
+ rabbitmq_amqp1_0 \
+ rabbitmq_auth_backend_amqp \
+ rabbitmq_auth_backend_cache \
+ rabbitmq_auth_backend_http \
+ rabbitmq_auth_backend_ldap \
+ rabbitmq_auth_backend_oauth2 \
+ rabbitmq_auth_mechanism_ssl \
+ rabbitmq_aws \
+ rabbitmq_boot_steps_visualiser \
+ rabbitmq_cli \
+ rabbitmq_codegen \
+ rabbitmq_consistent_hash_exchange \
+ rabbitmq_ct_client_helpers \
+ rabbitmq_ct_helpers \
+ rabbitmq_delayed_message_exchange \
+ rabbitmq_dotnet_client \
+ rabbitmq_event_exchange \
+ rabbitmq_federation \
+ rabbitmq_federation_management \
+ rabbitmq_java_client \
+ rabbitmq_jms_client \
+ rabbitmq_jms_cts \
+ rabbitmq_jms_topic_exchange \
+ rabbitmq_lvc_exchange \
+ rabbitmq_management \
+ rabbitmq_management_agent \
+ rabbitmq_management_exchange \
+ rabbitmq_management_themes \
+ rabbitmq_message_timestamp \
+ rabbitmq_metronome \
+ rabbitmq_mqtt \
+ rabbitmq_objc_client \
+ rabbitmq_peer_discovery_aws \
+ rabbitmq_peer_discovery_common \
+ rabbitmq_peer_discovery_consul \
+ rabbitmq_peer_discovery_etcd \
+ rabbitmq_peer_discovery_k8s \
+ rabbitmq_prometheus \
+ rabbitmq_random_exchange \
+ rabbitmq_recent_history_exchange \
+ rabbitmq_routing_node_stamp \
+ rabbitmq_rtopic_exchange \
+ rabbitmq_server_release \
+ rabbitmq_sharding \
+ rabbitmq_shovel \
+ rabbitmq_shovel_management \
+ rabbitmq_stomp \
+ rabbitmq_stream \
+ rabbitmq_toke \
+ rabbitmq_top \
+ rabbitmq_tracing \
+ rabbitmq_trust_store \
+ rabbitmq_web_dispatch \
+ rabbitmq_web_mqtt \
+ rabbitmq_web_mqtt_examples \
+ rabbitmq_web_stomp \
+ rabbitmq_web_stomp_examples \
+ rabbitmq_website
+
+# Erlang.mk does not rebuild dependencies by default, once they were
+# compiled once, except for those listed in the `$(FORCE_REBUILD)`
+# variable.
+#
+# We want all RabbitMQ components to always be rebuilt: this eases
+# the work on several components at the same time.
+
+FORCE_REBUILD = $(RABBITMQ_COMPONENTS)
+
+# Several components have a custom erlang.mk/build.config, mainly
+# to disable eunit. Therefore, we can't use the top-level project's
+# erlang.mk copy.
+NO_AUTOPATCH += $(RABBITMQ_COMPONENTS)
+
+ifeq ($(origin current_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+current_rmq_ref := $(shell (\
+ ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\
+ if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi))
+else
+current_rmq_ref := master
+endif
+endif
+export current_rmq_ref
+
+ifeq ($(origin base_rmq_ref),undefined)
+ifneq ($(wildcard .git),)
+possible_base_rmq_ref := master
+ifeq ($(possible_base_rmq_ref),$(current_rmq_ref))
+base_rmq_ref := $(current_rmq_ref)
+else
+base_rmq_ref := $(shell \
+ (git rev-parse --verify -q master >/dev/null && \
+ git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \
+ git merge-base --is-ancestor $$(git merge-base master HEAD) $(possible_base_rmq_ref) && \
+ echo $(possible_base_rmq_ref)) || \
+ echo master)
+endif
+else
+base_rmq_ref := master
+endif
+endif
+export base_rmq_ref
+
+# Repository URL selection.
+#
+# First, we infer other components' location from the current project
+# repository URL, if it's a Git repository:
+# - We take the "origin" remote URL as the base
+# - The current project name and repository name is replaced by the
+# target's properties:
+# eg. rabbitmq-common is replaced by rabbitmq-codegen
+# eg. rabbit_common is replaced by rabbitmq_codegen
+#
+# If cloning from this computed location fails, we fallback to RabbitMQ
+# upstream which is GitHub.
+
+# Macro to transform eg. "rabbit_common" to "rabbitmq-common".
+rmq_cmp_repo_name = $(word 2,$(dep_$(1)))
+
+# Upstream URL for the current project.
+RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT))
+RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git
+
+# Current URL for the current project. If this is not a Git clone,
+# default to the upstream Git repository.
+ifneq ($(wildcard .git),)
+git_origin_fetch_url := $(shell git config remote.origin.url)
+git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url)
+RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url)
+RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url)
+else
+RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL)
+RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL)
+endif
+
+# Macro to replace the following pattern:
+# 1. /foo.git -> /bar.git
+# 2. /foo -> /bar
+# 3. /foo/ -> /bar/
+subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3))))
+
+# Macro to replace both the project's name (eg. "rabbit_common") and
+# repository name (eg. "rabbitmq-common") by the target's equivalent.
+#
+# This macro is kept on one line because we don't want whitespaces in
+# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell
+# single-quoted string.
+dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo))
+
+dep_rmq_commits = $(if $(dep_$(1)), \
+ $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \
+ $(pkg_$(1)_commit))
+
+define dep_fetch_git_rmq
+ fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \
+ fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \
+ if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \
+ git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url1"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \
+ elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \
+ fetch_url="$$$$fetch_url2"; \
+ push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \
+ fi; \
+ cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \
+ $(foreach ref,$(call dep_rmq_commits,$(1)), \
+ git checkout -q $(ref) >/dev/null 2>&1 || \
+ ) \
+ (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \
+ 1>&2 && false) ) && \
+ (test "$$$$fetch_url" = "$$$$push_url" || \
+ git remote set-url --push origin "$$$$push_url")
+endef
+
+# --------------------------------------------------------------------
+# Component distribution.
+# --------------------------------------------------------------------
+
+list-dist-deps::
+ @:
+
+prepare-dist::
+ @:
+
+# --------------------------------------------------------------------
+# Umbrella-specific settings.
+# --------------------------------------------------------------------
+
+# If the top-level project is a RabbitMQ component, we override
+# $(DEPS_DIR) for this project to point to the top-level's one.
+#
+# We also verify that the guessed DEPS_DIR is actually named `deps`,
+# to rule out any situation where it is a coincidence that we found a
+# `rabbitmq-components.mk` up upper directories.
+
+possible_deps_dir_1 = $(abspath ..)
+possible_deps_dir_2 = $(abspath ../../..)
+
+ifeq ($(notdir $(possible_deps_dir_1)),deps)
+ifneq ($(wildcard $(possible_deps_dir_1)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_1)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+
+ifeq ($(deps_dir_overriden),)
+ifeq ($(notdir $(possible_deps_dir_2)),deps)
+ifneq ($(wildcard $(possible_deps_dir_2)/../rabbitmq-components.mk),)
+deps_dir_overriden = 1
+DEPS_DIR ?= $(possible_deps_dir_2)
+DISABLE_DISTCLEAN = 1
+endif
+endif
+endif
+
+ifneq ($(wildcard UMBRELLA.md),)
+DISABLE_DISTCLEAN = 1
+endif
+
+# We disable `make distclean` so $(DEPS_DIR) is not accidentally removed.
+
+ifeq ($(DISABLE_DISTCLEAN),1)
+ifneq ($(filter distclean distclean-deps,$(MAKECMDGOALS)),)
+SKIP_DEPS = 1
+endif
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-dist.mk b/deps/rabbit_common/mk/rabbitmq-dist.mk
new file mode 100644
index 0000000000..3e17a27939
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-dist.mk
@@ -0,0 +1,365 @@
+.PHONY: dist test-dist do-dist cli-scripts cli-escripts clean-dist
+
+DIST_DIR = plugins
+CLI_SCRIPTS_DIR = sbin
+CLI_ESCRIPTS_DIR = escript
+MIX = echo y | mix
+
+# Set $(DIST_AS_EZS) to a non-empty value to enable the packaging of
+# plugins as .ez archives.
+ifeq ($(USE_RABBIT_BOOT_SCRIPT),)
+DIST_AS_EZS ?=
+else
+DIST_AS_EZS =
+endif
+
+dist_verbose_0 = @echo " DIST " $@;
+dist_verbose_2 = set -x;
+dist_verbose = $(dist_verbose_$(V))
+
+MIX_ARCHIVES ?= $(HOME)/.mix/archives
+
+MIX_TASK_ARCHIVE_DEPS_VERSION = 0.5.0
+mix_task_archive_deps = $(MIX_ARCHIVES)/mix_task_archive_deps-$(MIX_TASK_ARCHIVE_DEPS_VERSION)
+
+# We take the version of an Erlang application from the .app file. This
+# macro is called like this:
+#
+# $(call get_app_version,/path/to/name.app.src)
+
+ifeq ($(PLATFORM),msys2)
+core_unix_path = $(shell cygpath $1)
+else
+core_unix_path = $1
+endif
+
+define get_app_version
+$(shell awk '
+/{ *vsn *, *"/ {
+ vsn=$$0;
+ sub(/.*{ *vsn, *"/, "", vsn);
+ sub(/".*/, "", vsn);
+ print vsn;
+ exit;
+}' $(1))
+endef
+
+define get_mix_project_version
+$(shell cd $(1) && \
+ $(MIX) do deps.get, deps.compile, compile >/dev/null && \
+ $(MIX) run --no-start -e "IO.puts(Mix.Project.config[:version])")
+endef
+
+# Define the target to create an .ez plugin archive for an
+# Erlang.mk-based project. This macro is called like this:
+#
+# $(call do_ez_target_erlangmk,app_name,app_version,app_dir)
+
+define do_ez_target_erlangmk
+dist_$(1)_ez_dir = $$(if $(2),$(DIST_DIR)/$(1)-$(2), \
+ $$(if $$(VERSION),$(DIST_DIR)/$(1)-$$(VERSION),$(DIST_DIR)/$(1)))
+ifeq ($(DIST_AS_EZS),)
+dist_$(1)_ez = $$(dist_$(1)_ez_dir)
+else
+dist_$(1)_ez = $$(dist_$(1)_ez_dir).ez
+endif
+
+$$(dist_$(1)_ez): APP = $(1)
+$$(dist_$(1)_ez): VSN = $(2)
+$$(dist_$(1)_ez): SRC_DIR = $(3)
+$$(dist_$(1)_ez): EZ_DIR = $$(abspath $$(dist_$(1)_ez_dir))
+$$(dist_$(1)_ez): EZ = $$(dist_$(1)_ez)
+$$(dist_$(1)_ez): $$(if $$(wildcard $(3)/ebin $(3)/include $(3)/priv),\
+ $$(filter-out %/dep_built %/ebin/test,$$(call core_find,$$(wildcard $(3)/ebin $(3)/include $(3)/priv),*)),)
+
+# If the application's Makefile defines a `list-dist-deps` target, we
+# use it to populate the dependencies list. This is useful when the
+# application has also a `prepare-dist` target to modify the created
+# tree before we make an archive out of it.
+
+ifeq ($$(shell test -f $(3)/rabbitmq-components.mk \
+ && grep -q '^list-dist-deps::' $(3)/Makefile && echo yes),yes)
+$$(dist_$(1)_ez): $$(patsubst %,$(3)/%, \
+ $$(shell $(MAKE) --no-print-directory -C $(3) list-dist-deps \
+ APP=$(1) VSN=$(2) EZ_DIR=$$(abspath $$(dist_$(1)_ez_dir))))
+endif
+
+ERLANGMK_DIST_APPS += $(1)
+
+ERLANGMK_DIST_EZS += $$(dist_$(1)_ez)
+
+endef
+
+# Define the target to create an .ez plugin archive for a Mix-based
+# project. This macro is called like this:
+#
+# $(call do_ez_target_mix,app_name,app_version,app_dir)
+
+define get_mix_project_dep_ezs
+$(shell cd $(1) && \
+ $(MIX) do deps.get, deps.compile, compile >/dev/null && \
+ $(MIX) archive.build.all.list -e -o $(DIST_DIR) --skip "rabbit $(ERLANGMK_DIST_APPS)")
+endef
+
+define do_ez_target_mix
+dist_$(1)_ez_dir = $$(if $(2),$(DIST_DIR)/$(1)-$(2), \
+ $$(if $$(VERSION),$(DIST_DIR)/$(1)-$$(VERSION),$(DIST_DIR)/$(1)))
+dist_$(1)_ez = $$(dist_$(1)_ez_dir).ez
+
+$$(dist_$(1)_ez): APP = $(1)
+$$(dist_$(1)_ez): VSN = $(2)
+$$(dist_$(1)_ez): SRC_DIR = $(3)
+$$(dist_$(1)_ez): EZ_DIR = $$(abspath $$(dist_$(1)_ez_dir))
+$$(dist_$(1)_ez): EZ = $$(dist_$(1)_ez)
+$$(dist_$(1)_ez): $$(if $$(wildcard _build/dev/lib/$(1)/ebin $(3)/priv),\
+ $$(filter-out %/dep_built,$$(call core_find,$$(wildcard _build/dev/lib/$(1)/ebin $(3)/priv),*)),)
+
+MIX_DIST_EZS += $$(dist_$(1)_ez)
+EXTRA_DIST_EZS += $$(call get_mix_project_dep_ezs,$(3))
+
+endef
+
+# Real entry point: it tests the existence of an .app file to determine
+# if it is an Erlang application (and therefore if it should be provided
+# as an .ez plugin archive) and calls do_ez_target_erlangmk. If instead
+# it finds a Mix configuration file, it calls do_ez_target_mix. It
+# should be called as:
+#
+# $(call ez_target,path_to_app)
+
+define ez_target
+dist_$(1)_appdir = $(2)
+dist_$(1)_appfile = $$(dist_$(1)_appdir)/ebin/$(1).app
+dist_$(1)_mixfile = $$(dist_$(1)_appdir)/mix.exs
+
+$$(if $$(shell test -f $$(dist_$(1)_appfile) && echo OK), \
+ $$(eval $$(call do_ez_target_erlangmk,$(1),$$(call get_app_version,$$(dist_$(1)_appfile)),$$(dist_$(1)_appdir))), \
+ $$(if $$(shell test -f $$(dist_$(1)_mixfile) && [ "x$(1)" != "xrabbitmqctl" ] && [ "x$(1)" != "xrabbitmq_cli" ] && echo OK), \
+ $$(eval $$(call do_ez_target_mix,$(1),$$(call get_mix_project_version,$$(dist_$(1)_appdir)),$$(dist_$(1)_appdir)))))
+
+endef
+
+ifneq ($(filter do-dist,$(MAKECMDGOALS)),)
+# The following code is evaluated only when running "make do-dist",
+# otherwise it would trigger an infinite loop, as this code calls "make
+# list-dist-deps" (see do_ez_target_erlangmk).
+ifdef DIST_PLUGINS_LIST
+# Now, try to create an .ez target for the top-level project and all
+# dependencies.
+
+ifeq ($(wildcard $(DIST_PLUGINS_LIST)),)
+$(error DIST_PLUGINS_LIST ($(DIST_PLUGINS_LIST)) is missing)
+endif
+
+$(eval $(foreach path, \
+ $(filter-out %/looking_glass %/lz4, \
+ $(sort $(shell cat $(DIST_PLUGINS_LIST))) $(CURDIR)), \
+ $(call ez_target,$(if $(filter $(path),$(CURDIR)),$(PROJECT),$(notdir $(path))),$(path))))
+endif
+endif
+
+# The actual recipe to create the .ez plugin archive. Some variables
+# are defined in the do_ez_target_erlangmk and do_ez_target_mix macros
+# above. All .ez archives are also listed in this do_ez_target_erlangmk
+# and do_ez_target_mix macros.
+
+RSYNC ?= rsync
+RSYNC_V_0 =
+RSYNC_V_1 = -v
+RSYNC_V = $(RSYNC_V_$(V))
+
+ZIP ?= zip
+ZIP_V_0 = -q
+ZIP_V_1 =
+ZIP_V = $(ZIP_V_$(V))
+
+$(ERLANGMK_DIST_EZS):
+ $(verbose) rm -rf $(EZ_DIR) $(EZ)
+ $(verbose) mkdir -p $(EZ_DIR)
+ $(dist_verbose) $(RSYNC) -a $(RSYNC_V) \
+ --exclude '/ebin/dep_built' \
+ --exclude '/ebin/test' \
+ --include '/ebin/***' \
+ --include '/include/***' \
+ --include '/priv/***' \
+ --exclude '*' \
+ $(call core_unix_path,$(SRC_DIR))/ $(call core_unix_path,$(EZ_DIR))/
+ @# Give a chance to the application to make any modification it
+ @# wants to the tree before we make an archive.
+ $(verbose) ! (test -f $(SRC_DIR)/rabbitmq-components.mk \
+ && grep -q '^prepare-dist::' $(SRC_DIR)/Makefile) || \
+ $(MAKE) --no-print-directory -C $(SRC_DIR) prepare-dist \
+ APP=$(APP) VSN=$(VSN) EZ_DIR=$(EZ_DIR)
+ifneq ($(DIST_AS_EZS),)
+ $(verbose) (cd $(DIST_DIR) && \
+ find "$(basename $(notdir $@))" | LC_COLLATE=C sort \
+ > "$(basename $(notdir $@)).manifest" && \
+ $(ZIP) $(ZIP_V) --names-stdin "$(notdir $@)" \
+ < "$(basename $(notdir $@)).manifest")
+ $(verbose) rm -rf $(EZ_DIR) $(EZ_DIR).manifest
+endif
+
+$(MIX_DIST_EZS): $(mix_task_archive_deps)
+ $(verbose) cd $(SRC_DIR) && \
+ $(MIX) do deps.get, deps.compile, compile, archive.build.all \
+ -e -o $(abspath $(DIST_DIR)) --skip "rabbit $(ERLANGMK_DIST_APPS)"
+
+MIX_TASK_ARCHIVE_DEPS_URL = https://github.com/rabbitmq/mix_task_archive_deps/releases/download/$(MIX_TASK_ARCHIVE_DEPS_VERSION)/mix_task_archive_deps-$(MIX_TASK_ARCHIVE_DEPS_VERSION).ez
+
+$(mix_task_archive_deps):
+ $(gen_verbose) mix archive.install --force $(MIX_TASK_ARCHIVE_DEPS_URL)
+
+# We need to recurse because the top-level make instance is evaluated
+# before dependencies are downloaded.
+
+MAYBE_APPS_LIST = $(if $(shell test -f $(ERLANG_MK_TMP)/apps.log && echo OK), \
+ $(ERLANG_MK_TMP)/apps.log)
+DIST_LOCK = $(DIST_DIR).lock
+
+dist:: $(ERLANG_MK_RECURSIVE_DEPS_LIST) all
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ else \
+ $(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"; \
+ fi
+
+test-dist:: export TEST_DIR=NON-EXISTENT
+test-dist:: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) test-build
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(DIST_LOCK) \
+ sh -c '$(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"'; \
+ else \
+ $(MAKE) do-dist \
+ DIST_PLUGINS_LIST="$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \
+ $(MAYBE_APPS_LIST)"; \
+ fi
+
+DIST_EZS = $(ERLANGMK_DIST_EZS) $(MIX_DIST_EZS)
+
+do-dist:: $(DIST_EZS)
+ $(verbose) unwanted='$(filter-out $(DIST_EZS) $(EXTRA_DIST_EZS), \
+ $(wildcard $(DIST_DIR)/*))'; \
+ test -z "$$unwanted" || (echo " RM $$unwanted" && rm -rf $$unwanted)
+
+CLI_SCRIPTS_LOCK = $(CLI_SCRIPTS_DIR).lock
+CLI_ESCRIPTS_LOCK = $(CLI_ESCRIPTS_DIR).lock
+
+ifneq ($(filter-out rabbit_common amqp10_common,$(PROJECT)),)
+dist:: install-cli
+test-build:: install-cli
+endif
+
+install-cli: install-cli-scripts install-cli-escripts
+ @:
+
+ifeq ($(PROJECT),rabbit)
+install-cli-scripts:
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_SCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_SCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ else \
+ mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in scripts/*; do \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done; \
+ fi
+else
+
+install-cli-scripts:
+ $(gen_verbose) \
+ set -e; \
+ if test -d "$(DEPS_DIR)/rabbit/scripts"; then \
+ rabbit_scripts_dir='$(DEPS_DIR)/rabbit/scripts'; \
+ elif test -d "$(DEPS_DIR)/../scripts"; then \
+ rabbit_scripts_dir='$(DEPS_DIR)/../scripts'; \
+ else \
+ echo 'rabbit/scripts directory not found' 1>&2; \
+ exit 1; \
+ fi; \
+ test -d "$$rabbit_scripts_dir"; \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_SCRIPTS_LOCK) \
+ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "'$$rabbit_scripts_dir'"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_SCRIPTS_LOCK) \
+ sh -e -c 'mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "'$$rabbit_scripts_dir'"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done'; \
+ else \
+ mkdir -p "$(CLI_SCRIPTS_DIR)" && \
+ for file in "$$rabbit_scripts_dir"/*; do \
+ test -f "$$file"; \
+ cmp -s "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")" || \
+ cp -a "$$file" "$(CLI_SCRIPTS_DIR)/$$(basename "$$file")"; \
+ done; \
+ fi
+endif
+
+install-cli-escripts:
+ $(gen_verbose) \
+ if command -v flock >/dev/null; then \
+ flock $(CLI_ESCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR='; \
+ elif command -v lockf >/dev/null; then \
+ lockf $(CLI_ESCRIPTS_LOCK) \
+ sh -c 'mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR='; \
+ else \
+ mkdir -p "$(CLI_ESCRIPTS_DIR)" && \
+ $(MAKE) -C "$(DEPS_DIR)/rabbitmq_cli" install \
+ PREFIX="$(abspath $(CLI_ESCRIPTS_DIR))" \
+ DESTDIR= ; \
+ fi
+
+clean-dist::
+ $(gen_verbose) rm -rf \
+ "$(DIST_DIR)" \
+ "$(CLI_SCRIPTS_DIR)" \
+ "$(CLI_ESCRIPTS_DIR)"
+
+clean:: clean-dist
diff --git a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk
new file mode 100644
index 0000000000..7b5f14b8f9
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk
@@ -0,0 +1,3 @@
+ifeq ($(filter rabbitmq-early-test.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-early-test.mk
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-early-test.mk b/deps/rabbit_common/mk/rabbitmq-early-test.mk
new file mode 100644
index 0000000000..f4f00173b3
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-early-test.mk
@@ -0,0 +1,130 @@
+# --------------------------------------------------------------------
+# xref
+# --------------------------------------------------------------------
+
+ifeq ($(filter distclean distclean-xref,$(MAKECMDGOALS)),)
+ifneq ($(PROJECT),rabbit_common)
+XREFR := $(DEPS_DIR)/rabbit_common/mk/xrefr
+else
+XREFR := mk/xrefr
+endif
+endif
+
+# --------------------------------------------------------------------
+# dialyzer
+# --------------------------------------------------------------------
+
+DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions
+
+# --------------------------------------------------------------------
+# %-on-concourse dependencies.
+# --------------------------------------------------------------------
+
+ifneq ($(words $(filter %-on-concourse,$(MAKECMDGOALS))),0)
+TEST_DEPS += ci $(RMQ_CI_CT_HOOKS)
+NO_AUTOPATCH += ci $(RMQ_CI_CT_HOOKS)
+dep_ci = git git@github.com:rabbitmq/rabbitmq-ci master
+endif
+
+# --------------------------------------------------------------------
+# Common Test flags.
+# --------------------------------------------------------------------
+
+# We start the common_test node as a hidden Erlang node. The benefit
+# is that other Erlang nodes won't try to connect to each other after
+# discovering the common_test node if they are not meant to.
+#
+# This helps when several unrelated RabbitMQ clusters are started in
+# parallel.
+
+CT_OPTS += -hidden
+
+# Enable the following common_test hooks on Travis and Concourse:
+#
+# cth_fail_fast
+# This hook will make sure the first failure puts an end to the
+# testsuites; ie. all remaining tests are skipped.
+#
+# cth_styledout
+# This hook will change the output of common_test to something more
+# concise and colored.
+#
+# On Jenkins, in addition to those common_test hooks, enable JUnit-like
+# report. Jenkins parses those reports so the results can be browsed
+# from its UI. Furthermore, it displays a graph showing evolution of the
+# results over time.
+
+ifndef TRAVIS
+CT_HOOKS ?= cth_styledout
+TEST_DEPS += cth_styledout
+endif
+
+ifdef TRAVIS
+FAIL_FAST = 1
+SKIP_AS_ERROR = 1
+endif
+
+ifdef CONCOURSE
+FAIL_FAST = 1
+SKIP_AS_ERROR = 1
+endif
+
+RMQ_CI_CT_HOOKS = cth_fail_fast
+ifeq ($(FAIL_FAST),1)
+CT_HOOKS += $(RMQ_CI_CT_HOOKS)
+TEST_DEPS += $(RMQ_CI_CT_HOOKS)
+endif
+
+dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master
+dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master
+
+CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS))
+CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE))
+
+# Disable most messages on Travis because it might exceed the limit
+# set by Travis.
+#
+# CAUTION: All arguments after -erl_args are passed to the emulator and
+# common_test doesn't interpret them! Therefore, all common_test flags
+# *MUST* appear before.
+
+CT_QUIET_FLAGS = -verbosity 50 \
+ -erl_args \
+ -kernel error_logger silent
+
+ifdef TRAVIS
+CT_OPTS += $(CT_QUIET_FLAGS)
+endif
+
+# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped
+# testsuite/testgroup/testcase is considered an error.
+
+ifeq ($(SKIP_AS_ERROR),1)
+export RABBITMQ_CT_SKIP_AS_ERROR = true
+endif
+
+# --------------------------------------------------------------------
+# Looking Glass rules.
+# --------------------------------------------------------------------
+
+ifneq ("$(RABBITMQ_TRACER)","")
+BUILD_DEPS += looking_glass
+dep_looking_glass = git https://github.com/rabbitmq/looking-glass master
+ERL_LIBS := "$(ERL_LIBS):../looking_glass:../lz4"
+export RABBITMQ_TRACER
+endif
+
+define lg_callgrind.erl
+lg_callgrind:profile_many("traces.lz4.*", "callgrind.out", #{running => true}),
+halt().
+endef
+
+.PHONY: profile clean-profile
+
+profile:
+ $(gen_verbose) $(call erlang,$(call lg_callgrind.erl))
+
+clean:: clean-profile
+
+clean-profile:
+ $(gen_verbose) rm -f traces.lz4.* callgrind.out.*
diff --git a/deps/rabbit_common/mk/rabbitmq-hexpm.mk b/deps/rabbit_common/mk/rabbitmq-hexpm.mk
new file mode 100644
index 0000000000..24281b1321
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-hexpm.mk
@@ -0,0 +1,67 @@
+# --------------------------------------------------------------------
+# Hex.pm.
+# --------------------------------------------------------------------
+
+.PHONY: hex-publish hex-publish-docs
+
+HEXPM_URL = https://github.com/rabbitmq/hexpm-cli/releases/latest/download/hexpm
+HEXPM_CLI = $(ERLANG_MK_TMP)/hexpm
+
+$(HEXPM_CLI):
+ $(verbose) mkdir -p $(ERLANG_MK_TMP)
+ $(gen_verbose) $(call core_http_get,$@,$(HEXPM_URL))
+ $(verbose) chmod +x $@
+
+RABBIT_COMMON_HEXPM_VERSION = $(PROJECT_VERSION)
+AMQP_CLIENT_HEXPM_VERSION = $(PROJECT_VERSION)
+
+rebar.config: dep_rabbit_common = hex $(RABBIT_COMMON_HEXPM_VERSION)
+rebar.config: dep_amqp_client = hex $(AMQP_CLIENT_HEXPM_VERSION)
+
+define RABBITMQ_HEXPM_DEFAULT_FILES
+ "erlang.mk",
+ "git-revisions.txt",
+ "include",
+ "LICENSE*",
+ "Makefile",
+ "rabbitmq-components.mk",
+ "README",
+ "README.md",
+ "src"
+endef
+
+ifeq ($(PROJECT),rabbit_common)
+RMQ_COMPONENTS_PREFIX = mk
+RMQ_COMPONENTS_HEXPM = mk/rabbitmq-components.hexpm.mk
+else
+RMQ_COMPONENTS_PREFIX = .
+RMQ_COMPONENTS_HEXPM = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.hexpm.mk
+endif
+
+hex-publish: $(HEXPM_CLI) app rebar.config
+ $(gen_verbose) echo "$(PROJECT_DESCRIPTION) $(PROJECT_VERSION)" \
+ > git-revisions.txt
+ $(verbose) mv \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk \
+ rabbitmq-components.mk.not-hexpm
+ $(verbose) cp \
+ $(RMQ_COMPONENTS_HEXPM) \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) grep -E '^dep.* = hex' \
+ rabbitmq-components.mk.not-hexpm \
+ >> $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) touch -r \
+ rabbitmq-components.mk.not-hexpm \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk
+ $(verbose) trap '\
+ rm -f git-revisions.txt rebar.lock; \
+ if test -f rabbitmq-components.mk.not-hexpm; then \
+ mv \
+ rabbitmq-components.mk.not-hexpm \
+ $(RMQ_COMPONENTS_PREFIX)/rabbitmq-components.mk; \
+ fi' EXIT INT; \
+ $(HEXPM_CLI) publish
+
+hex-publish-docs: $(HEXPM_CLI) app docs
+ $(gen_verbose) trap 'rm -f rebar.lock' EXIT INT; \
+ $(HEXPM_CLI) docs
diff --git a/deps/rabbit_common/mk/rabbitmq-macros.mk b/deps/rabbit_common/mk/rabbitmq-macros.mk
new file mode 100644
index 0000000000..048745a7f0
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-macros.mk
@@ -0,0 +1,22 @@
+# Macro to compare two x.y.z versions.
+#
+# Usage:
+# ifeq ($(call compare_version,$(ERTS_VER),$(MAX_ERTS_VER),<),true)
+# # Only evaluated if $(ERTS_VER) < $(MAX_ERTS_VER)
+# endif
+
+define compare_version
+$(shell awk 'BEGIN {
+ split("$(1)", v1, ".");
+ version1 = v1[1] * 1000000 + v1[2] * 10000 + v1[3] * 100 + v1[4];
+
+ split("$(2)", v2, ".");
+ version2 = v2[1] * 1000000 + v2[2] * 10000 + v2[3] * 100 + v2[4];
+
+ if (version1 $(3) version2) {
+ print "true";
+ } else {
+ print "false";
+ }
+}')
+endef
diff --git a/deps/rabbit_common/mk/rabbitmq-plugin.mk b/deps/rabbit_common/mk/rabbitmq-plugin.mk
new file mode 100644
index 0000000000..29064a9a4f
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-plugin.mk
@@ -0,0 +1,23 @@
+ifeq ($(filter rabbitmq-build.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-build.mk
+endif
+
+ifeq ($(filter rabbitmq-hexpm.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-hexpm.mk
+endif
+
+ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk
+endif
+
+ifeq ($(filter rabbitmq-run.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-run.mk
+endif
+
+ifeq ($(filter rabbitmq-test.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-test.mk
+endif
+
+ifeq ($(filter rabbitmq-tools.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-tools.mk
+endif
diff --git a/deps/rabbit_common/mk/rabbitmq-run.mk b/deps/rabbit_common/mk/rabbitmq-run.mk
new file mode 100644
index 0000000000..bef62c03f7
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-run.mk
@@ -0,0 +1,428 @@
+.PHONY: run-broker run-background-broker run-node run-background-node \
+ start-background-node start-rabbit-on-node \
+ stop-rabbit-on-node set-resource-alarm clear-resource-alarm \
+ stop-node
+
+ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),)
+include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk
+endif
+
+exec_verbose_0 = @echo " EXEC " $@;
+exec_verbose_2 = set -x;
+exec_verbose = $(exec_verbose_$(V))
+
+ifeq ($(PLATFORM),msys2)
+TEST_TMPDIR ?= $(TEMP)/rabbitmq-test-instances
+else
+TMPDIR ?= /tmp
+TEST_TMPDIR ?= $(TMPDIR)/rabbitmq-test-instances
+endif
+
+# Location of the scripts controlling the broker.
+RABBITMQ_SCRIPTS_DIR ?= $(CURDIR)/sbin
+
+ifeq ($(PLATFORM),msys2)
+RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins.bat
+RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server.bat
+RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl.bat
+else
+RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins
+RABBITMQ_SERVER ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-server
+RABBITMQCTL ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmqctl
+endif
+
+export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER
+
+# We export MAKE to be sure scripts and tests use the proper command.
+export MAKE
+
+# We need to pass the location of codegen to the Java client ant
+# process.
+CODEGEN_DIR = $(DEPS_DIR)/rabbitmq_codegen
+PYTHONPATH = $(CODEGEN_DIR)
+export PYTHONPATH
+
+ANT ?= ant
+ANT_FLAGS += -Dmake.bin=$(MAKE) \
+ -DUMBRELLA_AVAILABLE=true \
+ -Drabbitmqctl.bin=$(RABBITMQCTL) \
+ -Dsibling.codegen.dir=$(CODEGEN_DIR)
+ifeq ($(PROJECT),rabbitmq_test)
+ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(CURDIR)
+else
+ANT_FLAGS += -Dsibling.rabbitmq_test.dir=$(DEPS_DIR)/rabbitmq_test
+endif
+export ANT ANT_FLAGS
+
+node_tmpdir = $(TEST_TMPDIR)/$(1)
+node_pid_file = $(call node_tmpdir,$(1))/$(1).pid
+node_log_base = $(call node_tmpdir,$(1))/log
+node_mnesia_base = $(call node_tmpdir,$(1))/mnesia
+node_mnesia_dir = $(call node_mnesia_base,$(1))/$(1)
+node_quorum_dir = $(call node_mnesia_dir,$(1))/quorum
+node_stream_dir = $(call node_mnesia_dir,$(1))/stream
+node_plugins_expand_dir = $(call node_tmpdir,$(1))/plugins
+node_feature_flags_file = $(call node_tmpdir,$(1))/feature_flags
+node_enabled_plugins_file = $(call node_tmpdir,$(1))/enabled_plugins
+
+# Broker startup variables for the test environment.
+ifeq ($(PLATFORM),msys2)
+HOSTNAME := $(COMPUTERNAME)
+else
+HOSTNAME := $(shell hostname -s)
+endif
+
+RABBITMQ_NODENAME ?= rabbit@$(HOSTNAME)
+RABBITMQ_NODENAME_FOR_PATHS ?= $(RABBITMQ_NODENAME)
+NODE_TMPDIR ?= $(call node_tmpdir,$(RABBITMQ_NODENAME_FOR_PATHS))
+
+RABBITMQ_BASE ?= $(NODE_TMPDIR)
+RABBITMQ_PID_FILE ?= $(call node_pid_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_LOG_BASE ?= $(call node_log_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_BASE ?= $(call node_mnesia_base,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_MNESIA_DIR ?= $(call node_mnesia_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_QUORUM_DIR ?= $(call node_quorum_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_STREAM_DIR ?= $(call node_stream_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_PLUGINS_EXPAND_DIR ?= $(call node_plugins_expand_dir,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_FEATURE_FLAGS_FILE ?= $(call node_feature_flags_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+RABBITMQ_ENABLED_PLUGINS_FILE ?= $(call node_enabled_plugins_file,$(RABBITMQ_NODENAME_FOR_PATHS))
+
+# Enable colourful debug logging by default
+# To change this, set RABBITMQ_LOG to info, notice, warning etc.
+RABBITMQ_LOG ?= debug,+color
+export RABBITMQ_LOG
+
+# erlang.mk adds dependencies' ebin directory to ERL_LIBS. This is
+# a sane default, but we prefer to rely on the .ez archives in the
+# `plugins` directory so the plugin code is executed. The `plugins`
+# directory is added to ERL_LIBS by rabbitmq-env.
+DIST_ERL_LIBS = $(patsubst :%,%,$(patsubst %:,%,$(subst :$(APPS_DIR):,:,$(subst :$(DEPS_DIR):,:,:$(ERL_LIBS):))))
+
+ifdef PLUGINS_FROM_DEPS_DIR
+RMQ_PLUGINS_DIR=$(DEPS_DIR)
+else
+RMQ_PLUGINS_DIR=$(CURDIR)/$(DIST_DIR)
+endif
+
+define basic_script_env_settings
+MAKE="$(MAKE)" \
+ERL_LIBS="$(DIST_ERL_LIBS)" \
+RABBITMQ_NODENAME="$(1)" \
+RABBITMQ_NODE_IP_ADDRESS="$(RABBITMQ_NODE_IP_ADDRESS)" \
+RABBITMQ_NODE_PORT="$(3)" \
+RABBITMQ_BASE="$(call node_tmpdir,$(2))" \
+RABBITMQ_PID_FILE="$(call node_pid_file,$(2))" \
+RABBITMQ_LOG_BASE="$(call node_log_base,$(2))" \
+RABBITMQ_MNESIA_BASE="$(call node_mnesia_base,$(2))" \
+RABBITMQ_MNESIA_DIR="$(call node_mnesia_dir,$(2))" \
+RABBITMQ_QUORUM_DIR="$(call node_quorum_dir,$(2))" \
+RABBITMQ_STREAM_DIR="$(call node_stream_dir,$(2))" \
+RABBITMQ_FEATURE_FLAGS_FILE="$(call node_feature_flags_file,$(2))" \
+RABBITMQ_PLUGINS_DIR="$(if $(RABBITMQ_PLUGINS_DIR),$(RABBITMQ_PLUGINS_DIR),$(RMQ_PLUGINS_DIR))" \
+RABBITMQ_PLUGINS_EXPAND_DIR="$(call node_plugins_expand_dir,$(2))" \
+RABBITMQ_SERVER_START_ARGS="-ra wal_sync_method sync $(RABBITMQ_SERVER_START_ARGS)" \
+RABBITMQ_ENABLED_PLUGINS="$(RABBITMQ_ENABLED_PLUGINS)"
+endef
+
+BASIC_SCRIPT_ENV_SETTINGS = \
+ $(call basic_script_env_settings,$(RABBITMQ_NODENAME),$(RABBITMQ_NODENAME_FOR_PATHS),$(RABBITMQ_NODE_PORT)) \
+ RABBITMQ_ENABLED_PLUGINS_FILE="$(RABBITMQ_ENABLED_PLUGINS_FILE)"
+
+test-tmpdir:
+ $(verbose) mkdir -p $(TEST_TMPDIR)
+
+virgin-test-tmpdir:
+ $(gen_verbose) rm -rf $(TEST_TMPDIR)
+ $(verbose) mkdir -p $(TEST_TMPDIR)
+
+node-tmpdir:
+ $(verbose) mkdir -p $(RABBITMQ_LOG_BASE) \
+ $(RABBITMQ_MNESIA_BASE) \
+ $(RABBITMQ_PLUGINS_EXPAND_DIR)
+
+virgin-node-tmpdir:
+ $(gen_verbose) rm -rf $(NODE_TMPDIR)
+ $(verbose) mkdir -p $(RABBITMQ_LOG_BASE) \
+ $(RABBITMQ_MNESIA_BASE) \
+ $(RABBITMQ_PLUGINS_EXPAND_DIR)
+
+.PHONY: test-tmpdir virgin-test-tmpdir node-tmpdir virgin-node-tmpdir
+
+ifdef LEAVE_PLUGINS_DISABLED
+RABBITMQ_ENABLED_PLUGINS ?=
+else
+RABBITMQ_ENABLED_PLUGINS ?= ALL
+endif
+
+# --------------------------------------------------------------------
+# Run a full RabbitMQ.
+# --------------------------------------------------------------------
+
+define test_rabbitmq_config
+%% vim:ft=erlang:
+
+[
+ {rabbit, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(RABBITMQ_NODE_PORT)]}$(comma),)
+ {loopback_users, []},
+ {log, [{file, [{level, debug}]},
+ {console, [{level, debug}]}]}
+ ]},
+ {rabbitmq_management, [
+$(if $(RABBITMQ_NODE_PORT), {listener$(comma) [{port$(comma) $(shell echo "$$(($(RABBITMQ_NODE_PORT) + 10000))")}]},)
+ ]},
+ {rabbitmq_mqtt, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((1883 + $(RABBITMQ_NODE_PORT) - 5672))")]},)
+ ]},
+ {rabbitmq_stomp, [
+$(if $(RABBITMQ_NODE_PORT), {tcp_listeners$(comma) [$(shell echo "$$((61613 + $(RABBITMQ_NODE_PORT) - 5672))")]},)
+ ]},
+ {ra, [
+ {data_dir, "$(RABBITMQ_QUORUM_DIR)"},
+ {wal_sync_method, sync}
+ ]},
+ {lager, [
+ {colors, [
+ %% https://misc.flogisoft.com/bash/tip_colors_and_formatting
+ {debug, "\\\e[0;34m" },
+ {info, "\\\e[1;37m" },
+ {notice, "\\\e[1;36m" },
+ {warning, "\\\e[1;33m" },
+ {error, "\\\e[1;31m" },
+ {critical, "\\\e[1;35m" },
+ {alert, "\\\e[1;44m" },
+ {emergency, "\\\e[1;41m" }
+ ]}
+ ]},
+ {osiris, [
+ {data_dir, "$(RABBITMQ_STREAM_DIR)"}
+ ]}
+].
+endef
+
+define test_rabbitmq_config_with_tls
+%% vim:ft=erlang:
+
+[
+ {rabbit, [
+ {loopback_users, []},
+ {log, [{file, [{level, debug}]},
+ {console, [{level, debug}]}]},
+ {ssl_listeners, [5671]},
+ {ssl_options, [
+ {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
+ {certfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/cert.pem"},
+ {keyfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}
+ ]},
+ {rabbitmq_management, [
+ {listener, [
+ {port, 15671},
+ {ssl, true},
+ {ssl_opts, [
+ {cacertfile, "$(TEST_TLS_CERTS_DIR_in_config)/testca/cacert.pem"},
+ {certfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/cert.pem"},
+ {keyfile, "$(TEST_TLS_CERTS_DIR_in_config)/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false},
+ {honor_cipher_order, true}]}
+ ]}
+ ]},
+ {ra, [
+ {data_dir, "$(RABBITMQ_QUORUM_DIR)"},
+ {wal_sync_method, sync}
+ ]},
+ {lager, [
+ {colors, [
+ %% https://misc.flogisoft.com/bash/tip_colors_and_formatting
+ {debug, "\\\e[0;34m" },
+ {info, "\\\e[1;37m" },
+ {notice, "\\\e[1;36m" },
+ {warning, "\\\e[1;33m" },
+ {error, "\\\e[1;31m" },
+ {critical, "\\\e[1;35m" },
+ {alert, "\\\e[1;44m" },
+ {emergency, "\\\e[1;41m" }
+ ]}
+ ]},
+ {osiris, [
+ {data_dir, "$(RABBITMQ_STREAM_DIR)"}
+ ]}
+].
+endef
+
+TEST_CONFIG_FILE ?= $(TEST_TMPDIR)/test.config
+TEST_TLS_CERTS_DIR := $(TEST_TMPDIR)/tls-certs
+ifeq ($(origin TEST_TLS_CERTS_DIR_in_config),undefined)
+ifeq ($(PLATFORM),msys2)
+TEST_TLS_CERTS_DIR_in_config := $(shell echo $(TEST_TLS_CERTS_DIR) | sed -E "s,^/([^/]+),\1:,")
+else
+TEST_TLS_CERTS_DIR_in_config := $(TEST_TLS_CERTS_DIR)
+endif
+export TEST_TLS_CERTS_DIR_in_config
+endif
+
+.PHONY: $(TEST_CONFIG_FILE)
+$(TEST_CONFIG_FILE): node-tmpdir
+ $(gen_verbose) printf "$(subst $(newline),\n,$(subst ",\",$(config)))" > $@
+
+$(TEST_TLS_CERTS_DIR): node-tmpdir
+ $(gen_verbose) $(MAKE) -C $(DEPS_DIR)/rabbitmq_ct_helpers/tools/tls-certs \
+ DIR=$(TEST_TLS_CERTS_DIR) all
+
+show-test-tls-certs-dir: $(TEST_TLS_CERTS_DIR)
+ @echo $(TEST_TLS_CERTS_DIR)
+
+ifdef NOBUILD
+DIST_TARGET ?=
+else
+ifeq ($(wildcard ebin/test),)
+DIST_TARGET ?= dist
+else
+DIST_TARGET ?= test-dist
+endif
+endif
+
+run-broker run-tls-broker: RABBITMQ_CONFIG_FILE := $(basename $(TEST_CONFIG_FILE))
+run-broker: config := $(test_rabbitmq_config)
+run-tls-broker: config := $(test_rabbitmq_config_with_tls)
+run-tls-broker: $(TEST_TLS_CERTS_DIR)
+
+run-broker run-tls-broker: node-tmpdir $(DIST_TARGET) $(TEST_CONFIG_FILE)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_ALLOW_INPUT=true \
+ RABBITMQ_CONFIG_FILE=$(RABBITMQ_CONFIG_FILE) \
+ $(RABBITMQ_SERVER)
+
+run-background-broker: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ $(RABBITMQ_SERVER) -detached
+
+# --------------------------------------------------------------------
+# Run a bare Erlang node.
+# --------------------------------------------------------------------
+
+run-node: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ RABBITMQ_ALLOW_INPUT=true \
+ $(RABBITMQ_SERVER)
+
+run-background-node: virgin-node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ $(RABBITMQ_SERVER) -detached
+
+# --------------------------------------------------------------------
+# Start RabbitMQ in the background.
+# --------------------------------------------------------------------
+
+ifneq ($(LOG_TO_STDIO),yes)
+REDIRECT_STDIO = > $(RABBITMQ_LOG_BASE)/startup_log \
+ 2> $(RABBITMQ_LOG_BASE)/startup_err
+endif
+
+RMQCTL_WAIT_TIMEOUT ?= 60
+
+define rmq_started
+true = rpc:call('$(1)', rabbit, is_running, []),
+halt().
+endef
+
+start-background-node: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ RABBITMQ_NODE_ONLY=true \
+ $(RABBITMQ_SERVER) \
+ $(REDIRECT_STDIO) &
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE) kernel
+
+start-background-broker: node-tmpdir $(DIST_TARGET)
+ $(BASIC_SCRIPT_ENV_SETTINGS) \
+ $(RABBITMQ_SERVER) \
+ $(REDIRECT_STDIO) &
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE) && \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(call erlang,$(call rmq_started,$(RABBITMQ_NODENAME)),-sname sbb-$$$$ -hidden)
+
+start-rabbit-on-node:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit:start().' | \
+ sed -E -e '/^ completed with .* plugins\.$$/d' -e '/^ok$$/d'
+ $(verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) wait --timeout $(RMQCTL_WAIT_TIMEOUT) $(RABBITMQ_PID_FILE)
+
+stop-rabbit-on-node:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit:stop().' | \
+ sed -E -e '/^ok$$/d'
+
+stop-node:
+ $(exec_verbose) ( \
+ pid=$$(test -f $(RABBITMQ_PID_FILE) && cat $(RABBITMQ_PID_FILE)); \
+ test "$$pid" && \
+ kill -TERM "$$pid" && \
+ echo waiting for process to exit && \
+ while ps -p "$$pid" >/dev/null 2>&1; do sleep 1; done \
+ ) || :
+
+# " <-- To please Vim syntax hilighting.
+
+# --------------------------------------------------------------------
+# Start a RabbitMQ cluster in the background.
+# --------------------------------------------------------------------
+
+NODES ?= 2
+
+start-brokers start-cluster: $(DIST_TARGET)
+ @for n in $$(seq $(NODES)); do \
+ nodename="rabbit-$$n@$(HOSTNAME)"; \
+ $(MAKE) start-background-broker \
+ NOBUILD=1 \
+ RABBITMQ_NODENAME="$$nodename" \
+ RABBITMQ_NODE_PORT="$$((5672 + $$n - 1))" \
+ RABBITMQ_SERVER_START_ARGS=" \
+ -rabbit loopback_users [] \
+ -rabbitmq_management listener [{port,$$((15672 + $$n - 1))}] \
+ -rabbitmq_mqtt tcp_listeners [$$((1883 + $$n - 1))] \
+ -rabbitmq_stomp tcp_listeners [$$((61613 + $$n - 1))] \
+ -rabbitmq_prometheus tcp_config [{port,$$((15692 + $$n - 1))}] \
+ -rabbitmq_stream tcp_listeners [$$((5555 + $$n - 1))] \
+ "; \
+ if test '$@' = 'start-cluster' && test "$$nodename1"; then \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" stop_app; \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" join_cluster "$$nodename1"; \
+ ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n "$$nodename" start_app; \
+ else \
+ nodename1=$$nodename; \
+ fi; \
+ done
+
+stop-brokers stop-cluster:
+ @for n in $$(seq $(NODES) -1 1); do \
+ nodename="rabbit-$$n@$(HOSTNAME)"; \
+ $(MAKE) stop-node \
+ RABBITMQ_NODENAME="$$nodename"; \
+ done
+
+# --------------------------------------------------------------------
+# Used by testsuites.
+# --------------------------------------------------------------------
+
+set-resource-alarm:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit_alarm:set_alarm({{resource_limit, $(SOURCE), node()}, []}).'
+
+clear-resource-alarm:
+ $(exec_verbose) ERL_LIBS="$(DIST_ERL_LIBS)" \
+ $(RABBITMQCTL) -n $(RABBITMQ_NODENAME) \
+ eval 'rabbit_alarm:clear_alarm({resource_limit, $(SOURCE), node()}).'
diff --git a/deps/rabbit_common/mk/rabbitmq-test.mk b/deps/rabbit_common/mk/rabbitmq-test.mk
new file mode 100644
index 0000000000..931f072125
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-test.mk
@@ -0,0 +1,80 @@
+.PHONY: ct-slow ct-fast
+
+ct-slow ct-fast:
+ $(MAKE) ct CT_SUITES='$(CT_SUITES)'
+
+# --------------------------------------------------------------------
+# xref
+# --------------------------------------------------------------------
+
+# We need the list of dependencies of the current project. We use it in
+# xrefr(1) to scan for Elixir-based projects. For those, we need to add
+# the path inside `_build` to the xref code path.
+
+ifneq ($(filter xref,$(MAKECMDGOALS)),)
+export ERLANG_MK_RECURSIVE_DEPS_LIST
+endif
+
+xref: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+
+# --------------------------------------------------------------------
+# Helpers to run Make targets on Concourse.
+# --------------------------------------------------------------------
+
+FLY ?= fly
+FLY_TARGET ?= $(shell $(FLY) targets | awk '/ci\.rabbitmq\.com/ { print $$1; }')
+
+CONCOURSE_TASK = $(ERLANG_MK_TMP)/concourse-task.yaml
+
+CI_DIR ?= $(DEPS_DIR)/ci
+PIPELINE_DIR = $(CI_DIR)/server-release
+BRANCH_RELEASE = $(shell "$(PIPELINE_DIR)/scripts/map-branch-to-release.sh" "$(base_rmq_ref)")
+PIPELINE_DATA = $(PIPELINE_DIR)/release-data-$(BRANCH_RELEASE).yaml
+REPOSITORY_NAME = $(shell "$(PIPELINE_DIR)/scripts/map-erlang-app-and-repository-name.sh" "$(PIPELINE_DATA)" "$(PROJECT)")
+
+CONCOURSE_PLATFORM ?= linux
+ERLANG_VERSION ?= $(shell "$(PIPELINE_DIR)/scripts/list-erlang-versions.sh" "$(PIPELINE_DATA)" | head -n 1)
+TASK_INPUTS = $(shell "$(PIPELINE_DIR)/scripts/list-task-inputs.sh" "$(CONCOURSE_TASK)")
+
+.PHONY: $(CONCOURSE_TASK)
+$(CONCOURSE_TASK): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST)
+ $(gen_verbose) echo 'platform: $(CONCOURSE_PLATFORM)' > "$@"
+ $(verbose) echo 'inputs:' >> "$@"
+ $(verbose) echo ' - name: $(PROJECT)' >> "$@"
+ $(verbose) cat $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) | while read -r file; do \
+ echo " - name: $$(basename "$$file")" >> "$@"; \
+ done
+ $(verbose) echo 'outputs:' >> "$@"
+ $(verbose) echo ' - name: test-output' >> "$@"
+ifeq ($(CONCOURSE_PLATFORM),linux)
+ $(verbose) echo 'image_resource:' >> "$@"
+ $(verbose) echo ' type: docker-image' >> "$@"
+ $(verbose) echo ' source:' >> "$@"
+ $(verbose) echo ' repository: pivotalrabbitmq/rabbitmq-server-buildenv' >> "$@"
+ $(verbose) echo ' tag: linux-erlang-$(ERLANG_VERSION)' >> "$@"
+endif
+ $(verbose) echo 'run:' >> "$@"
+ $(verbose) echo ' path: ci/server-release/scripts/test-erlang-app.sh' >> "$@"
+ $(verbose) echo ' args:' >> "$@"
+ $(verbose) echo " - $(PROJECT)" >> "$@"
+# This section must be the last because the `%-on-concourse` target
+# appends other variables.
+ $(verbose) echo 'params:' >> "$@"
+ifdef V
+ $(verbose) echo ' V: "$(V)"' >> "$@"
+endif
+ifdef t
+ $(verbose) echo ' t: "$(t)"' >> "$@"
+endif
+
+%-on-concourse: $(CONCOURSE_TASK)
+ $(verbose) test -d "$(PIPELINE_DIR)"
+ $(verbose) echo ' MAKE_TARGET: "$*"' >> "$(CONCOURSE_TASK)"
+ $(FLY) -t $(FLY_TARGET) execute \
+ --config="$(CONCOURSE_TASK)" \
+ $(foreach input,$(TASK_INPUTS), \
+ $(if $(filter $(PROJECT),$(input)), \
+ --input="$(input)=.", \
+ --input="$(input)=$(DEPS_DIR)/$(input)")) \
+ --output="test-output=$(CT_LOGS_DIR)/on-concourse"
+ $(verbose) rm -f "$(CT_LOGS_DIR)/on-concourse/filename"
diff --git a/deps/rabbit_common/mk/rabbitmq-tools.mk b/deps/rabbit_common/mk/rabbitmq-tools.mk
new file mode 100644
index 0000000000..6672153cb0
--- /dev/null
+++ b/deps/rabbit_common/mk/rabbitmq-tools.mk
@@ -0,0 +1,429 @@
+ifeq ($(PLATFORM),msys2)
+HOSTNAME := $(COMPUTERNAME)
+else
+HOSTNAME := $(shell hostname -s)
+endif
+
+READY_DEPS = $(foreach DEP,\
+ $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)), \
+ $(if $(wildcard $(DEPS_DIR)/$(DEP)),$(DEP),))
+
+RELEASED_RMQ_DEPS = $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS))
+
+.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \
+ show-current-git-fetch-url show-current-git-push-url
+
+show-upstream-git-fetch-url:
+ @echo $(RABBITMQ_UPSTREAM_FETCH_URL)
+
+show-upstream-git-push-url:
+ @echo $(RABBITMQ_UPSTREAM_PUSH_URL)
+
+show-current-git-fetch-url:
+ @echo $(RABBITMQ_CURRENT_FETCH_URL)
+
+show-current-git-push-url:
+ @echo $(RABBITMQ_CURRENT_PUSH_URL)
+
+.PHONY: update-erlang-mk update-rabbitmq-components.mk
+
+update-erlang-mk: erlang-mk
+ $(verbose) if test "$(DO_COMMIT)" = 'yes'; then \
+ git diff --quiet -- erlang.mk \
+ || git commit -m 'Update erlang.mk' -- erlang.mk; \
+ fi
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/erlang.mk \
+ || $(MAKE) -C $$repo erlang-mk; \
+ if test "$(DO_COMMIT)" = 'yes'; then \
+ (cd $$repo; \
+ git diff --quiet -- erlang.mk \
+ || git commit -m 'Update erlang.mk' -- erlang.mk); \
+ fi; \
+ done
+
+# --------------------------------------------------------------------
+# rabbitmq-components.mk checks.
+# --------------------------------------------------------------------
+
+UPSTREAM_RMQ_COMPONENTS_MK = $(DEPS_DIR)/rabbit_common/mk/rabbitmq-components.mk
+
+ifeq ($(PROJECT),rabbit_common)
+check-rabbitmq-components.mk:
+ @:
+else
+check-rabbitmq-components.mk:
+ $(verbose) cmp -s rabbitmq-components.mk \
+ $(UPSTREAM_RMQ_COMPONENTS_MK) || \
+ (echo "error: rabbitmq-components.mk must be updated!" 1>&2; \
+ false)
+endif
+
+ifeq ($(PROJECT),rabbit_common)
+rabbitmq-components-mk:
+ @:
+else
+rabbitmq-components-mk:
+ifeq ($(FORCE),yes)
+ $(gen_verbose) cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+else
+ $(gen_verbose) if test -d .git && test -d $(DEPS_DIR)/rabbit_common/.git; then \
+ upstream_branch=$$(LANG=C git -C $(DEPS_DIR)/rabbit_common branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ local_branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ test "$$local_branch" = "$$upstream_branch" || exit 0; \
+ fi; \
+ cp -a $(UPSTREAM_RMQ_COMPONENTS_MK) .
+endif
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) git diff --quiet rabbitmq-components.mk \
+ || git commit -m 'Update rabbitmq-components.mk' rabbitmq-components.mk
+endif
+endif
+
+update-rabbitmq-components-mk: rabbitmq-components-mk
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/rabbitmq-components.mk \
+ || $(MAKE) -C $$repo rabbitmq-components-mk; \
+ done
+
+update-contributor-code-of-conduct:
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ cp $(DEPS_DIR)/rabbit_common/CODE_OF_CONDUCT.md $$repo/CODE_OF_CONDUCT.md; \
+ cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \
+ done
+
+ifdef CREDS
+define replace_aws_creds
+ set -e; \
+ if test -f "$(CREDS)"; then \
+ key_id=$(shell travis encrypt --no-interactive \
+ "AWS_ACCESS_KEY_ID=$$(awk '/^rabbitmq-s3-access-key-id/ { print $$2; }' < "$(CREDS)")"); \
+ access_key=$(shell travis encrypt --no-interactive \
+ "AWS_SECRET_ACCESS_KEY=$$(awk '/^rabbitmq-s3-secret-access-key/ { print $$2; }' < "$(CREDS)")"); \
+ mv .travis.yml .travis.yml.orig; \
+ awk "\
+ /^ global:/ { \
+ print; \
+ print \" - secure: $$key_id\"; \
+ print \" - secure: $$access_key\"; \
+ next; \
+ } \
+ /- secure:/ { next; } \
+ { print; }" < .travis.yml.orig > .travis.yml; \
+ rm -f .travis.yml.orig; \
+ else \
+ echo " INFO: CREDS file missing; not setting/updating AWS credentials"; \
+ fi
+endef
+else
+define replace_aws_creds
+ echo " INFO: CREDS not set; not setting/updating AWS credentials"
+endef
+endif
+
+ifeq ($(PROJECT),rabbit_common)
+travis-yml:
+ $(gen_verbose) $(replace_aws_creds)
+else
+travis-yml:
+ $(gen_verbose) \
+ set -e; \
+ if test -d .git && test -d $(DEPS_DIR)/rabbit_common/.git; then \
+ upstream_branch=$$(LANG=C git -C $(DEPS_DIR)/rabbit_common branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ local_branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ test "$$local_branch" = "$$upstream_branch" || exit 0; \
+ fi; \
+ test -f .travis.yml || exit 0; \
+ (grep -E -- '- secure:' .travis.yml || :) > .travis.yml.creds; \
+ cp -a $(DEPS_DIR)/rabbit_common/.travis.yml .travis.yml.orig; \
+ awk ' \
+ /^ global:/ { \
+ print; \
+ system("test -f .travis.yml.creds && cat .travis.yml.creds"); \
+ next; \
+ } \
+ /- secure:/ { next; } \
+ { print; } \
+ ' < .travis.yml.orig > .travis.yml; \
+ rm -f .travis.yml.orig .travis.yml.creds; \
+ if test -f .travis.yml.patch; then \
+ patch -p0 < .travis.yml.patch; \
+ rm -f .travis.yml.orig; \
+ fi; \
+ $(replace_aws_creds)
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) ! test -f .travis.yml || \
+ git diff --quiet .travis.yml \
+ || git commit -m 'Travis CI: Update config from rabbitmq-common' .travis.yml
+endif
+endif
+
+update-travis-yml: travis-yml
+ $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \
+ ! test -f $$repo/rabbitmq-components.mk \
+ || $(MAKE) -C $$repo travis-yml; \
+ done
+
+ifneq ($(wildcard .git),)
+
+.PHONY: sync-gitremote sync-gituser
+
+sync-gitremote: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitremote)
+ @:
+
+%+sync-gitremote:
+ $(exec_verbose) cd $* && \
+ git remote set-url origin \
+ '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(notdir $*))'
+ $(verbose) cd $* && \
+ git remote set-url --push origin \
+ '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(notdir $*))'
+
+ifeq ($(origin, RMQ_GIT_GLOBAL_USER_NAME),undefined)
+RMQ_GIT_GLOBAL_USER_NAME := $(shell git config --global user.name)
+export RMQ_GIT_GLOBAL_USER_NAME
+endif
+ifeq ($(origin RMQ_GIT_GLOBAL_USER_EMAIL),undefined)
+RMQ_GIT_GLOBAL_USER_EMAIL := $(shell git config --global user.email)
+export RMQ_GIT_GLOBAL_USER_EMAIL
+endif
+ifeq ($(origin RMQ_GIT_USER_NAME),undefined)
+RMQ_GIT_USER_NAME := $(shell git config user.name)
+export RMQ_GIT_USER_NAME
+endif
+ifeq ($(origin RMQ_GIT_USER_EMAIL),undefined)
+RMQ_GIT_USER_EMAIL := $(shell git config user.email)
+export RMQ_GIT_USER_EMAIL
+endif
+
+sync-gituser: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gituser)
+ @:
+
+%+sync-gituser:
+ifeq ($(RMQ_GIT_USER_NAME),$(RMQ_GIT_GLOBAL_USER_NAME))
+ $(exec_verbose) cd $* && git config --unset user.name || :
+else
+ $(exec_verbose) cd $* && git config user.name "$(RMQ_GIT_USER_NAME)"
+endif
+ifeq ($(RMQ_GIT_USER_EMAIL),$(RMQ_GIT_GLOBAL_USER_EMAIL))
+ $(verbose) cd $* && git config --unset user.email || :
+else
+ $(verbose) cd $* && git config user.email "$(RMQ_GIT_USER_EMAIL)"
+endif
+
+.PHONY: sync-gitignore-from-master
+sync-gitignore-from-master: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitignore-from-master)
+
+%+sync-gitignore-from-master:
+ $(gen_verbose) cd $* && \
+ if test -d .git; then \
+ branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \
+ ! test "$$branch" = 'master' || exit 0; \
+ git show origin/master:.gitignore > .gitignore; \
+ fi
+ifeq ($(DO_COMMIT),yes)
+ $(verbose) cd $* && \
+ if test -d .git; then \
+ git diff --quiet .gitignore \
+ || git commit -m 'Git: Sync .gitignore from master' .gitignore; \
+ fi
+endif
+
+.PHONY: show-branch
+
+show-branch: $(READY_DEPS:%=$(DEPS_DIR)/%+show-branch)
+ $(verbose) printf '%-34s %s\n' $(PROJECT): "$$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match)"
+
+%+show-branch:
+ $(verbose) printf '%-34s %s\n' $(notdir $*): "$$(cd $* && (git symbolic-ref -q --short HEAD || git describe --tags --exact-match))"
+
+SINCE_TAG ?= last-release
+COMMITS_LOG_OPTS ?= --oneline --decorate --no-merges
+MARKDOWN ?= no
+
+define show_commits_since_tag
+set -e; \
+if test "$1"; then \
+ erlang_app=$(notdir $1); \
+ repository=$(call rmq_cmp_repo_name,$(notdir $1)); \
+ git_dir=-C\ "$1"; \
+else \
+ erlang_app=$(PROJECT); \
+ repository=$(call rmq_cmp_repo_name,$(PROJECT)); \
+fi; \
+case "$(SINCE_TAG)" in \
+last-release) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \
+ ;; \
+*) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \
+ ;; \
+esac; \
+if test "$$tags_count" -gt 0; then \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags \
+ --exclude "*-beta*" \
+ --exclude "*_milestone*" \
+ --exclude "*[-_]rc*"); \
+ ;; \
+ last-prerelease) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags); \
+ ;; \
+ *) \
+ git $$git_dir rev-parse "$(SINCE_TAG)" -- >/dev/null; \
+ ref=$(SINCE_TAG); \
+ ;; \
+ esac; \
+ commits_count=$$(git $$git_dir log --oneline "$$ref.." | wc -l); \
+ if test "$$commits_count" -gt 0; then \
+ if test "$(MARKDOWN)" = yes; then \
+ printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\nCommits since \`$$ref\`:\n\n"; \
+ git $$git_dir --no-pager log $(COMMITS_LOG_OPTS) \
+ --format="format:* %s ([\`%h\`](https://github.com/rabbitmq/$$repository/commit/%H))" \
+ "$$ref.."; \
+ echo; \
+ else \
+ echo; \
+ echo "# $$repository - Commits since $$ref"; \
+ git $$git_dir log $(COMMITS_LOG_OPTS) "$$ref.."; \
+ fi; \
+ fi; \
+else \
+ if test "$(MARKDOWN)" = yes; then \
+ printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\n**New** since the last release!\n"; \
+ else \
+ echo; \
+ echo "# $$repository - New since the last release!"; \
+ fi; \
+fi
+endef
+
+.PHONY: commits-since-release
+
+commits-since-release: commits-since-release-title \
+ $(RELEASED_RMQ_DEPS:%=$(DEPS_DIR)/%+commits-since-release)
+ $(verbose) $(call show_commits_since_tag)
+
+commits-since-release-title:
+ $(verbose) set -e; \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \
+ ;; \
+ *) \
+ tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \
+ ;; \
+ esac; \
+ if test "$$tags_count" -gt 0; then \
+ case "$(SINCE_TAG)" in \
+ last-release) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags \
+ --exclude "*-beta*" \
+ --exclude "*_milestone*" \
+ --exclude "*[-_]rc*"); \
+ ;; \
+ last-prerelease) \
+ ref=$$(git $$git_dir describe --abbrev=0 --tags); \
+ ;; \
+ *) \
+ ref=$(SINCE_TAG); \
+ ;; \
+ esac; \
+ version=$$(echo "$$ref" | sed -E \
+ -e 's/rabbitmq_v([0-9]+)_([0-9]+)_([0-9]+)/v\1.\2.\3/' \
+ -e 's/_milestone/-beta./' \
+ -e 's/_rc/-rc./' \
+ -e 's/^v//'); \
+ echo "# Changes since RabbitMQ $$version"; \
+ else \
+ echo "# Changes since the beginning of time"; \
+ fi
+
+%+commits-since-release:
+ $(verbose) $(call show_commits_since_tag,$*)
+
+endif # ($(wildcard .git),)
+
+# --------------------------------------------------------------------
+# erlang.mk query-deps* formatting.
+# --------------------------------------------------------------------
+
+# We need to provide a repo mapping for deps resolved via git_rmq fetch method
+query_repo_git_rmq = https://github.com/rabbitmq/$(call rmq_cmp_repo_name,$(1))
+
+# --------------------------------------------------------------------
+# Common test logs compression.
+# --------------------------------------------------------------------
+
+.PHONY: ct-logs-archive clean-ct-logs-archive
+
+ifneq ($(wildcard logs/*),)
+TAR := tar
+ifeq ($(PLATFORM),freebsd)
+TAR := gtar
+endif
+ifeq ($(PLATFORM),darwin)
+TAR := gtar
+endif
+
+CT_LOGS_ARCHIVE ?= $(PROJECT)-ct-logs-$(subst _,-,$(subst -,,$(subst .,,$(patsubst ct_run.ct_$(PROJECT)@$(HOSTNAME).%,%,$(notdir $(lastword $(wildcard logs/ct_run.*))))))).tar.xz
+
+ifeq ($(patsubst %.tar.xz,%,$(CT_LOGS_ARCHIVE)),$(CT_LOGS_ARCHIVE))
+$(error CT_LOGS_ARCHIVE file must use '.tar.xz' as its filename extension)
+endif
+
+ct-logs-archive: $(CT_LOGS_ARCHIVE)
+ @:
+
+$(CT_LOGS_ARCHIVE):
+ $(gen_verbose) \
+ for file in logs/*; do \
+ ! test -L "$$file" || rm "$$file"; \
+ done
+ $(verbose) \
+ $(TAR) -c \
+ --exclude "*/mnesia" \
+ --transform "s/^logs/$(patsubst %.tar.xz,%,$(notdir $(CT_LOGS_ARCHIVE)))/" \
+ -f - logs | \
+ xz > "$@"
+else
+ct-logs-archive:
+ @:
+endif
+
+clean-ct-logs-archive::
+ $(gen_verbose) rm -f $(PROJECT)-ct-logs-*.tar.xz
+
+clean:: clean-ct-logs-archive
+
+# --------------------------------------------------------------------
+# Generate a file listing RabbitMQ component dependencies and their
+# Git commit hash.
+# --------------------------------------------------------------------
+
+.PHONY: rabbitmq-deps.mk clean-rabbitmq-deps.mk
+
+rabbitmq-deps.mk: $(PROJECT)-rabbitmq-deps.mk
+ @:
+
+closing_paren := )
+
+define rmq_deps_mk_line
+dep_$(1) := git $(dir $(RABBITMQ_UPSTREAM_FETCH_URL))$(call rmq_cmp_repo_name,$(1)).git $$(git -C "$(2)" rev-parse HEAD)
+endef
+
+$(PROJECT)-rabbitmq-deps.mk: $(ERLANG_MK_RECURSIVE_DEPS_LIST)
+ $(gen_verbose) echo "# In $(PROJECT) - commit $$(git rev-parse HEAD)" > $@
+ $(verbose) cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | \
+ while read -r dir; do \
+ component=$$(basename "$$dir"); \
+ case "$$component" in \
+ $(foreach component,$(RABBITMQ_COMPONENTS),$(component)$(closing_paren) echo "$(call rmq_deps_mk_line,$(component),$$dir)" ;;) \
+ esac; \
+ done >> $@
+
+clean:: clean-rabbitmq-deps.mk
+
+clean-rabbitmq-deps.mk:
+ $(gen_verbose) rm -f $(PROJECT)-rabbitmq-deps.mk
diff --git a/deps/rabbit_common/mk/xrefr b/deps/rabbit_common/mk/xrefr
new file mode 100755
index 0000000000..03c408fcb4
--- /dev/null
+++ b/deps/rabbit_common/mk/xrefr
@@ -0,0 +1,338 @@
+#!/usr/bin/env escript
+%% vim:ft=erlang:
+
+%% The code is copied from xref_runner.
+%% https://github.com/inaka/xref_runner
+%%
+%% The only change is the support of our erlang_version_support
+%% attribute: we don't want any warnings about functions which will be
+%% dropped at load time.
+%%
+%% It's also a plain text escript instead of a compiled one because we
+%% want to support Erlang R16B03 and the version of xref_runner uses
+%% maps and is built with something like Erlang 18.
+
+%% This mode allows us to reference local function. For instance:
+%% lists:map(fun generate_comment/1, Comments)
+-mode(compile).
+
+-define(DIRS, ["ebin", "test"]).
+
+-define(CHECKS, [undefined_function_calls,
+ undefined_functions,
+ locals_not_used]).
+
+main(_) ->
+ Checks = ?CHECKS,
+ ElixirDeps = get_elixir_deps_paths(),
+ [true = code:add_path(P) || P <- ElixirDeps],
+ XrefWarnings = lists:append([check(Check) || Check <- Checks]),
+ warnings_prn(XrefWarnings),
+ case XrefWarnings of
+ [] -> ok;
+ _ -> halt(1)
+ end.
+
+get_elixir_deps_paths() ->
+ case os:getenv("ERLANG_MK_RECURSIVE_DEPS_LIST") of
+ false ->
+ [];
+ Filename ->
+ {ok, Fd} = file:open(Filename, [read]),
+ get_elixir_deps_paths1(Fd, [])
+ end.
+
+get_elixir_deps_paths1(Fd, Paths) ->
+ case file:read_line(Fd) of
+ {ok, Line0} ->
+ Line = Line0 -- [$\r, $\n],
+ RootPath = case os:type() of
+ {unix, _} ->
+ Line;
+ {win32, _} ->
+ case os:find_executable("cygpath.exe") of
+ false ->
+ Line;
+ Cygpath ->
+ os:cmd(
+ io_lib:format("~s --windows \"~s\"",
+ [Cygpath, Line]))
+ -- [$\r, $\n]
+ end
+ end,
+ Glob = filename:join([RootPath, "_build", "dev", "lib", "*", "ebin"]),
+ NewPaths = filelib:wildcard(Glob),
+ get_elixir_deps_paths1(Fd, Paths ++ NewPaths);
+ eof ->
+ add_elixir_stdlib_path(Paths)
+ end.
+
+add_elixir_stdlib_path(Paths) ->
+ case find_elixir_home() of
+ false -> Paths;
+ ElixirLibDir -> [ElixirLibDir | Paths]
+ end.
+
+find_elixir_home() ->
+ ElixirExe = case os:type() of
+ {unix, _} -> "elixir";
+ {win32, _} -> "elixir.bat"
+ end,
+ case os:find_executable(ElixirExe) of
+ false -> false;
+ ExePath -> resolve_symlink(ExePath)
+ end.
+
+resolve_symlink(ExePath) ->
+ case file:read_link_all(ExePath) of
+ {error, einval} ->
+ determine_elixir_home(ExePath);
+ {ok, ResolvedLink} ->
+ ExePath1 = filename:absname(ResolvedLink,
+ filename:dirname(ExePath)),
+ resolve_symlink(ExePath1);
+ {error, _} ->
+ false
+ end.
+
+determine_elixir_home(ExePath) ->
+ LibPath = filename:join([filename:dirname(filename:dirname(ExePath)),
+ "lib",
+ "elixir",
+ "ebin"]),
+ case filelib:is_dir(LibPath) of
+ true -> LibPath;
+ false -> {skip, "Failed to locate Elixir lib dir"}
+ end.
+check(Check) ->
+ Dirs = ?DIRS,
+ lists:foreach(fun code:add_path/1, Dirs),
+
+ {ok, Xref} = xref:start([]),
+ try
+ ok = xref:set_library_path(Xref, code:get_path()),
+
+ lists:foreach(
+ fun(Dir) ->
+ case filelib:is_dir(Dir) of
+ true -> {ok, _} = xref:add_directory(Xref, Dir);
+ false -> ok
+ end
+ end, Dirs),
+
+ {ok, Results} = xref:analyze(Xref, Check),
+
+ FilteredResults = filter_xref_results(Check, Results),
+
+ [result_to_warning(Check, Result) || Result <- FilteredResults]
+ after
+ stopped = xref:stop(Xref)
+ end.
+
+%% -------------------------------------------------------------------
+%% Filtering results.
+%% -------------------------------------------------------------------
+
+filter_xref_results(Check, Results) ->
+ SourceModules =
+ lists:usort([source_module(Result) || Result <- Results]),
+
+ Ignores = lists:flatmap(
+ fun(Module) -> get_ignorelist(Module, Check) end, SourceModules),
+
+ UnusedFunctions = lists:flatmap(
+ fun(Mod) -> get_unused_compat_functions(Mod) end,
+ SourceModules),
+
+ ToIgnore = case get(results_to_ignore) of
+ undefined -> [];
+ RTI -> RTI
+ end,
+ NewToIgnore = [parse_xref_target(Result)
+ || Result <- Results,
+ lists:member(parse_xref_source(Result), UnusedFunctions)],
+ AllToIgnore = ToIgnore ++ NewToIgnore ++ [mfa(M, {F, A})
+ || {_, {M, F, A}} <- Ignores],
+ put(results_to_ignore, AllToIgnore),
+
+ [Result || Result <- Results,
+ not lists:member(parse_xref_result(Result), Ignores) andalso
+ not lists:member(parse_xref_result(Result), AllToIgnore) andalso
+ not lists:member(parse_xref_source(Result), UnusedFunctions)].
+
+source_module({Mt, _Ft, _At}) -> Mt;
+source_module({{Ms, _Fs, _As}, _Target}) -> Ms.
+
+%%
+%% Ignore behaviour functions, and explicitly marked functions
+%%
+%% Functions can be ignored by using
+%% -ignore_xref([{F, A}, {M, F, A}...]).
+get_ignorelist(Mod, Check) ->
+ %% Get ignore_xref attribute and combine them in one list
+ Attributes =
+ try
+ Mod:module_info(attributes)
+ catch
+ _Class:_Error -> []
+ end,
+
+ IgnoreXref =
+ [mfa(Mod, Value) || {ignore_xref, Values} <- Attributes, Value <- Values],
+
+ BehaviourCallbacks = get_behaviour_callbacks(Check, Mod, Attributes),
+
+ %% And create a flat {M, F, A} list
+ IgnoreXref ++ BehaviourCallbacks.
+
+get_behaviour_callbacks(exports_not_used, Mod, Attributes) ->
+ Behaviours = [Value || {behaviour, Values} <- Attributes, Value <- Values],
+ [{Mod, {Mod, F, A}}
+ || B <- Behaviours, {F, A} <- B:behaviour_info(callbacks)];
+get_behaviour_callbacks(_Check, _Mod, _Attributes) ->
+ [].
+
+get_unused_compat_functions(Module) ->
+ OTPVersion = code_version:get_otp_version(),
+ Attributes = try
+ Module:module_info(attributes)
+ catch
+ _Class:_Error -> []
+ end,
+ CompatTuples = [Tuple
+ || {erlang_version_support, Tuples} <- Attributes,
+ Tuple <- Tuples],
+ get_unused_compat_functions(Module, OTPVersion, CompatTuples, []).
+
+get_unused_compat_functions(_, _, [], Result) ->
+ Result;
+get_unused_compat_functions(Module,
+ OTPVersion,
+ [{MinOTPVersion, Choices} | Rest],
+ Result) ->
+ Functions = lists:map(
+ fun({_, Arity, Pre, Post}) ->
+ if
+ OTPVersion >= MinOTPVersion ->
+ %% We ignore the "pre" function.
+ mfa(Module, {Pre, Arity});
+ true ->
+ %% We ignore the "post" function.
+ mfa(Module, {Post, Arity})
+ end
+ end, Choices),
+ get_unused_compat_functions(Module, OTPVersion, Rest,
+ Result ++ Functions).
+
+mfa(M, {F, A}) -> {M, {M, F, A}};
+mfa(M, MFA) -> {M, MFA}.
+
+parse_xref_result({{SM, _, _}, MFAt}) -> {SM, MFAt};
+parse_xref_result({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+parse_xref_source({{SM, _, _} = MFAt, _}) -> {SM, MFAt};
+parse_xref_source({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+parse_xref_target({_, {TM, _, _} = MFAt}) -> {TM, MFAt};
+parse_xref_target({TM, _, _} = MFAt) -> {TM, MFAt}.
+
+%% -------------------------------------------------------------------
+%% Preparing results.
+%% -------------------------------------------------------------------
+
+result_to_warning(Check, {MFASource, MFATarget}) ->
+ {Filename, Line} = get_source(MFASource),
+ [{filename, Filename},
+ {line, Line},
+ {source, MFASource},
+ {target, MFATarget},
+ {check, Check}];
+result_to_warning(Check, MFA) ->
+ {Filename, Line} = get_source(MFA),
+ [{filename, Filename},
+ {line, Line},
+ {source, MFA},
+ {check, Check}].
+
+%%
+%% Given a MFA, find the file and LOC where it's defined. Note that
+%% xref doesn't work if there is no abstract_code, so we can avoid
+%% being too paranoid here.
+%%
+get_source({M, F, A}) ->
+ case code:get_object_code(M) of
+ error -> {"", 0};
+ {M, Bin, _} -> find_function_source(M, F, A, Bin)
+ end.
+
+find_function_source(M, F, A, Bin) ->
+ AbstractCode = beam_lib:chunks(Bin, [abstract_code]),
+ {ok, {M, [{abstract_code, {raw_abstract_v1, Code}}]}} = AbstractCode,
+
+ %% Extract the original source filename from the abstract code
+ [Source|_] = [S || {attribute, _, file, {S, _}} <- Code],
+
+ %% Extract the line number for a given function def
+ Fn = [E || E <- Code,
+ element(1, E) == function,
+ element(3, E) == F,
+ element(4, E) == A],
+
+ case Fn of
+ [{function, Line, F, _, _}] when is_integer(Line) ->
+ {Source, Line};
+ [{function, Line, F, _, _}] ->
+ {Source, erl_anno:line(Line)};
+ %% do not crash if functions are exported, even though they
+ %% are not in the source.
+ %% parameterized modules add new/1 and instance/1 for example.
+ [] -> {Source, 0}
+ end.
+
+%% -------------------------------------------------------------------
+%% Reporting results.
+%% -------------------------------------------------------------------
+
+warnings_prn([]) ->
+ ok;
+warnings_prn(Comments) ->
+ Messages = lists:map(fun generate_comment/1, Comments),
+ lists:foreach(fun warning_prn/1, Messages).
+
+warning_prn(Message) ->
+ FullMessage = Message ++ "~n",
+ io:format(FullMessage, []).
+
+generate_comment(XrefWarning) ->
+ Filename = proplists:get_value(filename, XrefWarning),
+ Line = proplists:get_value(line, XrefWarning),
+ Source = proplists:get_value(source, XrefWarning),
+ Check = proplists:get_value(check, XrefWarning),
+ Target = proplists:get_value(target, XrefWarning),
+ Position = case {Filename, Line} of
+ {"", _} -> "";
+ {Filename, 0} -> [Filename, " "];
+ {Filename, Line} -> [Filename, ":",
+ integer_to_list(Line), " "]
+ end,
+ [Position, generate_comment_text(Check, Source, Target)].
+
+generate_comment_text(Check, {SM, SF, SA}, TMFA) ->
+ SMFA = io_lib:format("`~p:~p/~p`", [SM, SF, SA]),
+ generate_comment_text(Check, SMFA, TMFA);
+generate_comment_text(Check, SMFA, {TM, TF, TA}) ->
+ TMFA = io_lib:format("`~p:~p/~p`", [TM, TF, TA]),
+ generate_comment_text(Check, SMFA, TMFA);
+
+generate_comment_text(undefined_function_calls, SMFA, TMFA) ->
+ io_lib:format("~s calls undefined function ~s", [SMFA, TMFA]);
+generate_comment_text(undefined_functions, SMFA, _TMFA) ->
+ io_lib:format("~s is not defined as a function", [SMFA]);
+generate_comment_text(locals_not_used, SMFA, _TMFA) ->
+ io_lib:format("~s is an unused local function", [SMFA]);
+generate_comment_text(exports_not_used, SMFA, _TMFA) ->
+ io_lib:format("~s is an unused export", [SMFA]);
+generate_comment_text(deprecated_function_calls, SMFA, TMFA) ->
+ io_lib:format("~s calls deprecated function ~s", [SMFA, TMFA]);
+generate_comment_text(deprecated_functions, SMFA, _TMFA) ->
+ io_lib:format("~s is deprecated", [SMFA]).
diff --git a/deps/rabbit_common/src/app_utils.erl b/deps/rabbit_common/src/app_utils.erl
new file mode 100644
index 0000000000..df965575be
--- /dev/null
+++ b/deps/rabbit_common/src/app_utils.erl
@@ -0,0 +1,167 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(app_utils).
+
+-export([load_applications/1,
+ start_applications/1, start_applications/2, start_applications/3,
+ stop_applications/1, stop_applications/2, app_dependency_order/2,
+ app_dependencies/1]).
+
+-type error_handler() :: fun((atom(), any()) -> 'ok' | no_return()).
+-type restart_type() :: 'permanent' | 'transient' | 'temporary'.
+
+-spec load_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()]) -> 'ok'.
+-spec stop_applications([atom()]) -> 'ok'.
+-spec start_applications([atom()], error_handler()) -> 'ok'.
+-spec start_applications([atom()], error_handler(), #{atom() => restart_type()}) -> 'ok'.
+-spec stop_applications([atom()], error_handler()) -> 'ok'.
+-spec app_dependency_order([atom()], boolean()) -> [digraph:vertex()].
+-spec app_dependencies(atom()) -> [atom()].
+-spec failed_to_start_app(atom(), any()) -> no_return().
+-spec failed_to_stop_app(atom(), any()) -> no_return().
+
+%%---------------------------------------------------------------------------
+%% Public API
+
+load_applications(Apps) ->
+ load_applications(queue:from_list(Apps), sets:new()),
+ ok.
+
+start_applications(Apps) ->
+ start_applications(
+ Apps, fun failed_to_start_app/2).
+
+stop_applications(Apps) ->
+ stop_applications(
+ Apps, fun failed_to_stop_app/2).
+
+failed_to_start_app(App, Reason) ->
+ throw({error, {cannot_start_application, App, Reason}}).
+
+failed_to_stop_app(App, Reason) ->
+ throw({error, {cannot_stop_application, App, Reason}}).
+
+start_applications(Apps, ErrorHandler) ->
+ start_applications(Apps, ErrorHandler, #{}).
+
+start_applications(Apps, ErrorHandler, RestartTypes) ->
+ manage_applications(fun lists:foldl/3,
+ fun(App) -> ensure_all_started(App, RestartTypes) end,
+ fun application:stop/1,
+ already_started,
+ ErrorHandler,
+ Apps).
+
+stop_applications(Apps, ErrorHandler) ->
+ manage_applications(fun lists:foldr/3,
+ fun(App) ->
+ rabbit_log:info("Stopping application '~s'", [App]),
+ application:stop(App)
+ end,
+ fun(App) -> ensure_all_started(App, #{}) end,
+ not_started,
+ ErrorHandler,
+ Apps).
+
+app_dependency_order(RootApps, StripUnreachable) ->
+ {ok, G} = rabbit_misc:build_acyclic_graph(
+ fun ({App, _Deps}) -> [{App, App}] end,
+ fun ({App, Deps}) -> [{Dep, App} || Dep <- Deps] end,
+ [{App, app_dependencies(App)} ||
+ {App, _Desc, _Vsn} <- application:loaded_applications()]),
+ try
+ case StripUnreachable of
+ true -> digraph:del_vertices(G, digraph:vertices(G) --
+ digraph_utils:reachable(RootApps, G));
+ false -> ok
+ end,
+ digraph_utils:topsort(G)
+ after
+ true = digraph:delete(G)
+ end.
+
+%%---------------------------------------------------------------------------
+%% Private API
+
+load_applications(Worklist, Loaded) ->
+ case queue:out(Worklist) of
+ {empty, _WorkList} ->
+ ok;
+ {{value, App}, Worklist1} ->
+ case sets:is_element(App, Loaded) of
+ true -> load_applications(Worklist1, Loaded);
+ false -> case application:load(App) of
+ ok -> ok;
+ {error, {already_loaded, App}} -> ok;
+ Error -> throw(Error)
+ end,
+ load_applications(
+ queue:join(Worklist1,
+ queue:from_list(app_dependencies(App))),
+ sets:add_element(App, Loaded))
+ end
+ end.
+
+app_dependencies(App) ->
+ case application:get_key(App, applications) of
+ undefined -> [];
+ {ok, Lst} -> Lst
+ end.
+
+manage_applications(Iterate, Do, Undo, SkipError, ErrorHandler, Apps) ->
+ Iterate(fun (App, Acc) ->
+ case Do(App) of
+ ok -> [App | Acc];
+ {ok, []} -> Acc;
+ {ok, [App]} -> [App | Acc];
+ {ok, StartedApps} -> StartedApps ++ Acc;
+ {error, {SkipError, _}} -> Acc;
+ {error, Reason} ->
+ lists:foreach(Undo, Acc),
+ ErrorHandler(App, Reason)
+ end
+ end, [], Apps),
+ ok.
+
+%% Stops the Erlang VM when the rabbit application stops abnormally
+%% i.e. message store reaches its restart limit
+default_restart_type(rabbit) -> transient;
+default_restart_type(_) -> temporary.
+
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%%
+%% Code originally from Erlang/OTP source lib/kernel/src/application.erl
+%% and modified to use RestartTypes map
+%%
+ensure_all_started(Application, RestartTypes) ->
+ case ensure_all_started(Application, RestartTypes, []) of
+ {ok, Started} ->
+ {ok, lists:reverse(Started)};
+ {error, Reason, Started} ->
+ _ = [application:stop(App) || App <- Started],
+ {error, Reason}
+ end.
+
+ensure_all_started(Application, RestartTypes, Started) ->
+ RestartType = maps:get(Application, RestartTypes, default_restart_type(Application)),
+ case application:start(Application, RestartType) of
+ ok ->
+ {ok, [Application | Started]};
+ {error, {already_started, Application}} ->
+ {ok, Started};
+ {error, {not_started, Dependency}} ->
+ case ensure_all_started(Dependency, RestartTypes, Started) of
+ {ok, NewStarted} ->
+ ensure_all_started(Application, RestartTypes, NewStarted);
+ Error ->
+ Error
+ end;
+ {error, Reason} ->
+ {error, {Application, Reason}, Started}
+ end.
diff --git a/deps/rabbit_common/src/code_version.erl b/deps/rabbit_common/src/code_version.erl
new file mode 100644
index 0000000000..76e9c75c7f
--- /dev/null
+++ b/deps/rabbit_common/src/code_version.erl
@@ -0,0 +1,348 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+-module(code_version).
+
+-export([update/1, get_otp_version/0]).
+
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+%% @doc Reads the abstract code of the given `Module`, modifies it to adapt to
+%% the current Erlang version, compiles and loads the result.
+%% This function finds the current Erlang version and then selects the function
+%% call for that version, removing all other versions declared in the original
+%% beam file. `code_version:update/1` is triggered by the module itself the
+%% first time an affected function is called.
+%%
+%% The purpose of this functionality is to support the new time API introduced
+%% in ERTS 7.0, while providing compatibility with previous versions.
+%%
+%% `Module` must contain an attribute `erlang_version_support` containing a list of
+%% tuples:
+%%
+%% {ErlangVersion, [{OriginalFunction, Arity, PreErlangVersionFunction,
+%% PostErlangVersionFunction}]}
+%%
+%% All these new functions may be exported, and implemented as follows:
+%%
+%% OriginalFunction() ->
+%% code_version:update(?MODULE),
+%% ?MODULE:OriginalFunction().
+%%
+%% PostErlangVersionFunction() ->
+%% %% implementation using new time API
+%% ..
+%%
+%% PreErlangVersionFunction() ->
+%% %% implementation using fallback solution
+%% ..
+%%
+%% CAUTION: Make sure that all functions in the module are patched this
+%% way! If you have "regular" functions, you might hit a race condition
+%% between the unload of the old module and the load of the patched
+%% module. If all functions are patched, loading will be serialized,
+%% thanks to a lock acquired by `code_version`. However, if you have
+%% regular functions, any call to them will bypass that lock and the old
+%% code will be reloaded from disk. This will kill the process trying to
+%% patch the module.
+%%
+%% end
+%%----------------------------------------------------------------------------
+-spec update(atom()) -> ok | no_return().
+update(Module) ->
+ AbsCode = get_abs_code(Module),
+ Forms = replace_forms(Module, get_otp_version(), AbsCode),
+ Code = compile_forms(Forms),
+ load_code(Module, Code).
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+load_code(Module, Code) ->
+ LockId = {{?MODULE, Module}, self()},
+ FakeFilename = "Loaded by rabbit_common",
+ global:set_lock(LockId, [node()]),
+ case code:which(Module) of
+ FakeFilename ->
+ ok;
+ _ ->
+ unload(Module),
+ case code:load_binary(Module, FakeFilename, Code) of
+ {module, _} -> ok;
+ {error, Reason} -> throw({cannot_load, Module, Reason})
+ end
+ end,
+ global:del_lock(LockId, [node()]),
+ ok.
+
+unload(Module) ->
+ code:soft_purge(Module),
+ code:delete(Module).
+
+compile_forms(Forms) ->
+ case compile:forms(Forms, [debug_info, return_errors]) of
+ {ok, _ModName, Code} ->
+ Code;
+ {ok, _ModName, Code, _Warnings} ->
+ Code;
+ Error ->
+ throw({cannot_compile_forms, Error})
+ end.
+
+get_abs_code(Module) ->
+ get_forms(get_object_code(Module)).
+
+get_object_code(Module) ->
+ case code:get_object_code(Module) of
+ {_Mod, Code, _File} ->
+ Code;
+ error ->
+ throw({not_found, Module})
+ end.
+
+get_forms(Code) ->
+ case beam_lib:chunks(Code, [abstract_code]) of
+ {ok, {_, [{abstract_code, {raw_abstract_v1, Forms}}]}} ->
+ Forms;
+ {ok, {Module, [{abstract_code, no_abstract_code}]}} ->
+ throw({no_abstract_code, Module});
+ {error, beam_lib, Reason} ->
+ throw({no_abstract_code, Reason})
+ end.
+
+get_otp_version() ->
+ Version = erlang:system_info(otp_release),
+ case re:run(Version, "^[0-9][0-9]", [{capture, first, list}]) of
+ {match, [V]} ->
+ list_to_integer(V);
+ _ ->
+ %% Could be anything below R17, we are not interested
+ 0
+ end.
+
+get_original_pairs(VersionSupport) ->
+ [{Orig, Arity} || {Orig, Arity, _Pre, _Post} <- VersionSupport].
+
+get_delete_pairs(true, VersionSupport) ->
+ [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport];
+get_delete_pairs(false, VersionSupport) ->
+ [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport].
+
+get_rename_pairs(true, VersionSupport) ->
+ [{Post, Arity} || {_Orig, Arity, _Pre, Post} <- VersionSupport];
+get_rename_pairs(false, VersionSupport) ->
+ [{Pre, Arity} || {_Orig, Arity, Pre, _Post} <- VersionSupport].
+
+%% Pairs of {Renamed, OriginalName} functions
+get_name_pairs(true, VersionSupport) ->
+ [{{Post, Arity}, Orig} || {Orig, Arity, _Pre, Post} <- VersionSupport];
+get_name_pairs(false, VersionSupport) ->
+ [{{Pre, Arity}, Orig} || {Orig, Arity, Pre, _Post} <- VersionSupport].
+
+delete_abstract_functions(ToDelete) ->
+ fun(Tree, Function) ->
+ case lists:member(Function, ToDelete) of
+ true ->
+ erl_syntax:comment(["Deleted unused function"]);
+ false ->
+ Tree
+ end
+ end.
+
+rename_abstract_functions(ToRename, ToName) ->
+ fun(Tree, Function) ->
+ case lists:member(Function, ToRename) of
+ true ->
+ FunctionName = proplists:get_value(Function, ToName),
+ erl_syntax:function(
+ erl_syntax:atom(FunctionName),
+ erl_syntax:function_clauses(Tree));
+ false ->
+ Tree
+ end
+ end.
+
+replace_forms(Module, ErlangVersion, AbsCode) ->
+ %% Obtain attribute containing the list of functions that must be updated
+ Attr = Module:module_info(attributes),
+ VersionSupport = proplists:get_value(erlang_version_support, Attr),
+ {Pre, Post} = lists:splitwith(fun({Version, _Pairs}) ->
+ Version > ErlangVersion
+ end, VersionSupport),
+ %% Replace functions in two passes: replace for Erlang versions > current
+ %% first, Erlang versions =< current afterwards.
+ replace_version_forms(
+ true, replace_version_forms(false, AbsCode, get_version_functions(Pre)),
+ get_version_functions(Post)).
+
+get_version_functions(List) ->
+ lists:append([Pairs || {_Version, Pairs} <- List]).
+
+replace_version_forms(IsPost, AbsCode, VersionSupport) ->
+ %% Get pairs of {Function, Arity} for the triggering functions, which
+ %% are also the final function names.
+ Original = get_original_pairs(VersionSupport),
+ %% Get pairs of {Function, Arity} for the unused version
+ ToDelete = get_delete_pairs(IsPost, VersionSupport),
+ %% Delete original functions (those that trigger the code update) and
+ %% the unused version ones
+ DeleteFun = delete_abstract_functions(ToDelete ++ Original),
+ AbsCode0 = replace_function_forms(AbsCode, DeleteFun),
+ %% Get pairs of {Function, Arity} for the current version which must be
+ %% renamed
+ ToRename = get_rename_pairs(IsPost, VersionSupport),
+ %% Get paris of {Renamed, OriginalName} functions
+ ToName = get_name_pairs(IsPost, VersionSupport),
+ %% Rename versioned functions with their final name
+ RenameFun = rename_abstract_functions(ToRename, ToName),
+ AbsCode1 = replace_function_forms(AbsCode0, RenameFun),
+ %% Adjust `-dialyzer` attribute.
+ AbsCode2 = fix_dialyzer_attribute(AbsCode1, ToDelete, ToName),
+ %% Remove exports of all versioned functions
+ remove_exports(AbsCode2, ToDelete ++ ToRename).
+
+replace_function_forms(AbsCode, Fun) ->
+ ReplaceFunction =
+ fun(Tree) ->
+ Function = erl_syntax_lib:analyze_function(Tree),
+ Fun(Tree, Function)
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ function -> ReplaceFunction(Tree);
+ _Other -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+fix_dialyzer_attribute(AbsCode, ToDelete, ToName) ->
+ FixDialyzer =
+ fun(Tree) ->
+ case erl_syntax_lib:analyze_attribute(Tree) of
+ {dialyzer, {_, Value}} ->
+ FixedValue = fix_dialyzer_attribute_value(Value,
+ ToDelete,
+ ToName),
+ rebuild_dialyzer({dialyzer, FixedValue});
+ _ ->
+ Tree
+ end
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ attribute -> FixDialyzer(Tree);
+ _ -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+fix_dialyzer_attribute_value(Info, ToDelete, ToName)
+ when is_list(Info) ->
+ lists:map(
+ fun(I) ->
+ fix_dialyzer_attribute_value(I, ToDelete, ToName)
+ end,
+ Info);
+fix_dialyzer_attribute_value({Warn, FunList}, ToDelete, ToName) ->
+ FixedFunList = fix_dialyzer_attribute_funlist(FunList, ToDelete, ToName),
+ {Warn, FixedFunList};
+fix_dialyzer_attribute_value(Info, _, _)
+ when is_atom(Info) ->
+ Info.
+
+fix_dialyzer_attribute_funlist(FunList, ToDelete, ToName)
+ when is_list(FunList) ->
+ lists:filtermap(
+ fun(I) ->
+ case fix_dialyzer_attribute_funlist(I, ToDelete, ToName) of
+ [] -> false;
+ R -> {true, R}
+ end
+ end,
+ FunList);
+fix_dialyzer_attribute_funlist({FunName, Arity} = Fun,
+ ToDelete, ToName)
+ when is_atom(FunName) andalso is_integer(Arity) andalso Arity >= 0 ->
+ remove_or_rename(Fun, ToDelete, ToName);
+fix_dialyzer_attribute_funlist(FunList, _, _) ->
+ FunList.
+
+remove_or_rename(Fun, ToDelete, ToName) ->
+ case lists:member(Fun, ToDelete) of
+ true ->
+ [];
+ false ->
+ case proplists:get_value(Fun, ToName) of
+ undefined -> Fun;
+ NewName -> setelement(1, Fun, NewName)
+ end
+ end.
+
+rebuild_dialyzer({dialyzer, Value}) ->
+ erl_syntax:attribute(
+ erl_syntax:atom(dialyzer),
+ [rebuild_dialyzer_value(Value)]).
+
+rebuild_dialyzer_value(Value) when is_list(Value) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_value(V) || V <- Value]);
+rebuild_dialyzer_value({Warn, FunList}) ->
+ erl_syntax:tuple(
+ [rebuild_dialyzer_warn(Warn),
+ rebuild_dialyzer_funlist(FunList)]);
+rebuild_dialyzer_value(Warn) when is_atom(Warn) ->
+ rebuild_dialyzer_warn(Warn).
+
+rebuild_dialyzer_warn(Warn) when is_list(Warn) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_warn(W) || W <- Warn]);
+rebuild_dialyzer_warn(Warn) when is_atom(Warn) ->
+ erl_syntax:atom(Warn).
+
+rebuild_dialyzer_funlist(FunList) when is_list(FunList) ->
+ erl_syntax:list(
+ [rebuild_dialyzer_funlist({N, A}) || {N, A} <- FunList]);
+rebuild_dialyzer_funlist({FunName, Arity}) ->
+ erl_syntax:tuple([erl_syntax:atom(FunName), erl_syntax:integer(Arity)]).
+
+filter_export_pairs(Info, ToDelete) ->
+ lists:filter(fun(Pair) ->
+ not lists:member(Pair, ToDelete)
+ end, Info).
+
+remove_exports(AbsCode, ToDelete) ->
+ RemoveExports =
+ fun(Tree) ->
+ case erl_syntax_lib:analyze_attribute(Tree) of
+ {export, Info} ->
+ Remaining = filter_export_pairs(Info, ToDelete),
+ rebuild_export(Remaining);
+ _Other -> Tree
+ end
+ end,
+ Filter = fun(Tree) ->
+ case erl_syntax:type(Tree) of
+ attribute -> RemoveExports(Tree);
+ _Other -> Tree
+ end
+ end,
+ fold_syntax_tree(Filter, AbsCode).
+
+rebuild_export(Args) ->
+ erl_syntax:attribute(
+ erl_syntax:atom(export),
+ [erl_syntax:list(
+ [erl_syntax:arity_qualifier(erl_syntax:atom(N),
+ erl_syntax:integer(A))
+ || {N, A} <- Args])]).
+
+fold_syntax_tree(Filter, Forms) ->
+ Tree = erl_syntax:form_list(Forms),
+ NewTree = erl_syntax_lib:map(Filter, Tree),
+ erl_syntax:revert_forms(NewTree).
diff --git a/deps/rabbit_common/src/credit_flow.erl b/deps/rabbit_common/src/credit_flow.erl
new file mode 100644
index 0000000000..da1d9606c1
--- /dev/null
+++ b/deps/rabbit_common/src/credit_flow.erl
@@ -0,0 +1,210 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(credit_flow).
+
+%% Credit flow is controlled by a credit specification - a
+%% {InitialCredit, MoreCreditAfter} tuple. For the message sender,
+%% credit starts at InitialCredit and is decremented with every
+%% message sent. The message receiver grants more credit to the sender
+%% by sending it a {bump_credit, ...} control message after receiving
+%% MoreCreditAfter messages. The sender should pass this message in to
+%% handle_bump_msg/1. The sender should block when it goes below 0
+%% (check by invoking blocked/0). If a process is both a sender and a
+%% receiver it will not grant any more credit to its senders when it
+%% is itself blocked - thus the only processes that need to check
+%% blocked/0 are ones that read from network sockets.
+%%
+%% Credit flows left to right when process send messages down the
+%% chain, starting at the rabbit_reader, ending at the msg_store:
+%% reader -> channel -> queue_process -> msg_store.
+%%
+%% If the message store has a back log, then it will block the
+%% queue_process, which will block the channel, and finally the reader
+%% will be blocked, throttling down publishers.
+%%
+%% Once a process is unblocked, it will grant credits up the chain,
+%% possibly unblocking other processes:
+%% reader <--grant channel <--grant queue_process <--grant msg_store.
+%%
+%% Grepping the project files for `credit_flow` will reveal the places
+%% where this module is currently used, with extra comments on what's
+%% going on at each instance. Note that credit flow between mirrors
+%% synchronization has not been documented, since this doesn't affect
+%% client publishes.
+
+-define(DEFAULT_INITIAL_CREDIT, 200).
+-define(DEFAULT_MORE_CREDIT_AFTER, 100).
+
+-define(DEFAULT_CREDIT,
+ case get(credit_flow_default_credit) of
+ undefined ->
+ Val = rabbit_misc:get_env(rabbit, credit_flow_default_credit,
+ {?DEFAULT_INITIAL_CREDIT,
+ ?DEFAULT_MORE_CREDIT_AFTER}),
+ put(credit_flow_default_credit, Val),
+ Val;
+ Val -> Val
+ end).
+
+-export([send/1, send/2, ack/1, ack/2, handle_bump_msg/1, blocked/0, state/0]).
+-export([peer_down/1]).
+-export([block/1, unblock/1]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([bump_msg/0]).
+
+-opaque(bump_msg() :: {pid(), non_neg_integer()}).
+-type(credit_spec() :: {non_neg_integer(), non_neg_integer()}).
+
+-spec send
+ (pid()) -> 'ok';
+ (credit_spec()) -> 'ok'.
+-spec ack(pid()) -> 'ok'.
+-spec ack(pid(), credit_spec()) -> 'ok'.
+-spec handle_bump_msg(bump_msg()) -> 'ok'.
+-spec blocked() -> boolean().
+-spec peer_down(pid()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+%% process dict update macro - eliminates the performance-hurting
+%% closure creation a HOF would introduce
+-define(UPDATE(Key, Default, Var, Expr),
+ begin
+ %% We deliberately allow Var to escape from the case here
+ %% to be used in Expr. Any temporary var we introduced
+ %% would also escape, and might conflict.
+ Var = case get(Key) of
+ undefined -> Default;
+ V -> V
+ end,
+ put(Key, Expr)
+ end).
+
+%% If current process was blocked by credit flow in the last
+%% STATE_CHANGE_INTERVAL milliseconds, state/0 will report it as "in
+%% flow".
+-define(STATE_CHANGE_INTERVAL, 1000000).
+
+-ifdef(CREDIT_FLOW_TRACING).
+-define(TRACE_BLOCKED(SELF, FROM), rabbit_event:notify(credit_flow_blocked,
+ [{process, SELF},
+ {process_info, erlang:process_info(SELF)},
+ {from, FROM},
+ {from_info, erlang:process_info(FROM)},
+ {timestamp,
+ os:system_time(
+ milliseconds)}])).
+-define(TRACE_UNBLOCKED(SELF, FROM), rabbit_event:notify(credit_flow_unblocked,
+ [{process, SELF},
+ {from, FROM},
+ {timestamp,
+ os:system_time(
+ milliseconds)}])).
+-else.
+-define(TRACE_BLOCKED(SELF, FROM), ok).
+-define(TRACE_UNBLOCKED(SELF, FROM), ok).
+-endif.
+
+%%----------------------------------------------------------------------------
+
+%% There are two "flows" here; of messages and of credit, going in
+%% opposite directions. The variable names "From" and "To" refer to
+%% the flow of credit, but the function names refer to the flow of
+%% messages. This is the clearest I can make it (since the function
+%% names form the API and want to make sense externally, while the
+%% variable names are used in credit bookkeeping and want to make
+%% sense internally).
+
+%% For any given pair of processes, ack/2 and send/2 must always be
+%% called with the same credit_spec().
+
+send(From) -> send(From, ?DEFAULT_CREDIT).
+
+send(From, {InitialCredit, _MoreCreditAfter}) ->
+ ?UPDATE({credit_from, From}, InitialCredit, C,
+ if C == 1 -> block(From),
+ 0;
+ true -> C - 1
+ end).
+
+ack(To) -> ack(To, ?DEFAULT_CREDIT).
+
+ack(To, {_InitialCredit, MoreCreditAfter}) ->
+ ?UPDATE({credit_to, To}, MoreCreditAfter, C,
+ if C == 1 -> grant(To, MoreCreditAfter),
+ MoreCreditAfter;
+ true -> C - 1
+ end).
+
+handle_bump_msg({From, MoreCredit}) ->
+ ?UPDATE({credit_from, From}, 0, C,
+ if C =< 0 andalso C + MoreCredit > 0 -> unblock(From),
+ C + MoreCredit;
+ true -> C + MoreCredit
+ end).
+
+blocked() -> case get(credit_blocked) of
+ undefined -> false;
+ [] -> false;
+ _ -> true
+ end.
+
+state() -> case blocked() of
+ true -> flow;
+ false -> case get(credit_blocked_at) of
+ undefined -> running;
+ B -> Now = erlang:monotonic_time(),
+ Diff = erlang:convert_time_unit(Now - B,
+ native,
+ micro_seconds),
+ case Diff < ?STATE_CHANGE_INTERVAL of
+ true -> flow;
+ false -> running
+ end
+ end
+ end.
+
+peer_down(Peer) ->
+ %% In theory we could also remove it from credit_deferred here, but it
+ %% doesn't really matter; at some point later we will drain
+ %% credit_deferred and thus send messages into the void...
+ unblock(Peer),
+ erase({credit_from, Peer}),
+ erase({credit_to, Peer}),
+ ok.
+
+%% --------------------------------------------------------------------------
+
+grant(To, Quantity) ->
+ Msg = {bump_credit, {self(), Quantity}},
+ case blocked() of
+ false -> To ! Msg;
+ true -> ?UPDATE(credit_deferred, [], Deferred, [{To, Msg} | Deferred])
+ end.
+
+block(From) ->
+ ?TRACE_BLOCKED(self(), From),
+ case blocked() of
+ false -> put(credit_blocked_at, erlang:monotonic_time());
+ true -> ok
+ end,
+ ?UPDATE(credit_blocked, [], Blocks, [From | Blocks]).
+
+unblock(From) ->
+ ?TRACE_UNBLOCKED(self(), From),
+ ?UPDATE(credit_blocked, [], Blocks, Blocks -- [From]),
+ case blocked() of
+ false -> case erase(credit_deferred) of
+ undefined -> ok;
+ Credits -> _ = [To ! Msg || {To, Msg} <- Credits],
+ ok
+ end;
+ true -> ok
+ end.
diff --git a/deps/rabbit_common/src/delegate.erl b/deps/rabbit_common/src/delegate.erl
new file mode 100644
index 0000000000..a73d5e64b1
--- /dev/null
+++ b/deps/rabbit_common/src/delegate.erl
@@ -0,0 +1,277 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(delegate).
+
+%% delegate is an alternative way of doing remote calls. Compared to
+%% the rpc module, it reduces inter-node communication. For example,
+%% if a message is routed to 1,000 queues on node A and needs to be
+%% propagated to nodes B and C, it would be nice to avoid doing 2,000
+%% remote casts to queue processes.
+%%
+%% An important issue here is preserving order - we need to make sure
+%% that messages from a certain channel to a certain queue take a
+%% consistent route, to prevent them being reordered. In fact all
+%% AMQP-ish things (such as queue declaration results and basic.get)
+%% must take the same route as well, to ensure that clients see causal
+%% ordering correctly. Therefore we have a rather generic mechanism
+%% here rather than just a message-reflector. That's also why we pick
+%% the delegate process to use based on a hash of the source pid.
+%%
+%% When a function is invoked using delegate:invoke/2,
+%% or delegate:invoke_no_result/2 on a group of pids, the pids are first split
+%% into local and remote ones. Remote processes are then grouped by
+%% node. The function is then invoked locally and on every node (using
+%% gen_server2:multi/4) as many times as there are processes on that
+%% node, sequentially.
+%%
+%% Errors returned when executing functions on remote nodes are re-raised
+%% in the caller.
+%%
+%% RabbitMQ starts a pool of delegate processes on boot. The size of
+%% the pool is configurable, the aim is to make sure we don't have too
+%% few delegates and thus limit performance on many-CPU machines.
+
+-behaviour(gen_server2).
+
+-export([start_link/1, start_link/2, invoke_no_result/2,
+ invoke/2, invoke/3, monitor/2, monitor/3, demonitor/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {node, monitors, name}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([monitor_ref/0]).
+
+-type monitor_ref() :: reference() | {atom(), pid()}.
+-type fun_or_mfa(A) :: fun ((pid()) -> A) | {atom(), atom(), [any()]}.
+
+-spec start_link
+ (non_neg_integer()) -> {'ok', pid()} | ignore | {'error', any()}.
+-spec invoke
+ ( pid(), fun_or_mfa(A)) -> A;
+ ([pid()], fun_or_mfa(A)) -> {[{pid(), A}], [{pid(), term()}]}.
+-spec invoke_no_result(pid() | [pid()], fun_or_mfa(any())) -> 'ok'.
+-spec monitor('process', pid()) -> monitor_ref().
+-spec demonitor(monitor_ref()) -> 'true'.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+-define(DEFAULT_NAME, "delegate_").
+
+%%----------------------------------------------------------------------------
+
+start_link(Num) ->
+ start_link(?DEFAULT_NAME, Num).
+
+start_link(Name, Num) ->
+ Name1 = delegate_name(Name, Num),
+ gen_server2:start_link({local, Name1}, ?MODULE, [Name1], []).
+
+invoke(Pid, FunOrMFA) ->
+ invoke(Pid, ?DEFAULT_NAME, FunOrMFA).
+
+invoke(Pid, _Name, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ apply1(FunOrMFA, Pid);
+invoke(Pid, Name, FunOrMFA) when is_pid(Pid) ->
+ case invoke([Pid], Name, FunOrMFA) of
+ {[{Pid, Result}], []} ->
+ Result;
+ {[], [{Pid, {Class, Reason, StackTrace}}]} ->
+ erlang:raise(Class, Reason, StackTrace)
+ end;
+
+invoke([], _Name, _FunOrMFA) -> %% optimisation
+ {[], []};
+invoke([Pid], _Name, FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ case safe_invoke(Pid, FunOrMFA) of
+ {ok, _, Result} -> {[{Pid, Result}], []};
+ {error, _, Error} -> {[], [{Pid, Error}]}
+ end;
+invoke(Pids, Name, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ %% The use of multi_call is only safe because the timeout is
+ %% infinity, and thus there is no process spawned in order to do
+ %% the sending. Thus calls can't overtake preceding calls/casts.
+ {Replies, BadNodes} =
+ case maps:keys(Grouped) of
+ [] -> {[], []};
+ RemoteNodes -> gen_server2:multi_call(
+ RemoteNodes, delegate(self(), Name, RemoteNodes),
+ {invoke, FunOrMFA, Grouped}, infinity)
+ end,
+ BadPids = [{Pid, {exit, {nodedown, BadNode}, []}} ||
+ BadNode <- BadNodes,
+ Pid <- maps:get(BadNode, Grouped)],
+ ResultsNoNode = lists:append([safe_invoke(LocalPids, FunOrMFA) |
+ [Results || {_Node, Results} <- Replies]]),
+ lists:foldl(
+ fun ({ok, Pid, Result}, {Good, Bad}) -> {[{Pid, Result} | Good], Bad};
+ ({error, Pid, Error}, {Good, Bad}) -> {Good, [{Pid, Error} | Bad]}
+ end, {[], BadPids}, ResultsNoNode).
+
+monitor(process, Pid) ->
+ ?MODULE:monitor(process, Pid, ?DEFAULT_NAME).
+
+monitor(process, Pid, _Prefix) when node(Pid) =:= node() ->
+ erlang:monitor(process, Pid);
+monitor(process, Pid, Prefix) ->
+ Name = delegate(Pid, Prefix, [node(Pid)]),
+ gen_server2:cast(Name, {monitor, self(), Pid}),
+ {Name, Pid}.
+
+demonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref);
+demonitor({Name, Pid}) ->
+ gen_server2:cast(Name, {demonitor, self(), Pid}).
+
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) andalso node(Pid) =:= node() ->
+ %% Optimization, avoids calling invoke_no_result/3.
+ %%
+ %% This may seem like a cosmetic change at first but it actually massively reduces the memory usage in mirrored
+ %% queues when ack/nack are sent to the node that hosts a mirror.
+ %% This way binary references are not kept around unnecessarily.
+ %%
+ %% See https://github.com/rabbitmq/rabbitmq-common/issues/208#issuecomment-311308583 for a before/after
+ %% comparison.
+ _ = safe_invoke(Pid, FunOrMFA), %% we don't care about any error
+ ok;
+invoke_no_result(Pid, FunOrMFA) when is_pid(Pid) ->
+ %% Optimization, avoids calling invoke_no_result/3
+ RemoteNode = node(Pid),
+ gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
+ {invoke, FunOrMFA,
+ maps:from_list([{RemoteNode, [Pid]}])}),
+ ok;
+invoke_no_result([], _FunOrMFA) -> %% optimisation
+ ok;
+invoke_no_result([Pid], FunOrMFA) when node(Pid) =:= node() -> %% optimisation
+ _ = safe_invoke(Pid, FunOrMFA), %% must not die
+ ok;
+invoke_no_result([Pid], FunOrMFA) ->
+ RemoteNode = node(Pid),
+ gen_server2:abcast([RemoteNode], delegate(self(), ?DEFAULT_NAME, [RemoteNode]),
+ {invoke, FunOrMFA,
+ maps:from_list([{RemoteNode, [Pid]}])}),
+ ok;
+invoke_no_result(Pids, FunOrMFA) when is_list(Pids) ->
+ {LocalPids, Grouped} = group_pids_by_node(Pids),
+ case maps:keys(Grouped) of
+ [] -> ok;
+ RemoteNodes -> gen_server2:abcast(
+ RemoteNodes, delegate(self(), ?DEFAULT_NAME, RemoteNodes),
+ {invoke, FunOrMFA, Grouped})
+ end,
+ _ = safe_invoke(LocalPids, FunOrMFA), %% must not die
+ ok.
+
+%%----------------------------------------------------------------------------
+
+group_pids_by_node(Pids) ->
+ LocalNode = node(),
+ lists:foldl(
+ fun (Pid, {Local, Remote}) when node(Pid) =:= LocalNode ->
+ {[Pid | Local], Remote};
+ (Pid, {Local, Remote}) ->
+ {Local,
+ maps:update_with(
+ node(Pid), fun (List) -> [Pid | List] end, [Pid], Remote)}
+ end, {[], maps:new()}, Pids).
+
+delegate_name(Name, Hash) ->
+ list_to_atom(Name ++ integer_to_list(Hash)).
+
+delegate(Pid, Prefix, RemoteNodes) ->
+ case get(delegate) of
+ undefined -> Name = delegate_name(Prefix,
+ erlang:phash2(Pid,
+ delegate_sup:count(RemoteNodes, Prefix))),
+ put(delegate, Name),
+ Name;
+ Name -> Name
+ end.
+
+safe_invoke(Pids, FunOrMFA) when is_list(Pids) ->
+ [safe_invoke(Pid, FunOrMFA) || Pid <- Pids];
+safe_invoke(Pid, FunOrMFA) when is_pid(Pid) ->
+ try
+ {ok, Pid, apply1(FunOrMFA, Pid)}
+ catch Class:Reason:Stacktrace ->
+ {error, Pid, {Class, Reason, Stacktrace}}
+ end.
+
+apply1({M, F, A}, Arg) -> apply(M, F, [Arg | A]);
+apply1(Fun, Arg) -> Fun(Arg).
+
+%%----------------------------------------------------------------------------
+
+init([Name]) ->
+ {ok, #state{node = node(), monitors = dict:new(), name = Name}, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({invoke, FunOrMFA, Grouped}, _From, State = #state{node = Node}) ->
+ {reply, safe_invoke(maps:get(Node, Grouped), FunOrMFA), State,
+ hibernate}.
+
+handle_cast({monitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:add_element(MonitoringPid, Pids),
+ dict:store(Pid, {Ref, Pids1}, Monitors);
+ error ->
+ Ref = erlang:monitor(process, Pid),
+ Pids = gb_sets:singleton(MonitoringPid),
+ dict:store(Pid, {Ref, Pids}, Monitors)
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({demonitor, MonitoringPid, Pid},
+ State = #state{monitors = Monitors}) ->
+ Monitors1 = case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Pids1 = gb_sets:del_element(MonitoringPid, Pids),
+ case gb_sets:is_empty(Pids1) of
+ true -> erlang:demonitor(Ref),
+ dict:erase(Pid, Monitors);
+ false -> dict:store(Pid, {Ref, Pids1}, Monitors)
+ end;
+ error ->
+ Monitors
+ end,
+ {noreply, State#state{monitors = Monitors1}, hibernate};
+
+handle_cast({invoke, FunOrMFA, Grouped}, State = #state{node = Node}) ->
+ _ = safe_invoke(maps:get(Node, Grouped), FunOrMFA),
+ {noreply, State, hibernate}.
+
+handle_info({'DOWN', Ref, process, Pid, Info},
+ State = #state{monitors = Monitors, name = Name}) ->
+ {noreply,
+ case dict:find(Pid, Monitors) of
+ {ok, {Ref, Pids}} ->
+ Msg = {'DOWN', {Name, Pid}, process, Pid, Info},
+ gb_sets:fold(fun (MonitoringPid, _) -> MonitoringPid ! Msg end,
+ none, Pids),
+ State#state{monitors = dict:erase(Pid, Monitors)};
+ error ->
+ State
+ end, hibernate};
+
+handle_info(_Info, State) ->
+ {noreply, State, hibernate}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/delegate_sup.erl b/deps/rabbit_common/src/delegate_sup.erl
new file mode 100644
index 0000000000..b92e1eaa46
--- /dev/null
+++ b/deps/rabbit_common/src/delegate_sup.erl
@@ -0,0 +1,55 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(delegate_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/1, start_link/2, count/1, count/2, sup_name/1]).
+
+-export([init/1]).
+
+-define(SERVER, "delegate_").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(integer()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(integer(), string()) -> rabbit_types:ok_pid_or_error().
+-spec count([node()]) -> integer().
+
+%%----------------------------------------------------------------------------
+
+sup_name(Prefix) ->
+ list_to_atom(Prefix ++ "sup").
+
+start_link(Count, Prefix) ->
+ supervisor:start_link({local, sup_name(Prefix)}, ?MODULE, [Count, Prefix]).
+start_link(Count) ->
+ start_link(Count, ?SERVER).
+
+count(Nodes) ->
+ count(Nodes, ?SERVER).
+
+count([], _) ->
+ 1;
+count([Node | Nodes], Prefix) ->
+ try
+ length(supervisor:which_children({sup_name(Prefix), Node}))
+ catch exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ count(Nodes, Prefix);
+ exit:{R, _} when R =:= noproc; R =:= normal; R =:= shutdown;
+ R =:= nodedown ->
+ count(Nodes, Prefix)
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([Count, Name]) ->
+ {ok, {{one_for_one, 10, 10},
+ [{Num, {delegate, start_link, [Name, Num]},
+ transient, 16#ffffffff, worker, [delegate]} ||
+ Num <- lists:seq(0, Count - 1)]}}.
diff --git a/deps/rabbit_common/src/file_handle_cache.erl b/deps/rabbit_common/src/file_handle_cache.erl
new file mode 100644
index 0000000000..9220f40ce4
--- /dev/null
+++ b/deps/rabbit_common/src/file_handle_cache.erl
@@ -0,0 +1,1564 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(file_handle_cache).
+
+%% A File Handle Cache
+%%
+%% This extends a subset of the functionality of the Erlang file
+%% module. In the below, we use "file handle" to specifically refer to
+%% file handles, and "file descriptor" to refer to descriptors which
+%% are not file handles, e.g. sockets.
+%%
+%% Some constraints
+%% 1) This supports one writer, multiple readers per file. Nothing
+%% else.
+%% 2) Do not open the same file from different processes. Bad things
+%% may happen, especially for writes.
+%% 3) Writes are all appends. You cannot write to the middle of a
+%% file, although you can truncate and then append if you want.
+%% 4) There are read and write buffers. Feel free to use the read_ahead
+%% mode, but beware of the interaction between that buffer and the write
+%% buffer.
+%%
+%% Some benefits
+%% 1) You do not have to remember to call sync before close
+%% 2) Buffering is much more flexible than with the plain file module,
+%% and you can control when the buffer gets flushed out. This means
+%% that you can rely on reads-after-writes working, without having to
+%% call the expensive sync.
+%% 3) Unnecessary calls to position and sync get optimised out.
+%% 4) You can find out what your 'real' offset is, and what your
+%% 'virtual' offset is (i.e. where the hdl really is, and where it
+%% would be after the write buffer is written out).
+%%
+%% There is also a server component which serves to limit the number
+%% of open file descriptors. This is a hard limit: the server
+%% component will ensure that clients do not have more file
+%% descriptors open than it's configured to allow.
+%%
+%% On open, the client requests permission from the server to open the
+%% required number of file handles. The server may ask the client to
+%% close other file handles that it has open, or it may queue the
+%% request and ask other clients to close file handles they have open
+%% in order to satisfy the request. Requests are always satisfied in
+%% the order they arrive, even if a latter request (for a small number
+%% of file handles) can be satisfied before an earlier request (for a
+%% larger number of file handles). On close, the client sends a
+%% message to the server. These messages allow the server to keep
+%% track of the number of open handles. The client also keeps a
+%% gb_tree which is updated on every use of a file handle, mapping the
+%% time at which the file handle was last used (timestamp) to the
+%% handle. Thus the smallest key in this tree maps to the file handle
+%% that has not been used for the longest amount of time. This
+%% smallest key is included in the messages to the server. As such,
+%% the server keeps track of when the least recently used file handle
+%% was used *at the point of the most recent open or close* by each
+%% client.
+%%
+%% Note that this data can go very out of date, by the client using
+%% the least recently used handle.
+%%
+%% When the limit is exceeded (i.e. the number of open file handles is
+%% at the limit and there are pending 'open' requests), the server
+%% calculates the average age of the last reported least recently used
+%% file handle of all the clients. It then tells all the clients to
+%% close any handles not used for longer than this average, by
+%% invoking the callback the client registered. The client should
+%% receive this message and pass it into
+%% set_maximum_since_use/1. However, it is highly possible this age
+%% will be greater than the ages of all the handles the client knows
+%% of because the client has used its file handles in the mean
+%% time. Thus at this point the client reports to the server the
+%% current timestamp at which its least recently used file handle was
+%% last used. The server will check two seconds later that either it
+%% is back under the limit, in which case all is well again, or if
+%% not, it will calculate a new average age. Its data will be much
+%% more recent now, and so it is very likely that when this is
+%% communicated to the clients, the clients will close file handles.
+%% (In extreme cases, where it's very likely that all clients have
+%% used their open handles since they last sent in an update, which
+%% would mean that the average will never cause any file handles to
+%% be closed, the server can send out an average age of 0, resulting
+%% in all available clients closing all their file handles.)
+%%
+%% Care is taken to ensure that (a) processes which are blocked
+%% waiting for file descriptors to become available are not sent
+%% requests to close file handles; and (b) given it is known how many
+%% file handles a process has open, when the average age is forced to
+%% 0, close messages are only sent to enough processes to release the
+%% correct number of file handles and the list of processes is
+%% randomly shuffled. This ensures we don't cause processes to
+%% needlessly close file handles, and ensures that we don't always
+%% make such requests of the same processes.
+%%
+%% The advantage of this scheme is that there is only communication
+%% from the client to the server on open, close, and when in the
+%% process of trying to reduce file handle usage. There is no
+%% communication from the client to the server on normal file handle
+%% operations. This scheme forms a feed-back loop - the server does
+%% not care which file handles are closed, just that some are, and it
+%% checks this repeatedly when over the limit.
+%%
+%% Handles which are closed as a result of the server are put into a
+%% "soft-closed" state in which the handle is closed (data flushed out
+%% and sync'd first) but the state is maintained. The handle will be
+%% fully reopened again as soon as needed, thus users of this library
+%% do not need to worry about their handles being closed by the server
+%% - reopening them when necessary is handled transparently.
+%%
+%% The server also supports obtain, release and transfer. obtain/{0,1}
+%% blocks until a file descriptor is available, at which point the
+%% requesting process is considered to 'own' more descriptor(s).
+%% release/{0,1} is the inverse operation and releases previously obtained
+%% descriptor(s). transfer/{1,2} transfers ownership of file descriptor(s)
+%% between processes. It is non-blocking. Obtain has a
+%% lower limit, set by the ?OBTAIN_LIMIT/1 macro. File handles can use
+%% the entire limit, but will be evicted by obtain calls up to the
+%% point at which no more obtain calls can be satisfied by the obtains
+%% limit. Thus there will always be some capacity available for file
+%% handles. Processes that use obtain are never asked to return them,
+%% and they are not managed in any way by the server. It is simply a
+%% mechanism to ensure that processes that need file descriptors such
+%% as sockets can do so in such a way that the overall number of open
+%% file descriptors is managed.
+%%
+%% The callers of register_callback/3, obtain, and the argument of
+%% transfer are monitored, reducing the count of handles in use
+%% appropriately when the processes terminate.
+
+-behaviour(gen_server2).
+
+-export([register_callback/3]).
+-export([open/3, close/1, read/2, append/2, needs_sync/1, sync/1, position/2,
+ truncate/1, current_virtual_offset/1, current_raw_offset/1, flush/1,
+ copy/3, set_maximum_since_use/1, delete/1, clear/1,
+ open_with_absolute_path/3]).
+-export([obtain/0, obtain/1, release/0, release/1, transfer/1, transfer/2,
+ set_limit/1, get_limit/0, info_keys/0, with_handle/1, with_handle/2,
+ info/0, info/1, clear_read_cache/0, clear_process_read_cache/0]).
+-export([set_reservation/0, set_reservation/1, release_reservation/0]).
+-export([ulimit/0]).
+
+-export([start_link/0, start_link/2, init/1, handle_call/3, handle_cast/2,
+ handle_info/2, terminate/2, code_change/3, prioritise_cast/3]).
+
+-define(SERVER, ?MODULE).
+%% Reserve 3 handles for ra usage: wal, segment writer and a dets table
+-define(RESERVED_FOR_OTHERS, 100 + 3).
+
+-define(FILE_HANDLES_LIMIT_OTHER, 1024).
+-define(FILE_HANDLES_CHECK_INTERVAL, 2000).
+
+-define(OBTAIN_LIMIT(LIMIT), trunc((LIMIT * 0.9) - 2)).
+-define(CLIENT_ETS_TABLE, file_handle_cache_client).
+-define(ELDERS_ETS_TABLE, file_handle_cache_elders).
+
+%%----------------------------------------------------------------------------
+
+-record(file,
+ { reader_count,
+ has_writer
+ }).
+
+-record(handle,
+ { hdl,
+ ref,
+ offset,
+ is_dirty,
+ write_buffer_size,
+ write_buffer_size_limit,
+ write_buffer,
+ read_buffer,
+ read_buffer_pos,
+ read_buffer_rem, %% Num of bytes from pos to end
+ read_buffer_size, %% Next size of read buffer to use
+ read_buffer_size_limit, %% Max size of read buffer to use
+ read_buffer_usage, %% Bytes we have read from it, for tuning
+ at_eof,
+ path,
+ mode,
+ options,
+ is_write,
+ is_read,
+ last_used_at
+ }).
+
+-record(fhc_state,
+ { elders,
+ limit,
+ open_count,
+ open_pending,
+ obtain_limit, %%socket
+ obtain_count_socket,
+ obtain_count_file,
+ obtain_pending_socket,
+ obtain_pending_file,
+ clients,
+ timer_ref,
+ alarm_set,
+ alarm_clear,
+ reserve_count_socket,
+ reserve_count_file
+ }).
+
+-record(cstate,
+ { pid,
+ callback,
+ opened,
+ obtained_socket,
+ obtained_file,
+ blocked,
+ pending_closes,
+ reserved_socket,
+ reserved_file
+ }).
+
+-record(pending,
+ { kind,
+ pid,
+ requested,
+ from
+ }).
+
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+
+-type ref() :: any().
+-type ok_or_error() :: 'ok' | {'error', any()}.
+-type val_or_error(T) :: {'ok', T} | {'error', any()}.
+-type position() :: ('bof' | 'eof' | non_neg_integer() |
+ {('bof' |'eof'), non_neg_integer()} |
+ {'cur', integer()}).
+-type offset() :: non_neg_integer().
+
+-spec register_callback(atom(), atom(), [any()]) -> 'ok'.
+-spec open
+ (file:filename(), [any()],
+ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+ {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+ val_or_error(ref()).
+-spec open_with_absolute_path
+ (file:filename(), [any()],
+ [{'write_buffer', (non_neg_integer() | 'infinity' | 'unbuffered')} |
+ {'read_buffer', (non_neg_integer() | 'unbuffered')}]) ->
+ val_or_error(ref()).
+-spec close(ref()) -> ok_or_error().
+-spec read
+ (ref(), non_neg_integer()) -> val_or_error([char()] | binary()) | 'eof'.
+-spec append(ref(), iodata()) -> ok_or_error().
+-spec sync(ref()) -> ok_or_error().
+-spec position(ref(), position()) -> val_or_error(offset()).
+-spec truncate(ref()) -> ok_or_error().
+-spec current_virtual_offset(ref()) -> val_or_error(offset()).
+-spec current_raw_offset(ref()) -> val_or_error(offset()).
+-spec flush(ref()) -> ok_or_error().
+-spec copy(ref(), ref(), non_neg_integer()) -> val_or_error(non_neg_integer()).
+-spec delete(ref()) -> ok_or_error().
+-spec clear(ref()) -> ok_or_error().
+-spec set_maximum_since_use(non_neg_integer()) -> 'ok'.
+-spec obtain() -> 'ok'.
+-spec obtain(non_neg_integer()) -> 'ok'.
+-spec release() -> 'ok'.
+-spec release(non_neg_integer()) -> 'ok'.
+-spec transfer(pid()) -> 'ok'.
+-spec transfer(pid(), non_neg_integer()) -> 'ok'.
+-spec with_handle(fun(() -> A)) -> A.
+-spec with_handle(non_neg_integer(), fun(() -> A)) -> A.
+-spec set_limit(non_neg_integer()) -> 'ok'.
+-spec get_limit() -> non_neg_integer().
+-spec info_keys() -> rabbit_types:info_keys().
+-spec info() -> rabbit_types:infos().
+-spec info([atom()]) -> rabbit_types:infos().
+-spec ulimit() -> 'unknown' | non_neg_integer().
+
+%%----------------------------------------------------------------------------
+-define(INFO_KEYS, [total_limit, total_used, sockets_limit, sockets_used]).
+
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ start_link(fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(AlarmSet, AlarmClear) ->
+ gen_server2:start_link({local, ?SERVER}, ?MODULE, [AlarmSet, AlarmClear],
+ [{timeout, infinity}]).
+
+register_callback(M, F, A)
+ when is_atom(M) andalso is_atom(F) andalso is_list(A) ->
+ gen_server2:cast(?SERVER, {register_callback, self(), {M, F, A}}).
+
+open(Path, Mode, Options) ->
+ open_with_absolute_path(filename:absname(Path), Mode, Options).
+
+open_with_absolute_path(Path, Mode, Options) ->
+ File1 = #file { reader_count = RCount, has_writer = HasWriter } =
+ case get({Path, fhc_file}) of
+ File = #file {} -> File;
+ undefined -> #file { reader_count = 0,
+ has_writer = false }
+ end,
+ Mode1 = append_to_write(Mode),
+ IsWriter = is_writer(Mode1),
+ case IsWriter andalso HasWriter of
+ true -> {error, writer_exists};
+ false -> {ok, Ref} = new_closed_handle(Path, Mode1, Options),
+ case get_or_reopen_timed([{Ref, new}]) of
+ {ok, [_Handle1]} ->
+ RCount1 = case is_reader(Mode1) of
+ true -> RCount + 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter orelse IsWriter,
+ put({Path, fhc_file},
+ File1 #file { reader_count = RCount1,
+ has_writer = HasWriter1 }),
+ {ok, Ref};
+ Error ->
+ erase({Ref, fhc_handle}),
+ Error
+ end
+ end.
+
+close(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined -> ok;
+ Handle -> case hard_close(Handle) of
+ ok -> ok;
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+read(Ref, Count) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([#handle { is_read = false }]) ->
+ {error, not_open_for_reading};
+ ([#handle{read_buffer_size_limit = 0,
+ hdl = Hdl, offset = Offset} = Handle]) ->
+ %% The read buffer is disabled. This is just an
+ %% optimization: the clauses below can handle this case.
+ case prim_file_read(Hdl, Count) of
+ {ok, Data} -> {{ok, Data},
+ [Handle#handle{offset = Offset+size(Data)}]};
+ eof -> {eof, [Handle #handle { at_eof = true }]};
+ Error -> {Error, Handle}
+ end;
+ ([Handle = #handle{read_buffer = Buf,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ read_buffer_usage = BufUsg,
+ offset = Offset}])
+ when BufRem >= Count ->
+ <<_:BufPos/binary, Res:Count/binary, _/binary>> = Buf,
+ {{ok, Res}, [Handle#handle{offset = Offset + Count,
+ read_buffer_pos = BufPos + Count,
+ read_buffer_rem = BufRem - Count,
+ read_buffer_usage = BufUsg + Count }]};
+ ([Handle0]) ->
+ maybe_reduce_read_cache([Ref]),
+ Handle = #handle{read_buffer = Buf,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ read_buffer_size = BufSz,
+ hdl = Hdl,
+ offset = Offset}
+ = tune_read_buffer_limit(Handle0, Count),
+ WantedCount = Count - BufRem,
+ case prim_file_read(Hdl, max(BufSz, WantedCount)) of
+ {ok, Data} ->
+ <<_:BufPos/binary, BufTl/binary>> = Buf,
+ ReadCount = size(Data),
+ case ReadCount < WantedCount of
+ true ->
+ OffSet1 = Offset + BufRem + ReadCount,
+ {{ok, <<BufTl/binary, Data/binary>>},
+ [reset_read_buffer(
+ Handle#handle{offset = OffSet1})]};
+ false ->
+ <<Hd:WantedCount/binary, _/binary>> = Data,
+ OffSet1 = Offset + BufRem + WantedCount,
+ BufRem1 = ReadCount - WantedCount,
+ {{ok, <<BufTl/binary, Hd/binary>>},
+ [Handle#handle{offset = OffSet1,
+ read_buffer = Data,
+ read_buffer_pos = WantedCount,
+ read_buffer_rem = BufRem1,
+ read_buffer_usage = WantedCount}]}
+ end;
+ eof ->
+ {eof, [Handle #handle { at_eof = true }]};
+ Error ->
+ {Error, [reset_read_buffer(Handle)]}
+ end
+ end).
+
+append(Ref, Data) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { is_write = false }]) ->
+ {error, not_open_for_writing};
+ ([Handle]) ->
+ case maybe_seek(eof, Handle) of
+ {{ok, _Offset}, #handle { hdl = Hdl, offset = Offset,
+ write_buffer_size_limit = 0,
+ at_eof = true } = Handle1} ->
+ Offset1 = Offset + iolist_size(Data),
+ {prim_file_write(Hdl, Data),
+ [Handle1 #handle { is_dirty = true, offset = Offset1 }]};
+ {{ok, _Offset}, #handle { write_buffer = WriteBuffer,
+ write_buffer_size = Size,
+ write_buffer_size_limit = Limit,
+ at_eof = true } = Handle1} ->
+ WriteBuffer1 = [Data | WriteBuffer],
+ Size1 = Size + iolist_size(Data),
+ Handle2 = Handle1 #handle { write_buffer = WriteBuffer1,
+ write_buffer_size = Size1 },
+ case Limit =/= infinity andalso Size1 > Limit of
+ true -> {Result, Handle3} = write_buffer(Handle2),
+ {Result, [Handle3]};
+ false -> {ok, [Handle2]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+sync(Ref) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([#handle { is_dirty = false, write_buffer = [] }]) ->
+ ok;
+ ([Handle = #handle { hdl = Hdl,
+ is_dirty = true, write_buffer = [] }]) ->
+ case prim_file_sync(Hdl) of
+ ok -> {ok, [Handle #handle { is_dirty = false }]};
+ Error -> {Error, [Handle]}
+ end
+ end).
+
+needs_sync(Ref) ->
+ %% This must *not* use with_handles/2; see bug 25052
+ case get({Ref, fhc_handle}) of
+ #handle { is_dirty = false, write_buffer = [] } -> false;
+ #handle {} -> true
+ end.
+
+position(Ref, NewOffset) ->
+ with_flushed_handles(
+ [Ref], keep,
+ fun ([Handle]) -> {Result, Handle1} = maybe_seek(NewOffset, Handle),
+ {Result, [Handle1]}
+ end).
+
+truncate(Ref) ->
+ with_flushed_handles(
+ [Ref],
+ fun ([Handle1 = #handle { hdl = Hdl }]) ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end
+ end).
+
+current_virtual_offset(Ref) ->
+ with_handles([Ref], fun ([#handle { at_eof = true, is_write = true,
+ offset = Offset,
+ write_buffer_size = Size }]) ->
+ {ok, Offset + Size};
+ ([#handle { offset = Offset }]) ->
+ {ok, Offset}
+ end).
+
+current_raw_offset(Ref) ->
+ with_handles([Ref], fun ([Handle]) -> {ok, Handle #handle.offset} end).
+
+flush(Ref) ->
+ with_flushed_handles([Ref], fun ([Handle]) -> {ok, [Handle]} end).
+
+copy(Src, Dest, Count) ->
+ with_flushed_handles(
+ [Src, Dest],
+ fun ([SHandle = #handle { is_read = true, hdl = SHdl, offset = SOffset },
+ DHandle = #handle { is_write = true, hdl = DHdl, offset = DOffset }]
+ ) ->
+ case prim_file:copy(SHdl, DHdl, Count) of
+ {ok, Count1} = Result1 ->
+ {Result1,
+ [SHandle #handle { offset = SOffset + Count1 },
+ DHandle #handle { offset = DOffset + Count1,
+ is_dirty = true }]};
+ Error ->
+ {Error, [SHandle, DHandle]}
+ end;
+ (_Handles) ->
+ {error, incorrect_handle_modes}
+ end).
+
+delete(Ref) ->
+ case erase({Ref, fhc_handle}) of
+ undefined ->
+ ok;
+ Handle = #handle { path = Path } ->
+ case hard_close(Handle #handle { is_dirty = false,
+ write_buffer = [] }) of
+ ok -> prim_file:delete(Path);
+ {Error, Handle1} -> put_handle(Ref, Handle1),
+ Error
+ end
+ end.
+
+clear(Ref) ->
+ with_handles(
+ [Ref],
+ fun ([#handle { at_eof = true, write_buffer_size = 0, offset = 0 }]) ->
+ ok;
+ ([Handle]) ->
+ case maybe_seek(bof, Handle#handle{write_buffer = [],
+ write_buffer_size = 0}) of
+ {{ok, 0}, Handle1 = #handle { hdl = Hdl }} ->
+ case prim_file:truncate(Hdl) of
+ ok -> {ok, [Handle1 #handle { at_eof = true }]};
+ Error -> {Error, [Handle1]}
+ end;
+ {{error, _} = Error, Handle1} ->
+ {Error, [Handle1]}
+ end
+ end).
+
+set_maximum_since_use(MaximumAge) ->
+ Now = erlang:monotonic_time(),
+ case lists:foldl(
+ fun ({{Ref, fhc_handle},
+ Handle = #handle { hdl = Hdl, last_used_at = Then }}, Rep) ->
+ case Hdl =/= closed andalso
+ erlang:convert_time_unit(Now - Then,
+ native,
+ micro_seconds)
+ >= MaximumAge of
+ true -> soft_close(Ref, Handle) orelse Rep;
+ false -> Rep
+ end;
+ (_KeyValuePair, Rep) ->
+ Rep
+ end, false, get()) of
+ false -> age_tree_change(), ok;
+ true -> ok
+ end.
+
+obtain() -> obtain(1).
+set_reservation() -> set_reservation(1).
+release() -> release(1).
+release_reservation() -> release_reservation(file).
+transfer(Pid) -> transfer(Pid, 1).
+
+obtain(Count) -> obtain(Count, socket).
+set_reservation(Count) -> set_reservation(Count, file).
+release(Count) -> release(Count, socket).
+
+with_handle(Fun) ->
+ with_handle(1, Fun).
+
+with_handle(N, Fun) ->
+ ok = obtain(N, file),
+ try Fun()
+ after ok = release(N, file)
+ end.
+
+obtain(Count, Type) when Count > 0 ->
+ %% If the FHC isn't running, obtains succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:call(
+ ?SERVER, {obtain, Count, Type, self()}, infinity)
+ end.
+
+set_reservation(Count, Type) when Count > 0 ->
+ %% If the FHC isn't running, reserve succeed immediately.
+ case whereis(?SERVER) of
+ undefined -> ok;
+ _ -> gen_server2:cast(?SERVER, {set_reservation, Count, Type, self()})
+ end.
+
+release(Count, Type) when Count > 0 ->
+ gen_server2:cast(?SERVER, {release, Count, Type, self()}).
+
+release_reservation(Type) ->
+ gen_server2:cast(?SERVER, {release_reservation, Type, self()}).
+
+transfer(Pid, Count) when Count > 0 ->
+ gen_server2:cast(?SERVER, {transfer, Count, self(), Pid}).
+
+set_limit(Limit) ->
+ gen_server2:call(?SERVER, {set_limit, Limit}, infinity).
+
+get_limit() ->
+ gen_server2:call(?SERVER, get_limit, infinity).
+
+info_keys() -> ?INFO_KEYS.
+
+info() -> info(?INFO_KEYS).
+info(Items) -> gen_server2:call(?SERVER, {info, Items}, infinity).
+
+clear_read_cache() ->
+ gen_server2:cast(?SERVER, clear_read_cache).
+
+clear_process_read_cache() ->
+ [
+ begin
+ Handle1 = reset_read_buffer(Handle),
+ put({Ref, fhc_handle}, Handle1)
+ end ||
+ {{Ref, fhc_handle}, Handle} <- get(),
+ size(Handle#handle.read_buffer) > 0
+ ].
+
+%%----------------------------------------------------------------------------
+%% Internal functions
+%%----------------------------------------------------------------------------
+
+prim_file_read(Hdl, Size) ->
+ file_handle_cache_stats:update(
+ io_read, Size, fun() -> prim_file:read(Hdl, Size) end).
+
+prim_file_write(Hdl, Bytes) ->
+ file_handle_cache_stats:update(
+ io_write, iolist_size(Bytes), fun() -> prim_file:write(Hdl, Bytes) end).
+
+prim_file_sync(Hdl) ->
+ file_handle_cache_stats:update(io_sync, fun() -> prim_file:sync(Hdl) end).
+
+prim_file_position(Hdl, NewOffset) ->
+ file_handle_cache_stats:update(
+ io_seek, fun() -> prim_file:position(Hdl, NewOffset) end).
+
+is_reader(Mode) -> lists:member(read, Mode).
+
+is_writer(Mode) -> lists:member(write, Mode).
+
+append_to_write(Mode) ->
+ case lists:member(append, Mode) of
+ true -> [write | Mode -- [append, write]];
+ false -> Mode
+ end.
+
+with_handles(Refs, Fun) ->
+ with_handles(Refs, reset, Fun).
+
+with_handles(Refs, ReadBuffer, Fun) ->
+ case get_or_reopen_timed([{Ref, reopen} || Ref <- Refs]) of
+ {ok, Handles0} ->
+ Handles = case ReadBuffer of
+ reset -> [reset_read_buffer(H) || H <- Handles0];
+ keep -> Handles0
+ end,
+ case Fun(Handles) of
+ {Result, Handles1} when is_list(Handles1) ->
+ _ = lists:zipwith(fun put_handle/2, Refs, Handles1),
+ Result;
+ Result ->
+ Result
+ end;
+ Error ->
+ Error
+ end.
+
+with_flushed_handles(Refs, Fun) ->
+ with_flushed_handles(Refs, reset, Fun).
+
+with_flushed_handles(Refs, ReadBuffer, Fun) ->
+ with_handles(
+ Refs, ReadBuffer,
+ fun (Handles) ->
+ case lists:foldl(
+ fun (Handle, {ok, HandlesAcc}) ->
+ {Res, Handle1} = write_buffer(Handle),
+ {Res, [Handle1 | HandlesAcc]};
+ (Handle, {Error, HandlesAcc}) ->
+ {Error, [Handle | HandlesAcc]}
+ end, {ok, []}, Handles) of
+ {ok, Handles1} ->
+ Fun(lists:reverse(Handles1));
+ {Error, Handles1} ->
+ {Error, lists:reverse(Handles1)}
+ end
+ end).
+
+get_or_reopen_timed(RefNewOrReopens) ->
+ file_handle_cache_stats:update(
+ io_file_handle_open_attempt, fun() -> get_or_reopen(RefNewOrReopens) end).
+
+get_or_reopen(RefNewOrReopens) ->
+ case partition_handles(RefNewOrReopens) of
+ {OpenHdls, []} ->
+ {ok, [Handle || {_Ref, Handle} <- OpenHdls]};
+ {OpenHdls, ClosedHdls} ->
+ Oldest = oldest(get_age_tree(),
+ fun () -> erlang:monotonic_time() end),
+ case gen_server2:call(?SERVER, {open, self(), length(ClosedHdls),
+ Oldest}, infinity) of
+ ok ->
+ case reopen(ClosedHdls) of
+ {ok, RefHdls} -> sort_handles(RefNewOrReopens,
+ OpenHdls, RefHdls, []);
+ Error -> Error
+ end;
+ close ->
+ [soft_close(Ref, Handle) ||
+ {{Ref, fhc_handle}, Handle = #handle { hdl = Hdl }} <-
+ get(),
+ Hdl =/= closed],
+ get_or_reopen(RefNewOrReopens)
+ end
+ end.
+
+reopen(ClosedHdls) -> reopen(ClosedHdls, get_age_tree(), []).
+
+reopen([], Tree, RefHdls) ->
+ put_age_tree(Tree),
+ {ok, lists:reverse(RefHdls)};
+reopen([{Ref, NewOrReopen, Handle = #handle { hdl = closed,
+ path = Path,
+ mode = Mode0,
+ offset = Offset,
+ last_used_at = undefined }} |
+ RefNewOrReopenHdls] = ToOpen, Tree, RefHdls) ->
+ Mode = case NewOrReopen of
+ new -> Mode0;
+ reopen -> file_handle_cache_stats:update(io_reopen),
+ [read | Mode0]
+ end,
+ case prim_file:open(Path, Mode) of
+ {ok, Hdl} ->
+ Now = erlang:monotonic_time(),
+ {{ok, _Offset}, Handle1} =
+ maybe_seek(Offset, reset_read_buffer(
+ Handle#handle{hdl = Hdl,
+ offset = 0,
+ last_used_at = Now})),
+ put({Ref, fhc_handle}, Handle1),
+ reopen(RefNewOrReopenHdls, gb_trees:insert({Now, Ref}, true, Tree),
+ [{Ref, Handle1} | RefHdls]);
+ Error ->
+ %% NB: none of the handles in ToOpen are in the age tree
+ Oldest = oldest(Tree, fun () -> undefined end),
+ [gen_server2:cast(?SERVER, {close, self(), Oldest}) || _ <- ToOpen],
+ put_age_tree(Tree),
+ Error
+ end.
+
+partition_handles(RefNewOrReopens) ->
+ lists:foldr(
+ fun ({Ref, NewOrReopen}, {Open, Closed}) ->
+ case get({Ref, fhc_handle}) of
+ #handle { hdl = closed } = Handle ->
+ {Open, [{Ref, NewOrReopen, Handle} | Closed]};
+ #handle {} = Handle ->
+ {[{Ref, Handle} | Open], Closed}
+ end
+ end, {[], []}, RefNewOrReopens).
+
+sort_handles([], [], [], Acc) ->
+ {ok, lists:reverse(Acc)};
+sort_handles([{Ref, _} | RefHdls], [{Ref, Handle} | RefHdlsA], RefHdlsB, Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]);
+sort_handles([{Ref, _} | RefHdls], RefHdlsA, [{Ref, Handle} | RefHdlsB], Acc) ->
+ sort_handles(RefHdls, RefHdlsA, RefHdlsB, [Handle | Acc]).
+
+put_handle(Ref, Handle = #handle { last_used_at = Then }) ->
+ Now = erlang:monotonic_time(),
+ age_tree_update(Then, Now, Ref),
+ put({Ref, fhc_handle}, Handle #handle { last_used_at = Now }).
+
+with_age_tree(Fun) -> put_age_tree(Fun(get_age_tree())).
+
+get_age_tree() ->
+ case get(fhc_age_tree) of
+ undefined -> gb_trees:empty();
+ AgeTree -> AgeTree
+ end.
+
+put_age_tree(Tree) -> put(fhc_age_tree, Tree).
+
+age_tree_update(Then, Now, Ref) ->
+ with_age_tree(
+ fun (Tree) ->
+ gb_trees:insert({Now, Ref}, true,
+ gb_trees:delete_any({Then, Ref}, Tree))
+ end).
+
+age_tree_delete(Then, Ref) ->
+ with_age_tree(
+ fun (Tree) ->
+ Tree1 = gb_trees:delete_any({Then, Ref}, Tree),
+ Oldest = oldest(Tree1, fun () -> undefined end),
+ gen_server2:cast(?SERVER, {close, self(), Oldest}),
+ Tree1
+ end).
+
+age_tree_change() ->
+ with_age_tree(
+ fun (Tree) ->
+ case gb_trees:is_empty(Tree) of
+ true -> Tree;
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
+ gen_server2:cast(?SERVER, {update, self(), Oldest}),
+ Tree
+ end
+ end).
+
+oldest(Tree, DefaultFun) ->
+ case gb_trees:is_empty(Tree) of
+ true -> DefaultFun();
+ false -> {{Oldest, _Ref}, _} = gb_trees:smallest(Tree),
+ Oldest
+ end.
+
+new_closed_handle(Path, Mode, Options) ->
+ WriteBufferSize =
+ case application:get_env(rabbit, fhc_write_buffering) of
+ {ok, false} -> 0;
+ {ok, true} ->
+ case proplists:get_value(write_buffer, Options, unbuffered) of
+ unbuffered -> 0;
+ infinity -> infinity;
+ N when is_integer(N) -> N
+ end
+ end,
+ ReadBufferSize =
+ case application:get_env(rabbit, fhc_read_buffering) of
+ {ok, false} -> 0;
+ {ok, true} ->
+ case proplists:get_value(read_buffer, Options, unbuffered) of
+ unbuffered -> 0;
+ N2 when is_integer(N2) -> N2
+ end
+ end,
+ Ref = make_ref(),
+ put({Ref, fhc_handle}, #handle { hdl = closed,
+ ref = Ref,
+ offset = 0,
+ is_dirty = false,
+ write_buffer_size = 0,
+ write_buffer_size_limit = WriteBufferSize,
+ write_buffer = [],
+ read_buffer = <<>>,
+ read_buffer_pos = 0,
+ read_buffer_rem = 0,
+ read_buffer_size = ReadBufferSize,
+ read_buffer_size_limit = ReadBufferSize,
+ read_buffer_usage = 0,
+ at_eof = false,
+ path = Path,
+ mode = Mode,
+ options = Options,
+ is_write = is_writer(Mode),
+ is_read = is_reader(Mode),
+ last_used_at = undefined }),
+ {ok, Ref}.
+
+soft_close(Ref, Handle) ->
+ {Res, Handle1} = soft_close(Handle),
+ case Res of
+ ok -> put({Ref, fhc_handle}, Handle1),
+ true;
+ _ -> put_handle(Ref, Handle1),
+ false
+ end.
+
+soft_close(Handle = #handle { hdl = closed }) ->
+ {ok, Handle};
+soft_close(Handle) ->
+ case write_buffer(Handle) of
+ {ok, #handle { hdl = Hdl,
+ ref = Ref,
+ is_dirty = IsDirty,
+ last_used_at = Then } = Handle1 } ->
+ ok = case IsDirty of
+ true -> prim_file_sync(Hdl);
+ false -> ok
+ end,
+ ok = prim_file:close(Hdl),
+ age_tree_delete(Then, Ref),
+ {ok, Handle1 #handle { hdl = closed,
+ is_dirty = false,
+ last_used_at = undefined }};
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+hard_close(Handle) ->
+ case soft_close(Handle) of
+ {ok, #handle { path = Path,
+ is_read = IsReader, is_write = IsWriter }} ->
+ #file { reader_count = RCount, has_writer = HasWriter } = File =
+ get({Path, fhc_file}),
+ RCount1 = case IsReader of
+ true -> RCount - 1;
+ false -> RCount
+ end,
+ HasWriter1 = HasWriter andalso not IsWriter,
+ case RCount1 =:= 0 andalso not HasWriter1 of
+ true -> erase({Path, fhc_file});
+ false -> put({Path, fhc_file},
+ File #file { reader_count = RCount1,
+ has_writer = HasWriter1 })
+ end,
+ ok;
+ {_Error, _Handle} = Result ->
+ Result
+ end.
+
+maybe_seek(New, Handle = #handle{hdl = Hdl,
+ offset = Old,
+ read_buffer_pos = BufPos,
+ read_buffer_rem = BufRem,
+ at_eof = AtEoF}) ->
+ {AtEoF1, NeedsSeek} = needs_seek(AtEoF, Old, New),
+ case NeedsSeek of
+ true when is_number(New) andalso
+ ((New >= Old andalso New =< BufRem + Old)
+ orelse (New < Old andalso Old - New =< BufPos)) ->
+ Diff = New - Old,
+ {{ok, New}, Handle#handle{offset = New,
+ at_eof = AtEoF1,
+ read_buffer_pos = BufPos + Diff,
+ read_buffer_rem = BufRem - Diff}};
+ true ->
+ case prim_file_position(Hdl, New) of
+ {ok, Offset1} = Result ->
+ {Result, reset_read_buffer(Handle#handle{offset = Offset1,
+ at_eof = AtEoF1})};
+ {error, _} = Error ->
+ {Error, Handle}
+ end;
+ false ->
+ {{ok, Old}, Handle}
+ end.
+
+needs_seek( AtEoF, _CurOffset, cur ) -> {AtEoF, false};
+needs_seek( AtEoF, _CurOffset, {cur, 0}) -> {AtEoF, false};
+needs_seek( true, _CurOffset, eof ) -> {true , false};
+needs_seek( true, _CurOffset, {eof, 0}) -> {true , false};
+needs_seek( false, _CurOffset, eof ) -> {true , true };
+needs_seek( false, _CurOffset, {eof, 0}) -> {true , true };
+needs_seek( AtEoF, 0, bof ) -> {AtEoF, false};
+needs_seek( AtEoF, 0, {bof, 0}) -> {AtEoF, false};
+needs_seek( AtEoF, CurOffset, CurOffset) -> {AtEoF, false};
+needs_seek( true, CurOffset, {bof, DesiredOffset})
+ when DesiredOffset >= CurOffset ->
+ {true, true};
+needs_seek( true, _CurOffset, {cur, DesiredOffset})
+ when DesiredOffset > 0 ->
+ {true, true};
+needs_seek( true, CurOffset, DesiredOffset) %% same as {bof, DO}
+ when is_integer(DesiredOffset) andalso DesiredOffset >= CurOffset ->
+ {true, true};
+%% because we can't really track size, we could well end up at EoF and not know
+needs_seek(_AtEoF, _CurOffset, _DesiredOffset) ->
+ {false, true}.
+
+write_buffer(Handle = #handle { write_buffer = [] }) ->
+ {ok, Handle};
+write_buffer(Handle = #handle { hdl = Hdl, offset = Offset,
+ write_buffer = WriteBuffer,
+ write_buffer_size = DataSize,
+ at_eof = true }) ->
+ case prim_file_write(Hdl, lists:reverse(WriteBuffer)) of
+ ok ->
+ Offset1 = Offset + DataSize,
+ {ok, Handle #handle { offset = Offset1, is_dirty = true,
+ write_buffer = [], write_buffer_size = 0 }};
+ {error, _} = Error ->
+ {Error, Handle}
+ end.
+
+reset_read_buffer(Handle) ->
+ Handle#handle{read_buffer = <<>>,
+ read_buffer_pos = 0,
+ read_buffer_rem = 0}.
+
+%% We come into this function whenever there's been a miss while
+%% reading from the buffer - but note that when we first start with a
+%% new handle the usage will be 0. Therefore in that case don't take
+%% it as meaning the buffer was useless, we just haven't done anything
+%% yet!
+tune_read_buffer_limit(Handle = #handle{read_buffer_usage = 0}, _Count) ->
+ Handle;
+%% In this head we have been using the buffer but now tried to read
+%% outside it. So how did we do? If we used less than the size of the
+%% buffer, make the new buffer the size of what we used before, but
+%% add one byte (so that next time we can distinguish between getting
+%% the buffer size exactly right and actually wanting more). If we
+%% read 100% of what we had, then double it for next time, up to the
+%% limit that was set when we were created.
+tune_read_buffer_limit(Handle = #handle{read_buffer = Buf,
+ read_buffer_usage = Usg,
+ read_buffer_size = Sz,
+ read_buffer_size_limit = Lim}, Count) ->
+ %% If the buffer is <<>> then we are in the first read after a
+ %% reset, the read_buffer_usage is the total usage from before the
+ %% reset. But otherwise we are in a read which read off the end of
+ %% the buffer, so really the size of this read should be included
+ %% in the usage.
+ TotalUsg = case Buf of
+ <<>> -> Usg;
+ _ -> Usg + Count
+ end,
+ Handle#handle{read_buffer_usage = 0,
+ read_buffer_size = erlang:min(case TotalUsg < Sz of
+ true -> Usg + 1;
+ false -> Usg * 2
+ end, Lim)}.
+
+maybe_reduce_read_cache(SparedRefs) ->
+ case vm_memory_monitor:get_memory_use(bytes) of
+ {_, infinity} -> ok;
+ {MemUse, MemLimit} when MemUse < MemLimit -> ok;
+ {MemUse, MemLimit} -> reduce_read_cache(
+ (MemUse - MemLimit) * 2,
+ SparedRefs)
+ end.
+
+reduce_read_cache(MemToFree, SparedRefs) ->
+ Handles = lists:sort(
+ fun({_, H1}, {_, H2}) -> H1 < H2 end,
+ [{R, H} || {{R, fhc_handle}, H} <- get(),
+ not lists:member(R, SparedRefs)
+ andalso size(H#handle.read_buffer) > 0]),
+ FreedMem = lists:foldl(
+ fun
+ (_, Freed) when Freed >= MemToFree ->
+ Freed;
+ ({Ref, #handle{read_buffer = Buf} = Handle}, Freed) ->
+ Handle1 = reset_read_buffer(Handle),
+ put({Ref, fhc_handle}, Handle1),
+ Freed + size(Buf)
+ end, 0, Handles),
+ if
+ FreedMem < MemToFree andalso SparedRefs =/= [] ->
+ reduce_read_cache(MemToFree - FreedMem, []);
+ true ->
+ ok
+ end.
+
+infos(Items, State) -> [{Item, i(Item, State)} || Item <- Items].
+
+i(total_limit, #fhc_state{limit = Limit}) -> Limit;
+i(total_used, State) -> used(State);
+i(sockets_limit, #fhc_state{obtain_limit = Limit}) -> Limit;
+i(sockets_used, #fhc_state{obtain_count_socket = Count,
+ reserve_count_socket = RCount}) -> Count + RCount;
+i(files_reserved, #fhc_state{reserve_count_file = RCount}) -> RCount;
+i(Item, _) -> throw({bad_argument, Item}).
+
+used(#fhc_state{open_count = C1,
+ obtain_count_socket = C2,
+ obtain_count_file = C3,
+ reserve_count_socket = C4,
+ reserve_count_file = C5}) -> C1 + C2 + C3 + C4 + C5.
+
+%%----------------------------------------------------------------------------
+%% gen_server2 callbacks
+%%----------------------------------------------------------------------------
+
+init([AlarmSet, AlarmClear]) ->
+ _ = file_handle_cache_stats:init(),
+ Limit = case application:get_env(file_handles_high_watermark) of
+ {ok, Watermark} when (is_integer(Watermark) andalso
+ Watermark > 0) ->
+ Watermark;
+ _ ->
+ case ulimit() of
+ unknown -> ?FILE_HANDLES_LIMIT_OTHER;
+ Lim -> lists:max([2, Lim - ?RESERVED_FOR_OTHERS])
+ end
+ end,
+ ObtainLimit = obtain_limit(Limit),
+ error_logger:info_msg("Limiting to approx ~p file handles (~p sockets)~n",
+ [Limit, ObtainLimit]),
+ Clients = ets:new(?CLIENT_ETS_TABLE, [set, private, {keypos, #cstate.pid}]),
+ Elders = ets:new(?ELDERS_ETS_TABLE, [set, private]),
+ {ok, #fhc_state { elders = Elders,
+ limit = Limit,
+ open_count = 0,
+ open_pending = pending_new(),
+ obtain_limit = ObtainLimit,
+ obtain_count_file = 0,
+ obtain_pending_file = pending_new(),
+ obtain_count_socket = 0,
+ obtain_pending_socket = pending_new(),
+ clients = Clients,
+ timer_ref = undefined,
+ alarm_set = AlarmSet,
+ alarm_clear = AlarmClear,
+ reserve_count_file = 0,
+ reserve_count_socket = 0 }}.
+
+prioritise_cast(Msg, _Len, _State) ->
+ case Msg of
+ {release, _, _, _} -> 5;
+ {release_reservation, _, _, _} -> 5;
+ _ -> 0
+ end.
+
+handle_call({open, Pid, Requested, EldestUnusedSince}, From,
+ State = #fhc_state { open_count = Count,
+ open_pending = Pending,
+ elders = Elders,
+ clients = Clients })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ Item = #pending { kind = open,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ ok = track_client(Pid, Clients),
+ case needs_reduce(State #fhc_state { open_count = Count + Requested }) of
+ true -> case ets:lookup(Clients, Pid) of
+ [#cstate { opened = 0 }] ->
+ true = ets:update_element(
+ Clients, Pid, {#cstate.blocked, true}),
+ {noreply,
+ reduce(State #fhc_state {
+ open_pending = pending_in(Item, Pending) })};
+ [#cstate { opened = Opened }] ->
+ true = ets:update_element(
+ Clients, Pid,
+ {#cstate.pending_closes, Opened}),
+ {reply, close, State}
+ end;
+ false -> {noreply, run_pending_item(Item, State)}
+ end;
+
+handle_call({obtain, N, Type, Pid}, From,
+ State = #fhc_state { clients = Clients }) ->
+ Count = obtain_state(Type, count, State),
+ Pending = obtain_state(Type, pending, State),
+ ok = track_client(Pid, Clients),
+ Item = #pending { kind = {obtain, Type}, pid = Pid,
+ requested = N, from = From },
+ Enqueue = fun () ->
+ true = ets:update_element(Clients, Pid,
+ {#cstate.blocked, true}),
+ set_obtain_state(Type, pending,
+ pending_in(Item, Pending), State)
+ end,
+ {noreply,
+ case obtain_limit_reached(Type, State) of
+ true -> Enqueue();
+ false -> case needs_reduce(
+ set_obtain_state(Type, count, Count + 1, State)) of
+ true -> reduce(Enqueue());
+ false -> adjust_alarm(
+ State, run_pending_item(Item, State))
+ end
+ end};
+
+handle_call({set_limit, Limit}, _From, State) ->
+ {reply, ok, adjust_alarm(
+ State, maybe_reduce(
+ process_pending(
+ State #fhc_state {
+ limit = Limit,
+ obtain_limit = obtain_limit(Limit) })))};
+
+handle_call(get_limit, _From, State = #fhc_state { limit = Limit }) ->
+ {reply, Limit, State};
+
+handle_call({info, Items}, _From, State) ->
+ {reply, infos(Items, State), State}.
+
+handle_cast({register_callback, Pid, MFA},
+ State = #fhc_state { clients = Clients }) ->
+ ok = track_client(Pid, Clients),
+ true = ets:update_element(Clients, Pid, {#cstate.callback, MFA}),
+ {noreply, State};
+
+handle_cast({update, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders })
+ when EldestUnusedSince =/= undefined ->
+ true = ets:insert(Elders, {Pid, EldestUnusedSince}),
+ %% don't call maybe_reduce from here otherwise we can create a
+ %% storm of messages
+ {noreply, State};
+
+handle_cast({release, N, Type, Pid}, State) ->
+ State1 = process_pending(update_counts({obtain, Type}, Pid, -N, State)),
+ {noreply, adjust_alarm(State, State1)};
+
+handle_cast({close, Pid, EldestUnusedSince},
+ State = #fhc_state { elders = Elders, clients = Clients }) ->
+ true = case EldestUnusedSince of
+ undefined -> ets:delete(Elders, Pid);
+ _ -> ets:insert(Elders, {Pid, EldestUnusedSince})
+ end,
+ ets:update_counter(Clients, Pid, {#cstate.pending_closes, -1, 0, 0}),
+ {noreply, adjust_alarm(State, process_pending(
+ update_counts(open, Pid, -1, State)))};
+
+handle_cast({transfer, N, FromPid, ToPid}, State) ->
+ ok = track_client(ToPid, State#fhc_state.clients),
+ {noreply, process_pending(
+ update_counts({obtain, socket}, ToPid, +N,
+ update_counts({obtain, socket}, FromPid, -N,
+ State)))};
+
+handle_cast(clear_read_cache, State) ->
+ _ = clear_process_read_cache(),
+ {noreply, State};
+
+handle_cast({release_reservation, Type, Pid}, State) ->
+ State1 = process_pending(update_counts({reserve, Type}, Pid, 0, State)),
+ {noreply, adjust_alarm(State, State1)};
+
+handle_cast({set_reservation, N, Type, Pid},
+ State = #fhc_state { clients = Clients }) ->
+ ok = track_client(Pid, Clients),
+ NewState = process_pending(update_counts({reserve, Type}, Pid, N, State)),
+ {noreply, case needs_reduce(NewState) of
+ true -> reduce(NewState);
+ false -> adjust_alarm(State, NewState)
+ end}.
+
+handle_info(check_counts, State) ->
+ {noreply, maybe_reduce(State #fhc_state { timer_ref = undefined })};
+
+handle_info({'DOWN', _MRef, process, Pid, _Reason},
+ State = #fhc_state { elders = Elders,
+ open_count = OpenCount,
+ open_pending = OpenPending,
+ obtain_count_file = ObtainCountF,
+ obtain_count_socket = ObtainCountS,
+ obtain_pending_file = ObtainPendingF,
+ obtain_pending_socket = ObtainPendingS,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCountS,
+ clients = Clients }) ->
+ [#cstate { opened = Opened,
+ obtained_file = ObtainedFile,
+ obtained_socket = ObtainedSocket,
+ reserved_file = ReservedFile,
+ reserved_socket = ReservedSocket }] =
+ ets:lookup(Clients, Pid),
+ true = ets:delete(Clients, Pid),
+ true = ets:delete(Elders, Pid),
+ Fun = fun (#pending { pid = Pid1 }) -> Pid1 =/= Pid end,
+ State1 = process_pending(
+ State #fhc_state {
+ open_count = OpenCount - Opened,
+ open_pending = filter_pending(Fun, OpenPending),
+ obtain_count_file = ObtainCountF - ObtainedFile,
+ obtain_count_socket = ObtainCountS - ObtainedSocket,
+ obtain_pending_file = filter_pending(Fun, ObtainPendingF),
+ obtain_pending_socket = filter_pending(Fun, ObtainPendingS),
+ reserve_count_file = ReserveCountF - ReservedFile,
+ reserve_count_socket = ReserveCountS - ReservedSocket}),
+ {noreply, adjust_alarm(State, State1)}.
+
+terminate(_Reason, State = #fhc_state { clients = Clients,
+ elders = Elders }) ->
+ ets:delete(Clients),
+ ets:delete(Elders),
+ State.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% pending queue abstraction helpers
+%%----------------------------------------------------------------------------
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+filter_pending(Fun, {Count, Queue}) ->
+ {Delta, Queue1} =
+ queue_fold(
+ fun (Item = #pending { requested = Requested }, {DeltaN, QueueN}) ->
+ case Fun(Item) of
+ true -> {DeltaN, queue:in(Item, QueueN)};
+ false -> {DeltaN - Requested, QueueN}
+ end
+ end, {0, queue:new()}, Queue),
+ {Count + Delta, Queue1}.
+
+pending_new() ->
+ {0, queue:new()}.
+
+pending_in(Item = #pending { requested = Requested }, {Count, Queue}) ->
+ {Count + Requested, queue:in(Item, Queue)}.
+
+pending_out({0, _Queue} = Pending) ->
+ {empty, Pending};
+pending_out({N, Queue}) ->
+ {{value, #pending { requested = Requested }} = Result, Queue1} =
+ queue:out(Queue),
+ {Result, {N - Requested, Queue1}}.
+
+pending_count({Count, _Queue}) ->
+ Count.
+
+%%----------------------------------------------------------------------------
+%% server helpers
+%%----------------------------------------------------------------------------
+
+obtain_limit(infinity) -> infinity;
+obtain_limit(Limit) -> case ?OBTAIN_LIMIT(Limit) of
+ OLimit when OLimit < 0 -> 0;
+ OLimit -> OLimit
+ end.
+
+obtain_limit_reached(socket, State) -> obtain_limit_reached(State);
+obtain_limit_reached(file, State) -> needs_reduce(State).
+
+obtain_limit_reached(#fhc_state{obtain_limit = Limit,
+ obtain_count_socket = Count,
+ reserve_count_socket = RCount}) ->
+ Limit =/= infinity andalso (RCount + Count) >= Limit.
+
+obtain_state(file, count, #fhc_state{obtain_count_file = N}) -> N;
+obtain_state(socket, count, #fhc_state{obtain_count_socket = N}) -> N;
+obtain_state(file, pending, #fhc_state{obtain_pending_file = N}) -> N;
+obtain_state(socket, pending, #fhc_state{obtain_pending_socket = N}) -> N.
+
+set_obtain_state(file, count, N, S) -> S#fhc_state{obtain_count_file = N};
+set_obtain_state(socket, count, N, S) -> S#fhc_state{obtain_count_socket = N};
+set_obtain_state(file, pending, N, S) -> S#fhc_state{obtain_pending_file = N};
+set_obtain_state(socket, pending, N, S) -> S#fhc_state{obtain_pending_socket = N}.
+
+adjust_alarm(OldState = #fhc_state { alarm_set = AlarmSet,
+ alarm_clear = AlarmClear }, NewState) ->
+ case {obtain_limit_reached(OldState), obtain_limit_reached(NewState)} of
+ {false, true} -> AlarmSet({file_descriptor_limit, []});
+ {true, false} -> AlarmClear(file_descriptor_limit);
+ _ -> ok
+ end,
+ NewState.
+
+process_pending(State = #fhc_state { limit = infinity }) ->
+ State;
+process_pending(State) ->
+ process_open(process_obtain(socket, process_obtain(file, State))).
+
+process_open(State = #fhc_state { limit = Limit,
+ open_pending = Pending}) ->
+ {Pending1, State1} = process_pending(Pending, Limit - used(State), State),
+ State1 #fhc_state { open_pending = Pending1 }.
+
+process_obtain(socket, State = #fhc_state { limit = Limit,
+ obtain_limit = ObtainLimit,
+ open_count = OpenCount,
+ obtain_count_socket = ObtainCount,
+ obtain_pending_socket = Pending,
+ obtain_count_file = ObtainCountF,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCount}) ->
+ Quota = min(ObtainLimit - ObtainCount,
+ Limit - (OpenCount + ObtainCount + ObtainCountF + ReserveCount + ReserveCountF)),
+ {Pending1, State1} = process_pending(Pending, Quota, State),
+ State1#fhc_state{obtain_pending_socket = Pending1};
+process_obtain(file, State = #fhc_state { limit = Limit,
+ open_count = OpenCount,
+ obtain_count_socket = ObtainCountS,
+ obtain_count_file = ObtainCountF,
+ obtain_pending_file = Pending,
+ reserve_count_file = ReserveCountF,
+ reserve_count_socket = ReserveCountS}) ->
+ Quota = Limit - (OpenCount + ObtainCountS + ObtainCountF + ReserveCountF + ReserveCountS),
+ {Pending1, State1} = process_pending(Pending, Quota, State),
+ State1#fhc_state{obtain_pending_file = Pending1}.
+
+process_pending(Pending, Quota, State) when Quota =< 0 ->
+ {Pending, State};
+process_pending(Pending, Quota, State) ->
+ case pending_out(Pending) of
+ {empty, _Pending} ->
+ {Pending, State};
+ {{value, #pending { requested = Requested }}, _Pending1}
+ when Requested > Quota ->
+ {Pending, State};
+ {{value, #pending { requested = Requested } = Item}, Pending1} ->
+ process_pending(Pending1, Quota - Requested,
+ run_pending_item(Item, State))
+ end.
+
+run_pending_item(#pending { kind = Kind,
+ pid = Pid,
+ requested = Requested,
+ from = From },
+ State = #fhc_state { clients = Clients }) ->
+ gen_server2:reply(From, ok),
+ true = ets:update_element(Clients, Pid, {#cstate.blocked, false}),
+ update_counts(Kind, Pid, Requested, State).
+
+update_counts(open, Pid, Delta,
+ State = #fhc_state { open_count = OpenCount,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.opened, Delta}),
+ State #fhc_state { open_count = OpenCount + Delta};
+update_counts({obtain, file}, Pid, Delta,
+ State = #fhc_state {obtain_count_file = ObtainCountF,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_file, Delta}),
+ State #fhc_state { obtain_count_file = ObtainCountF + Delta};
+update_counts({obtain, socket}, Pid, Delta,
+ State = #fhc_state {obtain_count_socket = ObtainCountS,
+ clients = Clients }) ->
+ ets:update_counter(Clients, Pid, {#cstate.obtained_socket, Delta}),
+ State #fhc_state { obtain_count_socket = ObtainCountS + Delta};
+update_counts({reserve, file}, Pid, NewReservation,
+ State = #fhc_state {reserve_count_file = ReserveCountF,
+ clients = Clients }) ->
+ [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
+ Delta = NewReservation - R,
+ ets:update_counter(Clients, Pid, {#cstate.reserved_file, Delta}),
+ State #fhc_state { reserve_count_file = ReserveCountF + Delta};
+update_counts({reserve, socket}, Pid, NewReservation,
+ State = #fhc_state {reserve_count_socket = ReserveCountS,
+ clients = Clients }) ->
+ [#cstate{reserved_file = R}] = ets:lookup(Clients, Pid),
+ Delta = NewReservation - R,
+ ets:update_counter(Clients, Pid, {#cstate.reserved_socket, Delta}),
+ State #fhc_state { reserve_count_socket = ReserveCountS + Delta}.
+
+maybe_reduce(State) ->
+ case needs_reduce(State) of
+ true -> reduce(State);
+ false -> State
+ end.
+
+needs_reduce(#fhc_state { limit = Limit,
+ open_count = OpenCount,
+ open_pending = {OpenPending, _},
+ obtain_limit = ObtainLimit,
+ obtain_count_socket = ObtainCountS,
+ obtain_count_file = ObtainCountF,
+ obtain_pending_file = {ObtainPendingF, _},
+ obtain_pending_socket = {ObtainPendingS, _},
+ reserve_count_socket = ReserveCountS,
+ reserve_count_file = ReserveCountF}) ->
+ Limit =/= infinity
+ andalso (((OpenCount + ObtainCountS + ObtainCountF + ReserveCountS + ReserveCountF) > Limit)
+ orelse (OpenPending =/= 0)
+ orelse (ObtainPendingF =/= 0)
+ orelse (ObtainCountS < ObtainLimit
+ andalso (ObtainPendingS =/= 0))).
+
+reduce(State = #fhc_state { open_pending = OpenPending,
+ obtain_pending_file = ObtainPendingFile,
+ obtain_pending_socket = ObtainPendingSocket,
+ elders = Elders,
+ clients = Clients,
+ timer_ref = TRef }) ->
+ Now = erlang:monotonic_time(),
+ {CStates, Sum, ClientCount} =
+ ets:foldl(fun ({Pid, Eldest}, {CStatesAcc, SumAcc, CountAcc} = Accs) ->
+ [#cstate { pending_closes = PendingCloses,
+ opened = Opened,
+ blocked = Blocked } = CState] =
+ ets:lookup(Clients, Pid),
+ TimeDiff = erlang:convert_time_unit(
+ Now - Eldest, native, micro_seconds),
+ case Blocked orelse PendingCloses =:= Opened of
+ true -> Accs;
+ false -> {[CState | CStatesAcc],
+ SumAcc + TimeDiff,
+ CountAcc + 1}
+ end
+ end, {[], 0, 0}, Elders),
+ case CStates of
+ [] -> ok;
+ _ -> case (Sum / ClientCount) -
+ (1000 * ?FILE_HANDLES_CHECK_INTERVAL) of
+ AverageAge when AverageAge > 0 ->
+ notify_age(CStates, AverageAge);
+ _ ->
+ notify_age0(Clients, CStates,
+ pending_count(OpenPending) +
+ pending_count(ObtainPendingFile) +
+ pending_count(ObtainPendingSocket))
+ end
+ end,
+ case TRef of
+ undefined -> TRef1 = erlang:send_after(
+ ?FILE_HANDLES_CHECK_INTERVAL, ?SERVER,
+ check_counts),
+ State #fhc_state { timer_ref = TRef1 };
+ _ -> State
+ end.
+
+notify_age(CStates, AverageAge) ->
+ lists:foreach(
+ fun (#cstate { callback = undefined }) -> ok;
+ (#cstate { callback = {M, F, A} }) -> apply(M, F, A ++ [AverageAge])
+ end, CStates).
+
+notify_age0(Clients, CStates, Required) ->
+ case [CState || CState <- CStates, CState#cstate.callback =/= undefined] of
+ [] -> ok;
+ Notifications -> S = rand:uniform(length(Notifications)),
+ {L1, L2} = lists:split(S, Notifications),
+ notify(Clients, Required, L2 ++ L1)
+ end.
+
+notify(_Clients, _Required, []) ->
+ ok;
+notify(_Clients, Required, _Notifications) when Required =< 0 ->
+ ok;
+notify(Clients, Required, [#cstate{ pid = Pid,
+ callback = {M, F, A},
+ opened = Opened } | Notifications]) ->
+ apply(M, F, A ++ [0]),
+ ets:update_element(Clients, Pid, {#cstate.pending_closes, Opened}),
+ notify(Clients, Required - Opened, Notifications).
+
+track_client(Pid, Clients) ->
+ case ets:insert_new(Clients, #cstate { pid = Pid,
+ callback = undefined,
+ opened = 0,
+ obtained_file = 0,
+ obtained_socket = 0,
+ blocked = false,
+ pending_closes = 0,
+ reserved_file = 0,
+ reserved_socket = 0 }) of
+ true -> _MRef = erlang:monitor(process, Pid),
+ ok;
+ false -> ok
+ end.
+
+
+%% To increase the number of file descriptors: on Windows set ERL_MAX_PORTS
+%% environment variable, on Linux set `ulimit -n`.
+ulimit() ->
+ IOStats = case erlang:system_info(check_io) of
+ [Val | _] when is_list(Val) -> Val;
+ Val when is_list(Val) -> Val;
+ _Other -> []
+ end,
+ case proplists:get_value(max_fds, IOStats) of
+ MaxFds when is_integer(MaxFds) andalso MaxFds > 1 ->
+ case os:type() of
+ {win32, _OsName} ->
+ %% On Windows max_fds is twice the number of open files:
+ %% https://github.com/yrashk/erlang/blob/e1282325ed75e52a98d5/erts/emulator/sys/win32/sys.c#L2459-2466
+ MaxFds div 2;
+ _Any ->
+ %% For other operating systems trust Erlang.
+ MaxFds
+ end;
+ _ ->
+ unknown
+ end.
diff --git a/deps/rabbit_common/src/file_handle_cache_stats.erl b/deps/rabbit_common/src/file_handle_cache_stats.erl
new file mode 100644
index 0000000000..e36a4b38dc
--- /dev/null
+++ b/deps/rabbit_common/src/file_handle_cache_stats.erl
@@ -0,0 +1,57 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(file_handle_cache_stats).
+
+%% stats about read / write operations that go through the fhc.
+
+-export([init/0, update/3, update/2, update/1, get/0]).
+
+-define(TABLE, ?MODULE).
+
+-define(COUNT,
+ [io_reopen, mnesia_ram_tx, mnesia_disk_tx,
+ msg_store_read, msg_store_write,
+ queue_index_journal_write, queue_index_write, queue_index_read]).
+-define(COUNT_TIME, [io_sync, io_seek, io_file_handle_open_attempt]).
+-define(COUNT_TIME_BYTES, [io_read, io_write]).
+
+init() ->
+ _ = ets:new(?TABLE, [public, named_table]),
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME_BYTES,
+ Counter <- [count, bytes, time]],
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT_TIME,
+ Counter <- [count, time]],
+ [ets:insert(?TABLE, {{Op, Counter}, 0}) || Op <- ?COUNT,
+ Counter <- [count]].
+
+update(Op, Bytes, Thunk) ->
+ {Time, Res} = timer_tc(Thunk),
+ _ = ets:update_counter(?TABLE, {Op, count}, 1),
+ _ = ets:update_counter(?TABLE, {Op, bytes}, Bytes),
+ _ = ets:update_counter(?TABLE, {Op, time}, Time),
+ Res.
+
+update(Op, Thunk) ->
+ {Time, Res} = timer_tc(Thunk),
+ _ = ets:update_counter(?TABLE, {Op, count}, 1),
+ _ = ets:update_counter(?TABLE, {Op, time}, Time),
+ Res.
+
+update(Op) ->
+ ets:update_counter(?TABLE, {Op, count}, 1),
+ ok.
+
+get() ->
+ lists:sort(ets:tab2list(?TABLE)).
+
+timer_tc(Thunk) ->
+ T1 = erlang:monotonic_time(),
+ Res = Thunk(),
+ T2 = erlang:monotonic_time(),
+ Diff = erlang:convert_time_unit(T2 - T1, native, micro_seconds),
+ {Diff, Res}.
diff --git a/deps/rabbit_common/src/gen_server2.erl b/deps/rabbit_common/src/gen_server2.erl
new file mode 100644
index 0000000000..b80e921a89
--- /dev/null
+++ b/deps/rabbit_common/src/gen_server2.erl
@@ -0,0 +1,1419 @@
+%% This file is a copy of gen_server.erl from the R13B-1 Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is gen_server2
+%%
+%% 2) more efficient handling of selective receives in callbacks
+%% gen_server2 processes drain their message queue into an internal
+%% buffer before invoking any callback module functions. Messages are
+%% dequeued from the buffer for processing. Thus the effective message
+%% queue of a gen_server2 process is the concatenation of the internal
+%% buffer and the real message queue.
+%% As a result of the draining, any selective receive invoked inside a
+%% callback is less likely to have to scan a large message queue.
+%%
+%% 3) gen_server2:cast is guaranteed to be order-preserving
+%% The original code could reorder messages when communicating with a
+%% process on a remote node that was not currently connected.
+%%
+%% 4) The callback module can optionally implement prioritise_call/4,
+%% prioritise_cast/3 and prioritise_info/3. These functions take
+%% Message, From, Length and State or just Message, Length and State
+%% (where Length is the current number of messages waiting to be
+%% processed) and return a single integer representing the priority
+%% attached to the message, or 'drop' to ignore it (for
+%% prioritise_cast/3 and prioritise_info/3 only). Messages with
+%% higher priorities are processed before requests with lower
+%% priorities. The default priority is 0.
+%%
+%% 5) The callback module can optionally implement
+%% handle_pre_hibernate/1 and handle_post_hibernate/1. These will be
+%% called immediately prior to and post hibernation, respectively. If
+%% handle_pre_hibernate returns {hibernate, NewState} then the process
+%% will hibernate. If the module does not implement
+%% handle_pre_hibernate/1 then the default action is to hibernate.
+%%
+%% 6) init can return a 4th arg, {backoff, InitialTimeout,
+%% MinimumTimeout, DesiredHibernatePeriod} (all in milliseconds,
+%% 'infinity' does not make sense here). Then, on all callbacks which
+%% can return a timeout (including init), timeout can be
+%% 'hibernate'. When this is the case, the current timeout value will
+%% be used (initially, the InitialTimeout supplied from init). After
+%% this timeout has occurred, hibernation will occur as normal. Upon
+%% awaking, a new current timeout value will be calculated.
+%%
+%% The purpose is that the gen_server2 takes care of adjusting the
+%% current timeout value such that the process will increase the
+%% timeout value repeatedly if it is unable to sleep for the
+%% DesiredHibernatePeriod. If it is able to sleep for the
+%% DesiredHibernatePeriod it will decrease the current timeout down to
+%% the MinimumTimeout, so that the process is put to sleep sooner (and
+%% hopefully stays asleep for longer). In short, should a process
+%% using this receive a burst of messages, it should not hibernate
+%% between those messages, but as the messages become less frequent,
+%% the process will not only hibernate, it will do so sooner after
+%% each message.
+%%
+%% When using this backoff mechanism, normal timeout values (i.e. not
+%% 'hibernate') can still be used, and if they are used then the
+%% handle_info(timeout, State) will be called as normal. In this case,
+%% returning 'hibernate' from handle_info(timeout, State) will not
+%% hibernate the process immediately, as it would if backoff wasn't
+%% being used. Instead it'll wait for the current timeout as described
+%% above.
+%%
+%% 7) The callback module can return from any of the handle_*
+%% functions, a {become, Module, State} triple, or a {become, Module,
+%% State, Timeout} quadruple. This allows the gen_server to
+%% dynamically change the callback module. The State is the new state
+%% which will be passed into any of the callback functions in the new
+%% module. Note there is no form also encompassing a reply, thus if
+%% you wish to reply in handle_call/3 and change the callback module,
+%% you need to use gen_server2:reply/2 to issue the reply
+%% manually. The init function can similarly return a 5th argument,
+%% Module, in order to dynamically decide the callback module on init.
+%%
+%% 8) The callback module can optionally implement
+%% format_message_queue/2 which is the equivalent of format_status/2
+%% but where the second argument is specifically the priority_queue
+%% which contains the prioritised message_queue.
+%%
+%% 9) The function with_state/2 can be used to debug a process with
+%% heavyweight state (without needing to copy the entire state out of
+%% process as sys:get_status/1 would). Pass through a function which
+%% can be invoked on the state, get back the result. The state is not
+%% modified.
+%%
+%% 10) an mcall/1 function has been added for performing multiple
+%% call/3 in parallel. Unlike multi_call, which sends the same request
+%% to same-named processes residing on a supplied list of nodes, it
+%% operates on name/request pairs, where name is anything accepted by
+%% call/3, i.e. a pid, global name, local name, or local name on a
+%% particular node.
+%%
+%% 11) Internal buffer length is emitted as a core [RabbitMQ] metric.
+
+%% All modifications are (C) 2009-2020 VMware, Inc. or its affiliates.
+
+%% ``The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved via the world wide web at https://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% The Initial Developer of the Original Code is Ericsson Utvecklings AB.
+%% Portions created by Ericsson are Copyright 1999, Ericsson Utvecklings
+%% AB. All Rights Reserved.''
+%%
+%% $Id$
+%%
+-module(gen_server2).
+
+-ifdef(OTP_RELEASE).
+-if(?OTP_RELEASE >= 22).
+-compile(nowarn_deprecated_function).
+-endif.
+-endif.
+
+%%% ---------------------------------------------------
+%%%
+%%% The idea behind THIS server is that the user module
+%%% provides (different) functions to handle different
+%%% kind of inputs.
+%%% If the Parent process terminates the Module:terminate/2
+%%% function is called.
+%%%
+%%% The user module should export:
+%%%
+%%% init(Args)
+%%% ==> {ok, State}
+%%% {ok, State, Timeout}
+%%% {ok, State, Timeout, Backoff}
+%%% {ok, State, Timeout, Backoff, Module}
+%%% ignore
+%%% {stop, Reason}
+%%%
+%%% handle_call(Msg, {From, Tag}, State)
+%%%
+%%% ==> {reply, Reply, State}
+%%% {reply, Reply, State, Timeout}
+%%% {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, Reply, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_cast(Msg, State)
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term terminate(State) is called
+%%%
+%%% handle_info(Info, State) Info is e.g. {'EXIT', P, R}, {nodedown, N}, ...
+%%%
+%%% ==> {noreply, State}
+%%% {noreply, State, Timeout}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% terminate(Reason, State) Let the user module clean up
+%%% Reason = normal | shutdown | {shutdown, Term} | Term
+%%% always called when server terminates
+%%%
+%%% ==> ok | Term
+%%%
+%%% handle_pre_hibernate(State)
+%%%
+%%% ==> {hibernate, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% handle_post_hibernate(State)
+%%%
+%%% ==> {noreply, State}
+%%% {stop, Reason, State}
+%%% Reason = normal | shutdown | Term, terminate(State) is called
+%%%
+%%% The work flow (of the server) can be described as follows:
+%%%
+%%% User module Generic
+%%% ----------- -------
+%%% start -----> start
+%%% init <----- .
+%%%
+%%% loop
+%%% handle_call <----- .
+%%% -----> reply
+%%%
+%%% handle_cast <----- .
+%%%
+%%% handle_info <----- .
+%%%
+%%% terminate <----- .
+%%%
+%%% -----> reply
+%%%
+%%%
+%%% ---------------------------------------------------
+
+%% API
+-export([start/3, start/4,
+ start_link/3, start_link/4,
+ stop/1, stop/3,
+ call/2, call/3,
+ cast/2, reply/2,
+ abcast/2, abcast/3,
+ multi_call/2, multi_call/3, multi_call/4,
+ mcall/1,
+ with_state/2,
+ enter_loop/3, enter_loop/4, enter_loop/5, enter_loop/6, wake_hib/1]).
+
+%% System exports
+-export([system_continue/3,
+ system_terminate/4,
+ system_code_change/4,
+ format_status/2]).
+
+%% Internal exports
+-export([init_it/6]).
+
+-import(error_logger, [format/2]).
+
+%% State record
+-record(gs2_state, {parent, name, state, mod, time,
+ timeout_state, queue, debug, prioritisers,
+ timer, emit_stats_fun, stop_stats_fun}).
+
+%%%=========================================================================
+%%% Specs. These exist only to shut up dialyzer's warnings
+%%%=========================================================================
+
+-type gs2_state() :: #gs2_state{}.
+
+-spec handle_common_termination(any(), atom(), gs2_state()) -> no_return().
+-spec hibernate(gs2_state()) -> no_return().
+-spec pre_hibernate(gs2_state()) -> no_return().
+-spec system_terminate(_, _, _, gs2_state()) -> no_return().
+
+-type millis() :: non_neg_integer().
+
+-dialyzer({nowarn_function, do_multi_call/4}).
+
+%%%=========================================================================
+%%% API
+%%%=========================================================================
+
+-callback init(Args :: term()) ->
+ {ok, State :: term()} |
+ {ok, State :: term(), timeout() | hibernate} |
+ {ok, State :: term(), timeout() | hibernate,
+ {backoff, millis(), millis(), millis()}} |
+ {ok, State :: term(), timeout() | hibernate,
+ {backoff, millis(), millis(), millis()}, atom()} |
+ ignore |
+ {stop, Reason :: term()}.
+-callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
+ State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(),
+ Reply :: term(), NewState :: term()}.
+-callback handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback handle_info(Info :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback terminate(Reason :: (normal | shutdown | {shutdown, term()} | term()),
+ State :: term()) ->
+ ok | term().
+-callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
+ Extra :: term()) ->
+ {ok, NewState :: term()} | {error, Reason :: term()}.
+
+%% It's not possible to define "optional" -callbacks, so putting specs
+%% for handle_pre_hibernate/1 and handle_post_hibernate/1 will result
+%% in warnings (the same applied for the behaviour_info before).
+
+%%% -----------------------------------------------------------------
+%%% Starts a generic server.
+%%% start(Mod, Args, Options)
+%%% start(Name, Mod, Args, Options)
+%%% start_link(Mod, Args, Options)
+%%% start_link(Name, Mod, Args, Options) where:
+%%% Name ::= {local, atom()} | {global, atom()}
+%%% Mod ::= atom(), callback module implementing the 'real' server
+%%% Args ::= term(), init arguments (to Mod:init/1)
+%%% Options ::= [{timeout, Timeout} | {debug, [Flag]}]
+%%% Flag ::= trace | log | {logfile, File} | statistics | debug
+%%% (debug == log && statistics)
+%%% Returns: {ok, Pid} |
+%%% {error, {already_started, Pid}} |
+%%% {error, Reason}
+%%% -----------------------------------------------------------------
+start(Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Mod, Args, Options).
+
+start(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, nolink, Name, Mod, Args, Options).
+
+start_link(Mod, Args, Options) ->
+ gen:start(?MODULE, link, Mod, Args, Options).
+
+start_link(Name, Mod, Args, Options) ->
+ gen:start(?MODULE, link, Name, Mod, Args, Options).
+
+%% -----------------------------------------------------------------
+%% Stop a generic server and wait for it to terminate.
+%% If the server is located at another node, that node will
+%% be monitored.
+%% -----------------------------------------------------------------
+stop(Name) ->
+ gen:stop(Name).
+
+stop(Name, Reason, Timeout) ->
+ gen:stop(Name, Reason, Timeout).
+
+%% -----------------------------------------------------------------
+%% Make a call to a generic server.
+%% If the server is located at another node, that node will
+%% be monitored.
+%% If the client is trapping exits and is linked server termination
+%% is handled here (? Shall we do that here (or rely on timeouts) ?).
+%% -----------------------------------------------------------------
+call(Name, Request) ->
+ case catch gen:call(Name, '$gen_call', Request) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request]}})
+ end.
+
+call(Name, Request, Timeout) ->
+ case catch gen:call(Name, '$gen_call', Request, Timeout) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, call, [Name, Request, Timeout]}})
+ end.
+
+%% -----------------------------------------------------------------
+%% Make a cast to a generic server.
+%% -----------------------------------------------------------------
+cast({global,Name}, Request) ->
+ catch global:send(Name, {'$gen_cast', Request}),
+ ok;
+cast({Name,Node}=Dest, Request) when is_atom(Name), is_atom(Node) ->
+ catch (Dest ! {'$gen_cast', Request}),
+ ok;
+cast(Dest, Request) when is_atom(Dest); is_pid(Dest) ->
+ catch (Dest ! {'$gen_cast', Request}),
+ ok.
+
+%% -----------------------------------------------------------------
+%% Send a reply to the client.
+%% -----------------------------------------------------------------
+reply({To, Tag}, Reply) ->
+ catch To ! {Tag, Reply}.
+
+%% -----------------------------------------------------------------
+%% Asynchronous broadcast, returns nothing, it's just send'n pray
+%% -----------------------------------------------------------------
+abcast(Name, Request) when is_atom(Name) ->
+ do_abcast([node() | nodes()], Name, {'$gen_cast', Request}).
+
+abcast(Nodes, Name, Request) when is_list(Nodes), is_atom(Name) ->
+ do_abcast(Nodes, Name, {'$gen_cast', Request}).
+
+do_abcast([Node|Nodes], Name, Msg) when is_atom(Node) ->
+ catch ({Name, Node} ! Msg),
+ do_abcast(Nodes, Name, Msg);
+do_abcast([], _,_) -> abcast.
+
+%%% -----------------------------------------------------------------
+%%% Make a call to servers at several nodes.
+%%% Returns: {[Replies],[BadNodes]}
+%%% A Timeout can be given
+%%%
+%%% A middleman process is used in case late answers arrives after
+%%% the timeout. If they would be allowed to glog the callers message
+%%% queue, it would probably become confused. Late answers will
+%%% now arrive to the terminated middleman and so be discarded.
+%%% -----------------------------------------------------------------
+multi_call(Name, Req)
+ when is_atom(Name) ->
+ do_multi_call([node() | nodes()], Name, Req, infinity).
+
+multi_call(Nodes, Name, Req)
+ when is_list(Nodes), is_atom(Name) ->
+ do_multi_call(Nodes, Name, Req, infinity).
+
+multi_call(Nodes, Name, Req, infinity) ->
+ do_multi_call(Nodes, Name, Req, infinity);
+multi_call(Nodes, Name, Req, Timeout)
+ when is_list(Nodes), is_atom(Name), is_integer(Timeout), Timeout >= 0 ->
+ do_multi_call(Nodes, Name, Req, Timeout).
+
+%%% -----------------------------------------------------------------
+%%% Make multiple calls to multiple servers, given pairs of servers
+%%% and messages.
+%%% Returns: {[{Dest, Reply}], [{Dest, Error}]}
+%%%
+%%% Dest can be pid() | RegName :: atom() |
+%%% {Name :: atom(), Node :: atom()} | {global, Name :: atom()}
+%%%
+%%% A middleman process is used to avoid clogging up the callers
+%%% message queue.
+%%% -----------------------------------------------------------------
+mcall(CallSpecs) ->
+ Tag = make_ref(),
+ {_, MRef} = spawn_monitor(
+ fun() ->
+ Refs = lists:foldl(
+ fun ({Dest, _Request}=S, Dict) ->
+ dict:store(do_mcall(S), Dest, Dict)
+ end, dict:new(), CallSpecs),
+ collect_replies(Tag, Refs, [], [])
+ end),
+ receive
+ {'DOWN', MRef, _, _, {Tag, Result}} -> Result;
+ {'DOWN', MRef, _, _, Reason} -> exit(Reason)
+ end.
+
+do_mcall({{global,Name}=Dest, Request}) ->
+ %% whereis_name is simply an ets lookup, and is precisely what
+ %% global:send/2 does, yet we need a Ref to put in the call to the
+ %% server, so invoking whereis_name makes a lot more sense here.
+ case global:whereis_name(Name) of
+ Pid when is_pid(Pid) ->
+ MRef = erlang:monitor(process, Pid),
+ catch msend(Pid, MRef, Request),
+ MRef;
+ undefined ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, Dest, noproc},
+ Ref
+ end;
+do_mcall({{Name,Node}=Dest, Request}) when is_atom(Name), is_atom(Node) ->
+ {_Node, MRef} = start_monitor(Node, Name), %% NB: we don't handle R6
+ catch msend(Dest, MRef, Request),
+ MRef;
+do_mcall({Dest, Request}) when is_atom(Dest); is_pid(Dest) ->
+ MRef = erlang:monitor(process, Dest),
+ catch msend(Dest, MRef, Request),
+ MRef.
+
+msend(Dest, MRef, Request) ->
+ erlang:send(Dest, {'$gen_call', {self(), MRef}, Request}, [noconnect]).
+
+collect_replies(Tag, Refs, Replies, Errors) ->
+ case dict:size(Refs) of
+ 0 -> exit({Tag, {Replies, Errors}});
+ _ -> receive
+ {MRef, Reply} ->
+ {Refs1, Replies1} = handle_call_result(MRef, Reply,
+ Refs, Replies),
+ collect_replies(Tag, Refs1, Replies1, Errors);
+ {'DOWN', MRef, _, _, Reason} ->
+ Reason1 = case Reason of
+ noconnection -> nodedown;
+ _ -> Reason
+ end,
+ {Refs1, Errors1} = handle_call_result(MRef, Reason1,
+ Refs, Errors),
+ collect_replies(Tag, Refs1, Replies, Errors1)
+ end
+ end.
+
+handle_call_result(MRef, Result, Refs, AccList) ->
+ %% we avoid the mailbox scanning cost of a call to erlang:demonitor/{1,2}
+ %% here, so we must cope with MRefs that we've already seen and erased
+ case dict:find(MRef, Refs) of
+ {ok, Pid} -> {dict:erase(MRef, Refs), [{Pid, Result}|AccList]};
+ _ -> {Refs, AccList}
+ end.
+
+%% -----------------------------------------------------------------
+%% Apply a function to a generic server's state.
+%% -----------------------------------------------------------------
+with_state(Name, Fun) ->
+ case catch gen:call(Name, '$with_state', Fun, infinity) of
+ {ok,Res} ->
+ Res;
+ {'EXIT',Reason} ->
+ exit({Reason, {?MODULE, with_state, [Name, Fun]}})
+ end.
+
+%%-----------------------------------------------------------------
+%% enter_loop(Mod, Options, State, <ServerName>, <TimeOut>, <Backoff>) ->_
+%%
+%% Description: Makes an existing process into a gen_server.
+%% The calling process will enter the gen_server receive
+%% loop and become a gen_server process.
+%% The process *must* have been started using one of the
+%% start functions in proc_lib, see proc_lib(3).
+%% The user is responsible for any initialization of the
+%% process, including registering a name for it.
+%%-----------------------------------------------------------------
+enter_loop(Mod, Options, State) ->
+ enter_loop(Mod, Options, State, self(), infinity, undefined).
+
+enter_loop(Mod, Options, State, Backoff = {backoff, _, _ , _}) ->
+ enter_loop(Mod, Options, State, self(), infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName = {_, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, undefined);
+
+enter_loop(Mod, Options, State, Timeout) ->
+ enter_loop(Mod, Options, State, self(), Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Backoff = {backoff, _, _, _}) ->
+ enter_loop(Mod, Options, State, ServerName, infinity, Backoff);
+
+enter_loop(Mod, Options, State, ServerName, Timeout) ->
+ enter_loop(Mod, Options, State, ServerName, Timeout, undefined).
+
+enter_loop(Mod, Options, State, ServerName, Timeout, Backoff) ->
+ Name = get_proc_name(ServerName),
+ Parent = get_parent(),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ Backoff1 = extend_backoff(Backoff),
+ {EmitStatsFun, StopStatsFun} = stats_funs(),
+ loop(init_stats(find_prioritisers(
+ #gs2_state { parent = Parent, name = Name, state = State,
+ mod = Mod, time = Timeout, timeout_state = Backoff1,
+ queue = Queue, debug = Debug,
+ emit_stats_fun = EmitStatsFun,
+ stop_stats_fun = StopStatsFun }))).
+
+%%%========================================================================
+%%% Gen-callback functions
+%%%========================================================================
+
+%%% ---------------------------------------------------
+%%% Initiate the new process.
+%%% Register the name using the Rfunc function
+%%% Calls the Mod:init/Args function.
+%%% Finally an acknowledge is sent to Parent and the main
+%%% loop is entered.
+%%% ---------------------------------------------------
+init_it(Starter, self, Name, Mod, Args, Options) ->
+ init_it(Starter, self(), Name, Mod, Args, Options);
+init_it(Starter, Parent, Name0, Mod, Args, Options) ->
+ Name = name(Name0),
+ Debug = debug_options(Name, Options),
+ Queue = priority_queue:new(),
+ {EmitStatsFun, StopStatsFun} = stats_funs(),
+ GS2State = find_prioritisers(
+ #gs2_state { parent = Parent,
+ name = Name,
+ mod = Mod,
+ queue = Queue,
+ debug = Debug,
+ emit_stats_fun = EmitStatsFun,
+ stop_stats_fun = StopStatsFun }),
+ case catch Mod:init(Args) of
+ {ok, State} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(GS2State#gs2_state { state = State,
+ time = infinity,
+ timeout_state = undefined }));
+ {ok, State, Timeout} ->
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(
+ GS2State#gs2_state { state = State,
+ time = Timeout,
+ timeout_state = undefined }));
+ {ok, State, Timeout, Backoff = {backoff, _, _, _}} ->
+ Backoff1 = extend_backoff(Backoff),
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(GS2State#gs2_state { state = State,
+ time = Timeout,
+ timeout_state = Backoff1 }));
+ {ok, State, Timeout, Backoff = {backoff, _, _, _}, Mod1} ->
+ Backoff1 = extend_backoff(Backoff),
+ proc_lib:init_ack(Starter, {ok, self()}),
+ loop(init_stats(find_prioritisers(
+ GS2State#gs2_state { mod = Mod1,
+ state = State,
+ time = Timeout,
+ timeout_state = Backoff1 })));
+ {stop, Reason} ->
+ %% For consistency, we must make sure that the
+ %% registered name (if any) is unregistered before
+ %% the parent process is notified about the failure.
+ %% (Otherwise, the parent process could get
+ %% an 'already_started' error if it immediately
+ %% tried starting the process again.)
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ ignore ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, ignore),
+ exit(normal);
+ {'EXIT', Reason} ->
+ unregister_name(Name0),
+ proc_lib:init_ack(Starter, {error, Reason}),
+ exit(Reason);
+ Else ->
+ Error = {bad_return_value, Else},
+ proc_lib:init_ack(Starter, {error, Error}),
+ exit(Error)
+ end.
+
+name({local,Name}) -> Name;
+name({global,Name}) -> Name;
+%% name(Pid) when is_pid(Pid) -> Pid;
+%% when R12 goes away, drop the line beneath and uncomment the line above
+name(Name) -> Name.
+
+unregister_name({local,Name}) ->
+ _ = (catch unregister(Name));
+unregister_name({global,Name}) ->
+ _ = global:unregister_name(Name);
+unregister_name(Pid) when is_pid(Pid) ->
+ Pid;
+%% Under R12 let's just ignore it, as we have a single term as Name.
+%% On R13 it will never get here, as we get tuple with 'local/global' atom.
+unregister_name(_Name) -> ok.
+
+extend_backoff(undefined) ->
+ undefined;
+extend_backoff({backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod}) ->
+ {backoff, InitialTimeout, MinimumTimeout, DesiredHibPeriod,
+ rand:seed(exsplus)}.
+
+%%%========================================================================
+%%% Internal functions
+%%%========================================================================
+%%% ---------------------------------------------------
+%%% The MAIN loop.
+%%% ---------------------------------------------------
+loop(GS2State = #gs2_state { time = hibernate,
+ timeout_state = undefined,
+ queue = Queue }) ->
+ case priority_queue:is_empty(Queue) of
+ true ->
+ pre_hibernate(GS2State);
+ false ->
+ process_next_msg(GS2State)
+ end;
+
+loop(GS2State) ->
+ process_next_msg(drain(GS2State)).
+
+drain(GS2State) ->
+ receive
+ Input -> drain(in(Input, GS2State))
+ after 0 -> GS2State
+ end.
+
+process_next_msg(GS2State0 = #gs2_state { time = Time,
+ timeout_state = TimeoutState,
+ queue = Queue }) ->
+ case priority_queue:out(Queue) of
+ {{value, Msg}, Queue1} ->
+ GS2State = ensure_stats_timer(GS2State0),
+ process_msg(Msg, GS2State#gs2_state { queue = Queue1 });
+ {empty, Queue1} ->
+ {Time1, HibOnTimeout, GS2State}
+ = case {Time, TimeoutState} of
+ {hibernate, {backoff, Current, _Min, _Desired, _RSt}} ->
+ {Current, true, stop_stats_timer(GS2State0)};
+ {hibernate, _} ->
+ %% wake_hib/7 will set Time to hibernate. If
+ %% we were woken and didn't receive a msg
+ %% then we will get here and need a sensible
+ %% value for Time1, otherwise we crash.
+ %% R13B1 always waits infinitely when waking
+ %% from hibernation, so that's what we do
+ %% here too.
+ {infinity, false, GS2State0};
+ _ -> {Time, false, GS2State0}
+ end,
+ receive
+ Input ->
+ %% Time could be 'hibernate' here, so *don't* call loop
+ process_next_msg(
+ drain(in(Input, GS2State #gs2_state { queue = Queue1 })))
+ after Time1 ->
+ case HibOnTimeout of
+ true ->
+ pre_hibernate(
+ GS2State #gs2_state { queue = Queue1 });
+ false ->
+ process_msg(timeout,
+ GS2State #gs2_state { queue = Queue1 })
+ end
+ end
+ end.
+
+wake_hib(GS2State = #gs2_state { timeout_state = TS }) ->
+ TimeoutState1 = case TS of
+ undefined ->
+ undefined;
+ {SleptAt, TimeoutState} ->
+ adjust_timeout_state(SleptAt,
+ erlang:monotonic_time(),
+ TimeoutState)
+ end,
+ post_hibernate(
+ drain(GS2State #gs2_state { timeout_state = TimeoutState1 })).
+
+hibernate(GS2State = #gs2_state { timeout_state = TimeoutState }) ->
+ TS = case TimeoutState of
+ undefined -> undefined;
+ {backoff, _, _, _, _} -> {erlang:monotonic_time(),
+ TimeoutState}
+ end,
+ proc_lib:hibernate(?MODULE, wake_hib,
+ [GS2State #gs2_state { timeout_state = TS }]).
+
+pre_hibernate(GS2State0 = #gs2_state { state = State,
+ mod = Mod,
+ emit_stats_fun = EmitStatsFun }) ->
+ GS2State = EmitStatsFun(stop_stats_timer(GS2State0)),
+ case erlang:function_exported(Mod, handle_pre_hibernate, 1) of
+ true ->
+ case catch Mod:handle_pre_hibernate(State) of
+ {hibernate, NState} ->
+ hibernate(GS2State #gs2_state { state = NState } );
+ Reply ->
+ handle_common_termination(Reply, pre_hibernate, GS2State)
+ end;
+ false ->
+ hibernate(GS2State)
+ end.
+
+post_hibernate(GS2State0 = #gs2_state { state = State,
+ mod = Mod }) ->
+ GS2State = ensure_stats_timer(GS2State0),
+ case erlang:function_exported(Mod, handle_post_hibernate, 1) of
+ true ->
+ case catch Mod:handle_post_hibernate(State) of
+ {noreply, NState} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = infinity });
+ {noreply, NState, Time} ->
+ process_next_msg(GS2State #gs2_state { state = NState,
+ time = Time });
+ Reply ->
+ handle_common_termination(Reply, post_hibernate, GS2State)
+ end;
+ false ->
+ %% use hibernate here, not infinity. This matches
+ %% R13B. The key is that we should be able to get through
+ %% to process_msg calling sys:handle_system_msg with Time
+ %% still set to hibernate, iff that msg is the very msg
+ %% that woke us up (or the first msg we receive after
+ %% waking up).
+ process_next_msg(GS2State #gs2_state { time = hibernate })
+ end.
+
+adjust_timeout_state(SleptAt, AwokeAt, {backoff, CurrentTO, MinimumTO,
+ DesiredHibPeriod, RandomState}) ->
+ NapLengthMicros = erlang:convert_time_unit(AwokeAt - SleptAt,
+ native, micro_seconds),
+ CurrentMicros = CurrentTO * 1000,
+ MinimumMicros = MinimumTO * 1000,
+ DesiredHibMicros = DesiredHibPeriod * 1000,
+ GapBetweenMessagesMicros = NapLengthMicros + CurrentMicros,
+ Base =
+ %% If enough time has passed between the last two messages then we
+ %% should consider sleeping sooner. Otherwise stay awake longer.
+ case GapBetweenMessagesMicros > (MinimumMicros + DesiredHibMicros) of
+ true -> lists:max([MinimumTO, CurrentTO div 2]);
+ false -> CurrentTO
+ end,
+ {Extra, RandomState1} = rand:uniform_s(Base, RandomState),
+ CurrentTO1 = Base + Extra,
+ {backoff, CurrentTO1, MinimumTO, DesiredHibPeriod, RandomState1}.
+
+in({'$gen_cast', Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {_, F, _} }) ->
+ in(Input, F(Msg, GS2State), GS2State);
+in({'$gen_call', From, Msg} = Input,
+ GS2State = #gs2_state { prioritisers = {F, _, _} }) ->
+ in(Input, F(Msg, From, GS2State), GS2State);
+in({'$with_state', _From, _Fun} = Input, GS2State) ->
+ in(Input, 0, GS2State);
+in({'EXIT', Parent, _R} = Input, GS2State = #gs2_state { parent = Parent }) ->
+ in(Input, infinity, GS2State);
+in({system, _From, _Req} = Input, GS2State) ->
+ in(Input, infinity, GS2State);
+in(emit_gen_server2_stats, GS2State = #gs2_state{ emit_stats_fun = EmitStatsFun}) ->
+ next_stats_timer(EmitStatsFun(GS2State));
+in(Input, GS2State = #gs2_state { prioritisers = {_, _, F} }) ->
+ in(Input, F(Input, GS2State), GS2State).
+
+in(_Input, drop, GS2State) ->
+ GS2State;
+
+in(Input, Priority, GS2State = #gs2_state { queue = Queue }) ->
+ GS2State # gs2_state { queue = priority_queue:in(Input, Priority, Queue) }.
+
+process_msg({system, From, Req},
+ GS2State = #gs2_state { parent = Parent, debug = Debug }) ->
+ case Req of
+ %% This clause will match only in R16B03.
+ %% Since 17.0 replace_state is not a system message.
+ {replace_state, StateFun} ->
+ GS2State1 = StateFun(GS2State),
+ _ = gen:reply(From, GS2State1),
+ system_continue(Parent, Debug, GS2State1);
+ _ ->
+ %% gen_server puts Hib on the end as the 7th arg, but that version
+ %% of the fun seems not to be documented so leaving out for now.
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Debug, GS2State)
+ end;
+process_msg({'$with_state', From, Fun},
+ GS2State = #gs2_state{state = State}) ->
+ reply(From, catch Fun(State)),
+ loop(GS2State);
+process_msg({'EXIT', Parent, Reason} = Msg,
+ GS2State = #gs2_state { parent = Parent }) ->
+ terminate(Reason, Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { debug = [] }) ->
+ handle_msg(Msg, GS2State);
+process_msg(Msg, GS2State = #gs2_state { name = Name, debug = Debug }) ->
+ Debug1 = sys:handle_debug(Debug, fun print_event/3, Name, {in, Msg}),
+ handle_msg(Msg, GS2State #gs2_state { debug = Debug1 }).
+
+%%% ---------------------------------------------------
+%%% Send/recive functions
+%%% ---------------------------------------------------
+
+do_multi_call(Nodes, Name, Req, infinity) ->
+ Tag = make_ref(),
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ rec_nodes(Tag, Monitors, Name, undefined);
+do_multi_call(Nodes, Name, Req, Timeout) ->
+ Tag = make_ref(),
+ Caller = self(),
+ Receiver =
+ spawn(
+ fun () ->
+ %% Middleman process. Should be unsensitive to regular
+ %% exit signals. The synchronization is needed in case
+ %% the receiver would exit before the caller started
+ %% the monitor.
+ process_flag(trap_exit, true),
+ Mref = erlang:monitor(process, Caller),
+ receive
+ {Caller,Tag} ->
+ Monitors = send_nodes(Nodes, Name, Tag, Req),
+ TimerId = erlang:start_timer(Timeout, self(), ok),
+ Result = rec_nodes(Tag, Monitors, Name, TimerId),
+ exit({self(),Tag,Result});
+ {'DOWN',Mref,_,_,_} ->
+ %% Caller died before sending us the go-ahead.
+ %% Give up silently.
+ exit(normal)
+ end
+ end),
+ Mref = erlang:monitor(process, Receiver),
+ Receiver ! {self(),Tag},
+ receive
+ {'DOWN',Mref,_,_,{Receiver,Tag,Result}} ->
+ Result;
+ {'DOWN',Mref,_,_,Reason} ->
+ %% The middleman code failed. Or someone did
+ %% exit(_, kill) on the middleman process => Reason==killed
+ exit(Reason)
+ end.
+
+send_nodes(Nodes, Name, Tag, Req) ->
+ send_nodes(Nodes, Name, Tag, Req, []).
+
+send_nodes([Node|Tail], Name, Tag, Req, Monitors)
+ when is_atom(Node) ->
+ Monitor = start_monitor(Node, Name),
+ %% Handle non-existing names in rec_nodes.
+ catch {Name, Node} ! {'$gen_call', {self(), {Tag, Node}}, Req},
+ send_nodes(Tail, Name, Tag, Req, [Monitor | Monitors]);
+send_nodes([_Node|Tail], Name, Tag, Req, Monitors) ->
+ %% Skip non-atom Node
+ send_nodes(Tail, Name, Tag, Req, Monitors);
+send_nodes([], _Name, _Tag, _Req, Monitors) ->
+ Monitors.
+
+%% Against old nodes:
+%% If no reply has been delivered within 2 secs. (per node) check that
+%% the server really exists and wait for ever for the answer.
+%%
+%% Against contemporary nodes:
+%% Wait for reply, server 'DOWN', or timeout from TimerId.
+
+rec_nodes(Tag, Nodes, Name, TimerId) ->
+ rec_nodes(Tag, Nodes, Name, [], [], 2000, TimerId).
+
+rec_nodes(Tag, [{N,R}|Tail], Name, Badnodes, Replies, Time, TimerId ) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, Time, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], Time, TimerId);
+ {timeout, TimerId, _} ->
+ unmonitor(R),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes(Tag, [N|Tail], Name, Badnodes, Replies, Time, TimerId) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes], Replies, 2000, TimerId);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, Badnodes,
+ [{N,Reply}|Replies], 2000, TimerId);
+ {timeout, TimerId, _} ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ %% Collect all replies that already have arrived
+ rec_nodes_rest(Tag, Tail, Name, [N | Badnodes], Replies)
+ after Time ->
+ case rpc:call(N, erlang, whereis, [Name]) of
+ Pid when is_pid(Pid) -> % It exists try again.
+ rec_nodes(Tag, [N|Tail], Name, Badnodes,
+ Replies, infinity, TimerId);
+ _ -> % badnode
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes(Tag, Tail, Name, [N|Badnodes],
+ Replies, 2000, TimerId)
+ end
+ end;
+rec_nodes(_, [], _, Badnodes, Replies, _, TimerId) ->
+ case catch erlang:cancel_timer(TimerId) of
+ false -> % It has already sent it's message
+ receive
+ {timeout, TimerId, _} -> ok
+ after 0 ->
+ ok
+ end;
+ _ -> % Timer was cancelled, or TimerId was 'undefined'
+ ok
+ end,
+ {Replies, Badnodes}.
+
+%% Collect all replies that already have arrived
+rec_nodes_rest(Tag, [{N,R}|Tail], Name, Badnodes, Replies) ->
+ receive
+ {'DOWN', R, _, _, _} ->
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ unmonitor(R),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(Tag, [N|Tail], Name, Badnodes, Replies) ->
+ %% R6 node
+ receive
+ {nodedown, N} ->
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies);
+ {{Tag, N}, Reply} -> %% Tag is bound !!!
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, Badnodes, [{N,Reply}|Replies])
+ after 0 ->
+ receive {nodedown, N} -> ok after 0 -> ok end,
+ monitor_node(N, false),
+ rec_nodes_rest(Tag, Tail, Name, [N|Badnodes], Replies)
+ end;
+rec_nodes_rest(_Tag, [], _Name, Badnodes, Replies) ->
+ {Replies, Badnodes}.
+
+
+%%% ---------------------------------------------------
+%%% Monitor functions
+%%% ---------------------------------------------------
+
+start_monitor(Node, Name) when is_atom(Node), is_atom(Name) ->
+ if node() =:= nonode@nohost, Node =/= nonode@nohost ->
+ Ref = make_ref(),
+ self() ! {'DOWN', Ref, process, {Name, Node}, noconnection},
+ {Node, Ref};
+ true ->
+ case catch erlang:monitor(process, {Name, Node}) of
+ {'EXIT', _} ->
+ %% Remote node is R6
+ monitor_node(Node, true),
+ Node;
+ Ref when is_reference(Ref) ->
+ {Node, Ref}
+ end
+ end.
+
+%% Cancels a monitor started with Ref=erlang:monitor(_, _).
+unmonitor(Ref) when is_reference(Ref) ->
+ erlang:demonitor(Ref),
+ receive
+ {'DOWN', Ref, _, _, _} ->
+ true
+ after 0 ->
+ true
+ end.
+
+%%% ---------------------------------------------------
+%%% Message handling functions
+%%% ---------------------------------------------------
+
+dispatch({'$gen_cast', Msg}, Mod, State) ->
+ Mod:handle_cast(Msg, State);
+dispatch(Info, Mod, State) ->
+ Mod:handle_info(Info, State).
+
+common_reply(_Name, From, Reply, _NState, [] = _Debug) ->
+ reply(From, Reply),
+ [];
+common_reply(Name, {To, _Tag} = From, Reply, NState, Debug) ->
+ reply(From, Reply),
+ sys:handle_debug(Debug, fun print_event/3, Name, {out, Reply, To, NState}).
+
+common_noreply(_Name, _NState, [] = _Debug) ->
+ [];
+common_noreply(Name, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {noreply, NState}).
+
+common_become(_Name, _Mod, _NState, [] = _Debug) ->
+ [];
+common_become(Name, Mod, NState, Debug) ->
+ sys:handle_debug(Debug, fun print_event/3, Name, {become, Mod, NState}).
+
+handle_msg({'$gen_call', From, Msg}, GS2State = #gs2_state { mod = Mod,
+ state = State,
+ name = Name,
+ debug = Debug }) ->
+ case catch Mod:handle_call(Msg, From, State) of
+ {reply, Reply, NState} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = infinity,
+ debug = Debug1 });
+ {reply, Reply, NState, Time1} ->
+ Debug1 = common_reply(Name, From, Reply, NState, Debug),
+ loop(GS2State #gs2_state { state = NState,
+ time = Time1,
+ debug = Debug1});
+ {stop, Reason, Reply, NState} ->
+ {'EXIT', R} =
+ (catch terminate(Reason, Msg,
+ GS2State #gs2_state { state = NState })),
+ _ = common_reply(Name, From, Reply, NState, Debug),
+ exit(R);
+ Other ->
+ handle_common_reply(Other, Msg, GS2State)
+ end;
+handle_msg(Msg, GS2State = #gs2_state { mod = Mod, state = State }) ->
+ Reply = (catch dispatch(Msg, Mod, State)),
+ handle_common_reply(Reply, Msg, GS2State).
+
+handle_common_reply(Reply, Msg, GS2State = #gs2_state { name = Name,
+ debug = Debug}) ->
+ case Reply of
+ {noreply, NState} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = infinity,
+ debug = Debug1});
+ {noreply, NState, Time1} ->
+ Debug1 = common_noreply(Name, NState, Debug),
+ loop(GS2State #gs2_state {state = NState,
+ time = Time1,
+ debug = Debug1});
+ {become, Mod, NState} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = infinity,
+ debug = Debug1 }));
+ {become, Mod, NState, Time1} ->
+ Debug1 = common_become(Name, Mod, NState, Debug),
+ loop(find_prioritisers(
+ GS2State #gs2_state { mod = Mod,
+ state = NState,
+ time = Time1,
+ debug = Debug1 }));
+ _ ->
+ handle_common_termination(Reply, Msg, GS2State)
+ end.
+
+handle_common_termination(Reply, Msg, GS2State) ->
+ case Reply of
+ {stop, Reason, NState} ->
+ terminate(Reason, Msg, GS2State #gs2_state { state = NState });
+ {'EXIT', What} ->
+ terminate(What, Msg, GS2State);
+ _ ->
+ terminate({bad_return_value, Reply}, Msg, GS2State)
+ end.
+
+%%-----------------------------------------------------------------
+%% Callback functions for system messages handling.
+%%-----------------------------------------------------------------
+system_continue(Parent, Debug, GS2State) ->
+ loop(GS2State #gs2_state { parent = Parent, debug = Debug }).
+
+system_terminate(Reason, _Parent, Debug, GS2State) ->
+ terminate(Reason, [], GS2State #gs2_state { debug = Debug }).
+
+system_code_change(GS2State = #gs2_state { mod = Mod,
+ state = State },
+ _Module, OldVsn, Extra) ->
+ case catch Mod:code_change(OldVsn, State, Extra) of
+ {ok, NewState} ->
+ NewGS2State = find_prioritisers(
+ GS2State #gs2_state { state = NewState }),
+ {ok, [NewGS2State]};
+ Else ->
+ Else
+ end.
+
+%%-----------------------------------------------------------------
+%% Format debug messages. Print them as the call-back module sees
+%% them, not as the real erlang messages. Use trace for that.
+%%-----------------------------------------------------------------
+print_event(Dev, {in, Msg}, Name) ->
+ case Msg of
+ {'$gen_call', {From, _Tag}, Call} ->
+ io:format(Dev, "*DBG* ~p got call ~p from ~w~n",
+ [Name, Call, From]);
+ {'$gen_cast', Cast} ->
+ io:format(Dev, "*DBG* ~p got cast ~p~n",
+ [Name, Cast]);
+ _ ->
+ io:format(Dev, "*DBG* ~p got ~p~n", [Name, Msg])
+ end;
+print_event(Dev, {out, Msg, To, State}, Name) ->
+ io:format(Dev, "*DBG* ~p sent ~p to ~w, new state ~w~n",
+ [Name, Msg, To, State]);
+print_event(Dev, {noreply, State}, Name) ->
+ io:format(Dev, "*DBG* ~p new state ~w~n", [Name, State]);
+print_event(Dev, Event, Name) ->
+ io:format(Dev, "*DBG* ~p dbg ~p~n", [Name, Event]).
+
+
+%%% ---------------------------------------------------
+%%% Terminate the server.
+%%% ---------------------------------------------------
+
+-spec terminate(_, _, _) -> no_return().
+
+terminate(Reason, Msg, #gs2_state { name = Name,
+ mod = Mod,
+ state = State,
+ debug = Debug,
+ stop_stats_fun = StopStatsFun
+ } = GS2State) ->
+ StopStatsFun(stop_stats_timer(GS2State)),
+ case catch Mod:terminate(Reason, State) of
+ {'EXIT', R} ->
+ error_info(R, Reason, Name, Msg, State, Debug),
+ exit(R);
+ _ ->
+ case Reason of
+ normal ->
+ exit(normal);
+ shutdown ->
+ exit(shutdown);
+ {shutdown,_}=Shutdown ->
+ exit(Shutdown);
+ _ ->
+ error_info(Reason, undefined, Name, Msg, State, Debug),
+ exit(Reason)
+ end
+ end.
+
+error_info(_Reason, _RootCause, application_controller, _Msg, _State, _Debug) ->
+ %% OTP-5811 Don't send an error report if it's the system process
+ %% application_controller which is terminating - let init take care
+ %% of it instead
+ ok;
+error_info(Reason, RootCause, Name, Msg, State, Debug) ->
+ Reason1 = error_reason(Reason),
+ Fmt =
+ "** Generic server ~p terminating~n"
+ "** Last message in was ~p~n"
+ "** When Server state == ~p~n"
+ "** Reason for termination == ~n** ~p~n",
+ case RootCause of
+ undefined -> format(Fmt, [Name, Msg, State, Reason1]);
+ _ -> format(Fmt ++ "** In 'terminate' callback "
+ "with reason ==~n** ~p~n",
+ [Name, Msg, State, Reason1,
+ error_reason(RootCause)])
+ end,
+ sys:print_log(Debug),
+ ok.
+
+error_reason({undef,[{M,F,A}|MFAs]} = Reason) ->
+ case code:is_loaded(M) of
+ false -> {'module could not be loaded',[{M,F,A}|MFAs]};
+ _ -> case erlang:function_exported(M, F, length(A)) of
+ true -> Reason;
+ false -> {'function not exported',[{M,F,A}|MFAs]}
+ end
+ end;
+error_reason(Reason) ->
+ Reason.
+
+%%% ---------------------------------------------------
+%%% Misc. functions.
+%%% ---------------------------------------------------
+
+opt(Op, [{Op, Value}|_]) ->
+ {ok, Value};
+opt(Op, [_|Options]) ->
+ opt(Op, Options);
+opt(_, []) ->
+ false.
+
+debug_options(Name, Opts) ->
+ case opt(debug, Opts) of
+ {ok, Options} -> dbg_options(Name, Options);
+ _ -> dbg_options(Name, [])
+ end.
+
+dbg_options(Name, []) ->
+ Opts =
+ case init:get_argument(generic_debug) of
+ error ->
+ [];
+ _ ->
+ [log, statistics]
+ end,
+ dbg_opts(Name, Opts);
+dbg_options(Name, Opts) ->
+ dbg_opts(Name, Opts).
+
+dbg_opts(Name, Opts) ->
+ case catch sys:debug_options(Opts) of
+ {'EXIT',_} ->
+ format("~p: ignoring erroneous debug options - ~p~n",
+ [Name, Opts]),
+ [];
+ Dbg ->
+ Dbg
+ end.
+
+get_proc_name(Pid) when is_pid(Pid) ->
+ Pid;
+get_proc_name({local, Name}) ->
+ case process_info(self(), registered_name) of
+ {registered_name, Name} ->
+ Name;
+ {registered_name, _Name} ->
+ exit(process_not_registered);
+ [] ->
+ exit(process_not_registered)
+ end;
+get_proc_name({global, Name}) ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(process_not_registered_globally);
+ Pid when Pid =:= self() ->
+ Name;
+ _Pid ->
+ exit(process_not_registered_globally)
+ end.
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid(Parent)->
+ Parent;
+ [Parent | _] when is_atom(Parent)->
+ name_to_pid(Parent);
+ _ ->
+ exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined ->
+ case whereis_name(Name) of
+ undefined ->
+ exit(could_not_find_registered_name);
+ Pid ->
+ Pid
+ end;
+ Pid ->
+ Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() ->
+ case is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true ->
+ Pid
+ end;
+ [] -> undefined
+ end.
+
+find_prioritisers(GS2State = #gs2_state { mod = Mod }) ->
+ PCall = function_exported_or_default(Mod, 'prioritise_call', 4,
+ fun (_Msg, _From, _State) -> 0 end),
+ PCast = function_exported_or_default(Mod, 'prioritise_cast', 3,
+ fun (_Msg, _State) -> 0 end),
+ PInfo = function_exported_or_default(Mod, 'prioritise_info', 3,
+ fun (_Msg, _State) -> 0 end),
+ GS2State #gs2_state { prioritisers = {PCall, PCast, PInfo} }.
+
+function_exported_or_default(Mod, Fun, Arity, Default) ->
+ case erlang:function_exported(Mod, Fun, Arity) of
+ true -> case Arity of
+ 3 -> fun (Msg, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, Length, State) of
+ drop ->
+ drop;
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end;
+ 4 -> fun (Msg, From, GS2State = #gs2_state { queue = Queue,
+ state = State }) ->
+ Length = priority_queue:len(Queue),
+ case catch Mod:Fun(Msg, From, Length, State) of
+ Res when is_integer(Res) ->
+ Res;
+ Err ->
+ handle_common_termination(Err, Msg, GS2State)
+ end
+ end
+ end;
+ false -> Default
+ end.
+
+%%-----------------------------------------------------------------
+%% Status information
+%%-----------------------------------------------------------------
+format_status(Opt, StatusData) ->
+ [PDict, SysState, Parent, Debug,
+ #gs2_state{name = Name, state = State, mod = Mod, queue = Queue}] =
+ StatusData,
+ NameTag = if is_pid(Name) ->
+ pid_to_list(Name);
+ is_atom(Name) ->
+ Name
+ end,
+ Header = lists:concat(["Status for generic server ", NameTag]),
+ Log = sys:get_debug(log, Debug, []),
+ Specfic = callback(Mod, format_status, [Opt, [PDict, State]],
+ fun () -> [{data, [{"State", State}]}] end),
+ Messages = callback(Mod, format_message_queue, [Opt, Queue],
+ fun () -> priority_queue:to_list(Queue) end),
+ [{header, Header},
+ {data, [{"Status", SysState},
+ {"Parent", Parent},
+ {"Logged events", Log},
+ {"Queued messages", Messages}]} |
+ Specfic].
+
+callback(Mod, FunName, Args, DefaultThunk) ->
+ case erlang:function_exported(Mod, FunName, length(Args)) of
+ true -> case catch apply(Mod, FunName, Args) of
+ {'EXIT', _} -> DefaultThunk();
+ Success -> Success
+ end;
+ false -> DefaultThunk()
+ end.
+
+stats_funs() ->
+ case ets:info(gen_server2_metrics) of
+ undefined ->
+ {fun(GS2State) -> GS2State end,
+ fun(GS2State) -> GS2State end};
+ _ ->
+ {fun emit_stats/1, fun stop_stats/1}
+ end.
+
+init_stats(State = #gs2_state{ emit_stats_fun = EmitStatsFun }) ->
+ StateWithInitTimer = rabbit_event:init_stats_timer(State, #gs2_state.timer),
+ next_stats_timer(EmitStatsFun(StateWithInitTimer)).
+
+next_stats_timer(State) ->
+ ensure_stats_timer(rabbit_event:reset_stats_timer(State, #gs2_state.timer)).
+
+ensure_stats_timer(State) ->
+ rabbit_event:ensure_stats_timer(State,
+ #gs2_state.timer,
+ emit_gen_server2_stats).
+
+stop_stats_timer(State) ->
+ rabbit_event:stop_stats_timer(State, #gs2_state.timer).
+
+emit_stats(State = #gs2_state{queue = Queue}) ->
+ rabbit_core_metrics:gen_server2_stats(self(), priority_queue:len(Queue)),
+ State.
+
+stop_stats(State) ->
+ rabbit_core_metrics:gen_server2_deleted(self()),
+ State.
diff --git a/deps/rabbit_common/src/lager_forwarder_backend.erl b/deps/rabbit_common/src/lager_forwarder_backend.erl
new file mode 100644
index 0000000000..936a1259ce
--- /dev/null
+++ b/deps/rabbit_common/src/lager_forwarder_backend.erl
@@ -0,0 +1,120 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(lager_forwarder_backend).
+
+-behaviour(gen_event).
+
+-export([init/1, handle_call/2, handle_event/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-record(state, {
+ next_sink :: atom(),
+ level :: {'mask', integer()} | inherit
+ }).
+
+%% @private
+init(Sink) when is_atom(Sink) ->
+ init([Sink]);
+init([Sink]) when is_atom(Sink) ->
+ init([Sink, inherit]);
+init([Sink, inherit]) when is_atom(Sink) ->
+ {ok, #state{
+ next_sink = Sink,
+ level = inherit
+ }};
+init([Sink, Level]) when is_atom(Sink) ->
+ try
+ Mask = lager_util:config_to_mask(Level),
+ {ok, #state{
+ next_sink = Sink,
+ level = Mask
+ }}
+ catch
+ _:_ ->
+ {error, {fatal, bad_log_level}}
+ end;
+init(_) ->
+ {error, {fatal, bad_config}}.
+
+%% @private
+handle_call(get_loglevel, #state{next_sink = Sink, level = inherit} = State) ->
+ SinkPid = whereis(Sink),
+ Mask = case self() of
+ SinkPid ->
+ %% Avoid direct loops, defaults to 'info'.
+ 127;
+ _ ->
+ try
+ Levels = [gen_event:call(SinkPid, Handler, get_loglevel,
+ infinity)
+ || Handler <- gen_event:which_handlers(SinkPid)],
+ lists:foldl(fun
+ ({mask, Mask}, Acc) ->
+ Mask bor Acc;
+ (Level, Acc) when is_integer(Level) ->
+ {mask, Mask} = lager_util:config_to_mask(
+ lager_util:num_to_level(Level)),
+ Mask bor Acc;
+ (_, Acc) ->
+ Acc
+ end, 0, Levels)
+ catch
+ exit:noproc ->
+ 127
+ end
+ end,
+ {ok, {mask, Mask}, State};
+handle_call(get_loglevel, #state{level = Mask} = State) ->
+ {ok, Mask, State};
+handle_call({set_loglevel, inherit}, State) ->
+ {ok, ok, State#state{level = inherit}};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Mask ->
+ {ok, ok, State#state{level = Mask}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+%% @private
+handle_event({log, LagerMsg}, #state{next_sink = Sink, level = Mask} = State) ->
+ SinkPid = whereis(Sink),
+ case self() of
+ SinkPid ->
+ %% Avoid direct loops.
+ ok;
+ _ ->
+ case Mask =:= inherit orelse
+ lager_util:is_loggable(LagerMsg, Mask, ?MODULE) of
+ true ->
+ case lager_config:get({Sink, async}, false) of
+ true -> gen_event:notify(SinkPid, {log, LagerMsg});
+ false -> gen_event:sync_notify(SinkPid, {log, LagerMsg})
+ end;
+ false ->
+ ok
+ end
+ end,
+ {ok, State};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%% @private
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%% @private
+terminate(_Reason, _State) ->
+ ok.
+
+%% @private
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/mirrored_supervisor.erl b/deps/rabbit_common/src/mirrored_supervisor.erl
new file mode 100644
index 0000000000..61ddc068b6
--- /dev/null
+++ b/deps/rabbit_common/src/mirrored_supervisor.erl
@@ -0,0 +1,513 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mirrored_supervisor).
+
+%% pg2 is deprecated in OTP 23.
+-compile(nowarn_deprecated_function).
+
+%% Mirrored Supervisor
+%% ===================
+%%
+%% This module implements a new type of supervisor. It acts like a
+%% normal supervisor, but at creation time you also provide the name
+%% of a process group to join. All the supervisors within the
+%% process group act like a single large distributed supervisor:
+%%
+%% * A process with a given child_id will only exist on one
+%% supervisor within the group.
+%%
+%% * If one supervisor fails, children may migrate to surviving
+%% supervisors within the group.
+%%
+%% In almost all cases you will want to use the module name for the
+%% process group. Using multiple process groups with the same module
+%% name is supported. Having multiple module names for the same
+%% process group will lead to undefined behaviour.
+%%
+%% Motivation
+%% ----------
+%%
+%% Sometimes you have processes which:
+%%
+%% * Only need to exist once per cluster.
+%%
+%% * Does not contain much state (or can reconstruct its state easily).
+%%
+%% * Needs to be restarted elsewhere should it be running on a node
+%% which fails.
+%%
+%% By creating a mirrored supervisor group with one supervisor on
+%% each node, that's what you get.
+%%
+%%
+%% API use
+%% -------
+%%
+%% This is basically the same as for supervisor, except that:
+%%
+%% 1) start_link(Module, Args) becomes
+%% start_link(Group, TxFun, Module, Args).
+%%
+%% 2) start_link({local, Name}, Module, Args) becomes
+%% start_link({local, Name}, Group, TxFun, Module, Args).
+%%
+%% 3) start_link({global, Name}, Module, Args) is not available.
+%%
+%% 4) The restart strategy simple_one_for_one is not available.
+%%
+%% 5) Mnesia is used to hold global state. At some point your
+%% application should invoke create_tables() (or table_definitions()
+%% if it wants to manage table creation itself).
+%%
+%% The TxFun parameter to start_link/{4,5} is a function which the
+%% mirrored supervisor can use to execute Mnesia transactions. In the
+%% RabbitMQ server this goes via a worker pool; in other cases a
+%% function like:
+%%
+%% tx_fun(Fun) ->
+%% case mnesia:sync_transaction(Fun) of
+%% {atomic, Result} -> Result;
+%% {aborted, Reason} -> throw({error, Reason})
+%% end.
+%%
+%% could be used.
+%%
+%% Internals
+%% ---------
+%%
+%% Each mirrored_supervisor consists of three processes - the overall
+%% supervisor, the delegate supervisor and the mirroring server. The
+%% overall supervisor supervises the other two processes. Its pid is
+%% the one returned from start_link; the pids of the other two
+%% processes are effectively hidden in the API.
+%%
+%% The delegate supervisor is in charge of supervising all the child
+%% processes that are added to the supervisor as usual.
+%%
+%% The mirroring server intercepts calls to the supervisor API
+%% (directed at the overall supervisor), does any special handling,
+%% and forwards everything to the delegate supervisor.
+%%
+%% This module implements all three, hence init/1 is somewhat overloaded.
+%%
+%% The mirroring server creates and joins a process group on
+%% startup. It monitors all the existing members of this group, and
+%% broadcasts a "hello" message to them so that they can monitor it in
+%% turn. When it receives a 'DOWN' message, it checks to see if it's
+%% the "first" server in the group and restarts all the child
+%% processes from the dead supervisor if so.
+%%
+%% In the future we might load balance this.
+%%
+%% Startup is slightly fiddly. The mirroring server needs to know the
+%% Pid of the overall supervisor, but we don't have that until it has
+%% started. Therefore we set this after the fact. We also start any
+%% children we found in Module:init() at this point, since starting
+%% children requires knowing the overall supervisor pid.
+
+-define(SUPERVISOR, supervisor2).
+-define(GEN_SERVER, gen_server2).
+-define(SUP_MODULE, mirrored_supervisor_sups).
+
+-define(TABLE, mirrored_sup_childspec).
+-define(TABLE_DEF,
+ {?TABLE,
+ [{record_name, mirrored_sup_childspec},
+ {type, ordered_set},
+ {attributes, record_info(fields, mirrored_sup_childspec)}]}).
+-define(TABLE_MATCH, {match, #mirrored_sup_childspec{ _ = '_' }}).
+
+-export([start_link/4, start_link/5,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1, check_childspecs/1]).
+
+-behaviour(?GEN_SERVER).
+
+-export([init/1, handle_call/3, handle_info/2, terminate/2, code_change/3,
+ handle_cast/2]).
+
+-export([start_internal/3]).
+-export([create_tables/0, table_definitions/0]).
+
+-record(mirrored_sup_childspec, {key, mirroring_pid, childspec}).
+
+-record(state, {overall,
+ delegate,
+ group,
+ tx_fun,
+ initial_childspecs,
+ child_order}).
+
+%%--------------------------------------------------------------------------
+%% Callback behaviour
+%%--------------------------------------------------------------------------
+
+-callback init(Args :: term()) ->
+ {ok, {{RestartStrategy :: ?SUPERVISOR:strategy(),
+ MaxR :: non_neg_integer(),
+ MaxT :: non_neg_integer()},
+ [ChildSpec :: ?SUPERVISOR:child_spec()]}}
+ | ignore.
+
+%%--------------------------------------------------------------------------
+%% Specs
+%%--------------------------------------------------------------------------
+
+-type startlink_err() :: {'already_started', pid()} | 'shutdown' | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-type group_name() :: any().
+
+-type(tx_fun() :: fun((fun(() -> A)) -> A)).
+
+-spec start_link(GroupName, TxFun, Module, Args) -> startlink_ret() when
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_link(SupName, GroupName, TxFun, Module, Args) ->
+ startlink_ret() when
+ SupName :: ?SUPERVISOR:sup_name(),
+ GroupName :: group_name(),
+ TxFun :: tx_fun(),
+ Module :: module(),
+ Args :: term().
+
+-spec start_internal(Group, TxFun, ChildSpecs) -> Result when
+ Group :: group_name(),
+ TxFun :: tx_fun(),
+ ChildSpecs :: [?SUPERVISOR:child_spec()],
+ Result :: {'ok', pid()} | {'error', term()}.
+
+-spec create_tables() -> Result when
+ Result :: 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link(Group, TxFun, Mod, Args) ->
+ start_link0([], Group, TxFun, init(Mod, Args)).
+
+start_link({local, SupName}, Group, TxFun, Mod, Args) ->
+ start_link0([{local, SupName}], Group, TxFun, init(Mod, Args));
+
+start_link({global, _SupName}, _Group, _TxFun, _Mod, _Args) ->
+ erlang:error(badarg).
+
+start_link0(Prefix, Group, TxFun, Init) ->
+ case apply(?SUPERVISOR, start_link,
+ Prefix ++ [?SUP_MODULE, {overall, Group, TxFun, Init}]) of
+ {ok, Pid} -> case catch call(Pid, {init, Pid}) of
+ ok -> {ok, Pid};
+ E -> E
+ end;
+ Other -> Other
+ end.
+
+init(Mod, Args) ->
+ case Mod:init(Args) of
+ {ok, {{Bad, _, _}, _ChildSpecs}} when
+ Bad =:= simple_one_for_one -> erlang:error(badarg);
+ Init -> Init
+ end.
+
+start_child(Sup, ChildSpec) -> call(Sup, {start_child, ChildSpec}).
+delete_child(Sup, Id) -> find_call(Sup, Id, {delete_child, Id}).
+restart_child(Sup, Id) -> find_call(Sup, Id, {msg, restart_child, [Id]}).
+terminate_child(Sup, Id) -> find_call(Sup, Id, {msg, terminate_child, [Id]}).
+which_children(Sup) -> fold(which_children, Sup, fun lists:append/2).
+count_children(Sup) -> fold(count_children, Sup, fun add_proplists/2).
+check_childspecs(Specs) -> ?SUPERVISOR:check_childspecs(Specs).
+
+call(Sup, Msg) -> ?GEN_SERVER:call(mirroring(Sup), Msg, infinity).
+cast(Sup, Msg) -> with_exit_handler(
+ fun() -> ok end,
+ fun() -> ?GEN_SERVER:cast(mirroring(Sup), Msg) end).
+
+find_call(Sup, Id, Msg) ->
+ Group = call(Sup, group),
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = '$1',
+ key = {Group, Id},
+ _ = '_'},
+ %% If we did this inside a tx we could still have failover
+ %% immediately after the tx - we can't be 100% here. So we may as
+ %% well dirty_select.
+ case mnesia:dirty_select(?TABLE, [{MatchHead, [], ['$1']}]) of
+ [Mirror] -> call(Mirror, Msg);
+ [] -> {error, not_found}
+ end.
+
+fold(FunAtom, Sup, AggFun) ->
+ Group = call(Sup, group),
+ lists:foldl(AggFun, [],
+ [apply(?SUPERVISOR, FunAtom, [D]) ||
+ M <- pg2:get_members(Group),
+ D <- [delegate(M)]]).
+
+child(Sup, Id) ->
+ [Pid] = [Pid || {Id1, Pid, _, _} <- ?SUPERVISOR:which_children(Sup),
+ Id1 =:= Id],
+ Pid.
+
+delegate(Sup) -> child(Sup, delegate).
+mirroring(Sup) -> child(Sup, mirroring).
+
+%%----------------------------------------------------------------------------
+
+start_internal(Group, TxFun, ChildSpecs) ->
+ ?GEN_SERVER:start_link(?MODULE, {Group, TxFun, ChildSpecs},
+ [{timeout, infinity}]).
+
+%%----------------------------------------------------------------------------
+
+init({Group, TxFun, ChildSpecs}) ->
+ {ok, #state{group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs,
+ child_order = child_order_from(ChildSpecs)}}.
+
+handle_call({init, Overall}, _From,
+ State = #state{overall = undefined,
+ delegate = undefined,
+ group = Group,
+ tx_fun = TxFun,
+ initial_childspecs = ChildSpecs}) ->
+ process_flag(trap_exit, true),
+ pg2:create(Group),
+ ok = pg2:join(Group, Overall),
+ Rest = pg2:get_members(Group) -- [Overall],
+ case Rest of
+ [] -> TxFun(fun() -> delete_all(Group) end);
+ _ -> ok
+ end,
+ [begin
+ ?GEN_SERVER:cast(mirroring(Pid), {ensure_monitoring, Overall}),
+ erlang:monitor(process, Pid)
+ end || Pid <- Rest],
+ Delegate = delegate(Overall),
+ erlang:monitor(process, Delegate),
+ State1 = State#state{overall = Overall, delegate = Delegate},
+ case errors([maybe_start(Group, TxFun, Overall, Delegate, S)
+ || S <- ChildSpecs]) of
+ [] -> {reply, ok, State1};
+ Errors -> {stop, {shutdown, Errors}, State1}
+ end;
+
+handle_call({start_child, ChildSpec}, _From,
+ State = #state{overall = Overall,
+ delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, case maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) of
+ already_in_mnesia -> {error, already_present};
+ {already_in_mnesia, Pid} -> {error, {already_started, Pid}};
+ Else -> Else
+ end, State};
+
+handle_call({delete_child, Id}, _From, State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun}) ->
+ {reply, stop(Group, TxFun, Delegate, Id), State};
+
+handle_call({msg, F, A}, _From, State = #state{delegate = Delegate}) ->
+ {reply, apply(?SUPERVISOR, F, [Delegate | A]), State};
+
+handle_call(group, _From, State = #state{group = Group}) ->
+ {reply, Group, State};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ensure_monitoring, Pid}, State) ->
+ erlang:monitor(process, Pid),
+ {noreply, State};
+
+handle_cast({die, Reason}, State = #state{group = Group}) ->
+ _ = tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _Ref, process, Pid, Reason},
+ State = #state{delegate = Pid, group = Group}) ->
+ %% Since the delegate is temporary, its death won't cause us to
+ %% die. Since the overall supervisor kills processes in reverse
+ %% order when shutting down "from above" and we started after the
+ %% delegate, if we see the delegate die then that means it died
+ %% "from below" i.e. due to the behaviour of its children, not
+ %% because the whole app was being torn down.
+ %%
+ %% Therefore if we get here we know we need to cause the entire
+ %% mirrored sup to shut down, not just fail over.
+ _ = tell_all_peers_to_die(Group, Reason),
+ {stop, Reason, State};
+
+handle_info({'DOWN', _Ref, process, Pid, _Reason},
+ State = #state{delegate = Delegate,
+ group = Group,
+ tx_fun = TxFun,
+ overall = O,
+ child_order = ChildOrder}) ->
+ %% TODO load balance this
+ %% No guarantee pg2 will have received the DOWN before us.
+ R = case lists:sort(pg2:get_members(Group)) -- [Pid] of
+ [O | _] -> ChildSpecs =
+ TxFun(fun() -> update_all(O, Pid) end),
+ [start(Delegate, ChildSpec)
+ || ChildSpec <- restore_child_order(ChildSpecs,
+ ChildOrder)];
+ _ -> []
+ end,
+ case errors(R) of
+ [] -> {noreply, State};
+ Errors -> {stop, {shutdown, Errors}, State}
+ end;
+
+handle_info(Info, State) ->
+ {stop, {unexpected_info, Info}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+
+tell_all_peers_to_die(Group, Reason) ->
+ [cast(P, {die, Reason}) || P <- pg2:get_members(Group) -- [self()]].
+
+maybe_start(Group, TxFun, Overall, Delegate, ChildSpec) ->
+ try TxFun(fun() -> check_start(Group, Overall, Delegate, ChildSpec) end) of
+ start -> start(Delegate, ChildSpec);
+ undefined -> already_in_mnesia;
+ Pid -> {already_in_mnesia, Pid}
+ catch
+ %% If we are torn down while in the transaction...
+ {error, E} -> {error, E}
+ end.
+
+check_start(Group, Overall, Delegate, ChildSpec) ->
+ case mnesia:wread({?TABLE, {Group, id(ChildSpec)}}) of
+ [] -> _ = write(Group, Overall, ChildSpec),
+ start;
+ [S] -> #mirrored_sup_childspec{key = {Group, Id},
+ mirroring_pid = Pid} = S,
+ case Overall of
+ Pid -> child(Delegate, Id);
+ _ -> case supervisor(Pid) of
+ dead -> _ = write(Group, Overall, ChildSpec),
+ start;
+ Delegate0 -> child(Delegate0, Id)
+ end
+ end
+ end.
+
+supervisor(Pid) -> with_exit_handler(fun() -> dead end,
+ fun() -> delegate(Pid) end).
+
+write(Group, Overall, ChildSpec) ->
+ S = #mirrored_sup_childspec{key = {Group, id(ChildSpec)},
+ mirroring_pid = Overall,
+ childspec = ChildSpec},
+ ok = mnesia:write(?TABLE, S, write),
+ ChildSpec.
+
+delete(Group, Id) ->
+ ok = mnesia:delete({?TABLE, {Group, Id}}).
+
+start(Delegate, ChildSpec) ->
+ apply(?SUPERVISOR, start_child, [Delegate, ChildSpec]).
+
+stop(Group, TxFun, Delegate, Id) ->
+ try TxFun(fun() -> check_stop(Group, Delegate, Id) end) of
+ deleted -> apply(?SUPERVISOR, delete_child, [Delegate, Id]);
+ running -> {error, running}
+ catch
+ {error, E} -> {error, E}
+ end.
+
+check_stop(Group, Delegate, Id) ->
+ case child(Delegate, Id) of
+ undefined -> delete(Group, Id),
+ deleted;
+ _ -> running
+ end.
+
+id({Id, _, _, _, _, _}) -> Id.
+
+update_all(Overall, OldOverall) ->
+ MatchHead = #mirrored_sup_childspec{mirroring_pid = OldOverall,
+ key = '$1',
+ childspec = '$2',
+ _ = '_'},
+ [write(Group, Overall, C) ||
+ [{Group, _Id}, C] <- mnesia:select(?TABLE, [{MatchHead, [], ['$$']}])].
+
+delete_all(Group) ->
+ MatchHead = #mirrored_sup_childspec{key = {Group, '_'},
+ childspec = '$1',
+ _ = '_'},
+ [delete(Group, id(C)) ||
+ C <- mnesia:select(?TABLE, [{MatchHead, [], ['$1']}])].
+
+errors(Results) -> [E || {error, E} <- Results].
+
+%%----------------------------------------------------------------------------
+
+create_tables() -> create_tables([?TABLE_DEF]).
+
+create_tables([]) ->
+ ok;
+create_tables([{Table, Attributes} | Ts]) ->
+ case mnesia:create_table(Table, Attributes) of
+ {atomic, ok} -> create_tables(Ts);
+ {aborted, {already_exists, ?TABLE}} -> create_tables(Ts);
+ Err -> Err
+ end.
+
+table_definitions() ->
+ {Name, Attributes} = ?TABLE_DEF,
+ [{Name, [?TABLE_MATCH | Attributes]}].
+
+%%----------------------------------------------------------------------------
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when R =:= noproc; R =:= nodedown;
+ R =:= normal; R =:= shutdown ->
+ Handler();
+ exit:{{R, _}, _} when R =:= nodedown; R =:= shutdown ->
+ Handler()
+ end.
+
+add_proplists(P1, P2) ->
+ add_proplists(lists:keysort(1, P1), lists:keysort(1, P2), []).
+add_proplists([], P2, Acc) -> P2 ++ Acc;
+add_proplists(P1, [], Acc) -> P1 ++ Acc;
+add_proplists([{K, V1} | P1], [{K, V2} | P2], Acc) ->
+ add_proplists(P1, P2, [{K, V1 + V2} | Acc]);
+add_proplists([{K1, _} = KV | P1], [{K2, _} | _] = P2, Acc) when K1 < K2 ->
+ add_proplists(P1, P2, [KV | Acc]);
+add_proplists(P1, [KV | P2], Acc) ->
+ add_proplists(P1, P2, [KV | Acc]).
+
+child_order_from(ChildSpecs) ->
+ lists:zipwith(fun(C, N) ->
+ {id(C), N}
+ end, ChildSpecs, lists:seq(1, length(ChildSpecs))).
+
+restore_child_order(ChildSpecs, ChildOrder) ->
+ lists:sort(fun(A, B) ->
+ proplists:get_value(id(A), ChildOrder)
+ < proplists:get_value(id(B), ChildOrder)
+ end, ChildSpecs).
diff --git a/deps/rabbit_common/src/mnesia_sync.erl b/deps/rabbit_common/src/mnesia_sync.erl
new file mode 100644
index 0000000000..2287436849
--- /dev/null
+++ b/deps/rabbit_common/src/mnesia_sync.erl
@@ -0,0 +1,64 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(mnesia_sync).
+
+%% mnesia:sync_transaction/3 fails to guarantee that the log is flushed to disk
+%% at commit. This module is an attempt to minimise the risk of data loss by
+%% performing a coalesced log fsync. Unfortunately this is performed regardless
+%% of whether or not the log was appended to.
+
+-behaviour(gen_server).
+
+-export([sync/0]).
+
+-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {waiting, disc_node}).
+
+%%----------------------------------------------------------------------------
+
+-spec sync() -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+sync() ->
+ gen_server:call(?SERVER, sync, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state{disc_node = mnesia:system_info(use_dir), waiting = []}}.
+
+handle_call(sync, _From, #state{disc_node = false} = State) ->
+ {reply, ok, State};
+handle_call(sync, From, #state{waiting = Waiting} = State) ->
+ {noreply, State#state{waiting = [From | Waiting]}, 0};
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(timeout, #state{waiting = Waiting} = State) ->
+ ok = disk_log:sync(latest_log),
+ _ = [gen_server:reply(From, ok) || From <- Waiting],
+ {noreply, State#state{waiting = []}};
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/pmon.erl b/deps/rabbit_common/src/pmon.erl
new file mode 100644
index 0000000000..f44168dfcf
--- /dev/null
+++ b/deps/rabbit_common/src/pmon.erl
@@ -0,0 +1,96 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2011-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(pmon).
+
+%% Process Monitor
+%% ================
+%%
+%% This module monitors processes so that every process has at most
+%% 1 monitor.
+%% Processes monitored can be dynamically added and removed.
+%%
+%% Unlike erlang:[de]monitor* functions, this module
+%% provides basic querying capability and avoids contacting down nodes.
+%%
+%% It is used to monitor nodes, queue mirrors, and by
+%% the queue collector, among other things.
+
+-export([new/0, new/1, monitor/2, monitor_all/2, demonitor/2,
+ is_monitored/2, erase/2, monitored/1, is_empty/1]).
+
+-compile({no_auto_import, [monitor/2]}).
+
+-record(state, {monitors = #{} :: #{item() => reference()},
+ module = erlang :: module()}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([?MODULE/0]).
+
+-opaque(?MODULE() :: #state{}).
+
+-type(item() :: pid() | {atom(), node()}).
+
+
+-spec new() -> ?MODULE().
+new() -> new(erlang).
+
+-spec new('erlang' | 'delegate') -> ?MODULE().
+new(Module) -> #state{module = Module}.
+
+-spec monitor(item(), ?MODULE()) -> ?MODULE().
+monitor(Item, S = #state{monitors = M, module = Module}) ->
+ case maps:is_key(Item, M) of
+ true -> S;
+ false -> case node_alive_shortcut(Item) of
+ true -> Ref = Module:monitor(process, Item),
+ S#state{monitors = maps:put(Item, Ref, M)};
+ false -> self() ! {'DOWN', fake_ref, process, Item,
+ nodedown},
+ S
+ end
+ end.
+
+-spec monitor_all([item()], ?MODULE()) -> ?MODULE().
+monitor_all([], S) -> S; %% optimisation
+monitor_all([Item], S) -> monitor(Item, S); %% optimisation
+monitor_all(Items, S) -> lists:foldl(fun monitor/2, S, Items).
+
+-spec demonitor(item(), ?MODULE()) -> ?MODULE().
+demonitor(Item, S = #state{monitors = M0, module = Module}) ->
+ case maps:take(Item, M0) of
+ {MRef, M} -> Module:demonitor(MRef),
+ S#state{monitors = M};
+ error -> S
+ end.
+
+-spec is_monitored(item(), ?MODULE()) -> boolean().
+is_monitored(Item, #state{monitors = M}) -> maps:is_key(Item, M).
+
+-spec erase(item(), ?MODULE()) -> ?MODULE().
+erase(Item, S = #state{monitors = M}) ->
+ S#state{monitors = maps:remove(Item, M)}.
+
+-spec monitored(?MODULE()) -> [item()].
+monitored(#state{monitors = M}) -> maps:keys(M).
+
+-spec is_empty(?MODULE()) -> boolean().
+is_empty(#state{monitors = M}) -> maps:size(M) == 0.
+
+%%----------------------------------------------------------------------------
+
+%% We check here to see if the node is alive in order to avoid trying
+%% to connect to it if it isn't - this can cause substantial
+%% slowdowns. We can't perform this shortcut if passed {Name, Node}
+%% since we would need to convert that into a pid for the fake 'DOWN'
+%% message, so we always return true here - but that's OK, it's just
+%% an optimisation.
+node_alive_shortcut(P) when is_pid(P) ->
+ lists:member(node(P), [node() | nodes()]);
+node_alive_shortcut({_Name, _Node}) ->
+ true.
diff --git a/deps/rabbit_common/src/priority_queue.erl b/deps/rabbit_common/src/priority_queue.erl
new file mode 100644
index 0000000000..4a7867129d
--- /dev/null
+++ b/deps/rabbit_common/src/priority_queue.erl
@@ -0,0 +1,234 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% Priority queues have essentially the same interface as ordinary
+%% queues, except that a) there is an in/3 that takes a priority, and
+%% b) we have only implemented the core API we need.
+%%
+%% Priorities should be integers - the higher the value the higher the
+%% priority - but we don't actually check that.
+%%
+%% in/2 inserts items with priority 0.
+%%
+%% We optimise the case where a priority queue is being used just like
+%% an ordinary queue. When that is the case we represent the priority
+%% queue as an ordinary queue. We could just call into the 'queue'
+%% module for that, but for efficiency we implement the relevant
+%% functions directly in here, thus saving on inter-module calls and
+%% eliminating a level of boxing.
+%%
+%% When the queue contains items with non-zero priorities, it is
+%% represented as a sorted kv list with the inverted Priority as the
+%% key and an ordinary queue as the value. Here again we use our own
+%% ordinary queue implementation for efficiency, often making recursive
+%% calls into the same function knowing that ordinary queues represent
+%% a base case.
+
+
+-module(priority_queue).
+
+-export([new/0, is_queue/1, is_empty/1, len/1, to_list/1, from_list/1,
+ in/2, in/3, out/1, out_p/1, join/2, filter/2, fold/3, highest/1,
+ member/2]).
+
+%%----------------------------------------------------------------------------
+
+-export_type([q/0]).
+
+-type(q() :: pqueue()).
+-type(priority() :: integer() | 'infinity').
+-type(squeue() :: {queue, [any()], [any()], non_neg_integer()}).
+-type(pqueue() :: squeue() | {pqueue, [{priority(), squeue()}]}).
+
+-spec new() -> pqueue().
+-spec is_queue(any()) -> boolean().
+-spec is_empty(pqueue()) -> boolean().
+-spec len(pqueue()) -> non_neg_integer().
+-spec to_list(pqueue()) -> [{priority(), any()}].
+-spec from_list([{priority(), any()}]) -> pqueue().
+-spec in(any(), pqueue()) -> pqueue().
+-spec in(any(), priority(), pqueue()) -> pqueue().
+-spec out(pqueue()) -> {empty | {value, any()}, pqueue()}.
+-spec out_p(pqueue()) -> {empty | {value, any(), priority()}, pqueue()}.
+-spec join(pqueue(), pqueue()) -> pqueue().
+-spec filter(fun ((any()) -> boolean()), pqueue()) -> pqueue().
+-spec fold
+ (fun ((any(), priority(), A) -> A), A, pqueue()) -> A.
+-spec highest(pqueue()) -> priority() | 'empty'.
+-spec member(any(), pqueue()) -> boolean().
+
+%%----------------------------------------------------------------------------
+
+new() ->
+ {queue, [], [], 0}.
+
+is_queue({queue, R, F, L}) when is_list(R), is_list(F), is_integer(L) ->
+ true;
+is_queue({pqueue, Queues}) when is_list(Queues) ->
+ lists:all(fun ({infinity, Q}) -> is_queue(Q);
+ ({P, Q}) -> is_integer(P) andalso is_queue(Q)
+ end, Queues);
+is_queue(_) ->
+ false.
+
+is_empty({queue, [], [], 0}) ->
+ true;
+is_empty(_) ->
+ false.
+
+len({queue, _R, _F, L}) ->
+ L;
+len({pqueue, Queues}) ->
+ lists:sum([len(Q) || {_, Q} <- Queues]).
+
+to_list({queue, In, Out, _Len}) when is_list(In), is_list(Out) ->
+ [{0, V} || V <- Out ++ lists:reverse(In, [])];
+to_list({pqueue, Queues}) ->
+ [{maybe_negate_priority(P), V} || {P, Q} <- Queues,
+ {0, V} <- to_list(Q)].
+
+from_list(L) ->
+ lists:foldl(fun ({P, E}, Q) -> in(E, P, Q) end, new(), L).
+
+in(Item, Q) ->
+ in(Item, 0, Q).
+
+in(X, 0, {queue, [_] = In, [], 1}) ->
+ {queue, [X], In, 2};
+in(X, 0, {queue, In, Out, Len}) when is_list(In), is_list(Out) ->
+ {queue, [X|In], Out, Len + 1};
+in(X, Priority, _Q = {queue, [], [], 0}) ->
+ in(X, Priority, {pqueue, []});
+in(X, Priority, Q = {queue, _, _, _}) ->
+ in(X, Priority, {pqueue, [{0, Q}]});
+in(X, Priority, {pqueue, Queues}) ->
+ P = maybe_negate_priority(Priority),
+ {pqueue, case lists:keysearch(P, 1, Queues) of
+ {value, {_, Q}} ->
+ lists:keyreplace(P, 1, Queues, {P, in(X, Q)});
+ false when P == infinity ->
+ [{P, {queue, [X], [], 1}} | Queues];
+ false ->
+ case Queues of
+ [{infinity, InfQueue} | Queues1] ->
+ [{infinity, InfQueue} |
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues1])];
+ _ ->
+ lists:keysort(1, [{P, {queue, [X], [], 1}} | Queues])
+ end
+ end}.
+
+out({queue, [], [], 0} = Q) ->
+ {empty, Q};
+out({queue, [V], [], 1}) ->
+ {{value, V}, {queue, [], [], 0}};
+out({queue, [Y|In], [], Len}) ->
+ [V|Out] = lists:reverse(In, []),
+ {{value, V}, {queue, [Y], Out, Len - 1}};
+out({queue, In, [V], Len}) when is_list(In) ->
+ {{value,V}, r2f(In, Len - 1)};
+out({queue, In,[V|Out], Len}) when is_list(In) ->
+ {{value, V}, {queue, In, Out, Len - 1}};
+out({pqueue, [{P, Q} | Queues]}) ->
+ {R, Q1} = out(Q),
+ NewQ = case is_empty(Q1) of
+ true -> case Queues of
+ [] -> {queue, [], [], 0};
+ [{0, OnlyQ}] -> OnlyQ;
+ [_|_] -> {pqueue, Queues}
+ end;
+ false -> {pqueue, [{P, Q1} | Queues]}
+ end,
+ {R, NewQ}.
+
+out_p({queue, _, _, _} = Q) -> add_p(out(Q), 0);
+out_p({pqueue, [{P, _} | _]} = Q) -> add_p(out(Q), maybe_negate_priority(P)).
+
+add_p(R, P) -> case R of
+ {empty, Q} -> {empty, Q};
+ {{value, V}, Q} -> {{value, V, P}, Q}
+ end.
+
+join(A, {queue, [], [], 0}) ->
+ A;
+join({queue, [], [], 0}, B) ->
+ B;
+join({queue, AIn, AOut, ALen}, {queue, BIn, BOut, BLen}) ->
+ {queue, BIn, AOut ++ lists:reverse(AIn, BOut), ALen + BLen};
+join(A = {queue, _, _, _}, {pqueue, BPQ}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, BPQ),
+ Post1 = case Post of
+ [] -> [ {0, A} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(A, ZeroQueue)} | Rest ];
+ _ -> [ {0, A} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, B = {queue, _, _, _}) ->
+ {Pre, Post} =
+ lists:splitwith(fun ({P, _}) -> P < 0 orelse P == infinity end, APQ),
+ Post1 = case Post of
+ [] -> [ {0, B} ];
+ [ {0, ZeroQueue} | Rest ] -> [ {0, join(ZeroQueue, B)} | Rest ];
+ _ -> [ {0, B} | Post ]
+ end,
+ {pqueue, Pre ++ Post1};
+join({pqueue, APQ}, {pqueue, BPQ}) ->
+ {pqueue, merge(APQ, BPQ, [])}.
+
+merge([], BPQ, Acc) ->
+ lists:reverse(Acc, BPQ);
+merge(APQ, [], Acc) ->
+ lists:reverse(Acc, APQ);
+merge([{P, A}|As], [{P, B}|Bs], Acc) ->
+ merge(As, Bs, [ {P, join(A, B)} | Acc ]);
+merge([{PA, A}|As], Bs = [{PB, _}|_], Acc) when PA < PB orelse PA == infinity ->
+ merge(As, Bs, [ {PA, A} | Acc ]);
+merge(As = [{_, _}|_], [{PB, B}|Bs], Acc) ->
+ merge(As, Bs, [ {PB, B} | Acc ]).
+
+filter(Pred, Q) -> fold(fun(V, P, Acc) ->
+ case Pred(V) of
+ true -> in(V, P, Acc);
+ false -> Acc
+ end
+ end, new(), Q).
+
+fold(Fun, Init, Q) -> case out_p(Q) of
+ {empty, _Q} -> Init;
+ {{value, V, P}, Q1} -> fold(Fun, Fun(V, P, Init), Q1)
+ end.
+
+highest({queue, [], [], 0}) -> empty;
+highest({queue, _, _, _}) -> 0;
+highest({pqueue, [{P, _} | _]}) -> maybe_negate_priority(P).
+
+member(_X, {queue, [], [], 0}) ->
+ false;
+member(X, {queue, R, F, _Size}) ->
+ lists:member(X, R) orelse lists:member(X, F);
+member(_X, {pqueue, []}) ->
+ false;
+member(X, {pqueue, [{_P, Q}]}) ->
+ member(X, Q);
+member(X, {pqueue, [{_P, Q} | T]}) ->
+ case member(X, Q) of
+ true ->
+ true;
+ false ->
+ member(X, {pqueue, T})
+ end;
+member(X, Q) ->
+ erlang:error(badarg, [X,Q]).
+
+r2f([], 0) -> {queue, [], [], 0};
+r2f([_] = R, 1) -> {queue, [], R, 1};
+r2f([X,Y], 2) -> {queue, [X], [Y], 2};
+r2f([X,Y|R], L) -> {queue, [X,Y], lists:reverse(R, []), L}.
+
+maybe_negate_priority(infinity) -> infinity;
+maybe_negate_priority(P) -> -P.
diff --git a/deps/rabbit_common/src/rabbit_amqp_connection.erl b/deps/rabbit_common/src/rabbit_amqp_connection.erl
new file mode 100644
index 0000000000..58486bd239
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_amqp_connection.erl
@@ -0,0 +1,34 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqp_connection).
+
+-export([amqp_params/2]).
+
+-spec amqp_params(pid(), timeout()) -> [{atom(), term()}].
+amqp_params(ConnPid, Timeout) ->
+ P = try
+ gen_server:call(ConnPid, {info, [amqp_params]}, Timeout)
+ catch exit:{noproc, Error} ->
+ rabbit_log:debug("file ~p, line ~p - connection process ~p not alive: ~p~n",
+ [?FILE, ?LINE, ConnPid, Error]),
+ [];
+ _:Error ->
+ rabbit_log:debug("file ~p, line ~p - failed to get amqp_params from connection process ~p: ~p~n",
+ [?FILE, ?LINE, ConnPid, Error]),
+ []
+ end,
+ process_amqp_params_result(P).
+
+process_amqp_params_result({error, {bad_argument, amqp_params}}) ->
+ %% Some connection process modules do not handle the {info, [amqp_params]}
+ %% message (like rabbit_reader) and throw a bad_argument error
+ [];
+process_amqp_params_result({ok, AmqpParams}) ->
+ AmqpParams;
+process_amqp_params_result(AmqpParams) ->
+ AmqpParams.
diff --git a/deps/rabbit_common/src/rabbit_amqqueue_common.erl b/deps/rabbit_common/src/rabbit_amqqueue_common.erl
new file mode 100644
index 0000000000..a45356de78
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_amqqueue_common.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_amqqueue_common).
+
+-export([notify_sent/2, notify_sent_queue_down/1, delete_exclusive/2]).
+
+-define(MORE_CONSUMER_CREDIT_AFTER, 50).
+
+-spec notify_sent(pid(), pid()) -> 'ok'.
+
+notify_sent(QPid, ChPid) ->
+ Key = {consumer_credit_to, QPid},
+ put(Key, case get(Key) of
+ 1 -> gen_server2:cast(
+ QPid, {notify_sent, ChPid,
+ ?MORE_CONSUMER_CREDIT_AFTER}),
+ ?MORE_CONSUMER_CREDIT_AFTER;
+ undefined -> erlang:monitor(process, QPid),
+ ?MORE_CONSUMER_CREDIT_AFTER - 1;
+ C -> C - 1
+ end),
+ ok.
+
+-spec notify_sent_queue_down(pid()) -> 'ok'.
+
+notify_sent_queue_down(QPid) ->
+ erase({consumer_credit_to, QPid}),
+ ok.
+
+-spec delete_exclusive([pid()], pid()) -> 'ok'.
+
+delete_exclusive(QPids, ConnId) ->
+ [gen_server2:cast(QPid, {delete_exclusive, ConnId}) || QPid <- QPids],
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl
new file mode 100644
index 0000000000..8d30fdca1b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_auth_backend_dummy.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_backend_dummy).
+-include("rabbit.hrl").
+
+-behaviour(rabbit_authn_backend).
+-behaviour(rabbit_authz_backend).
+
+-export([user/0]).
+-export([user_login_authentication/2, user_login_authorization/2,
+ check_vhost_access/3, check_resource_access/4, check_topic_access/4]).
+-export([state_can_expire/0]).
+
+-spec user() -> rabbit_types:user().
+
+%% A user to be used by the direct client when permission checks are
+%% not needed. This user can do anything AMQPish.
+user() -> #user{username = <<"none">>,
+ tags = [],
+ authz_backends = [{?MODULE, none}]}.
+
+%% Implementation of rabbit_auth_backend
+
+user_login_authentication(_, _) ->
+ {refused, "cannot log in conventionally as dummy user", []}.
+
+user_login_authorization(_, _) ->
+ {refused, "cannot log in conventionally as dummy user", []}.
+
+check_vhost_access(#auth_user{}, _VHostPath, _AuthzData) -> true.
+check_resource_access(#auth_user{}, #resource{}, _Permission, _Context) -> true.
+check_topic_access(#auth_user{}, #resource{}, _Permission, _Context) -> true.
+
+state_can_expire() -> false.
diff --git a/deps/rabbit_common/src/rabbit_auth_mechanism.erl b/deps/rabbit_common/src/rabbit_auth_mechanism.erl
new file mode 100644
index 0000000000..38d21f3a5a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_auth_mechanism.erl
@@ -0,0 +1,41 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_auth_mechanism).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+%% A description.
+-callback description() -> [proplists:property()].
+
+%% If this mechanism is enabled, should it be offered for a given socket?
+%% (primarily so EXTERNAL can be TLS-only)
+-callback should_offer(rabbit_net:socket()) -> boolean().
+
+%% Called before authentication starts. Should create a state
+%% object to be passed through all the stages of authentication.
+-callback init(rabbit_net:socket()) -> any().
+
+%% Handle a stage of authentication. Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {challenge, Challenge, NextState}
+%% Another round is needed. Here's the state I want next time.
+%% {protocol_error, Msg, Args}
+%% Client got the protocol wrong. Log and die.
+%% {refused, Username, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback handle_response(binary(), any()) ->
+ {'ok', rabbit_types:user()} |
+ {'challenge', binary(), any()} |
+ {'protocol_error', string(), [any()]} |
+ {'refused', rabbit_types:username() | none, string(), [any()]}.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_authn_backend.erl b/deps/rabbit_common/src/rabbit_authn_backend.erl
new file mode 100644
index 0000000000..e600ec884f
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_authn_backend.erl
@@ -0,0 +1,27 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_authn_backend).
+
+-include("rabbit.hrl").
+
+%% Check a user can log in, given a username and a proplist of
+%% authentication information (e.g. [{password, Password}]). If your
+%% backend is not to be used for authentication, this should always
+%% refuse access.
+%%
+%% Possible responses:
+%% {ok, User}
+%% Authentication succeeded, and here's the user record.
+%% {error, Error}
+%% Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%% Client failed authentication. Log and die.
+-callback user_login_authentication(rabbit_types:username(), [term()] | map()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
diff --git a/deps/rabbit_common/src/rabbit_authz_backend.erl b/deps/rabbit_common/src/rabbit_authz_backend.erl
new file mode 100644
index 0000000000..367aa8d1ef
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_authz_backend.erl
@@ -0,0 +1,88 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_authz_backend).
+
+-include("rabbit.hrl").
+
+%% Check that a user can log in, when this backend is being used for
+%% authorisation only. Authentication has already taken place
+%% successfully, but we need to check that the user exists in this
+%% backend, and initialise any impl field we will want to have passed
+%% back in future calls to check_vhost_access/3 and
+%% check_resource_access/3.
+%%
+%% Possible responses:
+%% {ok, Impl}
+%% {ok, Impl, Tags}
+%% User authorisation succeeded, and here's the impl and potential extra tags fields.
+%% {error, Error}
+%% Something went wrong. Log and die.
+%% {refused, Msg, Args}
+%% User authorisation failed. Log and die.
+-callback user_login_authorization(rabbit_types:username(), [term()] | map()) ->
+ {'ok', any()} |
+ {'ok', any(), any()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+%% Given #auth_user, vhost and data (client IP for now), can a user log in to a vhost?
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_vhost_access(AuthUser :: rabbit_types:auth_user(),
+ VHost :: rabbit_types:vhost(),
+ AuthzData :: rabbit_types:authz_data()) ->
+ boolean() | {'error', any()}.
+
+%% Given #auth_user, resource and permission, can a user access a resource?
+%%
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_resource_access(rabbit_types:auth_user(),
+ rabbit_types:r(atom()),
+ rabbit_access_control:permission_atom(),
+ rabbit_types:authz_context()) ->
+ boolean() | {'error', any()}.
+
+%% Given #auth_user, topic as resource, permission, and context, can a user access the topic?
+%%
+%% Possible responses:
+%% true
+%% false
+%% {error, Error}
+%% Something went wrong. Log and die.
+-callback check_topic_access(rabbit_types:auth_user(),
+ rabbit_types:r(atom()),
+ rabbit_access_control:permission_atom(),
+ rabbit_types:topic_access_context()) ->
+ boolean() | {'error', any()}.
+
+%% Returns true for backends that support state or credential expiration (e.g. use JWTs).
+-callback state_can_expire() -> boolean().
+
+%% Updates backend state that has expired.
+%%
+%% Possible responses:
+%% {ok, User}
+%% Secret updated successfully, and here's the user record.
+%% {error, Error}
+%% Something went wrong.
+%% {refused, Msg, Args}
+%% New secret is not valid or the user cannot authenticate with it.
+-callback update_state(AuthUser :: rabbit_types:auth_user(),
+ NewState :: term()) ->
+ {'ok', rabbit_types:auth_user()} |
+ {'refused', string(), [any()]} |
+ {'error', any()}.
+
+-optional_callbacks([update_state/2]).
diff --git a/deps/rabbit_common/src/rabbit_basic_common.erl b/deps/rabbit_common/src/rabbit_basic_common.erl
new file mode 100644
index 0000000000..e88f1172af
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_basic_common.erl
@@ -0,0 +1,41 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_basic_common).
+-include("rabbit.hrl").
+
+-export([build_content/2, from_content/1]).
+
+-spec build_content
+ (rabbit_framing:amqp_property_record(), binary() | [binary()]) ->
+ rabbit_types:content().
+-spec from_content
+ (rabbit_types:content()) ->
+ {rabbit_framing:amqp_property_record(), binary()}.
+
+build_content(Properties, BodyBin) when is_binary(BodyBin) ->
+ build_content(Properties, [BodyBin]);
+
+build_content(Properties, PFR) ->
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ #content{class_id = ClassId,
+ properties = Properties,
+ properties_bin = none,
+ protocol = none,
+ payload_fragments_rev = PFR}.
+
+from_content(Content) ->
+ #content{class_id = ClassId,
+ properties = Props,
+ payload_fragments_rev = FragmentsRev} =
+ rabbit_binary_parser:ensure_content_decoded(Content),
+ %% basic.publish hasn't changed so we can just hard-code amqp_0_9_1
+ {ClassId, _MethodId} =
+ rabbit_framing_amqp_0_9_1:method_id('basic.publish'),
+ {Props, list_to_binary(lists:reverse(FragmentsRev))}.
diff --git a/deps/rabbit_common/src/rabbit_binary_generator.erl b/deps/rabbit_common/src/rabbit_binary_generator.erl
new file mode 100644
index 0000000000..7a56cb92b6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_binary_generator.erl
@@ -0,0 +1,235 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binary_generator).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([build_simple_method_frame/3,
+ build_simple_content_frames/4,
+ build_heartbeat_frame/0]).
+-export([generate_table/1]).
+-export([check_empty_frame_size/0]).
+-export([ensure_content_encoded/2, clear_encoded_content/1]).
+-export([map_exception/3]).
+
+%%----------------------------------------------------------------------------
+
+-type frame() :: [binary()].
+
+-spec build_simple_method_frame
+ (rabbit_channel:channel_number(), rabbit_framing:amqp_method_record(),
+ rabbit_types:protocol()) ->
+ frame().
+-spec build_simple_content_frames
+ (rabbit_channel:channel_number(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol()) ->
+ [frame()].
+-spec build_heartbeat_frame() -> frame().
+-spec generate_table(rabbit_framing:amqp_table()) -> binary().
+-spec check_empty_frame_size() -> 'ok'.
+-spec ensure_content_encoded
+ (rabbit_types:content(), rabbit_types:protocol()) ->
+ rabbit_types:encoded_content().
+-spec clear_encoded_content
+ (rabbit_types:content()) ->
+ rabbit_types:unencoded_content().
+-spec map_exception
+ (rabbit_channel:channel_number(), rabbit_types:amqp_error() | any(),
+ rabbit_types:protocol()) ->
+ {rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record()}.
+
+%%----------------------------------------------------------------------------
+
+build_simple_method_frame(ChannelInt, MethodRecord, Protocol) ->
+ MethodFields = Protocol:encode_method_fields(MethodRecord),
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ {ClassId, MethodId} = Protocol:method_id(MethodName),
+ create_frame(1, ChannelInt, [<<ClassId:16, MethodId:16>>, MethodFields]).
+
+build_simple_content_frames(ChannelInt, Content, FrameMax, Protocol) ->
+ #content{class_id = ClassId,
+ properties_bin = ContentPropertiesBin,
+ payload_fragments_rev = PayloadFragmentsRev} =
+ ensure_content_encoded(Content, Protocol),
+ {BodySize, ContentFrames} =
+ build_content_frames(PayloadFragmentsRev, FrameMax, ChannelInt),
+ HeaderFrame = create_frame(2, ChannelInt,
+ [<<ClassId:16, 0:16, BodySize:64>>,
+ ContentPropertiesBin]),
+ [HeaderFrame | ContentFrames].
+
+build_content_frames(FragsRev, FrameMax, ChannelInt) ->
+ BodyPayloadMax = if FrameMax == 0 -> iolist_size(FragsRev);
+ true -> FrameMax - ?EMPTY_FRAME_SIZE
+ end,
+ build_content_frames(0, [], BodyPayloadMax, [],
+ lists:reverse(FragsRev), BodyPayloadMax, ChannelInt).
+
+build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
+ [], _BodyPayloadMax, _ChannelInt) ->
+ {SizeAcc, lists:reverse(FramesAcc)};
+build_content_frames(SizeAcc, FramesAcc, _FragSizeRem, [],
+ [<<>>], _BodyPayloadMax, _ChannelInt) ->
+ {SizeAcc, lists:reverse(FramesAcc)};
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ Frags, BodyPayloadMax, ChannelInt)
+ when FragSizeRem == 0 orelse Frags == [] ->
+ Frame = create_frame(3, ChannelInt, lists:reverse(FragAcc)),
+ FrameSize = BodyPayloadMax - FragSizeRem,
+ build_content_frames(SizeAcc + FrameSize, [Frame | FramesAcc],
+ BodyPayloadMax, [], Frags, BodyPayloadMax, ChannelInt);
+build_content_frames(SizeAcc, FramesAcc, FragSizeRem, FragAcc,
+ [Frag | Frags], BodyPayloadMax, ChannelInt) ->
+ Size = size(Frag),
+ {NewFragSizeRem, NewFragAcc, NewFrags} =
+ if Size == 0 -> {FragSizeRem, FragAcc, Frags};
+ Size =< FragSizeRem -> {FragSizeRem - Size, [Frag | FragAcc], Frags};
+ true -> <<Head:FragSizeRem/binary, Tail/binary>> =
+ Frag,
+ {0, [Head | FragAcc], [Tail | Frags]}
+ end,
+ build_content_frames(SizeAcc, FramesAcc, NewFragSizeRem, NewFragAcc,
+ NewFrags, BodyPayloadMax, ChannelInt).
+
+build_heartbeat_frame() ->
+ create_frame(?FRAME_HEARTBEAT, 0, <<>>).
+
+create_frame(TypeInt, ChannelInt, Payload) ->
+ [<<TypeInt:8, ChannelInt:16, (iolist_size(Payload)):32>>, Payload,
+ ?FRAME_END].
+
+%% table_field_to_binary supports the AMQP 0-8/0-9 standard types, S,
+%% I, D, T and F, as well as the QPid extensions b, d, f, l, s, t, x,
+%% and V.
+table_field_to_binary({FName, T, V}) ->
+ [short_string_to_binary(FName) | field_value_to_binary(T, V)].
+
+field_value_to_binary(longstr, V) -> [$S | long_string_to_binary(V)];
+field_value_to_binary(signedint, V) -> [$I, <<V:32/signed>>];
+field_value_to_binary(decimal, V) -> {Before, After} = V,
+ [$D, Before, <<After:32>>];
+field_value_to_binary(timestamp, V) -> [$T, <<V:64>>];
+field_value_to_binary(table, V) -> [$F | table_to_binary(V)];
+field_value_to_binary(array, V) -> [$A | array_to_binary(V)];
+field_value_to_binary(byte, V) -> [$b, <<V:8/signed>>];
+field_value_to_binary(double, V) -> [$d, <<V:64/float>>];
+field_value_to_binary(float, V) -> [$f, <<V:32/float>>];
+field_value_to_binary(long, V) -> [$l, <<V:64/signed>>];
+field_value_to_binary(short, V) -> [$s, <<V:16/signed>>];
+field_value_to_binary(bool, V) -> [$t, if V -> 1; true -> 0 end];
+field_value_to_binary(binary, V) -> [$x | long_string_to_binary(V)];
+field_value_to_binary(unsignedbyte, V) -> [$B, <<V:8/unsigned>>];
+field_value_to_binary(unsignedshort, V) -> [$u, <<V:16/unsigned>>];
+field_value_to_binary(unsignedint, V) -> [$i, <<V:32/unsigned>>];
+field_value_to_binary(void, _V) -> [$V].
+
+table_to_binary(Table) when is_list(Table) ->
+ BinTable = generate_table_iolist(Table),
+ [<<(iolist_size(BinTable)):32>> | BinTable].
+
+array_to_binary(Array) when is_list(Array) ->
+ BinArray = generate_array_iolist(Array),
+ [<<(iolist_size(BinArray)):32>> | BinArray].
+
+generate_table(Table) when is_list(Table) ->
+ list_to_binary(generate_table_iolist(Table)).
+
+generate_table_iolist(Table) ->
+ lists:map(fun table_field_to_binary/1, Table).
+
+generate_array_iolist(Array) ->
+ lists:map(fun ({T, V}) -> field_value_to_binary(T, V) end, Array).
+
+short_string_to_binary(String) ->
+ Len = string_length(String),
+ if Len < 256 -> [<<Len:8>>, String];
+ true -> exit(content_properties_shortstr_overflow)
+ end.
+
+long_string_to_binary(String) ->
+ Len = string_length(String),
+ [<<Len:32>>, String].
+
+string_length(String) when is_binary(String) -> size(String);
+string_length(String) -> length(String).
+
+check_empty_frame_size() ->
+ %% Intended to ensure that EMPTY_FRAME_SIZE is defined correctly.
+ case iolist_size(create_frame(?FRAME_BODY, 0, <<>>)) of
+ ?EMPTY_FRAME_SIZE -> ok;
+ ComputedSize -> exit({incorrect_empty_frame_size,
+ ComputedSize, ?EMPTY_FRAME_SIZE})
+ end.
+
+ensure_content_encoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol}, Protocol)
+ when PropBin =/= none ->
+ Content;
+ensure_content_encoded(Content = #content{properties = none,
+ properties_bin = PropBin,
+ protocol = Protocol}, Protocol1)
+ when PropBin =/= none ->
+ Props = Protocol:decode_properties(Content#content.class_id, PropBin),
+ Content#content{properties = Props,
+ properties_bin = Protocol1:encode_properties(Props),
+ protocol = Protocol1};
+ensure_content_encoded(Content = #content{properties = Props}, Protocol)
+ when Props =/= none ->
+ Content#content{properties_bin = Protocol:encode_properties(Props),
+ protocol = Protocol}.
+
+clear_encoded_content(Content = #content{properties_bin = none,
+ protocol = none}) ->
+ Content;
+clear_encoded_content(Content = #content{properties = none}) ->
+ %% Only clear when we can rebuild the properties_bin later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_encoded_content(Content = #content{}) ->
+ Content#content{properties_bin = none, protocol = none}.
+
+%% NB: this function is also used by the Erlang client
+map_exception(Channel, Reason, Protocol) ->
+ {SuggestedClose, ReplyCode, ReplyText, FailedMethod} =
+ lookup_amqp_exception(Reason, Protocol),
+ {ClassId, MethodId} = case FailedMethod of
+ {_, _} -> FailedMethod;
+ none -> {0, 0};
+ _ -> Protocol:method_id(FailedMethod)
+ end,
+ case SuggestedClose orelse (Channel == 0) of
+ true -> {0, #'connection.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}};
+ false -> {Channel, #'channel.close'{reply_code = ReplyCode,
+ reply_text = ReplyText,
+ class_id = ClassId,
+ method_id = MethodId}}
+ end.
+
+lookup_amqp_exception(#amqp_error{name = Name,
+ explanation = Expl,
+ method = Method},
+ Protocol) ->
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(Name),
+ ExplBin = amqp_exception_explanation(Text, Expl),
+ {ShouldClose, Code, ExplBin, Method};
+lookup_amqp_exception(Other, Protocol) ->
+ rabbit_log:warning("Non-AMQP exit reason '~p'~n", [Other]),
+ {ShouldClose, Code, Text} = Protocol:lookup_amqp_exception(internal_error),
+ {ShouldClose, Code, Text, none}.
+
+amqp_exception_explanation(Text, Expl) ->
+ ExplBin = list_to_binary(Expl),
+ CompleteTextBin = <<Text/binary, " - ", ExplBin/binary>>,
+ if size(CompleteTextBin) > 255 -> <<CompleteTextBin:252/binary, "...">>;
+ true -> CompleteTextBin
+ end.
diff --git a/deps/rabbit_common/src/rabbit_binary_parser.erl b/deps/rabbit_common/src/rabbit_binary_parser.erl
new file mode 100644
index 0000000000..478b0f0cd2
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_binary_parser.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_binary_parser).
+
+-include("rabbit.hrl").
+
+-export([parse_table/1]).
+-export([ensure_content_decoded/1, clear_decoded_content/1]).
+-export([validate_utf8/1, assert_utf8/1]).
+
+%%----------------------------------------------------------------------------
+
+-spec parse_table(binary()) -> rabbit_framing:amqp_table().
+-spec ensure_content_decoded
+ (rabbit_types:content()) ->
+ rabbit_types:decoded_content().
+-spec clear_decoded_content
+ (rabbit_types:content()) ->
+ rabbit_types:undecoded_content().
+-spec validate_utf8(binary()) -> 'ok' | 'error'.
+-spec assert_utf8(binary()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+%% parse_table supports the AMQP 0-8/0-9 standard types, S, I, D, T
+%% and F, as well as the QPid extensions b, d, f, l, s, t, x, and V.
+
+-define(SIMPLE_PARSE_TABLE(BType, Pattern, RType),
+ parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ BType, Pattern, Rest/binary>>) ->
+ [{NameString, RType, Value} | parse_table(Rest)]).
+
+%% Note that we try to put these in approximately the order we expect
+%% to hit them, that's why the empty binary is half way through.
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, longstr, Value} | parse_table(Rest)];
+
+?SIMPLE_PARSE_TABLE($T, Value:64/unsigned, timestamp);
+
+parse_table(<<>>) ->
+ [];
+
+?SIMPLE_PARSE_TABLE($b, Value:8/signed, byte);
+?SIMPLE_PARSE_TABLE($B, Value:8/unsigned, unsignedbyte);
+
+?SIMPLE_PARSE_TABLE($s, Value:16/signed, short);
+?SIMPLE_PARSE_TABLE($u, Value:16/unsigned, unsignedshort);
+
+?SIMPLE_PARSE_TABLE($I, Value:32/signed, signedint);
+?SIMPLE_PARSE_TABLE($i, Value:32/unsigned, unsignedint);
+
+?SIMPLE_PARSE_TABLE($d, Value:64/float, double);
+?SIMPLE_PARSE_TABLE($f, Value:32/float, float);
+
+%% yes, both 'l' and 'L' fields are decoded to 64-bit signed values;
+%% see https://github.com/rabbitmq/rabbitmq-server/issues/1093#issuecomment-276351183,
+%% https://www.rabbitmq.com/amqp-0-9-1-errata.html, and section
+%% 4.2.1 of the spec for details.
+?SIMPLE_PARSE_TABLE($l, Value:64/signed, long);
+?SIMPLE_PARSE_TABLE($L, Value:64/signed, long);
+
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $t, Value:8/unsigned, Rest/binary>>) ->
+ [{NameString, bool, (Value /= 0)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+ [{NameString, decimal, {Before, After}} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, table, parse_table(Value)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, array, parse_array(Value)} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{NameString, binary, Value} | parse_table(Rest)];
+
+parse_table(<<NLen:8/unsigned, NameString:NLen/binary,
+ $V, Rest/binary>>) ->
+ [{NameString, void, undefined} | parse_table(Rest)].
+
+-define(SIMPLE_PARSE_ARRAY(BType, Pattern, RType),
+ parse_array(<<BType, Pattern, Rest/binary>>) ->
+ [{RType, Value} | parse_array(Rest)]).
+
+parse_array(<<$S, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{longstr, Value} | parse_array(Rest)];
+
+?SIMPLE_PARSE_ARRAY($T, Value:64/unsigned, timestamp);
+
+parse_array(<<>>) ->
+ [];
+
+?SIMPLE_PARSE_ARRAY($b, Value:8/signed, byte);
+?SIMPLE_PARSE_ARRAY($B, Value:8/unsigned, unsignedbyte);
+
+?SIMPLE_PARSE_ARRAY($s, Value:16/signed, short);
+?SIMPLE_PARSE_ARRAY($u, Value:16/unsigned, unsignedshort);
+
+?SIMPLE_PARSE_ARRAY($I, Value:32/signed, signedint);
+?SIMPLE_PARSE_ARRAY($i, Value:32/unsigned, unsignedint);
+
+?SIMPLE_PARSE_ARRAY($d, Value:64/float, double);
+?SIMPLE_PARSE_ARRAY($f, Value:32/float, float);
+
+?SIMPLE_PARSE_ARRAY($l, Value:64/signed, long);
+?SIMPLE_PARSE_ARRAY($L, Value:64/signed, long);
+
+
+parse_array(<<$t, Value:8/unsigned, Rest/binary>>) ->
+ [{bool, (Value /= 0)} | parse_array(Rest)];
+
+parse_array(<<$D, Before:8/unsigned, After:32/unsigned, Rest/binary>>) ->
+ [{decimal, {Before, After}} | parse_array(Rest)];
+
+parse_array(<<$F, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{table, parse_table(Value)} | parse_array(Rest)];
+
+parse_array(<<$A, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{array, parse_array(Value)} | parse_array(Rest)];
+
+parse_array(<<$x, VLen:32/unsigned, Value:VLen/binary, Rest/binary>>) ->
+ [{binary, Value} | parse_array(Rest)];
+
+parse_array(<<$V, Rest/binary>>) ->
+ [{void, undefined} | parse_array(Rest)].
+
+ensure_content_decoded(Content = #content{properties = Props})
+ when Props =/= none ->
+ Content;
+ensure_content_decoded(Content = #content{properties_bin = PropBin,
+ protocol = Protocol})
+ when PropBin =/= none ->
+ Content#content{properties = Protocol:decode_properties(
+ Content#content.class_id, PropBin)}.
+
+clear_decoded_content(Content = #content{properties = none}) ->
+ Content;
+clear_decoded_content(Content = #content{properties_bin = none}) ->
+ %% Only clear when we can rebuild the properties later in
+ %% accordance to the content record definition comment - maximum
+ %% one of properties and properties_bin can be 'none'
+ Content;
+clear_decoded_content(Content = #content{}) ->
+ Content#content{properties = none}.
+
+assert_utf8(B) ->
+ case validate_utf8(B) of
+ ok -> ok;
+ error -> rabbit_misc:protocol_error(
+ frame_error, "Malformed UTF-8 in shortstr", [])
+ end.
+
+validate_utf8(Bin) ->
+ try
+ _ = xmerl_ucs:from_utf8(Bin),
+ ok
+ catch exit:{ucs, _} ->
+ error
+ end.
diff --git a/deps/rabbit_common/src/rabbit_cert_info.erl b/deps/rabbit_common/src/rabbit_cert_info.erl
new file mode 100644
index 0000000000..08e6f03c6c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_cert_info.erl
@@ -0,0 +1,270 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_cert_info).
+
+-include_lib("public_key/include/public_key.hrl").
+
+-export([issuer/1,
+ subject/1,
+ subject_alternative_names/1,
+ validity/1,
+ subject_items/2,
+ extensions/1
+]).
+
+%%--------------------------------------------------------------------------
+
+-export_type([certificate/0]).
+
+-type certificate() :: binary().
+
+%%--------------------------------------------------------------------------
+%% High-level functions used by reader
+%%--------------------------------------------------------------------------
+
+%% Return a string describing the certificate's issuer.
+-spec issuer(certificate()) -> string().
+
+issuer(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ issuer = Issuer }}) ->
+ format_rdn_sequence(Issuer)
+ end, Cert).
+
+%% Return a string describing the certificate's subject, as per RFC4514.
+-spec subject(certificate()) -> string().
+
+subject(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ format_rdn_sequence(Subject)
+ end, Cert).
+
+%% Return the parts of the certificate's subject.
+-spec subject_items
+ (certificate(), tuple()) -> [string()] | 'not_found'.
+
+subject_items(Cert, Type) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ subject = Subject }}) ->
+ find_by_type(Type, Subject)
+ end, Cert).
+
+-spec extensions(certificate()) -> [#'Extension'{}].
+extensions(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ extensions = Extensions }}) ->
+ Extensions
+ end, Cert).
+
+-spec subject_alternative_names(certificate()) -> [{atom(), string()}].
+subject_alternative_names(Cert) ->
+ Extensions = extensions(Cert),
+ try lists:keyfind(?'id-ce-subjectAltName', #'Extension'.extnID, Extensions) of
+ false -> [];
+ #'Extension'{extnValue = Val} -> Val
+ catch _:_ -> []
+ end.
+
+%% Return a string describing the certificate's validity.
+-spec validity(certificate()) -> string().
+
+validity(Cert) ->
+ cert_info(fun(#'OTPCertificate' {
+ tbsCertificate = #'OTPTBSCertificate' {
+ validity = {'Validity', Start, End} }}) ->
+ rabbit_misc:format("~s - ~s", [format_asn1_value(Start),
+ format_asn1_value(End)])
+ end, Cert).
+
+%%--------------------------------------------------------------------------
+
+cert_info(F, Cert) ->
+ F(public_key:pkix_decode_cert(Cert, otp)).
+
+find_by_type(Type, {rdnSequence, RDNs}) ->
+ case [V || #'AttributeTypeAndValue'{type = T, value = V}
+ <- lists:flatten(RDNs),
+ T == Type] of
+ [] -> not_found;
+ L -> [format_asn1_value(V) || V <- L]
+ end.
+
+%%--------------------------------------------------------------------------
+%% Formatting functions
+%%--------------------------------------------------------------------------
+
+%% Format and rdnSequence as a RFC4514 subject string.
+format_rdn_sequence({rdnSequence, Seq}) ->
+ string:join(lists:reverse([format_complex_rdn(RDN) || RDN <- Seq]), ",").
+
+%% Format an RDN set.
+format_complex_rdn(RDNs) ->
+ string:join([format_rdn(RDN) || RDN <- RDNs], "+").
+
+%% Format an RDN. If the type name is unknown, use the dotted decimal
+%% representation. See RFC4514, section 2.3.
+format_rdn(#'AttributeTypeAndValue'{type = T, value = V}) ->
+ FV = escape_rdn_value(format_asn1_value(V)),
+ Fmts = [{?'id-at-surname' , "SN"},
+ {?'id-at-givenName' , "GIVENNAME"},
+ {?'id-at-initials' , "INITIALS"},
+ {?'id-at-generationQualifier' , "GENERATIONQUALIFIER"},
+ {?'id-at-commonName' , "CN"},
+ {?'id-at-localityName' , "L"},
+ {?'id-at-stateOrProvinceName' , "ST"},
+ {?'id-at-organizationName' , "O"},
+ {?'id-at-organizationalUnitName' , "OU"},
+ {?'id-at-title' , "TITLE"},
+ {?'id-at-countryName' , "C"},
+ {?'id-at-serialNumber' , "SERIALNUMBER"},
+ {?'id-at-pseudonym' , "PSEUDONYM"},
+ {?'id-domainComponent' , "DC"},
+ {?'id-emailAddress' , "EMAILADDRESS"},
+ {?'street-address' , "STREET"},
+ {{0,9,2342,19200300,100,1,1} , "UID"}], %% Not in public_key.hrl
+ case proplists:lookup(T, Fmts) of
+ {_, Fmt} ->
+ rabbit_misc:format(Fmt ++ "=~s", [FV]);
+ none when is_tuple(T) ->
+ TypeL = [rabbit_misc:format("~w", [X]) || X <- tuple_to_list(T)],
+ rabbit_misc:format("~s=~s", [string:join(TypeL, "."), FV]);
+ none ->
+ rabbit_misc:format("~p=~s", [T, FV])
+ end.
+
+%% Escape a string as per RFC4514.
+escape_rdn_value(V) ->
+ escape_rdn_value(V, start).
+
+escape_rdn_value([], _) ->
+ [];
+escape_rdn_value([C | S], start) when C =:= $ ; C =:= $# ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value(S, start) ->
+ escape_rdn_value(S, middle);
+escape_rdn_value([$ ], middle) ->
+ [$\\, $ ];
+escape_rdn_value([C | S], middle) when C =:= $"; C =:= $+; C =:= $,; C =:= $;;
+ C =:= $<; C =:= $>; C =:= $\\ ->
+ [$\\, C | escape_rdn_value(S, middle)];
+escape_rdn_value([C | S], middle) when C < 32 ; C >= 126 ->
+ %% Of ASCII characters only U+0000 needs escaping, but for display
+ %% purposes it's handy to escape all non-printable chars. All non-ASCII
+ %% characters get converted to UTF-8 sequences and then escaped. We've
+ %% already got a UTF-8 sequence here, so just escape it.
+ rabbit_misc:format("\\~2.16.0B", [C]) ++ escape_rdn_value(S, middle);
+escape_rdn_value([C | S], middle) ->
+ [C | escape_rdn_value(S, middle)].
+
+%% Get the string representation of an OTPCertificate field.
+format_asn1_value({ST, S}) when ST =:= teletexString; ST =:= printableString;
+ ST =:= universalString; ST =:= utf8String;
+ ST =:= bmpString ->
+ format_directory_string(ST, S);
+format_asn1_value({utcTime, [Y1, Y2, M1, M2, D1, D2, H1, H2,
+ Min1, Min2, S1, S2, $Z]}) ->
+ rabbit_misc:format("20~c~c-~c~c-~c~cT~c~c:~c~c:~c~cZ",
+ [Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2]);
+%% We appear to get an untagged value back for an ia5string
+%% (e.g. domainComponent).
+format_asn1_value(V) when is_list(V) ->
+ V;
+format_asn1_value(V) when is_binary(V) ->
+ %% OTP does not decode some values when combined with an unknown
+ %% type. That's probably wrong, so as a last ditch effort let's
+ %% try manually decoding. 'DirectoryString' is semi-arbitrary -
+ %% but it is the type which covers the various string types we
+ %% handle below.
+ try
+ {ST, S} = public_key:der_decode('DirectoryString', V),
+ format_directory_string(ST, S)
+ catch _:_ ->
+ rabbit_misc:format("~p", [V])
+ end;
+format_asn1_value(V) ->
+ rabbit_misc:format("~p", [V]).
+
+%% DirectoryString { INTEGER : maxSize } ::= CHOICE {
+%% teletexString TeletexString (SIZE (1..maxSize)),
+%% printableString PrintableString (SIZE (1..maxSize)),
+%% bmpString BMPString (SIZE (1..maxSize)),
+%% universalString UniversalString (SIZE (1..maxSize)),
+%% uTF8String UTF8String (SIZE (1..maxSize)) }
+%%
+%% Precise definitions of printable / teletexString are hard to come
+%% by. This is what I reconstructed:
+%%
+%% printableString:
+%% "intended to represent the limited character sets available to
+%% mainframe input terminals"
+%% A-Z a-z 0-9 ' ( ) + , - . / : = ? [space]
+%% https://msdn.microsoft.com/en-us/library/bb540814(v=vs.85).aspx
+%%
+%% teletexString:
+%% "a sizable volume of software in the world treats TeletexString
+%% (T61String) as a simple 8-bit string with mostly Windows Latin 1
+%% (superset of iso-8859-1) encoding"
+%% https://www.mail-archive.com/asn1@asn1.org/msg00460.html
+%%
+%% (However according to that link X.680 actually defines
+%% TeletexString in some much more involved and crazy way. I suggest
+%% we treat it as ISO-8859-1 since Erlang does not support Windows
+%% Latin 1).
+%%
+%% bmpString:
+%% UCS-2 according to RFC 3641. Hence cannot represent Unicode
+%% characters above 65535 (outside the "Basic Multilingual Plane").
+%%
+%% universalString:
+%% UCS-4 according to RFC 3641.
+%%
+%% utf8String:
+%% UTF-8 according to RFC 3641.
+%%
+%% Within Rabbit we assume UTF-8 encoding. Since printableString is a
+%% subset of ASCII it is also a subset of UTF-8. The others need
+%% converting. Fortunately since the Erlang SSL library does the
+%% decoding for us (albeit into a weird format, see below), we just
+%% need to handle encoding into UTF-8. Note also that utf8Strings come
+%% back as binary.
+%%
+%% Note for testing: the default Ubuntu configuration for openssl will
+%% only create printableString or teletexString types no matter what
+%% you do. Edit string_mask in the [req] section of
+%% /etc/ssl/openssl.cnf to change this (see comments there). You
+%% probably also need to set utf8 = yes to get it to accept UTF-8 on
+%% the command line. Also note I could not get openssl to generate a
+%% universalString.
+
+format_directory_string(printableString, S) -> S;
+format_directory_string(teletexString, S) -> utf8_list_from(S);
+format_directory_string(bmpString, S) -> utf8_list_from(S);
+format_directory_string(universalString, S) -> utf8_list_from(S);
+format_directory_string(utf8String, S) -> binary_to_list(S).
+
+utf8_list_from(S) ->
+ binary_to_list(
+ unicode:characters_to_binary(flatten_ssl_list(S), utf32, utf8)).
+
+%% The Erlang SSL implementation invents its own representation for
+%% non-ascii strings - looking like [97,{0,0,3,187}] (that's LATIN
+%% SMALL LETTER A followed by GREEK SMALL LETTER LAMDA). We convert
+%% this into a list of unicode characters, which we can tell
+%% unicode:characters_to_binary is utf32.
+
+flatten_ssl_list(L) -> [flatten_ssl_list_item(I) || I <- L].
+
+flatten_ssl_list_item({A, B, C, D}) ->
+ A * (1 bsl 24) + B * (1 bsl 16) + C * (1 bsl 8) + D;
+flatten_ssl_list_item(N) when is_number (N) ->
+ N.
diff --git a/deps/rabbit_common/src/rabbit_channel_common.erl b/deps/rabbit_common/src/rabbit_channel_common.erl
new file mode 100644
index 0000000000..a21e17b2e7
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_channel_common.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_channel_common).
+
+-export([do/2, do/3, do_flow/3, ready_for_close/1]).
+
+do(Pid, Method) ->
+ do(Pid, Method, none).
+
+do(Pid, Method, Content) ->
+ gen_server2:cast(Pid, {method, Method, Content, noflow}).
+
+do_flow(Pid, Method, Content) ->
+ %% Here we are tracking messages sent by the rabbit_reader
+ %% process. We are accessing the rabbit_reader process dictionary.
+ credit_flow:send(Pid),
+ gen_server2:cast(Pid, {method, Method, Content, flow}).
+
+ready_for_close(Pid) ->
+ gen_server2:cast(Pid, ready_for_close).
diff --git a/deps/rabbit_common/src/rabbit_command_assembler.erl b/deps/rabbit_common/src/rabbit_command_assembler.erl
new file mode 100644
index 0000000000..ea6b19d083
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_command_assembler.erl
@@ -0,0 +1,124 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_command_assembler).
+-include("rabbit_framing.hrl").
+-include("rabbit.hrl").
+
+-export([analyze_frame/3, init/1, process/2]).
+
+%%----------------------------------------------------------------------------
+
+%%----------------------------------------------------------------------------
+
+-export_type([frame/0]).
+
+-type frame_type() :: ?FRAME_METHOD | ?FRAME_HEADER | ?FRAME_BODY |
+ ?FRAME_OOB_METHOD | ?FRAME_OOB_HEADER | ?FRAME_OOB_BODY |
+ ?FRAME_TRACE | ?FRAME_HEARTBEAT.
+-type protocol() :: rabbit_framing:protocol().
+-type method() :: rabbit_framing:amqp_method_record().
+-type class_id() :: rabbit_framing:amqp_class_id().
+-type weight() :: non_neg_integer().
+-type body_size() :: non_neg_integer().
+-type content() :: rabbit_types:undecoded_content().
+
+-type frame() ::
+ {'method', rabbit_framing:amqp_method_name(), binary()} |
+ {'content_header', class_id(), weight(), body_size(), binary()} |
+ {'content_body', binary()}.
+
+-type state() ::
+ {'method', protocol()} |
+ {'content_header', method(), class_id(), protocol()} |
+ {'content_body', method(), body_size(), class_id(), protocol()}.
+
+-spec analyze_frame(frame_type(), binary(), protocol()) ->
+ frame() | 'heartbeat' | 'error'.
+
+-spec init(protocol()) -> {ok, state()}.
+-spec process(frame(), state()) ->
+ {ok, state()} |
+ {ok, method(), state()} |
+ {ok, method(), content(), state()} |
+ {error, rabbit_types:amqp_error()}.
+
+%%--------------------------------------------------------------------
+
+analyze_frame(?FRAME_METHOD,
+ <<ClassId:16, MethodId:16, MethodFields/binary>>,
+ Protocol) ->
+ MethodName = Protocol:lookup_method_name({ClassId, MethodId}),
+ {method, MethodName, MethodFields};
+analyze_frame(?FRAME_HEADER,
+ <<ClassId:16, Weight:16, BodySize:64, Properties/binary>>,
+ _Protocol) ->
+ {content_header, ClassId, Weight, BodySize, Properties};
+analyze_frame(?FRAME_BODY, Body, _Protocol) ->
+ {content_body, Body};
+analyze_frame(?FRAME_HEARTBEAT, <<>>, _Protocol) ->
+ heartbeat;
+analyze_frame(_Type, _Body, _Protocol) ->
+ error.
+
+init(Protocol) -> {ok, {method, Protocol}}.
+
+process({method, MethodName, FieldsBin}, {method, Protocol}) ->
+ try
+ Method = Protocol:decode_method_fields(MethodName, FieldsBin),
+ case Protocol:method_has_content(MethodName) of
+ true -> {ClassId, _MethodId} = Protocol:method_id(MethodName),
+ {ok, {content_header, Method, ClassId, Protocol}};
+ false -> {ok, Method, {method, Protocol}}
+ end
+ catch exit:#amqp_error{} = Reason -> {error, Reason}
+ end;
+process(_Frame, {method, _Protocol}) ->
+ unexpected_frame("expected method frame, "
+ "got non method frame instead", [], none);
+process({content_header, ClassId, 0, 0, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, Method, Content, {method, Protocol}};
+process({content_header, ClassId, 0, BodySize, PropertiesBin},
+ {content_header, Method, ClassId, Protocol}) ->
+ Content = empty_content(ClassId, PropertiesBin, Protocol),
+ {ok, {content_body, Method, BodySize, Content, Protocol}};
+process({content_header, HeaderClassId, 0, _BodySize, _PropertiesBin},
+ {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got one for class ~w instead",
+ [ClassId, HeaderClassId], Method);
+process(_Frame, {content_header, Method, ClassId, _Protocol}) ->
+ unexpected_frame("expected content header for class ~w, "
+ "got non content header frame instead", [ClassId], Method);
+process({content_body, FragmentBin},
+ {content_body, Method, RemainingSize,
+ Content = #content{payload_fragments_rev = Fragments}, Protocol}) ->
+ NewContent = Content#content{
+ payload_fragments_rev = [FragmentBin | Fragments]},
+ case RemainingSize - size(FragmentBin) of
+ 0 -> {ok, Method, NewContent, {method, Protocol}};
+ Sz -> {ok, {content_body, Method, Sz, NewContent, Protocol}}
+ end;
+process(_Frame, {content_body, Method, _RemainingSize, _Content, _Protocol}) ->
+ unexpected_frame("expected content body, "
+ "got non content body frame instead", [], Method).
+
+%%--------------------------------------------------------------------
+
+empty_content(ClassId, PropertiesBin, Protocol) ->
+ #content{class_id = ClassId,
+ properties = none,
+ properties_bin = PropertiesBin,
+ protocol = Protocol,
+ payload_fragments_rev = []}.
+
+unexpected_frame(Format, Params, Method) when is_atom(Method) ->
+ {error, rabbit_misc:amqp_error(unexpected_frame, Format, Params, Method)};
+unexpected_frame(Format, Params, Method) ->
+ unexpected_frame(Format, Params, rabbit_misc:method_record_type(Method)).
diff --git a/deps/rabbit_common/src/rabbit_control_misc.erl b/deps/rabbit_common/src/rabbit_control_misc.erl
new file mode 100644
index 0000000000..0fff88a2cd
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_control_misc.erl
@@ -0,0 +1,179 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_control_misc).
+
+-export([emitting_map/4, emitting_map/5, emitting_map_with_exit_handler/4,
+ emitting_map_with_exit_handler/5, wait_for_info_messages/6,
+ spawn_emitter_caller/7, await_emitters_termination/1,
+ print_cmd_result/2]).
+
+-spec emitting_map(pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map(pid(), reference(), fun(), list(), atom()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+ (pid(), reference(), fun(), list()) -> 'ok'.
+-spec emitting_map_with_exit_handler
+ (pid(), reference(), fun(), list(), 'continue') -> 'ok'.
+
+-type fold_fun() :: fun((Item :: term(), AccIn :: term()) -> AccOut :: term()).
+
+-spec wait_for_info_messages(pid(), reference(), fold_fun(), InitialAcc, timeout(), non_neg_integer()) -> OK | Err when
+ InitialAcc :: term(), Acc :: term(), OK :: {ok, Acc}, Err :: {error, term()}.
+-spec spawn_emitter_caller(node(), module(), atom(), [term()], reference(), pid(), timeout()) -> 'ok'.
+-spec await_emitters_termination([pid()]) -> 'ok'.
+
+-spec print_cmd_result(atom(), term()) -> 'ok'.
+
+emitting_map(AggregatorPid, Ref, Fun, List) ->
+ emitting_map(AggregatorPid, Ref, Fun, List, continue),
+ AggregatorPid ! {Ref, finished},
+ ok.
+
+emitting_map(AggregatorPid, Ref, Fun, List, continue) ->
+ _ = emitting_map0(AggregatorPid, Ref, Fun, List, fun step/4),
+ ok.
+
+emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List) ->
+ emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List, continue),
+ AggregatorPid ! {Ref, finished},
+ ok.
+
+emitting_map_with_exit_handler(AggregatorPid, Ref, Fun, List, continue) ->
+ _ = emitting_map0(AggregatorPid, Ref, Fun, List, fun step_with_exit_handler/4),
+ ok.
+
+emitting_map0(AggregatorPid, Ref, Fun, List, StepFun) ->
+ [StepFun(AggregatorPid, Ref, Fun, Item) || Item <- List].
+
+step(AggregatorPid, Ref, Fun, Item) ->
+ AggregatorPid ! {Ref, Fun(Item), continue},
+ ok.
+
+step_with_exit_handler(AggregatorPid, Ref, Fun, Item) ->
+ Noop = make_ref(),
+ case rabbit_misc:with_exit_handler(
+ fun () -> Noop end,
+ fun () -> Fun(Item) end) of
+ Noop ->
+ ok;
+ Res ->
+ AggregatorPid ! {Ref, Res, continue},
+ ok
+ end.
+
+%% Invokes RPC for async info collection in separate (but linked to
+%% the caller) process. Separate process waits for RPC to finish and
+%% in case of errors sends them in wait_for_info_messages/5-compatible
+%% form to aggregator process. Calling process is then expected to
+%% do blocking call of wait_for_info_messages/5.
+%%
+%% Remote function MUST use calls to emitting_map/4 (and other
+%% emitting_map's) to properly deliver requested information to an
+%% aggregator process.
+%%
+%% If for performance reasons several parallel emitting_map's need to
+%% be run, remote function MUST NOT return until all this
+%% emitting_map's are done. And during all this time remote RPC
+%% process MUST be linked to emitting
+%% processes. await_emitters_termination/1 helper can be used as a
+%% last statement of remote function to ensure this behaviour.
+spawn_emitter_caller(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+ _ = spawn_monitor(
+ fun () ->
+ case rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) of
+ {error, _} = Error ->
+ Pid ! {Ref, error, Error};
+ {bad_argument, _} = Error ->
+ Pid ! {Ref, error, Error};
+ {badrpc, _} = Error ->
+ Pid ! {Ref, error, Error};
+ _ ->
+ ok
+ end
+ end),
+ ok.
+
+rpc_call_emitter(Node, Mod, Fun, Args, Ref, Pid, Timeout) ->
+ rabbit_misc:rpc_call(Node, Mod, Fun, Args++[Ref, Pid], Timeout).
+
+%% Aggregator process expects correct numbers of explicits ACKs about
+%% finished emission process. While everything is linked, we still
+%% need somehow to wait for termination of all emitters before
+%% returning from RPC call - otherwise links will be just broken with
+%% reason 'normal' and we can miss some errors, and subsequently
+%% hang.
+await_emitters_termination(Pids) ->
+ Monitors = [erlang:monitor(process, Pid) || Pid <- Pids],
+ collect_monitors(Monitors).
+
+collect_monitors([]) ->
+ ok;
+collect_monitors([Monitor|Rest]) ->
+ receive
+ {'DOWN', Monitor, process, _Pid, normal} ->
+ collect_monitors(Rest);
+ {'DOWN', Monitor, process, _Pid, noproc} ->
+ %% There is a link and a monitor to a process. Matching
+ %% this clause means that process has gracefully
+ %% terminated even before we've started monitoring.
+ collect_monitors(Rest);
+ {'DOWN', _, process, Pid, Reason} when Reason =/= normal,
+ Reason =/= noproc ->
+ exit({emitter_exit, Pid, Reason})
+ end.
+
+%% Wait for result of one or more calls to emitting_map-family
+%% functions.
+%%
+%% Number of expected acknowledgments is specified by ChunkCount
+%% argument. Most common usage will be with ChunkCount equals to
+%% number of live nodes, but it's not mandatory - thus more generic
+%% name of 'ChunkCount' was chosen.
+wait_for_info_messages(Pid, Ref, Fun, Acc0, Timeout, ChunkCount) ->
+ _ = notify_if_timeout(Pid, Ref, Timeout),
+ wait_for_info_messages(Ref, Fun, Acc0, ChunkCount).
+
+wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft) ->
+ receive
+ {Ref, finished} when ChunksLeft =:= 1 ->
+ {ok, Acc0};
+ {Ref, finished} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft - 1);
+ {Ref, {timeout, T}} ->
+ exit({error, {timeout, (T / 1000)}});
+ {Ref, []} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
+ {Ref, Result, continue} ->
+ wait_for_info_messages(Ref, Fun, Fun(Result, Acc0), ChunksLeft);
+ {Ref, error, Error} ->
+ {error, simplify_emission_error(Error)};
+ {'DOWN', _MRef, process, _Pid, normal} ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft);
+ {'DOWN', _MRef, process, _Pid, Reason} ->
+ {error, simplify_emission_error(Reason)};
+ _Msg ->
+ wait_for_info_messages(Ref, Fun, Acc0, ChunksLeft)
+ end.
+
+simplify_emission_error({badrpc, {'EXIT', {{nocatch, EmissionError}, _Stacktrace}}}) ->
+ EmissionError;
+simplify_emission_error({{nocatch, EmissionError}, _Stacktrace}) ->
+ EmissionError;
+simplify_emission_error({error, _} = Error) ->
+ Error;
+simplify_emission_error({bad_argument, _} = Error) ->
+ Error;
+simplify_emission_error(Anything) ->
+ {error, Anything}.
+
+notify_if_timeout(_, _, infinity) ->
+ ok;
+notify_if_timeout(Pid, Ref, Timeout) ->
+ erlang:send_after(Timeout, Pid, {Ref, {timeout, Timeout}}).
+
+print_cmd_result(authenticate_user, _Result) -> io:format("Success~n");
+print_cmd_result(join_cluster, already_member) -> io:format("The node is already a member of this cluster~n").
diff --git a/deps/rabbit_common/src/rabbit_core_metrics.erl b/deps/rabbit_common/src/rabbit_core_metrics.erl
new file mode 100644
index 0000000000..3a6732c0d2
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_core_metrics.erl
@@ -0,0 +1,437 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_core_metrics).
+
+-include("rabbit_core_metrics.hrl").
+
+-export([init/0]).
+-export([terminate/0]).
+
+-export([connection_created/2,
+ connection_closed/1,
+ connection_stats/2,
+ connection_stats/4]).
+
+-export([channel_created/2,
+ channel_closed/1,
+ channel_stats/2,
+ channel_stats/3,
+ channel_stats/4,
+ channel_queue_down/1,
+ channel_queue_exchange_down/1,
+ channel_exchange_down/1]).
+
+-export([consumer_created/9,
+ consumer_updated/9,
+ consumer_deleted/3]).
+
+-export([queue_stats/2,
+ queue_stats/5,
+ queue_declared/1,
+ queue_created/1,
+ queue_deleted/1,
+ queues_deleted/1]).
+
+-export([node_stats/2]).
+
+-export([node_node_stats/2]).
+
+-export([gen_server2_stats/2,
+ gen_server2_deleted/1,
+ get_gen_server2_stats/1]).
+
+-export([delete/2]).
+
+-export([auth_attempt_failed/3,
+ auth_attempt_succeeded/3,
+ reset_auth_attempt_metrics/0,
+ get_auth_attempts/0,
+ get_auth_attempts_by_source/0]).
+
+%%----------------------------------------------------------------------------
+%% Types
+%%----------------------------------------------------------------------------
+-type(channel_stats_id() :: pid() |
+ {pid(),
+ {rabbit_amqqueue:name(), rabbit_exchange:name()}} |
+ {pid(), rabbit_amqqueue:name()} |
+ {pid(), rabbit_exchange:name()}).
+
+-type(channel_stats_type() :: queue_exchange_stats | queue_stats |
+ exchange_stats | reductions).
+
+-type(activity_status() :: up | single_active | waiting | suspected_down).
+%%----------------------------------------------------------------------------
+%% Specs
+%%----------------------------------------------------------------------------
+-spec init() -> ok.
+-spec connection_created(pid(), rabbit_types:infos()) -> ok.
+-spec connection_closed(pid()) -> ok.
+-spec connection_stats(pid(), rabbit_types:infos()) -> ok.
+-spec connection_stats(pid(), integer(), integer(), integer()) -> ok.
+-spec channel_created(pid(), rabbit_types:infos()) -> ok.
+-spec channel_closed(pid()) -> ok.
+-spec channel_stats(pid(), rabbit_types:infos()) -> ok.
+-spec channel_stats(channel_stats_type(), channel_stats_id(),
+ rabbit_types:infos() | integer()) -> ok.
+-spec channel_queue_down({pid(), rabbit_amqqueue:name()}) -> ok.
+-spec channel_queue_exchange_down({pid(), {rabbit_amqqueue:name(),
+ rabbit_exchange:name()}}) -> ok.
+-spec channel_exchange_down({pid(), rabbit_exchange:name()}) -> ok.
+-spec consumer_created(pid(), binary(), boolean(), boolean(),
+ rabbit_amqqueue:name(), integer(), boolean(), activity_status(), list()) -> ok.
+-spec consumer_updated(pid(), binary(), boolean(), boolean(),
+ rabbit_amqqueue:name(), integer(), boolean(), activity_status(), list()) -> ok.
+-spec consumer_deleted(pid(), binary(), rabbit_amqqueue:name()) -> ok.
+-spec queue_stats(rabbit_amqqueue:name(), rabbit_types:infos()) -> ok.
+-spec queue_stats(rabbit_amqqueue:name(), integer(), integer(), integer(),
+ integer()) -> ok.
+-spec node_stats(atom(), rabbit_types:infos()) -> ok.
+-spec node_node_stats({node(), node()}, rabbit_types:infos()) -> ok.
+-spec gen_server2_stats(pid(), integer()) -> ok.
+-spec gen_server2_deleted(pid()) -> ok.
+-spec get_gen_server2_stats(pid()) -> integer() | 'not_found'.
+-spec delete(atom(), any()) -> ok.
+%%----------------------------------------------------------------------------
+%% Storage of the raw metrics in RabbitMQ core. All the processing of stats
+%% is done by the management plugin.
+%%----------------------------------------------------------------------------
+%%----------------------------------------------------------------------------
+%% API
+%%----------------------------------------------------------------------------
+init() ->
+ _ = [ets:new(Table, [Type, public, named_table, {write_concurrency, true},
+ {read_concurrency, true}])
+ || {Table, Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES],
+ ok.
+
+terminate() ->
+ [ets:delete(Table)
+ || {Table, _Type} <- ?CORE_TABLES ++ ?CORE_EXTRA_TABLES],
+ ok.
+
+connection_created(Pid, Infos) ->
+ ets:insert(connection_created, {Pid, Infos}),
+ ets:update_counter(connection_churn_metrics, node(), {2, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+connection_closed(Pid) ->
+ ets:delete(connection_created, Pid),
+ ets:delete(connection_metrics, Pid),
+ %% Delete marker
+ ets:update_element(connection_coarse_metrics, Pid, {5, 1}),
+ ets:update_counter(connection_churn_metrics, node(), {3, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+connection_stats(Pid, Infos) ->
+ ets:insert(connection_metrics, {Pid, Infos}),
+ ok.
+
+connection_stats(Pid, Recv_oct, Send_oct, Reductions) ->
+ %% Includes delete marker
+ ets:insert(connection_coarse_metrics, {Pid, Recv_oct, Send_oct, Reductions, 0}),
+ ok.
+
+channel_created(Pid, Infos) ->
+ ets:insert(channel_created, {Pid, Infos}),
+ ets:update_counter(connection_churn_metrics, node(), {4, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+channel_closed(Pid) ->
+ ets:delete(channel_created, Pid),
+ ets:delete(channel_metrics, Pid),
+ ets:delete(channel_process_metrics, Pid),
+ ets:update_counter(connection_churn_metrics, node(), {5, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+channel_stats(Pid, Infos) ->
+ ets:insert(channel_metrics, {Pid, Infos}),
+ ok.
+
+channel_stats(reductions, Id, Value) ->
+ ets:insert(channel_process_metrics, {Id, Value}),
+ ok.
+
+channel_stats(exchange_stats, publish, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, confirm, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, return_unroutable, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(exchange_stats, drop_unroutable, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_exchange_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_exchange_stats, publish, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_exchange_metrics, Id, Value, {Id, 0, 0}),
+ ok;
+channel_stats(queue_stats, get, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {2, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, get_no_ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {3, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, deliver, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {4, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, deliver_no_ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {5, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, redeliver, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {6, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, ack, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {7, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok;
+channel_stats(queue_stats, get_empty, Id, Value) ->
+ %% Includes delete marker
+ _ = ets:update_counter(channel_queue_metrics, Id, {8, Value}, {Id, 0, 0, 0, 0, 0, 0, 0, 0}),
+ ok.
+
+delete(Table, Key) ->
+ ets:delete(Table, Key),
+ ok.
+
+channel_queue_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_queue_metrics, Id, {9, 1}),
+ ok.
+
+channel_queue_exchange_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_queue_exchange_metrics, Id, {3, 1}),
+ ok.
+
+channel_exchange_down(Id) ->
+ %% Delete marker
+ ets:update_element(channel_exchange_metrics, Id, {6, 1}),
+ ok.
+
+consumer_created(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, Active, ActivityStatus, Args) ->
+ ets:insert(consumer_created, {{QName, ChPid, ConsumerTag}, ExclusiveConsume,
+ AckRequired, PrefetchCount, Active, ActivityStatus, Args}),
+ ok.
+
+consumer_updated(ChPid, ConsumerTag, ExclusiveConsume, AckRequired, QName,
+ PrefetchCount, Active, ActivityStatus, Args) ->
+ ets:insert(consumer_created, {{QName, ChPid, ConsumerTag}, ExclusiveConsume,
+ AckRequired, PrefetchCount, Active, ActivityStatus, Args}),
+ ok.
+
+consumer_deleted(ChPid, ConsumerTag, QName) ->
+ ets:delete(consumer_created, {QName, ChPid, ConsumerTag}),
+ ok.
+
+queue_stats(Name, Infos) ->
+ %% Includes delete marker
+ ets:insert(queue_metrics, {Name, Infos, 0}),
+ ok.
+
+queue_stats(Name, MessagesReady, MessagesUnacknowledge, Messages, Reductions) ->
+ ets:insert(queue_coarse_metrics, {Name, MessagesReady, MessagesUnacknowledge,
+ Messages, Reductions}),
+ ok.
+
+queue_declared(_Name) ->
+ %% Name is not needed, but might be useful in the future.
+ ets:update_counter(connection_churn_metrics, node(), {6, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+queue_created(_Name) ->
+ %% Name is not needed, but might be useful in the future.
+ ets:update_counter(connection_churn_metrics, node(), {7, 1},
+ ?CONNECTION_CHURN_METRICS),
+ ok.
+
+queue_deleted(Name) ->
+ ets:delete(queue_coarse_metrics, Name),
+ ets:update_counter(connection_churn_metrics, node(), {8, 1},
+ ?CONNECTION_CHURN_METRICS),
+ %% Delete markers
+ ets:update_element(queue_metrics, Name, {3, 1}),
+ CQX = ets:select(channel_queue_exchange_metrics, match_spec_cqx(Name)),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_exchange_metrics, Key, {3, 1})
+ end, CQX),
+ CQ = ets:select(channel_queue_metrics, match_spec_cq(Name)),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_metrics, Key, {9, 1})
+ end, CQ).
+
+queues_deleted(Queues) ->
+ ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)},
+ ?CONNECTION_CHURN_METRICS),
+ [ delete_queue_metrics(Queue) || Queue <- Queues ],
+ [
+ begin
+ MatchSpecCondition = build_match_spec_conditions_to_delete_all_queues(QueuesPartition),
+ delete_channel_queue_exchange_metrics(MatchSpecCondition),
+ delete_channel_queue_metrics(MatchSpecCondition)
+ end || QueuesPartition <- partition_queues(Queues)
+ ],
+ ok.
+
+partition_queues(Queues) when length(Queues) >= 1000 ->
+ {Partition, Rest} = lists:split(1000, Queues),
+ [Partition | partition_queues(Rest)];
+partition_queues(Queues) ->
+ [Queues].
+
+delete_queue_metrics(Queue) ->
+ ets:delete(queue_coarse_metrics, Queue),
+ ets:update_element(queue_metrics, Queue, {3, 1}),
+ ok.
+
+delete_channel_queue_exchange_metrics(MatchSpecCondition) ->
+ ChannelQueueExchangeMetricsToUpdate = ets:select(
+ channel_queue_exchange_metrics,
+ [
+ {
+ {{'$2', {'$1', '$3'}}, '_', '_'},
+ [MatchSpecCondition],
+ [{{'$2', {{'$1', '$3'}}}}]
+ }
+ ]
+ ),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_exchange_metrics, Key, {3, 1})
+ end, ChannelQueueExchangeMetricsToUpdate).
+
+delete_channel_queue_metrics(MatchSpecCondition) ->
+ ChannelQueueMetricsToUpdate = ets:select(
+ channel_queue_metrics,
+ [
+ {
+ {{'$2', '$1'}, '_', '_', '_', '_', '_', '_', '_', '_'},
+ [MatchSpecCondition],
+ [{{'$2', '$1'}}]
+ }
+ ]
+ ),
+ lists:foreach(fun(Key) ->
+ ets:update_element(channel_queue_metrics, Key, {9, 1})
+ end, ChannelQueueMetricsToUpdate).
+
+% [{'orelse',
+% {'==', {Queue}, '$1'},
+% {'orelse',
+% {'==', {Queue}, '$1'},
+% % ...
+% {'orelse',
+% {'==', {Queue}, '$1'},
+% {'==', true, true}
+% }
+% }
+% }],
+build_match_spec_conditions_to_delete_all_queues([Queue|Queues]) ->
+ {'orelse',
+ {'==', {Queue}, '$1'},
+ build_match_spec_conditions_to_delete_all_queues(Queues)
+ };
+build_match_spec_conditions_to_delete_all_queues([]) ->
+ true.
+
+node_stats(persister_metrics, Infos) ->
+ ets:insert(node_persister_metrics, {node(), Infos}),
+ ok;
+node_stats(coarse_metrics, Infos) ->
+ ets:insert(node_coarse_metrics, {node(), Infos}),
+ ok;
+node_stats(node_metrics, Infos) ->
+ ets:insert(node_metrics, {node(), Infos}),
+ ok.
+
+node_node_stats(Id, Infos) ->
+ ets:insert(node_node_metrics, {Id, Infos}),
+ ok.
+
+match_spec_cqx(Id) ->
+ [{{{'$2', {'$1', '$3'}}, '_', '_'}, [{'==', {Id}, '$1'}], [{{'$2', {{'$1', '$3'}}}}]}].
+
+match_spec_cq(Id) ->
+ [{{{'$2', '$1'}, '_', '_', '_', '_', '_', '_', '_', '_'}, [{'==', {Id}, '$1'}], [{{'$2', '$1'}}]}].
+
+gen_server2_stats(Pid, BufferLength) ->
+ ets:insert(gen_server2_metrics, {Pid, BufferLength}),
+ ok.
+
+gen_server2_deleted(Pid) ->
+ ets:delete(gen_server2_metrics, Pid),
+ ok.
+
+get_gen_server2_stats(Pid) ->
+ case ets:lookup(gen_server2_metrics, Pid) of
+ [{Pid, BufferLength}] ->
+ BufferLength;
+ [] ->
+ not_found
+ end.
+
+auth_attempt_succeeded(RemoteAddress, Username, Protocol) ->
+ %% ETS entry is {Key = {RemoteAddress, Username}, Total, Succeeded, Failed}
+ update_auth_attempt(RemoteAddress, Username, Protocol, [{2, 1}, {3, 1}]).
+
+auth_attempt_failed(RemoteAddress, Username, Protocol) ->
+ %% ETS entry is {Key = {RemoteAddress, Username}, Total, Succeeded, Failed}
+ update_auth_attempt(RemoteAddress, Username, Protocol, [{2, 1}, {4, 1}]).
+
+update_auth_attempt(RemoteAddress, Username, Protocol, Incr) ->
+ %% It should default to false as per ip/user metrics could keep growing indefinitely
+ %% It's up to the operator to enable them, and reset it required
+ case application:get_env(rabbit, track_auth_attempt_source) of
+ {ok, true} ->
+ case {RemoteAddress, Username} of
+ {<<>>, <<>>} ->
+ ok;
+ _ ->
+ Key = {RemoteAddress, Username, Protocol},
+ _ = ets:update_counter(auth_attempt_detailed_metrics, Key, Incr, {Key, 0, 0, 0})
+ end;
+ {ok, false} ->
+ ok
+ end,
+ _ = ets:update_counter(auth_attempt_metrics, Protocol, Incr, {Protocol, 0, 0, 0}),
+ ok.
+
+reset_auth_attempt_metrics() ->
+ ets:delete_all_objects(auth_attempt_metrics),
+ ets:delete_all_objects(auth_attempt_detailed_metrics),
+ ok.
+
+get_auth_attempts() ->
+ [format_auth_attempt(A) || A <- ets:tab2list(auth_attempt_metrics)].
+
+get_auth_attempts_by_source() ->
+ [format_auth_attempt(A) || A <- ets:tab2list(auth_attempt_detailed_metrics)].
+
+format_auth_attempt({{RemoteAddress, Username, Protocol}, Total, Succeeded, Failed}) ->
+ [{remote_address, RemoteAddress}, {username, Username},
+ {protocol, atom_to_binary(Protocol, utf8)}, {auth_attempts, Total},
+ {auth_attempts_failed, Failed}, {auth_attempts_succeeded, Succeeded}];
+format_auth_attempt({Protocol, Total, Succeeded, Failed}) ->
+ [{protocol, atom_to_binary(Protocol, utf8)}, {auth_attempts, Total},
+ {auth_attempts_failed, Failed}, {auth_attempts_succeeded, Succeeded}].
diff --git a/deps/rabbit_common/src/rabbit_data_coercion.erl b/deps/rabbit_common/src/rabbit_data_coercion.erl
new file mode 100644
index 0000000000..9d2b39da94
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_data_coercion.erl
@@ -0,0 +1,47 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_data_coercion).
+
+-export([to_binary/1, to_list/1, to_atom/1, to_integer/1, to_proplist/1, to_map/1]).
+-export([to_atom/2]).
+
+-spec to_binary(Val :: binary() | list() | atom() | integer()) -> binary().
+to_binary(Val) when is_list(Val) -> list_to_binary(Val);
+to_binary(Val) when is_atom(Val) -> atom_to_binary(Val, utf8);
+to_binary(Val) when is_integer(Val) -> integer_to_binary(Val);
+to_binary(Val) -> Val.
+
+-spec to_list(Val :: integer() | list() | binary() | atom() | map()) -> list().
+to_list(Val) when is_list(Val) -> Val;
+to_list(Val) when is_map(Val) -> maps:to_list(Val);
+to_list(Val) when is_atom(Val) -> atom_to_list(Val);
+to_list(Val) when is_binary(Val) -> binary_to_list(Val);
+to_list(Val) when is_integer(Val) -> integer_to_list(Val).
+
+-spec to_atom(Val :: atom() | list() | binary()) -> atom().
+to_atom(Val) when is_atom(Val) -> Val;
+to_atom(Val) when is_list(Val) -> list_to_atom(Val);
+to_atom(Val) when is_binary(Val) -> binary_to_atom(Val, utf8).
+
+-spec to_atom(Val :: atom() | list() | binary(), Encoding :: atom()) -> atom().
+to_atom(Val, _Encoding) when is_atom(Val) -> Val;
+to_atom(Val, _Encoding) when is_list(Val) -> list_to_atom(Val);
+to_atom(Val, Encoding) when is_binary(Val) -> binary_to_atom(Val, Encoding).
+
+-spec to_integer(Val :: integer() | list() | binary()) -> integer().
+to_integer(Val) when is_integer(Val) -> Val;
+to_integer(Val) when is_list(Val) -> list_to_integer(Val);
+to_integer(Val) when is_binary(Val) -> binary_to_integer(Val).
+
+-spec to_proplist(Val :: map() | list()) -> list().
+to_proplist(Val) when is_list(Val) -> Val;
+to_proplist(Val) when is_map(Val) -> maps:to_list(Val).
+
+-spec to_map(Val :: map() | list()) -> map().
+to_map(Val) when is_map(Val) -> Val;
+to_map(Val) when is_list(Val) -> maps:from_list(Val).
diff --git a/deps/rabbit_common/src/rabbit_env.erl b/deps/rabbit_common/src/rabbit_env.erl
new file mode 100644
index 0000000000..8817103e81
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_env.erl
@@ -0,0 +1,1850 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_env).
+
+-include_lib("kernel/include/file.hrl").
+
+-export([get_context/0,
+ get_context/1,
+ get_context_before_logging_init/0,
+ get_context_before_logging_init/1,
+ get_context_after_logging_init/1,
+ get_context_after_reloading_env/1,
+ dbg_config/0,
+ get_used_env_vars/0,
+ log_process_env/0,
+ log_context/1,
+ context_to_app_env_vars/1,
+ context_to_app_env_vars_no_logging/1,
+ context_to_code_path/1]).
+
+-ifdef(TEST).
+-export([parse_conf_env_file_output2/2,
+ value_is_yes/1]).
+-endif.
+
+-define(USED_ENV_VARS,
+ [
+ "RABBITMQ_ALLOW_INPUT",
+ "RABBITMQ_ADVANCED_CONFIG_FILE",
+ "RABBITMQ_BASE",
+ "RABBITMQ_CONF_ENV_FILE",
+ "RABBITMQ_CONFIG_FILE",
+ "RABBITMQ_CONFIG_FILES",
+ "RABBITMQ_DBG",
+ "RABBITMQ_DIST_PORT",
+ "RABBITMQ_ENABLED_PLUGINS",
+ "RABBITMQ_ENABLED_PLUGINS_FILE",
+ "RABBITMQ_FEATURE_FLAGS",
+ "RABBITMQ_FEATURE_FLAGS_FILE",
+ "RABBITMQ_HOME",
+ "RABBITMQ_KEEP_PID_FILE_ON_EXIT",
+ "RABBITMQ_LOG",
+ "RABBITMQ_LOG_BASE",
+ "RABBITMQ_LOG_FF_REGISTRY",
+ "RABBITMQ_LOGS",
+ "RABBITMQ_MNESIA_BASE",
+ "RABBITMQ_MNESIA_DIR",
+ "RABBITMQ_MOTD_FILE",
+ "RABBITMQ_NODE_IP_ADDRESS",
+ "RABBITMQ_NODE_PORT",
+ "RABBITMQ_NODENAME",
+ "RABBITMQ_PID_FILE",
+ "RABBITMQ_PLUGINS_DIR",
+ "RABBITMQ_PLUGINS_EXPAND_DIR",
+ "RABBITMQ_PRODUCT_NAME",
+ "RABBITMQ_PRODUCT_VERSION",
+ "RABBITMQ_QUORUM_DIR",
+ "RABBITMQ_STREAM_DIR",
+ "RABBITMQ_UPGRADE_LOG",
+ "RABBITMQ_USE_LONGNAME",
+ "SYS_PREFIX"
+ ]).
+
+get_context() ->
+ Context0 = get_context_before_logging_init(),
+ Context1 = get_context_after_logging_init(Context0),
+ get_context_after_reloading_env(Context1).
+
+get_context(TakeFromRemoteNode) ->
+ Context0 = get_context_before_logging_init(TakeFromRemoteNode),
+ Context1 = get_context_after_logging_init(Context0),
+ get_context_after_reloading_env(Context1).
+
+get_context_before_logging_init() ->
+ get_context_before_logging_init(false).
+
+get_context_before_logging_init(TakeFromRemoteNode) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun os_type/1,
+ fun log_levels/1,
+ fun interactive_shell/1,
+ fun output_supports_colors/1
+ ],
+
+ run_context_steps(context_base(TakeFromRemoteNode), Steps).
+
+get_context_after_logging_init(Context) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun sys_prefix/1,
+ fun rabbitmq_base/1,
+ fun data_dir/1,
+ fun rabbitmq_home/1,
+ fun config_base_dir/1,
+ fun load_conf_env_file/1,
+ fun log_levels/1
+ ],
+
+ run_context_steps(Context, Steps).
+
+get_context_after_reloading_env(Context) ->
+ %% The order of steps below is important because some of them
+ %% depends on previous steps.
+ Steps = [
+ fun nodename_type/1,
+ fun nodename/1,
+ fun split_nodename/1,
+ fun maybe_setup_dist_for_remote_query/1,
+ fun dbg_config/1,
+ fun main_config_file/1,
+ fun additional_config_files/1,
+ fun advanced_config_file/1,
+ fun log_base_dir/1,
+ fun main_log_file/1,
+ fun upgrade_log_file/1,
+ fun mnesia_base_dir/1,
+ fun mnesia_dir/1,
+ fun quorum_queue_dir/1,
+ fun stream_queue_dir/1,
+ fun pid_file/1,
+ fun keep_pid_file_on_exit/1,
+ fun feature_flags_file/1,
+ fun forced_feature_flags_on_init/1,
+ fun log_feature_flags_registry/1,
+ fun plugins_path/1,
+ fun plugins_expand_dir/1,
+ fun enabled_plugins_file/1,
+ fun enabled_plugins/1,
+ fun maybe_stop_dist_for_remote_query/1,
+ fun amqp_ipaddr/1,
+ fun amqp_tcp_port/1,
+ fun erlang_dist_tcp_port/1,
+ fun product_name/1,
+ fun product_version/1,
+ fun motd_file/1
+ ],
+
+ run_context_steps(Context, Steps).
+
+context_base(TakeFromRemoteNode) ->
+ Context = #{},
+ case TakeFromRemoteNode of
+ false ->
+ Context;
+ offline ->
+ update_context(Context,
+ from_remote_node,
+ offline);
+ _ when is_atom(TakeFromRemoteNode) ->
+ update_context(Context,
+ from_remote_node,
+ {TakeFromRemoteNode, 10000});
+ {RemoteNode, infinity}
+ when is_atom(RemoteNode) ->
+ update_context(Context,
+ from_remote_node,
+ TakeFromRemoteNode);
+ {RemoteNode, Timeout}
+ when is_atom(RemoteNode) andalso
+ is_integer(Timeout) andalso
+ Timeout >= 0 ->
+ update_context(Context,
+ from_remote_node,
+ {TakeFromRemoteNode, Timeout})
+ end.
+
+-ifdef(TEST).
+os_type(Context) ->
+ {OSType, Origin} =
+ try
+ {persistent_term:get({?MODULE, os_type}), environment}
+ catch
+ _:badarg ->
+ {os:type(), default}
+ end,
+ update_context(Context, os_type, OSType, Origin).
+-else.
+os_type(Context) ->
+ update_context(Context, os_type, os:type(), default).
+-endif.
+
+run_context_steps(Context, Steps) ->
+ lists:foldl(
+ fun(Step, Context1) -> Step(Context1) end,
+ Context,
+ Steps).
+
+update_context(Context, Key, Value) ->
+ Context#{Key => Value}.
+
+-define(origin_is_valid(O),
+ O =:= default orelse
+ O =:= environment orelse
+ O =:= remote_node).
+
+update_context(#{var_origins := Origins} = Context, Key, Value, Origin)
+ when ?origin_is_valid(Origin) ->
+ Context#{Key => Value,
+ var_origins => Origins#{Key => Origin}};
+update_context(Context, Key, Value, Origin)
+ when ?origin_is_valid(Origin) ->
+ Context#{Key => Value,
+ var_origins => #{Key => Origin}}.
+
+get_used_env_vars() ->
+ lists:filter(
+ fun({Var, _}) -> var_is_used(Var) end,
+ lists:sort(os:list_env_vars())).
+
+log_process_env() ->
+ rabbit_log_prelaunch:debug("Process environment:"),
+ lists:foreach(
+ fun({Var, Value}) ->
+ rabbit_log_prelaunch:debug(" - ~s = ~ts", [Var, Value])
+ end, lists:sort(os:list_env_vars())).
+
+log_context(Context) ->
+ rabbit_log_prelaunch:debug("Context (based on environment variables):"),
+ lists:foreach(
+ fun(Key) ->
+ Value = maps:get(Key, Context),
+ rabbit_log_prelaunch:debug(" - ~s: ~p", [Key, Value])
+ end,
+ lists:sort(maps:keys(Context))).
+
+context_to_app_env_vars(Context) ->
+ rabbit_log_prelaunch:debug(
+ "Setting default application environment variables:"),
+ Fun = fun({App, Param, Value}) ->
+ rabbit_log_prelaunch:debug(
+ " - ~s:~s = ~p", [App, Param, Value]),
+ ok = application:set_env(
+ App, Param, Value, [{persistent, true}])
+ end,
+ context_to_app_env_vars1(Context, Fun).
+
+context_to_app_env_vars_no_logging(Context) ->
+ Fun = fun({App, Param, Value}) ->
+ ok = application:set_env(
+ App, Param, Value, [{persistent, true}])
+ end,
+ context_to_app_env_vars1(Context, Fun).
+
+context_to_app_env_vars1(
+ #{mnesia_dir := MnesiaDir,
+ feature_flags_file := FFFile,
+ quorum_queue_dir := QuorumQueueDir,
+ stream_queue_dir := StreamQueueDir,
+ plugins_path := PluginsPath,
+ plugins_expand_dir := PluginsExpandDir,
+ enabled_plugins_file := EnabledPluginsFile} = Context,
+ Fun) ->
+ lists:foreach(
+ Fun,
+ %% Those are all the application environment variables which
+ %% were historically set on the erl(1) command line in
+ %% rabbitmq-server(8).
+ [{kernel, inet_default_connect_options, [{nodelay, true}]},
+ {sasl, errlog_type, error},
+ {os_mon, start_cpu_sup, false},
+ {os_mon, start_disksup, false},
+ {os_mon, start_memsup, false},
+ {mnesia, dir, MnesiaDir},
+ {ra, data_dir, QuorumQueueDir},
+ {osiris, data_dir, StreamQueueDir},
+ {rabbit, feature_flags_file, FFFile},
+ {rabbit, plugins_dir, PluginsPath},
+ {rabbit, plugins_expand_dir, PluginsExpandDir},
+ {rabbit, enabled_plugins_file, EnabledPluginsFile}]),
+
+ case Context of
+ #{erlang_dist_tcp_port := DistTcpPort} ->
+ lists:foreach(
+ Fun,
+ [{kernel, inet_dist_listen_min, DistTcpPort},
+ {kernel, inet_dist_listen_max, DistTcpPort}]);
+ _ ->
+ ok
+ end,
+ case Context of
+ #{amqp_ipaddr := IpAddr,
+ amqp_tcp_port := TcpPort}
+ when IpAddr /= undefined andalso TcpPort /= undefined ->
+ Fun({rabbit, tcp_listeners, [{IpAddr, TcpPort}]});
+ _ ->
+ ok
+ end,
+ ok.
+
+context_to_code_path(#{os_type := OSType, plugins_path := PluginsPath}) ->
+ Dirs = get_user_lib_dirs(OSType, PluginsPath),
+ code:add_pathsa(lists:reverse(Dirs)).
+
+%% -------------------------------------------------------------------
+%% Code copied from `kernel/src/code_server.erl`.
+%%
+%% The goal is to mimic the behavior of the `$ERL_LIBS` environment
+%% variable.
+
+get_user_lib_dirs(OSType, Path) ->
+ Sep = case OSType of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ SplitPath = string:lexemes(Path, Sep),
+ get_user_lib_dirs_1(SplitPath).
+
+get_user_lib_dirs_1([Dir|DirList]) ->
+ case erl_prim_loader:list_dir(Dir) of
+ {ok, Dirs} ->
+ Paths = make_path(Dir, Dirs),
+ %% Only add paths trailing with ./ebin.
+ [P || P <- Paths, filename:basename(P) =:= "ebin"] ++
+ get_user_lib_dirs_1(DirList);
+ error ->
+ get_user_lib_dirs_1(DirList)
+ end;
+get_user_lib_dirs_1([]) -> [].
+
+%%
+%% Create the initial path.
+%%
+make_path(BundleDir, Bundles0) ->
+ Bundles = choose_bundles(Bundles0),
+ make_path(BundleDir, Bundles, []).
+
+choose_bundles(Bundles) ->
+ ArchiveExt = archive_extension(),
+ Bs = lists:sort([create_bundle(B, ArchiveExt) || B <- Bundles]),
+ [FullName || {_Name,_NumVsn,FullName} <-
+ choose(lists:reverse(Bs), [], ArchiveExt)].
+
+create_bundle(FullName, ArchiveExt) ->
+ BaseName = filename:basename(FullName, ArchiveExt),
+ case split_base(BaseName) of
+ {Name, VsnStr} ->
+ case vsn_to_num(VsnStr) of
+ {ok, VsnNum} ->
+ {Name,VsnNum,FullName};
+ false ->
+ {FullName,[0],FullName}
+ end;
+ _ ->
+ {FullName,[0],FullName}
+ end.
+
+%% Convert "X.Y.Z. ..." to [K, L, M| ...]
+vsn_to_num(Vsn) ->
+ case is_vsn(Vsn) of
+ true ->
+ {ok, [list_to_integer(S) || S <- string:lexemes(Vsn, ".")]};
+ _ ->
+ false
+ end.
+
+is_vsn(Str) when is_list(Str) ->
+ Vsns = string:lexemes(Str, "."),
+ lists:all(fun is_numstr/1, Vsns).
+
+is_numstr(Cs) ->
+ lists:all(fun (C) when $0 =< C, C =< $9 -> true;
+ (_) -> false
+ end, Cs).
+
+choose([{Name,NumVsn,NewFullName}=New|Bs], Acc, ArchiveExt) ->
+ case lists:keyfind(Name, 1, Acc) of
+ {_, NV, OldFullName} when NV =:= NumVsn ->
+ case filename:extension(OldFullName) =:= ArchiveExt of
+ false ->
+ choose(Bs,Acc, ArchiveExt);
+ true ->
+ Acc2 = lists:keystore(Name, 1, Acc, New),
+ choose(Bs,Acc2, ArchiveExt)
+ end;
+ {_, _, _} ->
+ choose(Bs,Acc, ArchiveExt);
+ false ->
+ choose(Bs,[{Name,NumVsn,NewFullName}|Acc], ArchiveExt)
+ end;
+choose([],Acc, _ArchiveExt) ->
+ Acc.
+
+make_path(_, [], Res) ->
+ Res;
+make_path(BundleDir, [Bundle|Tail], Res) ->
+ Dir = filename:append(BundleDir, Bundle),
+ Ebin = filename:append(Dir, "ebin"),
+ %% First try with /ebin
+ case is_dir(Ebin) of
+ true ->
+ make_path(BundleDir, Tail, [Ebin|Res]);
+ false ->
+ %% Second try with archive
+ Ext = archive_extension(),
+ Base = filename:basename(Bundle, Ext),
+ Ebin2 = filename:join([BundleDir, Base ++ Ext, Base, "ebin"]),
+ Ebins =
+ case split_base(Base) of
+ {AppName,_} ->
+ Ebin3 = filename:join([BundleDir, Base ++ Ext,
+ AppName, "ebin"]),
+ [Ebin3, Ebin2, Dir];
+ _ ->
+ [Ebin2, Dir]
+ end,
+ case try_ebin_dirs(Ebins) of
+ {ok,FoundEbin} ->
+ make_path(BundleDir, Tail, [FoundEbin|Res]);
+ error ->
+ make_path(BundleDir, Tail, Res)
+ end
+ end.
+
+try_ebin_dirs([Ebin|Ebins]) ->
+ case is_dir(Ebin) of
+ true -> {ok,Ebin};
+ false -> try_ebin_dirs(Ebins)
+ end;
+try_ebin_dirs([]) ->
+ error.
+
+split_base(BaseName) ->
+ case string:lexemes(BaseName, "-") of
+ [_, _|_] = Toks ->
+ Vsn = lists:last(Toks),
+ AllButLast = lists:droplast(Toks),
+ {string:join(AllButLast, "-"),Vsn};
+ [_|_] ->
+ BaseName
+ end.
+
+is_dir(Path) ->
+ case erl_prim_loader:read_file_info(Path) of
+ {ok,#file_info{type=directory}} -> true;
+ _ -> false
+ end.
+
+archive_extension() ->
+ init:archive_extension().
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_NODENAME
+%% Erlang node name.
+%% Default: rabbit@<hostname>
+%%
+%% RABBITMQ_USE_LONGNAME
+%% Flag indicating if long Erlang node names should be used instead
+%% of short ones.
+%% Default: unset (use short names)
+
+nodename_type(Context) ->
+ case get_prefixed_env_var("RABBITMQ_USE_LONGNAME") of
+ false ->
+ update_context(Context, nodename_type, shortnames, default);
+ Value ->
+ NameType = case value_is_yes(Value) of
+ true -> longnames;
+ false -> shortnames
+ end,
+ update_context(Context, nodename_type, NameType, environment)
+ end.
+
+nodename(#{nodename_type := NameType} = Context) ->
+ LongHostname = net_adm:localhost(),
+ ShortHostname = re:replace(LongHostname, "\\..*$", "", [{return, list}]),
+ case get_prefixed_env_var("RABBITMQ_NODENAME") of
+ false when NameType =:= shortnames ->
+ Nodename = rabbit_nodes_common:make({"rabbit", ShortHostname}),
+ update_context(Context, nodename, Nodename, default);
+ false when NameType =:= longnames ->
+ Nodename = rabbit_nodes_common:make({"rabbit", LongHostname}),
+ update_context(Context, nodename, Nodename, default);
+ Value ->
+ Nodename = case string:find(Value, "@") of
+ nomatch when NameType =:= shortnames ->
+ rabbit_nodes_common:make({Value, ShortHostname});
+ nomatch when NameType =:= longnames ->
+ rabbit_nodes_common:make({Value, LongHostname});
+ _ ->
+ rabbit_nodes_common:make(Value)
+ end,
+ update_context(Context, nodename, Nodename, environment)
+ end.
+
+split_nodename(#{nodename := Nodename} = Context) ->
+ update_context(Context,
+ split_nodename, rabbit_nodes_common:parts(Nodename)).
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_CONFIG_FILE
+%% Main configuration file.
+%% Extension is optional. `.config` for the old erlang-term-based
+%% format, `.conf` for the new Cuttlefish-based format.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/rabbitmq
+%% (Windows) ${RABBITMQ_BASE}\rabbitmq
+%%
+%% RABBITMQ_CONFIG_FILES
+%% Additional configuration files.
+%% If a directory, all files directly inside it are loaded.
+%% If a glob pattern, all matching file are loaded.
+%% Only considered if the main configuration file is Cuttlefish-based.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/conf.d/*.conf
+%% (Windows) ${RABBITMQ_BASE}\conf.d\*.conf
+%%
+%% RABBITMQ_ADVANCED_CONFIG_FILE
+%% Advanced configuration file.
+%% Erlang-term-based format with a `.config` extension.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/advanced.config
+%% (Windows) ${RABBITMQ_BASE}\advanced.config
+
+config_base_dir(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ Dir = filename:join([SysPrefix, "etc", "rabbitmq"]),
+ update_context(Context, config_base_dir, Dir);
+config_base_dir(#{os_type := {win32, _},
+ rabbitmq_base := Dir} = Context) ->
+ update_context(Context, config_base_dir, Dir).
+
+main_config_file(Context) ->
+ case get_prefixed_env_var("RABBITMQ_CONFIG_FILE") of
+ false ->
+ File = get_default_main_config_file(Context),
+ update_context(Context, main_config_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, main_config_file, File, environment)
+ end.
+
+get_default_main_config_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "rabbitmq").
+
+additional_config_files(Context) ->
+ case get_prefixed_env_var("RABBITMQ_CONFIG_FILES") of
+ false ->
+ Pattern = get_default_additional_config_files(Context),
+ update_context(
+ Context, additional_config_files, Pattern, default);
+ Value ->
+ Pattern = normalize_path(Value),
+ update_context(
+ Context, additional_config_files, Pattern, environment)
+ end.
+
+get_default_additional_config_files(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join([ConfigBaseDir, "conf.d", "*.conf"]).
+
+advanced_config_file(Context) ->
+ case get_prefixed_env_var("RABBITMQ_ADVANCED_CONFIG_FILE") of
+ false ->
+ File = get_default_advanced_config_file(Context),
+ update_context(Context, advanced_config_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, advanced_config_file, File, environment)
+ end.
+
+get_default_advanced_config_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "advanced.config").
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_LOG_BASE
+%% Directory to write log files
+%% Default: (Unix) ${SYS_PREFIX}/var/log/rabbitmq
+%% (Windows) ${RABBITMQ_BASE}\log
+%%
+%% RABBITMQ_LOGS
+%% Main log file
+%% Default: ${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}.log
+%%
+%% RABBITMQ_UPDATE_LOG
+%% Upgrade-procesure-specific log file
+%% Default: ${RABBITMQ_LOG_BASE}/${RABBITMQ_NODENAME}_upgrade.log
+%%
+%% RABBITMQ_LOG
+%% Log level; overrides the configuration file value
+%% Default: (undefined)
+%%
+%% RABBITMQ_DBG
+%% List of `module`, `module:function` or `module:function/arity`
+%% to watch with dbg.
+%% Default: (undefined)
+
+log_levels(Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOG") of
+ false ->
+ update_context(Context, log_levels, undefined, default);
+ Value ->
+ LogLevels = parse_log_levels(string:lexemes(Value, ","), #{}),
+ update_context(Context, log_levels, LogLevels, environment)
+ end.
+
+parse_log_levels([CategoryValue | Rest], Result) ->
+ case string:lexemes(CategoryValue, "=") of
+ ["+color"] ->
+ Result1 = Result#{color => true},
+ parse_log_levels(Rest, Result1);
+ ["-color"] ->
+ Result1 = Result#{color => false},
+ parse_log_levels(Rest, Result1);
+ [CategoryOrLevel] ->
+ case parse_level(CategoryOrLevel) of
+ undefined ->
+ Result1 = Result#{CategoryOrLevel => info},
+ parse_log_levels(Rest, Result1);
+ Level ->
+ Result1 = Result#{global => Level},
+ parse_log_levels(Rest, Result1)
+ end;
+ [Category, Level0] ->
+ case parse_level(Level0) of
+ undefined ->
+ parse_log_levels(Rest, Result);
+ Level ->
+ Result1 = Result#{Category => Level},
+ parse_log_levels(Rest, Result1)
+ end
+ end;
+parse_log_levels([], Result) ->
+ Result.
+
+parse_level("debug") -> debug;
+parse_level("info") -> info;
+parse_level("notice") -> notice;
+parse_level("warning") -> warning;
+parse_level("error") -> error;
+parse_level("critical") -> critical;
+parse_level("alert") -> alert;
+parse_level("emergency") -> emergency;
+parse_level("none") -> none;
+parse_level(_) -> undefined.
+
+log_base_dir(#{os_type := OSType} = Context) ->
+ case {get_prefixed_env_var("RABBITMQ_LOG_BASE"), OSType} of
+ {false, {unix, _}} ->
+ #{sys_prefix := SysPrefix} = Context,
+ Dir = filename:join([SysPrefix, "var", "log", "rabbitmq"]),
+ update_context(Context, log_base_dir, Dir, default);
+ {false, {win32, _}} ->
+ #{rabbitmq_base := RabbitmqBase} = Context,
+ Dir = filename:join([RabbitmqBase, "log"]),
+ update_context(Context, log_base_dir, Dir, default);
+ {Value, _} ->
+ Dir = normalize_path(Value),
+ update_context(Context, log_base_dir, Dir, environment)
+ end.
+
+main_log_file(#{nodename := Nodename,
+ log_base_dir := LogBaseDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOGS") of
+ false ->
+ File= filename:join(LogBaseDir,
+ atom_to_list(Nodename) ++ ".log"),
+ update_context(Context, main_log_file, File, default);
+ "-" ->
+ update_context(Context, main_log_file, "-", environment);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, main_log_file, File, environment)
+ end.
+
+upgrade_log_file(#{nodename := Nodename,
+ log_base_dir := LogBaseDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_UPGRADE_LOG") of
+ false ->
+ File = filename:join(LogBaseDir,
+ atom_to_list(Nodename) ++ "_upgrade.log"),
+ update_context(Context, upgrade_log_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, upgrade_log_file, File, environment)
+ end.
+
+dbg_config() ->
+ {Mods, Output} = get_dbg_config(),
+ #{dbg_output => Output,
+ dbg_mods => Mods}.
+
+dbg_config(Context) ->
+ DbgContext = dbg_config(),
+ maps:merge(Context, DbgContext).
+
+get_dbg_config() ->
+ Output = stdout,
+ DbgValue = get_prefixed_env_var("RABBITMQ_DBG"),
+ case DbgValue of
+ false -> {[], Output};
+ _ -> get_dbg_config1(string:lexemes(DbgValue, ","), [], Output)
+ end.
+
+get_dbg_config1(["=" ++ Filename | Rest], Mods, _) ->
+ get_dbg_config1(Rest, Mods, Filename);
+get_dbg_config1([SpecValue | Rest], Mods, Output) ->
+ Pattern = "([^:]+)(?::([^/]+)(?:/([0-9]+))?)?",
+ Options = [{capture, all_but_first, list}],
+ Mods1 = case re:run(SpecValue, Pattern, Options) of
+ {match, [M, F, A]} ->
+ Entry = {list_to_atom(M),
+ list_to_atom(F),
+ list_to_integer(A)},
+ [Entry | Mods];
+ {match, [M, F]} ->
+ Entry = {list_to_atom(M),
+ list_to_atom(F),
+ '_'},
+ [Entry | Mods];
+ {match, [M]} ->
+ Entry = {list_to_atom(M),
+ '_',
+ '_'},
+ [Entry | Mods];
+ nomatch ->
+ Mods
+ end,
+ get_dbg_config1(Rest, Mods1, Output);
+get_dbg_config1([], Mods, Output) ->
+ {lists:reverse(Mods), Output}.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_MNESIA_BASE
+%% Directory where to create Mnesia directory.
+%% Default: (Unix) ${SYS_PREFIX}/var/lib/rabbitmq/mnesia
+%% (Windows) ${RABBITMQ_BASE}/db
+%%
+%% RABBITMQ_MNESIA_DIR
+%% Directory where to put Mnesia data.
+%% Default: (Unix) ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}
+%% (Windows) ${RABBITMQ_MNESIA_BASE}\${RABBITMQ_NODENAME}-mnesia
+
+mnesia_base_dir(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_BASE") of
+ false when Remote =:= offline ->
+ update_context(Context, mnesia_base_dir, undefined, default);
+ false ->
+ mnesia_base_dir_from_node(Context);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_base_dir, Dir, environment)
+ end;
+mnesia_base_dir(Context) ->
+ mnesia_base_dir_from_env(Context).
+
+mnesia_base_dir_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_BASE") of
+ false ->
+ Dir = get_default_mnesia_base_dir(Context),
+ update_context(Context, mnesia_base_dir, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_base_dir, Dir, environment)
+ end.
+
+mnesia_base_dir_from_node(Context) ->
+ %% This variable is used to compute other variables only, we
+ %% don't need to know what a remote node used initially. Only the
+ %% variables based on it are relevant.
+ update_context(Context, mnesia_base_dir, undefined, default).
+
+get_default_mnesia_base_dir(#{data_dir := DataDir} = Context) ->
+ Basename = case Context of
+ #{os_type := {unix, _}} -> "mnesia";
+ #{os_type := {win32, _}} -> "db"
+ end,
+ filename:join(DataDir, Basename).
+
+mnesia_dir(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_DIR") of
+ false when Remote =:= offline ->
+ update_context(Context, mnesia_dir, undefined, default);
+ false ->
+ mnesia_dir_from_node(Context);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, environment)
+ end;
+mnesia_dir(Context) ->
+ mnesia_dir_from_env(Context).
+
+mnesia_dir_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MNESIA_DIR") of
+ false ->
+ Dir = get_default_mnesia_dir(Context),
+ update_context(Context, mnesia_dir, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, environment)
+ end.
+
+mnesia_dir_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote, application, get_env, [mnesia, dir]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {mnesia, dir, undefined}});
+ {ok, {ok, Value}} ->
+ Dir = normalize_path(Value),
+ update_context(Context, mnesia_dir, Dir, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, mnesia_dir, undefined, default)
+ end.
+
+get_default_mnesia_dir(#{os_type := {unix, _},
+ nodename := Nodename,
+ mnesia_base_dir := MnesiaBaseDir})
+ when MnesiaBaseDir =/= undefined ->
+ filename:join(MnesiaBaseDir, atom_to_list(Nodename));
+get_default_mnesia_dir(#{os_type := {win32, _},
+ nodename := Nodename,
+ mnesia_base_dir := MnesiaBaseDir})
+ when MnesiaBaseDir =/= undefined ->
+ filename:join(MnesiaBaseDir, atom_to_list(Nodename) ++ "-mnesia").
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_QUORUM_DIR
+%% Directory where to store Ra state for quorum queues.
+%% Default: ${RABBITMQ_MNESIA_DIR}/quorum
+
+quorum_queue_dir(#{mnesia_dir := MnesiaDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_QUORUM_DIR") of
+ false when MnesiaDir =/= undefined ->
+ Dir = filename:join(MnesiaDir, "quorum"),
+ update_context(Context, quorum_queue_dir, Dir, default);
+ false when MnesiaDir =:= undefined ->
+ update_context(Context, quorum_queue_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, quorum_queue_dir, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_STREAM_DIR
+%% Directory where to store Ra state for stream queues.
+%% Default: ${RABBITMQ_MNESIA_DIR}/stream
+
+stream_queue_dir(#{mnesia_dir := MnesiaDir} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_STREAM_DIR") of
+ false when MnesiaDir =/= undefined ->
+ Dir = filename:join(MnesiaDir, "stream"),
+ update_context(Context, stream_queue_dir, Dir, default);
+ false when MnesiaDir =:= undefined ->
+ update_context(Context, stream_queue_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, stream_queue_dir, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PID_FILE
+%% File used to write the Erlang VM OS PID.
+%% Default: ${RABBITMQ_MNESIA_DIR}.pid
+%%
+%% RABBITMQ_KEEP_PID_FILE_ON_EXIT
+%% Whether to keep or remove the PID file on Erlang VM exit.
+%% Default: true
+
+pid_file(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PID_FILE") of
+ false when MnesiaBaseDir =/= undefined ->
+ File = filename:join(MnesiaBaseDir,
+ atom_to_list(Nodename) ++ ".pid"),
+ update_context(Context, pid_file, File, default);
+ false when MnesiaBaseDir =:= undefined ->
+ update_context(Context, pid_file, undefined, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, pid_file, File, environment)
+ end.
+
+keep_pid_file_on_exit(Context) ->
+ case get_prefixed_env_var("RABBITMQ_KEEP_PID_FILE_ON_EXIT") of
+ false ->
+ update_context(Context, keep_pid_file_on_exit, false, default);
+ Value ->
+ Keep = value_is_yes(Value),
+ update_context(Context, keep_pid_file_on_exit, Keep, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_FEATURE_FLAGS_FILE
+%% File used to store enabled feature flags.
+%% Default: ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-feature_flags
+
+feature_flags_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_FEATURE_FLAGS_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, feature_flags_file, undefined, default);
+ false ->
+ feature_flags_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, environment)
+ end;
+feature_flags_file(Context) ->
+ feature_flags_file_from_env(Context).
+
+feature_flags_file_from_env(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_env_var("RABBITMQ_FEATURE_FLAGS_FILE") of
+ false ->
+ File = filename:join(MnesiaBaseDir,
+ atom_to_list(Nodename) ++ "-feature_flags"),
+ update_context(Context, feature_flags_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, environment)
+ end.
+
+feature_flags_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote,
+ application, get_env, [rabbit, feature_flags_file]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, feature_flags_file, undefined}});
+ {ok, {ok, Value}} ->
+ File = normalize_path(Value),
+ update_context(Context, feature_flags_file, File, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, feature_flags_file, undefined, default)
+ end.
+
+forced_feature_flags_on_init(Context) ->
+ Value = get_prefixed_env_var("RABBITMQ_FEATURE_FLAGS",
+ [keep_empty_string_as_is]),
+ case Value of
+ false ->
+ %% get_prefixed_env_var() considers an empty string
+ %% is the same as an undefined environment variable.
+ update_context(Context,
+ forced_feature_flags_on_init, undefined, default);
+ _ ->
+ Flags = [list_to_atom(V) || V <- string:lexemes(Value, ",")],
+ update_context(Context,
+ forced_feature_flags_on_init, Flags, environment)
+ end.
+
+log_feature_flags_registry(Context) ->
+ case get_prefixed_env_var("RABBITMQ_LOG_FF_REGISTRY") of
+ false ->
+ update_context(Context,
+ log_feature_flags_registry, false, default);
+ Value ->
+ Log = value_is_yes(Value),
+ update_context(Context,
+ log_feature_flags_registry, Log, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PLUGINS_DIR
+%% List of directories where to look for plugins.
+%% Directories are separated by:
+%% ':' on Unix
+%% ';' on Windows
+%% Default: ${RABBITMQ_HOME}/plugins
+%%
+%% RABBITMQ_PLUGINS_EXPAND_DIR
+%% Directory where to expand plugin archives.
+%% Default: ${RABBITMQ_MNESIA_BASE}/${RABBITMQ_NODENAME}-plugins-expand
+%%
+%% RABBITMQ_ENABLED_PLUGINS_FILE
+%% File where the list of enabled plugins is stored.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/enabled_plugins
+%% (Windows) ${RABBITMQ_BASE}\enabled_plugins
+%%
+%% RABBITMQ_ENABLED_PLUGINS
+%% List of plugins to enable on startup.
+%% Values are:
+%% "ALL" to enable all plugins
+%% "" to enable no plugin
+%% a list of plugin names, separated by a coma (',')
+%% Default: Empty (i.e. use ${RABBITMQ_ENABLED_PLUGINS_FILE})
+
+plugins_path(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_DIR") of
+ false when Remote =:= offline ->
+ update_context(Context, plugins_path, undefined, default);
+ false ->
+ plugins_path_from_node(Context);
+ Path ->
+ update_context(Context, plugins_path, Path, environment)
+ end;
+plugins_path(Context) ->
+ plugins_path_from_env(Context).
+
+plugins_path_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_DIR") of
+ false ->
+ Path = get_default_plugins_path_from_env(Context),
+ update_context(Context, plugins_path, Path, default);
+ Path ->
+ update_context(Context, plugins_path, Path, environment)
+ end.
+
+plugins_path_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote, application, get_env, [rabbit, plugins_dir]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, plugins_dir, undefined}});
+ {ok, {ok, Path}} ->
+ update_context(Context, plugins_path, Path, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, plugins_path, undefined, default)
+ end.
+
+get_default_plugins_path(#{from_remote_node := offline}) ->
+ undefined;
+get_default_plugins_path(#{from_remote_node := Remote}) ->
+ get_default_plugins_path_from_node(Remote);
+get_default_plugins_path(Context) ->
+ get_default_plugins_path_from_env(Context).
+
+get_default_plugins_path_from_env(#{os_type := OSType}) ->
+ ThisModDir = this_module_dir(),
+ PluginsDir = rabbit_common_mod_location_to_plugins_dir(ThisModDir),
+ case {OSType, PluginsDir} of
+ {{unix, _}, "/usr/lib/rabbitmq/" ++ _} ->
+ UserPluginsDir = filename:join(
+ ["/", "usr", "lib", "rabbitmq", "plugins"]),
+ UserPluginsDir ++ ":" ++ PluginsDir;
+ _ ->
+ PluginsDir
+ end.
+
+get_default_plugins_path_from_node(Remote) ->
+ Ret = query_remote(Remote, code, where_is_file, ["rabbit_common.app"]),
+ case Ret of
+ {ok, non_existing = Error} ->
+ throw({query, Remote, {code, where_is_file, Error}});
+ {ok, Path} ->
+ rabbit_common_mod_location_to_plugins_dir(filename:dirname(Path));
+ {badrpc, nodedown} ->
+ undefined
+ end.
+
+rabbit_common_mod_location_to_plugins_dir(ModDir) ->
+ case filename:basename(ModDir) of
+ "ebin" ->
+ case filelib:is_dir(ModDir) of
+ false ->
+ %% rabbit_common in the plugin's .ez archive.
+ filename:dirname(
+ filename:dirname(
+ filename:dirname(ModDir)));
+ true ->
+ %% rabbit_common in the plugin's directory.
+ filename:dirname(
+ filename:dirname(ModDir))
+ end;
+ _ ->
+ %% rabbit_common in the CLI escript.
+ filename:join(
+ filename:dirname(
+ filename:dirname(ModDir)),
+ "plugins")
+ end.
+
+plugins_expand_dir(#{mnesia_base_dir := MnesiaBaseDir,
+ nodename := Nodename} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PLUGINS_EXPAND_DIR") of
+ false when MnesiaBaseDir =/= undefined ->
+ Dir = filename:join(
+ MnesiaBaseDir,
+ atom_to_list(Nodename) ++ "-plugins-expand"),
+ update_context(Context, plugins_expand_dir, Dir, default);
+ false when MnesiaBaseDir =:= undefined ->
+ update_context(Context, plugins_expand_dir, undefined, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, plugins_expand_dir, Dir, environment)
+ end.
+
+enabled_plugins_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_ENABLED_PLUGINS_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, enabled_plugins_file, undefined, default);
+ false ->
+ enabled_plugins_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, environment)
+ end;
+enabled_plugins_file(Context) ->
+ enabled_plugins_file_from_env(Context).
+
+enabled_plugins_file_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_ENABLED_PLUGINS_FILE") of
+ false ->
+ File = get_default_enabled_plugins_file(Context),
+ update_context(Context, enabled_plugins_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, environment)
+ end.
+
+get_default_enabled_plugins_file(#{config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "enabled_plugins").
+
+enabled_plugins_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = query_remote(Remote,
+ application, get_env, [rabbit, enabled_plugins_file]),
+ case Ret of
+ {ok, undefined} ->
+ throw({query, Remote, {rabbit, enabled_plugins_file, undefined}});
+ {ok, {ok, Value}} ->
+ File = normalize_path(Value),
+ update_context(Context, enabled_plugins_file, File, remote_node);
+ {badrpc, nodedown} ->
+ update_context(Context, enabled_plugins_file, undefined, default)
+ end.
+
+enabled_plugins(Context) ->
+ Value = get_prefixed_env_var(
+ "RABBITMQ_ENABLED_PLUGINS",
+ [keep_empty_string_as_is]),
+ case Value of
+ false ->
+ update_context(Context, enabled_plugins, undefined, default);
+ "ALL" ->
+ update_context(Context, enabled_plugins, all, environment);
+ "" ->
+ update_context(Context, enabled_plugins, [], environment);
+ _ ->
+ Plugins = [list_to_atom(P) || P <- string:lexemes(Value, ",")],
+ update_context(Context, enabled_plugins, Plugins, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_NODE_IP_ADDRESS
+%% AMQP TCP IP address to listen on
+%% Default: unset (i.e. listen on all interfaces)
+%%
+%% RABBITMQ_NODE_PORT
+%% AMQP TCP port.
+%% Default: 5672
+%%
+%% RABBITMQ_DIST_PORT
+%% Erlang distribution TCP port.
+%% Default: ${RABBITMQ_NODE_PORT} + 20000
+
+amqp_ipaddr(Context) ->
+ case get_prefixed_env_var("RABBITMQ_NODE_IP_ADDRESS") of
+ false ->
+ update_context(Context, amqp_ipaddr, "auto", default);
+ Value ->
+ update_context(Context, amqp_ipaddr, Value, environment)
+ end.
+
+amqp_tcp_port(Context) ->
+ case get_prefixed_env_var("RABBITMQ_NODE_PORT") of
+ false ->
+ update_context(Context, amqp_tcp_port, 5672, default);
+ TcpPortStr ->
+ try
+ TcpPort = erlang:list_to_integer(TcpPortStr),
+ update_context(Context, amqp_tcp_port, TcpPort, environment)
+ catch
+ _:badarg ->
+ rabbit_log_prelaunch:error(
+ "Invalid value for $RABBITMQ_NODE_PORT: ~p",
+ [TcpPortStr]),
+ throw({exit, ex_config})
+ end
+ end.
+
+erlang_dist_tcp_port(#{amqp_tcp_port := AmqpTcpPort} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_DIST_PORT") of
+ false ->
+ TcpPort = AmqpTcpPort + 20000,
+ update_context(Context, erlang_dist_tcp_port, TcpPort, default);
+ TcpPortStr ->
+ try
+ TcpPort = erlang:list_to_integer(TcpPortStr),
+ update_context(Context,
+ erlang_dist_tcp_port, TcpPort, environment)
+ catch
+ _:badarg ->
+ rabbit_log_prelaunch:error(
+ "Invalid value for $RABBITMQ_DIST_PORT: ~p",
+ [TcpPortStr]),
+ throw({exit, ex_config})
+ end
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% SYS_PREFIX [Unix only]
+%% Default: ""
+%%
+%% RABBITMQ_BASE [Windows only]
+%% Directory where to put RabbitMQ data.
+%% Default: !APPDATA!\RabbitMQ
+
+sys_prefix(#{os_type := {unix, _}} = Context) ->
+ case get_env_var("SYS_PREFIX") of
+ false ->
+ update_context(Context, sys_prefix, "", default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, sys_prefix, Dir, environment)
+ end;
+sys_prefix(Context) ->
+ Context.
+
+rabbitmq_base(#{os_type := {win32, _}} = Context) ->
+ case get_env_var("RABBITMQ_BASE") of
+ false ->
+ AppData = normalize_path(get_env_var("APPDATA")),
+ Dir = filename:join(AppData, "RabbitMQ"),
+ update_context(Context, rabbitmq_base, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, rabbitmq_base, Dir, environment)
+ end;
+rabbitmq_base(Context) ->
+ Context.
+
+data_dir(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ Dir = filename:join([SysPrefix, "var", "lib", "rabbitmq"]),
+ update_context(Context, data_dir, Dir);
+data_dir(#{os_type := {win32, _},
+ rabbitmq_base := RabbitmqBase} = Context) ->
+ update_context(Context, data_dir, RabbitmqBase).
+
+rabbitmq_home(Context) ->
+ case get_env_var("RABBITMQ_HOME") of
+ false ->
+ Dir = filename:dirname(get_default_plugins_path(Context)),
+ update_context(Context, rabbitmq_home, Dir, default);
+ Value ->
+ Dir = normalize_path(Value),
+ update_context(Context, rabbitmq_home, Dir, environment)
+ end.
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_ALLOW_INPUT
+%% Indicate if an Erlang shell is started or not.
+%% Default: false
+
+interactive_shell(Context) ->
+ case get_env_var("RABBITMQ_ALLOW_INPUT") of
+ false ->
+ update_context(Context,
+ interactive_shell, false, default);
+ Value ->
+ update_context(Context,
+ interactive_shell, value_is_yes(Value), environment)
+ end.
+
+%% FIXME: We would need a way to call isatty(3) to make sure the output
+%% is a terminal.
+output_supports_colors(#{os_type := {unix, _}} = Context) ->
+ update_context(Context, output_supports_colors, true, default);
+output_supports_colors(#{os_type := {win32, _}} = Context) ->
+ update_context(Context, output_supports_colors, false, default).
+
+%% -------------------------------------------------------------------
+%%
+%% RABBITMQ_PRODUCT_NAME
+%% Override the product name
+%% Default: unset (i.e. "RabbitMQ")
+%%
+%% RABBITMQ_PRODUCT_VERSION
+%% Override the product version
+%% Default: unset (i.e. `rabbit` application version).
+%%
+%% RABBITMQ_MOTD_FILE
+%% Indicate a filename containing a "message of the day" to add to
+%% the banners, both the logged and the printed ones.
+%% Default: (Unix) ${SYS_PREFIX}/etc/rabbitmq/motd
+%% (Windows) ${RABBITMQ_BASE}\motd.txt
+
+product_name(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_NAME") of
+ false when Remote =:= offline ->
+ update_context(Context, product_name, undefined, default);
+ false ->
+ product_name_from_node(Context);
+ Value ->
+ update_context(Context, product_name, Value, environment)
+ end;
+product_name(Context) ->
+ product_name_from_env(Context).
+
+product_name_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_NAME") of
+ false ->
+ update_context(Context, product_name, undefined, default);
+ Value ->
+ update_context(Context, product_name, Value, environment)
+ end.
+
+product_name_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, product_name, [])),
+ case Ret of
+ {badrpc, nodedown} ->
+ update_context(Context, product_name, undefined, default);
+ {query, _, _} ->
+ update_context(Context, product_name, undefined, default);
+ Value ->
+ update_context(Context, product_name, Value, remote_node)
+ end.
+
+product_version(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_VERSION") of
+ false when Remote =:= offline ->
+ update_context(Context, product_version, undefined, default);
+ false ->
+ product_version_from_node(Context);
+ Value ->
+ update_context(Context, product_version, Value, environment)
+ end;
+product_version(Context) ->
+ product_version_from_env(Context).
+
+product_version_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_PRODUCT_VERSION") of
+ false ->
+ update_context(Context, product_version, undefined, default);
+ Value ->
+ update_context(Context, product_version, Value, environment)
+ end.
+
+product_version_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, product_version, [])),
+ case Ret of
+ {badrpc, _} ->
+ update_context(Context, product_version, undefined, default);
+ {query, _, _} ->
+ update_context(Context, product_version, undefined, default);
+ Value ->
+ update_context(Context, product_version, Value, remote_node)
+ end.
+
+motd_file(#{from_remote_node := Remote} = Context) ->
+ case get_prefixed_env_var("RABBITMQ_MOTD_FILE") of
+ false when Remote =:= offline ->
+ update_context(Context, motd_file, undefined, default);
+ false ->
+ motd_file_from_node(Context);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, motd_file, File, environment)
+ end;
+motd_file(Context) ->
+ motd_file_from_env(Context).
+
+motd_file_from_env(Context) ->
+ case get_prefixed_env_var("RABBITMQ_MOTD_FILE") of
+ false ->
+ File = get_default_motd_file(Context),
+ update_context(Context, motd_file, File, default);
+ Value ->
+ File = normalize_path(Value),
+ update_context(Context, motd_file, File, environment)
+ end.
+
+get_default_motd_file(#{os_type := {unix, _},
+ config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "motd");
+get_default_motd_file(#{os_type := {win32, _},
+ config_base_dir := ConfigBaseDir}) ->
+ filename:join(ConfigBaseDir, "motd.txt").
+
+motd_file_from_node(#{from_remote_node := Remote} = Context) ->
+ Ret = (catch query_remote(Remote, rabbit, motd_file, [])),
+ case Ret of
+ {badrpc, _} ->
+ update_context(Context, motd_file, undefined, default);
+ {query, _, _} ->
+ update_context(Context, motd_file, undefined, default);
+ File ->
+ update_context(Context, motd_file, File, remote_node)
+ end.
+
+%% -------------------------------------------------------------------
+%% Loading of rabbitmq-env.conf.
+%% -------------------------------------------------------------------
+
+load_conf_env_file(#{os_type := {unix, _},
+ sys_prefix := SysPrefix} = Context) ->
+ {ConfEnvFile, Origin} =
+ case get_prefixed_env_var("RABBITMQ_CONF_ENV_FILE") of
+ false ->
+ File = filename:join(
+ [SysPrefix, "etc", "rabbitmq", "rabbitmq-env.conf"]),
+ {File, default};
+ Value ->
+ {normalize_path(Value), environment}
+ end,
+ Context1 = update_context(Context, conf_env_file, ConfEnvFile, Origin),
+ case loading_conf_env_file_enabled(Context1) of
+ true ->
+ case filelib:is_regular(ConfEnvFile) of
+ false ->
+ rabbit_log_prelaunch:debug(
+ "No $RABBITMQ_CONF_ENV_FILE (~ts)", [ConfEnvFile]),
+ Context1;
+ true ->
+ case os:find_executable("sh") of
+ false -> Context1;
+ Sh -> do_load_conf_env_file(Context1,
+ Sh,
+ ConfEnvFile)
+ end
+ end;
+ false ->
+ rabbit_log_prelaunch:debug(
+ "Loading of $RABBITMQ_CONF_ENV_FILE (~ts) is disabled",
+ [ConfEnvFile]),
+ Context1
+ end;
+load_conf_env_file(#{os_type := {win32, _},
+ rabbitmq_base := RabbitmqBase} = Context) ->
+ {ConfEnvFile, Origin} =
+ case get_prefixed_env_var("RABBITMQ_CONF_ENV_FILE") of
+ false ->
+ File = filename:join([RabbitmqBase, "rabbitmq-env-conf.bat"]),
+ {File, default};
+ Value ->
+ {normalize_path(Value), environment}
+ end,
+ Context1 = update_context(Context, conf_env_file, ConfEnvFile, Origin),
+ case loading_conf_env_file_enabled(Context1) of
+ true ->
+ case filelib:is_regular(ConfEnvFile) of
+ false ->
+ rabbit_log_prelaunch:debug(
+ "No $RABBITMQ_CONF_ENV_FILE (~ts)", [ConfEnvFile]),
+ Context1;
+ true ->
+ case os:find_executable("cmd.exe") of
+ false ->
+ Cmd = os:getenv("ComSpec"),
+ CmdExists =
+ Cmd =/= false andalso
+ filelib:is_regular(Cmd),
+ case CmdExists of
+ false -> Context1;
+ true -> do_load_conf_env_file(Context1,
+ Cmd,
+ ConfEnvFile)
+ end;
+ Cmd ->
+ do_load_conf_env_file(Context1, Cmd, ConfEnvFile)
+ end
+ end;
+ false ->
+ rabbit_log_prelaunch:debug(
+ "Loading of $RABBITMQ_CONF_ENV_FILE (~ts) is disabled",
+ [ConfEnvFile]),
+ Context1
+ end;
+load_conf_env_file(Context) ->
+ Context.
+
+-spec loading_conf_env_file_enabled(map()) -> boolean().
+
+-ifdef(TEST).
+loading_conf_env_file_enabled(_) ->
+ persistent_term:get({?MODULE, load_conf_env_file}, true).
+-else.
+loading_conf_env_file_enabled(_) ->
+ %% When this module is built without `TEST` defined, we want this
+ %% function to always return true. However, this makes Dialyzer
+ %% think it can only return true: this is not the case when the
+ %% module is compiled with `TEST` defined. The following line is
+ %% here to trick Dialyzer.
+ erlang:get({?MODULE, always_undefined}) =:= undefined.
+-endif.
+
+do_load_conf_env_file(#{os_type := {unix, _}} = Context, Sh, ConfEnvFile) ->
+ rabbit_log_prelaunch:debug(
+ "Sourcing $RABBITMQ_CONF_ENV_FILE: ~ts", [ConfEnvFile]),
+
+ %% The script below sources the `CONF_ENV_FILE` file, then it shows a
+ %% marker line and all environment variables.
+ %%
+ %% The marker line is useful to distinguish any output from the sourced
+ %% script from the variables we are interested in.
+ Marker = vars_list_marker(),
+ Script = rabbit_misc:format(
+ ". \"~ts\" && "
+ "echo \"~s\" && "
+ "set", [ConfEnvFile, Marker]),
+
+ #{sys_prefix := SysPrefix,
+ rabbitmq_home := RabbitmqHome} = Context,
+ MainConfigFile = re:replace(
+ get_default_main_config_file(Context),
+ "\\.(conf|config)$", "", [{return, list}]),
+
+ %% The variables below are those the `CONF_ENV_FILE` file can expect.
+ Env = [
+ {"SYS_PREFIX", SysPrefix},
+ {"RABBITMQ_HOME", RabbitmqHome},
+ {"CONFIG_FILE", MainConfigFile},
+ {"ADVANCED_CONFIG_FILE", get_default_advanced_config_file(Context)},
+ {"MNESIA_BASE", get_default_mnesia_base_dir(Context)},
+ {"ENABLED_PLUGINS_FILE", get_default_enabled_plugins_file(Context)},
+ {"PLUGINS_DIR", get_default_plugins_path_from_env(Context)},
+ {"CONF_ENV_FILE_PHASE", "rabbtimq-prelaunch"}
+ ],
+
+ Args = ["-ex", "-c", Script],
+ Opts = [{args, Args},
+ {env, Env},
+ binary,
+ use_stdio,
+ stderr_to_stdout,
+ exit_status],
+ Port = erlang:open_port({spawn_executable, Sh}, Opts),
+ collect_conf_env_file_output(Context, Port, Marker, <<>>);
+do_load_conf_env_file(#{os_type := {win32, _}} = Context, Cmd, ConfEnvFile) ->
+ %% rabbitmq/rabbitmq-common#392
+ rabbit_log_prelaunch:debug(
+ "Executing $RABBITMQ_CONF_ENV_FILE: ~ts", [ConfEnvFile]),
+
+ %% The script below executes the `CONF_ENV_FILE` file, then it shows a
+ %% marker line and all environment variables.
+ %%
+ %% The marker line is useful to distinguish any output from the sourced
+ %% script from the variables we are interested in.
+ %%
+ %% Arguments are split into a list of strings to support a filename with
+ %% whitespaces in the path.
+ Marker = vars_list_marker(),
+ Script = [ConfEnvFile, "&&",
+ "echo", Marker, "&&",
+ "set"],
+
+ #{rabbitmq_base := RabbitmqBase,
+ rabbitmq_home := RabbitmqHome} = Context,
+ MainConfigFile = re:replace(
+ get_default_main_config_file(Context),
+ "\\.(conf|config)$", "", [{return, list}]),
+
+ %% The variables below are those the `CONF_ENV_FILE` file can expect.
+ Env = [
+ {"RABBITMQ_BASE", RabbitmqBase},
+ {"RABBITMQ_HOME", RabbitmqHome},
+ {"CONFIG_FILE", MainConfigFile},
+ {"ADVANCED_CONFIG_FILE", get_default_advanced_config_file(Context)},
+ {"MNESIA_BASE", get_default_mnesia_base_dir(Context)},
+ {"ENABLED_PLUGINS_FILE", get_default_enabled_plugins_file(Context)},
+ {"PLUGINS_DIR", get_default_plugins_path_from_env(Context)},
+ {"CONF_ENV_FILE_PHASE", "rabbtimq-prelaunch"}
+ ],
+
+ Args = ["/Q", "/C" | Script],
+ Opts = [{args, Args},
+ {env, Env},
+ hide,
+ binary,
+ stderr_to_stdout,
+ exit_status],
+ Port = erlang:open_port({spawn_executable, Cmd}, Opts),
+ collect_conf_env_file_output(Context, Port, "\"" ++ Marker ++ "\" ", <<>>).
+
+vars_list_marker() ->
+ rabbit_misc:format(
+ "-----BEGIN VARS LIST FOR PID ~s-----", [os:getpid()]).
+
+collect_conf_env_file_output(Context, Port, Marker, Output) ->
+ receive
+ {Port, {exit_status, ExitStatus}} ->
+ Lines = post_port_cmd_output(Context, Output, ExitStatus),
+ case ExitStatus of
+ 0 -> parse_conf_env_file_output(Context, Marker, Lines);
+ _ -> Context
+ end;
+ {Port, {data, Chunk}} ->
+ collect_conf_env_file_output(
+ Context, Port, Marker, [Output, Chunk])
+ end.
+
+post_port_cmd_output(#{os_type := {OSType, _}}, Output, ExitStatus) ->
+ rabbit_log_prelaunch:debug(
+ "$RABBITMQ_CONF_ENV_FILE exit status: ~b",
+ [ExitStatus]),
+ DecodedOutput = unicode:characters_to_list(Output),
+ LineSep = case OSType of
+ win32 -> "\r\n";
+ _ -> "\n"
+ end,
+ Lines = string:split(string:trim(DecodedOutput), LineSep, all),
+ rabbit_log_prelaunch:debug("$RABBITMQ_CONF_ENV_FILE output:"),
+ [rabbit_log_prelaunch:debug(" ~ts", [Line]) || Line <- Lines],
+ Lines.
+
+parse_conf_env_file_output(Context, _, []) ->
+ Context;
+parse_conf_env_file_output(Context, Marker, [Marker | Lines]) ->
+ %% Found our marker, let's parse variables.
+ parse_conf_env_file_output1(Context, Lines);
+parse_conf_env_file_output(Context, Marker, [_ | Lines]) ->
+ parse_conf_env_file_output(Context, Marker, Lines).
+
+parse_conf_env_file_output1(Context, Lines) ->
+ Vars = parse_conf_env_file_output2(Lines, #{}),
+ %% Re-export variables.
+ lists:foreach(
+ fun(Var) ->
+ IsUsed = var_is_used(Var),
+ IsSet = var_is_set(Var),
+ case IsUsed andalso not IsSet of
+ true ->
+ rabbit_log_prelaunch:debug(
+ "$RABBITMQ_CONF_ENV_FILE: re-exporting variable $~s",
+ [Var]),
+ os:putenv(Var, maps:get(Var, Vars));
+ false ->
+ ok
+ end
+ end, lists:sort(maps:keys(Vars))),
+ Context.
+
+parse_conf_env_file_output2([], Vars) ->
+ Vars;
+parse_conf_env_file_output2([Line | Lines], Vars) ->
+ SetXOutput = is_sh_set_x_output(Line),
+ ShFunction = is_sh_function(Line, Lines),
+ if
+ SetXOutput ->
+ parse_conf_env_file_output2(Lines, Vars);
+ ShFunction ->
+ skip_sh_function(Lines, Vars);
+ true ->
+ case string:split(Line, "=") of
+ [Var, IncompleteValue] ->
+ {Value, Lines1} = parse_sh_literal(IncompleteValue, Lines, ""),
+ Vars1 = Vars#{Var => Value},
+ parse_conf_env_file_output2(Lines1, Vars1);
+ _ ->
+ %% Parsing failed somehow.
+ rabbit_log_prelaunch:warning(
+ "Failed to parse $RABBITMQ_CONF_ENV_FILE output: ~p",
+ [Line]),
+ #{}
+ end
+ end.
+
+is_sh_set_x_output(Line) ->
+ re:run(Line, "^\\++ ", [{capture, none}]) =:= match.
+
+is_sh_function(_, []) ->
+ false;
+is_sh_function(Line, Lines) ->
+ re:run(Line, "\\s\\(\\)\\s*$", [{capture, none}]) =:= match
+ andalso
+ re:run(hd(Lines), "^\\s*\\{\\s*$", [{capture, none}]) =:= match.
+
+parse_sh_literal("'" ++ SingleQuoted, Lines, Literal) ->
+ parse_single_quoted_literal(SingleQuoted, Lines, Literal);
+parse_sh_literal("\"" ++ DoubleQuoted, Lines, Literal) ->
+ parse_double_quoted_literal(DoubleQuoted, Lines, Literal);
+parse_sh_literal("$'" ++ DollarSingleQuoted, Lines, Literal) ->
+ parse_dollar_single_quoted_literal(DollarSingleQuoted, Lines, Literal);
+parse_sh_literal(Unquoted, Lines, Literal) ->
+ {lists:reverse(Literal) ++ Unquoted, Lines}.
+
+parse_single_quoted_literal([$' | Rest], Lines, Literal) ->
+ %% We reached the closing single quote.
+ parse_sh_literal(Rest, Lines, Literal);
+parse_single_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing single
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_single_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_single_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_single_quoted_literal(Rest, Lines, [C | Literal]).
+
+parse_double_quoted_literal([$" | Rest], Lines, Literal) ->
+ %% We reached the closing double quote.
+ parse_sh_literal(Rest, Lines, Literal);
+parse_double_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing double
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_double_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_double_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_double_quoted_literal(Rest, Lines, [C | Literal]).
+
+parse_dollar_single_quoted_literal([$'], Lines, Literal) ->
+ %% We reached the closing single quote.
+ {lists:reverse(Literal), Lines};
+parse_dollar_single_quoted_literal([], [Line | Lines], Literal) ->
+ %% We reached the end of line before finding the closing single
+ %% quote. The literal continues on the next line and includes that
+ %% newline character.
+ parse_dollar_single_quoted_literal(Line, Lines, [$\n | Literal]);
+parse_dollar_single_quoted_literal([C | Rest], Lines, Literal) ->
+ parse_dollar_single_quoted_literal(Rest, Lines, [C | Literal]).
+
+skip_sh_function(["}" | Lines], Vars) ->
+ parse_conf_env_file_output2(Lines, Vars);
+skip_sh_function([_ | Lines], Vars) ->
+ skip_sh_function(Lines, Vars).
+
+%% -------------------------------------------------------------------
+%% Helpers.
+%% -------------------------------------------------------------------
+
+get_env_var(VarName) ->
+ get_env_var(VarName, []).
+
+get_env_var(VarName, Options) ->
+ KeepEmptyString = lists:member(keep_empty_string_as_is, Options),
+ case os:getenv(VarName) of
+ false -> false;
+ "" when not KeepEmptyString -> false;
+ Value -> Value
+ end.
+
+get_prefixed_env_var(VarName) ->
+ get_prefixed_env_var(VarName, []).
+
+get_prefixed_env_var("RABBITMQ_" ++ Suffix = VarName,
+ Options) ->
+ case get_env_var(VarName, Options) of
+ false -> get_env_var(Suffix, Options);
+ Value -> Value
+ end.
+
+var_is_used("RABBITMQ_" ++ _ = PrefixedVar) ->
+ lists:member(PrefixedVar, ?USED_ENV_VARS);
+var_is_used("HOME") ->
+ false;
+var_is_used(Var) ->
+ lists:member("RABBITMQ_" ++ Var, ?USED_ENV_VARS).
+
+%% The $RABBITMQ_* variables have precedence over their un-prefixed equivalent.
+%% Therefore, when we check if $RABBITMQ_* is set, we only look at this
+%% variable. However, when we check if an un-prefixed variable is set, we first
+%% look at its $RABBITMQ_* variant.
+var_is_set("RABBITMQ_" ++ _ = PrefixedVar) ->
+ os:getenv(PrefixedVar) /= false;
+var_is_set(Var) ->
+ os:getenv("RABBITMQ_" ++ Var) /= false orelse
+ os:getenv(Var) /= false.
+
+value_is_yes(Value) when is_list(Value) orelse is_binary(Value) ->
+ Options = [{capture, none}, caseless],
+ re:run(string:trim(Value), "^(1|yes|true)$", Options) =:= match;
+value_is_yes(_) ->
+ false.
+
+normalize_path("" = Path) ->
+ Path;
+normalize_path(Path) ->
+ filename:join(filename:split(Path)).
+
+this_module_dir() ->
+ File = code:which(?MODULE),
+ %% Possible locations:
+ %% - the rabbit_common plugin (as an .ez archive):
+ %% .../plugins/rabbit_common-$version.ez/rabbit_common-$version/ebin
+ %% - the rabbit_common plugin (as a directory):
+ %% .../plugins/rabbit_common-$version/ebin
+ %% - the CLI:
+ %% .../escript/$cli
+ filename:dirname(File).
+
+maybe_setup_dist_for_remote_query(
+ #{from_remote_node := offline} = Context) ->
+ Context;
+maybe_setup_dist_for_remote_query(
+ #{from_remote_node := {RemoteNode, _}} = Context) ->
+ {NamePart, HostPart} = rabbit_nodes_common:parts(RemoteNode),
+ NameType = rabbit_nodes_common:name_type(RemoteNode),
+ ok = rabbit_nodes_common:ensure_epmd(),
+ Context1 = setup_dist_for_remote_query(
+ Context, NamePart, HostPart, NameType, 50),
+ case is_rabbitmq_loaded_on_remote_node(Context1) of
+ true -> Context1;
+ false -> maybe_stop_dist_for_remote_query(
+ update_context(Context, from_remote_node, offline))
+ end;
+maybe_setup_dist_for_remote_query(Context) ->
+ Context.
+
+setup_dist_for_remote_query(
+ #{dist_started_for_remote_query := true} = Context,
+ _, _, _, _) ->
+ Context;
+setup_dist_for_remote_query(Context, _, _, _, 0) ->
+ Context;
+setup_dist_for_remote_query(#{from_remote_node := {Remote, _}} = Context,
+ NamePart, HostPart, NameType,
+ Attempts) ->
+ RndNamePart = NamePart ++ "_ctl_" ++ integer_to_list(rand:uniform(100)),
+ Nodename = rabbit_nodes_common:make({RndNamePart, HostPart}),
+ case net_kernel:start([Nodename, NameType]) of
+ {ok, _} ->
+ update_context(Context, dist_started_for_remote_query, true);
+ {error, {already_started, _}} ->
+ Context;
+ {error, {{already_started, _}, _}} ->
+ Context;
+ Error ->
+ logger:error(
+ "rabbit_env: Failed to setup distribution (as ~s) to "
+ "query node ~s: ~p",
+ [Nodename, Remote, Error]),
+ setup_dist_for_remote_query(Context,
+ NamePart, HostPart, NameType,
+ Attempts - 1)
+ end.
+
+is_rabbitmq_loaded_on_remote_node(
+ #{from_remote_node := Remote}) ->
+ case query_remote(Remote, application, loaded_applications, []) of
+ {ok, Apps} ->
+ lists:keymember(mnesia, 1, Apps) andalso
+ lists:keymember(rabbit, 1, Apps);
+ _ ->
+ false
+ end.
+
+maybe_stop_dist_for_remote_query(
+ #{dist_started_for_remote_query := true} = Context) ->
+ net_kernel:stop(),
+ maps:remove(dist_started_for_remote_query, Context);
+maybe_stop_dist_for_remote_query(Context) ->
+ Context.
+
+query_remote({RemoteNode, Timeout}, Mod, Func, Args) ->
+ Ret = rpc:call(RemoteNode, Mod, Func, Args, Timeout),
+ case Ret of
+ {badrpc, nodedown} = Error -> Error;
+ {badrpc, _} = Error -> throw({query, RemoteNode, Error});
+ _ -> {ok, Ret}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_error_logger_handler.erl b/deps/rabbit_common/src/rabbit_error_logger_handler.erl
new file mode 100644
index 0000000000..714790a449
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_error_logger_handler.erl
@@ -0,0 +1,169 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_error_logger_handler).
+
+-behaviour(gen_event).
+
+%% API
+-export([start_link/0, add_handler/0]).
+
+%% gen_event callbacks
+-export([init/1, handle_event/2, handle_call/2,
+ handle_info/2, terminate/2, code_change/3]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {report = []}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Creates an event manager
+%%
+%% @spec start_link() -> {ok, Pid} | {error, Error}
+%% @end
+%%--------------------------------------------------------------------
+start_link() ->
+ gen_event:start_link({local, ?SERVER}).
+
+%%--------------------------------------------------------------------
+%% @doc
+%% Adds an event handler
+%%
+%% @spec add_handler() -> ok | {'EXIT', Reason} | term()
+%% @end
+%%--------------------------------------------------------------------
+add_handler() ->
+ gen_event:add_handler(?SERVER, ?MODULE, []).
+
+%%%===================================================================
+%%% gen_event callbacks
+%%%===================================================================
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever a new event handler is added to an event manager,
+%% this function is called to initialize the event handler.
+%%
+%% @spec init(Args) -> {ok, State}
+%% @end
+%%--------------------------------------------------------------------
+init([]) ->
+ {ok, #state{}}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives an event sent using
+%% gen_event:notify/2 or gen_event:sync_notify/2, this function is
+%% called for each installed event handler to handle the event.
+%%
+%% @spec handle_event(Event, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+
+handle_event({info_report, _Gleader, {_Pid, _Type,
+ {net_kernel, {'EXIT', _, Reason}}}},
+ #state{report = Report} = State) ->
+ NewReport = case format(Reason) of
+ [] -> Report;
+ Formatted -> [Formatted | Report]
+ end,
+ {ok, State#state{report = NewReport}};
+handle_event(_Event, State) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event manager receives a request sent using
+%% gen_event:call/3,4, this function is called for the specified
+%% event handler to handle the request.
+%%
+%% @spec handle_call(Request, State) ->
+%% {ok, Reply, State} |
+%% {swap_handler, Reply, Args1, State1, Mod2, Args2} |
+%% {remove_handler, Reply}
+%% @end
+%%--------------------------------------------------------------------
+handle_call(get_connection_report, State) ->
+ {ok, lists:reverse(State#state.report), State#state{report = []}};
+handle_call(_Request, State) ->
+ Reply = ok,
+ {ok, Reply, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% This function is called for each installed event handler when
+%% an event manager receives any other message than an event or a
+%% synchronous request (or a system message).
+%%
+%% @spec handle_info(Info, State) ->
+%% {ok, State} |
+%% {swap_handler, Args1, State1, Mod2, Args2} |
+%% remove_handler
+%% @end
+%%--------------------------------------------------------------------
+handle_info(_Info, State) ->
+ {ok, State}.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Whenever an event handler is deleted from an event manager, this
+%% function is called. It should be the opposite of Module:init/1 and
+%% do any necessary cleaning up.
+%%
+%% @spec terminate(Reason, State) -> void()
+%% @end
+%%--------------------------------------------------------------------
+terminate(_Reason, _State) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @private
+%% @doc
+%% Convert process state when code is changed
+%%
+%% @spec code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% @end
+%%--------------------------------------------------------------------
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+format({check_dflag_xnc_failed, _What}) ->
+ {" * Remote node uses an incompatible Erlang version ~n", []};
+format({recv_challenge_failed, no_node, Node}) ->
+ {" * Node name (or hostname) mismatch: node ~p believes its node name is not ~p but something else.~n"
+ " All nodes and CLI tools must refer to node ~p using the same name the node itself uses (see its logs to find out what it is)~n",
+ [Node, Node, Node]};
+format({recv_challenge_failed, Error}) ->
+ {" * Distribution failed unexpectedly while waiting for challenge: ~p~n", [Error]};
+format({recv_challenge_ack_failed, bad_cookie}) ->
+ {" * Authentication failed (rejected by the local node), please check the Erlang cookie~n", []};
+format({recv_challenge_ack_failed, {error, closed}}) ->
+ {" * Authentication failed (rejected by the remote node), please check the Erlang cookie~n", []};
+format({recv_status_failed, not_allowed}) ->
+ {" * This node is not on the list of nodes authorised by remote node (see net_kernel:allow/1)~n", []};
+format({recv_status_failed, {error, closed}}) ->
+ {" * Remote host closed TCP connection before completing authentication. Is the Erlang distribution using TLS?~n", []};
+format(setup_timer_timeout) ->
+ {" * TCP connection to remote host has timed out. Is the Erlang distribution using TLS?~n", []};
+format(_) ->
+ [].
diff --git a/deps/rabbit_common/src/rabbit_event.erl b/deps/rabbit_common/src/rabbit_event.erl
new file mode 100644
index 0000000000..152335958a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_event.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_event).
+
+-include("rabbit.hrl").
+
+-export([start_link/0]).
+-export([init_stats_timer/2, init_disabled_stats_timer/2,
+ ensure_stats_timer/3, stop_stats_timer/2, reset_stats_timer/2]).
+-export([stats_level/2, if_enabled/3]).
+-export([notify/2, notify/3, notify_if/3]).
+-export([sync_notify/2, sync_notify/3]).
+
+-ignore_xref([{gen_event, start_link, 2}]).
+-dialyzer([{no_missing_calls, start_link/0}]).
+
+%%----------------------------------------------------------------------------
+
+-record(state, {level, interval, timer}).
+
+%%----------------------------------------------------------------------------
+
+-export_type([event_type/0, event_props/0, event_timestamp/0, event/0]).
+
+-type event_type() :: atom().
+-type event_props() :: term().
+-type event_timestamp() :: non_neg_integer().
+
+-type event() :: #event { type :: event_type(),
+ props :: event_props(),
+ reference :: 'none' | reference(),
+ timestamp :: event_timestamp() }.
+
+-type level() :: 'none' | 'coarse' | 'fine'.
+
+-type timer_fun() :: fun (() -> 'ok').
+-type container() :: tuple().
+-type pos() :: non_neg_integer().
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec init_stats_timer(container(), pos()) -> container().
+-spec init_disabled_stats_timer(container(), pos()) -> container().
+-spec ensure_stats_timer(container(), pos(), term()) -> container().
+-spec stop_stats_timer(container(), pos()) -> container().
+-spec reset_stats_timer(container(), pos()) -> container().
+-spec stats_level(container(), pos()) -> level().
+-spec if_enabled(container(), pos(), timer_fun()) -> 'ok'.
+-spec notify(event_type(), event_props()) -> 'ok'.
+-spec notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
+-spec notify_if(boolean(), event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props()) -> 'ok'.
+-spec sync_notify(event_type(), event_props(), reference() | 'none') -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ %% gen_event:start_link/2 is not available before OTP 20
+ %% RabbitMQ 3.7 supports OTP >= 19.3
+ case erlang:function_exported(gen_event, start_link, 2) of
+ true ->
+ gen_event:start_link(
+ {local, ?MODULE},
+ [{spawn_opt, [{fullsweep_after, 0}]}]
+ );
+ false ->
+ gen_event:start_link({local, ?MODULE})
+ end.
+
+%% The idea is, for each stat-emitting object:
+%%
+%% On startup:
+%% init_stats_timer(State)
+%% notify(created event)
+%% if_enabled(internal_emit_stats) - so we immediately send something
+%%
+%% On wakeup:
+%% ensure_stats_timer(State, emit_stats)
+%% (Note we can't emit stats immediately, the timer may have fired 1ms ago.)
+%%
+%% emit_stats:
+%% if_enabled(internal_emit_stats)
+%% reset_stats_timer(State) - just bookkeeping
+%%
+%% Pre-hibernation:
+%% if_enabled(internal_emit_stats)
+%% stop_stats_timer(State)
+%%
+%% internal_emit_stats:
+%% notify(stats)
+
+init_stats_timer(C, P) ->
+ %% If the rabbit app is not loaded - use default none:5000
+ StatsLevel = application:get_env(rabbit, collect_statistics, none),
+ Interval = application:get_env(rabbit, collect_statistics_interval, 5000),
+ setelement(P, C, #state{level = StatsLevel, interval = Interval,
+ timer = undefined}).
+
+init_disabled_stats_timer(C, P) ->
+ setelement(P, C, #state{level = none, interval = 0, timer = undefined}).
+
+ensure_stats_timer(C, P, Msg) ->
+ case element(P, C) of
+ #state{level = Level, interval = Interval, timer = undefined} = State
+ when Level =/= none ->
+ TRef = erlang:send_after(Interval, self(), Msg),
+ setelement(P, C, State#state{timer = TRef});
+ #state{} ->
+ C
+ end.
+
+stop_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ case erlang:cancel_timer(TRef) of
+ false -> C;
+ _ -> setelement(P, C, State#state{timer = undefined})
+ end;
+ #state{} ->
+ C
+ end.
+
+reset_stats_timer(C, P) ->
+ case element(P, C) of
+ #state{timer = TRef} = State when TRef =/= undefined ->
+ setelement(P, C, State#state{timer = undefined});
+ #state{} ->
+ C
+ end.
+
+stats_level(C, P) ->
+ #state{level = Level} = element(P, C),
+ Level.
+
+if_enabled(C, P, Fun) ->
+ case element(P, C) of
+ #state{level = none} -> ok;
+ #state{} -> Fun(), ok
+ end.
+
+notify_if(true, Type, Props) -> notify(Type, Props);
+notify_if(false, _Type, _Props) -> ok.
+
+notify(Type, Props) -> notify(Type, rabbit_data_coercion:to_proplist(Props), none).
+
+notify(Type, Props, Ref) ->
+ %% Using {Name, node()} here to not fail if the event handler is not started
+ gen_event:notify({?MODULE, node()}, event_cons(Type, rabbit_data_coercion:to_proplist(Props), Ref)).
+
+sync_notify(Type, Props) -> sync_notify(Type, Props, none).
+
+sync_notify(Type, Props, Ref) ->
+ gen_event:sync_notify(?MODULE, event_cons(Type, Props, Ref)).
+
+event_cons(Type, Props, Ref) ->
+ #event{type = Type,
+ props = Props,
+ reference = Ref,
+ timestamp = os:system_time(milli_seconds)}.
+
diff --git a/deps/rabbit_common/src/rabbit_exchange_type.erl b/deps/rabbit_common/src/rabbit_exchange_type.erl
new file mode 100644
index 0000000000..ebd5cadbdb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_exchange_type.erl
@@ -0,0 +1,68 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_exchange_type).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(tx() :: 'transaction' | 'none').
+-type(serial() :: pos_integer() | tx()).
+
+-callback description() -> [proplists:property()].
+
+%% Should Rabbit ensure that all binding events that are
+%% delivered to an individual exchange can be serialised? (they
+%% might still be delivered out of order, but there'll be a
+%% serial number).
+-callback serialise_events() -> boolean().
+
+%% The no_return is there so that we can have an "invalid" exchange
+%% type (see rabbit_exchange_type_invalid).
+-callback route(rabbit_types:exchange(), rabbit_types:delivery()) ->
+ rabbit_router:match_result().
+
+%% called BEFORE declaration, to check args etc; may exit with #amqp_error{}
+-callback validate(rabbit_types:exchange()) -> 'ok'.
+
+%% called BEFORE declaration, to check args etc
+-callback validate_binding(rabbit_types:exchange(), rabbit_types:binding()) ->
+ rabbit_types:ok_or_error({'binding_invalid', string(), [any()]}).
+
+%% called after declaration and recovery
+-callback create(tx(), rabbit_types:exchange()) -> 'ok'.
+
+%% called after exchange (auto)deletion.
+-callback delete(tx(), rabbit_types:exchange(), [rabbit_types:binding()]) ->
+ 'ok'.
+
+%% called when the policy attached to this exchange changes.
+-callback policy_changed(rabbit_types:exchange(), rabbit_types:exchange()) ->
+ 'ok'.
+
+%% called after a binding has been added or recovered
+-callback add_binding(serial(), rabbit_types:exchange(),
+ rabbit_types:binding()) -> 'ok'.
+
+%% called after bindings have been deleted.
+-callback remove_bindings(serial(), rabbit_types:exchange(),
+ [rabbit_types:binding()]) -> 'ok'.
+
+%% called when comparing exchanges for equivalence - should return ok or
+%% exit with #amqp_error{}
+-callback assert_args_equivalence(rabbit_types:exchange(),
+ rabbit_framing:amqp_table()) ->
+ 'ok' | rabbit_types:connection_exit().
+
+%% Exchange type specific info keys
+-callback info(rabbit_types:exchange()) -> [{atom(), term()}].
+
+-callback info(rabbit_types:exchange(), [atom()]) -> [{atom(), term()}].
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_heartbeat.erl b/deps/rabbit_common/src/rabbit_heartbeat.erl
new file mode 100644
index 0000000000..8dbc7f3887
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_heartbeat.erl
@@ -0,0 +1,184 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_heartbeat).
+
+-export([start/6, start/7]).
+-export([start_heartbeat_sender/4, start_heartbeat_receiver/4,
+ pause_monitor/1, resume_monitor/1]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-export_type([heartbeaters/0]).
+
+-type heartbeaters() :: {rabbit_types:maybe(pid()), rabbit_types:maybe(pid())}.
+
+-type heartbeat_callback() :: fun (() -> any()).
+
+-export_type([heartbeat_timeout/0]).
+-type heartbeat_timeout() :: non_neg_integer().
+
+-spec start
+ (pid(), rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ heartbeat_timeout(), heartbeat_callback()) ->
+ heartbeaters().
+
+-spec start
+ (pid(), rabbit_net:socket(), rabbit_types:proc_name(),
+ heartbeat_timeout(), heartbeat_callback(), heartbeat_timeout(),
+ heartbeat_callback()) ->
+ heartbeaters().
+
+-spec start_heartbeat_sender
+ (rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) ->
+ rabbit_types:ok(pid()).
+-spec start_heartbeat_receiver
+ (rabbit_net:socket(), heartbeat_timeout(), heartbeat_callback(),
+ rabbit_types:proc_type_and_name()) ->
+ rabbit_types:ok(pid()).
+
+-spec pause_monitor(heartbeaters()) -> 'ok'.
+-spec resume_monitor(heartbeaters()) -> 'ok'.
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,{_, _}) -> any().
+-spec system_terminate(_,_,_,_) -> no_return().
+
+%%----------------------------------------------------------------------------
+start(SupPid, Sock, SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ start(SupPid, Sock, unknown,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun).
+
+start(SupPid, Sock, Identity,
+ SendTimeoutSec, SendFun, ReceiveTimeoutSec, ReceiveFun) ->
+ {ok, Sender} =
+ start_heartbeater(SendTimeoutSec, SupPid, Sock,
+ SendFun, heartbeat_sender,
+ start_heartbeat_sender, Identity),
+ {ok, Receiver} =
+ start_heartbeater(ReceiveTimeoutSec, SupPid, Sock,
+ ReceiveFun, heartbeat_receiver,
+ start_heartbeat_receiver, Identity),
+ {Sender, Receiver}.
+
+start_heartbeat_sender(Sock, TimeoutSec, SendFun, Identity) ->
+ %% the 'div 2' is there so that we don't end up waiting for nearly
+ %% 2 * TimeoutSec before sending a heartbeat in the boundary case
+ %% where the last message was sent just after a heartbeat.
+ heartbeater({Sock, TimeoutSec * 1000 div 2, send_oct, 0,
+ fun () -> SendFun(), continue end}, Identity).
+
+start_heartbeat_receiver(Sock, TimeoutSec, ReceiveFun, Identity) ->
+ %% we check for incoming data every interval, and time out after
+ %% two checks with no change. As a result we will time out between
+ %% 2 and 3 intervals after the last data has been received.
+ heartbeater({Sock, TimeoutSec * 1000, recv_oct, 1,
+ fun () -> ReceiveFun(), stop end}, Identity).
+
+pause_monitor({_Sender, none}) -> ok;
+pause_monitor({_Sender, Receiver}) -> Receiver ! pause, ok.
+
+resume_monitor({_Sender, none}) -> ok;
+resume_monitor({_Sender, Receiver}) -> Receiver ! resume, ok.
+
+system_continue(_Parent, Deb, {Params, State}) ->
+ heartbeater(Params, Deb, State).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+%%----------------------------------------------------------------------------
+start_heartbeater(0, _SupPid, _Sock, _TimeoutFun, _Name, _Callback,
+ _Identity) ->
+ {ok, none};
+start_heartbeater(TimeoutSec, SupPid, Sock, TimeoutFun, Name, Callback,
+ Identity) ->
+ supervisor2:start_child(
+ SupPid, {Name,
+ {rabbit_heartbeat, Callback,
+ [Sock, TimeoutSec, TimeoutFun, {Name, Identity}]},
+ transient, ?WORKER_WAIT, worker, [rabbit_heartbeat]}).
+
+heartbeater(Params, Identity) ->
+ Deb = sys:debug_options([]),
+ {ok, proc_lib:spawn_link(fun () ->
+ rabbit_misc:store_proc_name(Identity),
+ heartbeater(Params, Deb, {0, 0})
+ end)}.
+
+heartbeater({Sock, TimeoutMillisec, StatName, Threshold, Handler} = Params,
+ Deb, {StatVal0, SameCount} = State) ->
+ Recurse = fun (State1) -> heartbeater(Params, Deb, State1) end,
+ System = fun (From, Req) ->
+ sys:handle_system_msg(
+ Req, From, self(), ?MODULE, Deb, {Params, State})
+ end,
+ receive
+ pause ->
+ receive
+ resume -> Recurse({0, 0});
+ {system, From, Req} -> System(From, Req);
+ Other -> exit({unexpected_message, Other})
+ end;
+ {system, From, Req} ->
+ System(From, Req);
+ Other ->
+ exit({unexpected_message, Other})
+ after TimeoutMillisec ->
+ OkFun = fun(StatVal1) ->
+ if StatVal0 =:= 0 andalso StatName =:= send_oct ->
+ % Note: this clause is necessary to ensure the
+ % first RMQ -> client heartbeat is sent at the
+ % first interval, instead of waiting the first
+ % two intervals
+ {run_handler, {StatVal1, 0}};
+ StatVal1 =/= StatVal0 ->
+ {recurse, {StatVal1, 0}};
+ SameCount < Threshold ->
+ {recurse, {StatVal1, SameCount +1}};
+ true ->
+ {run_handler, {StatVal1, 0}}
+ end
+ end,
+ SSResult = get_sock_stats(Sock, StatName, OkFun),
+ handle_get_sock_stats(SSResult, Sock, StatName, Recurse, Handler)
+ end.
+
+handle_get_sock_stats(stop, _Sock, _StatName, _Recurse, _Handler) ->
+ ok;
+handle_get_sock_stats({recurse, RecurseArg}, _Sock, _StatName, Recurse, _Handler) ->
+ Recurse(RecurseArg);
+handle_get_sock_stats({run_handler, {_, SameCount}}, Sock, StatName, Recurse, Handler) ->
+ case Handler() of
+ stop -> ok;
+ continue ->
+ OkFun = fun(StatVal) ->
+ {recurse, {StatVal, SameCount}}
+ end,
+ SSResult = get_sock_stats(Sock, StatName, OkFun),
+ handle_get_sock_stats(SSResult, Sock, StatName, Recurse, Handler)
+ end.
+
+get_sock_stats(Sock, StatName, OkFun) ->
+ case rabbit_net:getstat(Sock, [StatName]) of
+ {ok, [{StatName, StatVal}]} ->
+ OkFun(StatVal);
+ {error, einval} ->
+ %% the socket is dead, most likely because the
+ %% connection is being shut down -> terminate
+ stop;
+ {error, Reason} ->
+ exit({cannot_get_socket_stats, Reason})
+ end.
diff --git a/deps/rabbit_common/src/rabbit_http_util.erl b/deps/rabbit_common/src/rabbit_http_util.erl
new file mode 100644
index 0000000000..d0ff498110
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_http_util.erl
@@ -0,0 +1,967 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2007 Mochi Media, Inc.
+
+%% @doc Utilities for parsing and quoting.
+
+-module(rabbit_http_util).
+-author('bob@mochimedia.com').
+-export([join/2, quote_plus/1, urlencode/1, parse_qs/1, unquote/1]).
+-export([path_split/1]).
+-export([urlsplit/1, urlsplit_path/1, urlunsplit/1, urlunsplit_path/1]).
+-export([parse_header/1]).
+-export([shell_quote/1, cmd/1, cmd_string/1, cmd_port/2, cmd_status/1, cmd_status/2]).
+-export([record_to_proplist/2, record_to_proplist/3]).
+-export([safe_relative_path/1, partition/2]).
+-export([parse_qvalues/1, pick_accepted_encodings/3]).
+-export([make_io/1]).
+
+-define(PERCENT, 37). % $\%
+-define(FULLSTOP, 46). % $\.
+-define(IS_HEX(C), ((C >= $0 andalso C =< $9) orelse
+ (C >= $a andalso C =< $f) orelse
+ (C >= $A andalso C =< $F))).
+-define(QS_SAFE(C), ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ (C =:= ?FULLSTOP orelse C =:= $- orelse C =:= $~ orelse
+ C =:= $_))).
+
+hexdigit(C) when C < 10 -> $0 + C;
+hexdigit(C) when C < 16 -> $A + (C - 10).
+
+unhexdigit(C) when C >= $0, C =< $9 -> C - $0;
+unhexdigit(C) when C >= $a, C =< $f -> C - $a + 10;
+unhexdigit(C) when C >= $A, C =< $F -> C - $A + 10.
+
+%% @spec partition(String, Sep) -> {String, [], []} | {Prefix, Sep, Postfix}
+%% @doc Inspired by Python 2.5's str.partition:
+%% partition("foo/bar", "/") = {"foo", "/", "bar"},
+%% partition("foo", "/") = {"foo", "", ""}.
+partition(String, Sep) ->
+ case partition(String, Sep, []) of
+ undefined ->
+ {String, "", ""};
+ Result ->
+ Result
+ end.
+
+partition("", _Sep, _Acc) ->
+ undefined;
+partition(S, Sep, Acc) ->
+ case partition2(S, Sep) of
+ undefined ->
+ [C | Rest] = S,
+ partition(Rest, Sep, [C | Acc]);
+ Rest ->
+ {lists:reverse(Acc), Sep, Rest}
+ end.
+
+partition2(Rest, "") ->
+ Rest;
+partition2([C | R1], [C | R2]) ->
+ partition2(R1, R2);
+partition2(_S, _Sep) ->
+ undefined.
+
+
+
+%% @spec safe_relative_path(string()) -> string() | undefined
+%% @doc Return the reduced version of a relative path or undefined if it
+%% is not safe. safe relative paths can be joined with an absolute path
+%% and will result in a subdirectory of the absolute path. Safe paths
+%% never contain a backslash character.
+safe_relative_path("/" ++ _) ->
+ undefined;
+safe_relative_path(P) ->
+ case string:chr(P, $\\) of
+ 0 ->
+ safe_relative_path(P, []);
+ _ ->
+ undefined
+ end.
+
+safe_relative_path("", Acc) ->
+ case Acc of
+ [] ->
+ "";
+ _ ->
+ string:join(lists:reverse(Acc), "/")
+ end;
+safe_relative_path(P, Acc) ->
+ case partition(P, "/") of
+ {"", "/", _} ->
+ %% /foo or foo//bar
+ undefined;
+ {"..", _, _} when Acc =:= [] ->
+ undefined;
+ {"..", _, Rest} ->
+ safe_relative_path(Rest, tl(Acc));
+ {Part, "/", ""} ->
+ safe_relative_path("", ["", Part | Acc]);
+ {Part, _, Rest} ->
+ safe_relative_path(Rest, [Part | Acc])
+ end.
+
+%% @spec shell_quote(string()) -> string()
+%% @doc Quote a string according to UNIX shell quoting rules, returns a string
+%% surrounded by double quotes.
+shell_quote(L) ->
+ shell_quote(L, [$\"]).
+
+%% @spec cmd_port([string()], Options) -> port()
+%% @doc open_port({spawn, mochiweb_util:cmd_string(Argv)}, Options).
+cmd_port(Argv, Options) ->
+ open_port({spawn, cmd_string(Argv)}, Options).
+
+%% @spec cmd([string()]) -> string()
+%% @doc os:cmd(cmd_string(Argv)).
+cmd(Argv) ->
+ os:cmd(cmd_string(Argv)).
+
+%% @spec cmd_string([string()]) -> string()
+%% @doc Create a shell quoted command string from a list of arguments.
+cmd_string(Argv) ->
+ string:join([shell_quote(X) || X <- Argv], " ").
+
+%% @spec cmd_status([string()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv) ->
+ cmd_status(Argv, []).
+
+%% @spec cmd_status([string()], [atom()]) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from the given application,
+%% will be spawned with cmd_port/2.
+cmd_status(Argv, Options) ->
+ Port = cmd_port(Argv, [exit_status, stderr_to_stdout,
+ use_stdio, binary | Options]),
+ try cmd_loop(Port, [])
+ after catch port_close(Port)
+ end.
+
+%% @spec cmd_loop(port(), list()) -> {ExitStatus::integer(), Stdout::binary()}
+%% @doc Accumulate the output and exit status from a port.
+cmd_loop(Port, Acc) ->
+ receive
+ {Port, {exit_status, Status}} ->
+ {Status, iolist_to_binary(lists:reverse(Acc))};
+ {Port, {data, Data}} ->
+ cmd_loop(Port, [Data | Acc])
+ end.
+
+%% @spec join([iolist()], iolist()) -> iolist()
+%% @doc Join a list of strings or binaries together with the given separator
+%% string or char or binary. The output is flattened, but may be an
+%% iolist() instead of a string() if any of the inputs are binary().
+join([], _Separator) ->
+ [];
+join([S], _Separator) ->
+ lists:flatten(S);
+join(Strings, Separator) ->
+ lists:flatten(revjoin(lists:reverse(Strings), Separator, [])).
+
+revjoin([], _Separator, Acc) ->
+ Acc;
+revjoin([S | Rest], Separator, []) ->
+ revjoin(Rest, Separator, [S]);
+revjoin([S | Rest], Separator, Acc) ->
+ revjoin(Rest, Separator, [S, Separator | Acc]).
+
+%% @spec quote_plus(atom() | integer() | float() | string() | binary()) -> string()
+%% @doc URL safe encoding of the given term.
+quote_plus(Atom) when is_atom(Atom) ->
+ quote_plus(atom_to_list(Atom));
+quote_plus(Int) when is_integer(Int) ->
+ quote_plus(integer_to_list(Int));
+quote_plus(Binary) when is_binary(Binary) ->
+ quote_plus(binary_to_list(Binary));
+quote_plus(Float) when is_float(Float) ->
+ quote_plus(rabbit_numerical:digits(Float));
+quote_plus(String) ->
+ quote_plus(String, []).
+
+quote_plus([], Acc) ->
+ lists:reverse(Acc);
+quote_plus([C | Rest], Acc) when ?QS_SAFE(C) ->
+ quote_plus(Rest, [C | Acc]);
+quote_plus([$\s | Rest], Acc) ->
+ quote_plus(Rest, [$+ | Acc]);
+quote_plus([C | Rest], Acc) ->
+ <<Hi:4, Lo:4>> = <<C>>,
+ quote_plus(Rest, [hexdigit(Lo), hexdigit(Hi), ?PERCENT | Acc]).
+
+%% @spec urlencode([{Key, Value}]) -> string()
+%% @doc URL encode the property list.
+urlencode(Props) ->
+ Pairs = lists:foldr(
+ fun ({K, V}, Acc) ->
+ [quote_plus(K) ++ "=" ++ quote_plus(V) | Acc]
+ end, [], Props),
+ string:join(Pairs, "&").
+
+%% @spec parse_qs(string() | binary()) -> [{Key, Value}]
+%% @doc Parse a query string or application/x-www-form-urlencoded.
+parse_qs(Binary) when is_binary(Binary) ->
+ parse_qs(binary_to_list(Binary));
+parse_qs(String) ->
+ parse_qs(String, []).
+
+parse_qs([], Acc) ->
+ lists:reverse(Acc);
+parse_qs(String, Acc) ->
+ {Key, Rest} = parse_qs_key(String),
+ {Value, Rest1} = parse_qs_value(Rest),
+ parse_qs(Rest1, [{Key, Value} | Acc]).
+
+parse_qs_key(String) ->
+ parse_qs_key(String, []).
+
+parse_qs_key([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_key([$= | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$; | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key(Rest=[$& | _], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_key([C | Rest], Acc) ->
+ parse_qs_key(Rest, [C | Acc]).
+
+parse_qs_value(String) ->
+ parse_qs_value(String, []).
+
+parse_qs_value([], Acc) ->
+ {qs_revdecode(Acc), ""};
+parse_qs_value([$; | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([$& | Rest], Acc) ->
+ {qs_revdecode(Acc), Rest};
+parse_qs_value([C | Rest], Acc) ->
+ parse_qs_value(Rest, [C | Acc]).
+
+%% @spec unquote(string() | binary()) -> string()
+%% @doc Unquote a URL encoded string.
+unquote(Binary) when is_binary(Binary) ->
+ unquote(binary_to_list(Binary));
+unquote(String) ->
+ qs_revdecode(lists:reverse(String)).
+
+qs_revdecode(S) ->
+ qs_revdecode(S, []).
+
+qs_revdecode([], Acc) ->
+ Acc;
+qs_revdecode([$+ | Rest], Acc) ->
+ qs_revdecode(Rest, [$\s | Acc]);
+qs_revdecode([Lo, Hi, ?PERCENT | Rest], Acc) when ?IS_HEX(Lo), ?IS_HEX(Hi) ->
+ qs_revdecode(Rest, [(unhexdigit(Lo) bor (unhexdigit(Hi) bsl 4)) | Acc]);
+qs_revdecode([C | Rest], Acc) ->
+ qs_revdecode(Rest, [C | Acc]).
+
+%% @spec urlsplit(Url) -> {Scheme, Netloc, Path, Query, Fragment}
+%% @doc Return a 5-tuple, does not expand % escapes. Only supports HTTP style
+%% URLs.
+urlsplit(Url) ->
+ {Scheme, Url1} = urlsplit_scheme(Url),
+ {Netloc, Url2} = urlsplit_netloc(Url1),
+ {Path, Query, Fragment} = urlsplit_path(Url2),
+ {Scheme, Netloc, Path, Query, Fragment}.
+
+urlsplit_scheme(Url) ->
+ case urlsplit_scheme(Url, []) of
+ no_scheme ->
+ {"", Url};
+ Res ->
+ Res
+ end.
+
+urlsplit_scheme([C | Rest], Acc) when ((C >= $a andalso C =< $z) orelse
+ (C >= $A andalso C =< $Z) orelse
+ (C >= $0 andalso C =< $9) orelse
+ C =:= $+ orelse C =:= $- orelse
+ C =:= $.) ->
+ urlsplit_scheme(Rest, [C | Acc]);
+urlsplit_scheme([$: | Rest], Acc=[_ | _]) ->
+ {string:to_lower(lists:reverse(Acc)), Rest};
+urlsplit_scheme(_Rest, _Acc) ->
+ no_scheme.
+
+urlsplit_netloc("//" ++ Rest) ->
+ urlsplit_netloc(Rest, []);
+urlsplit_netloc(Path) ->
+ {"", Path}.
+
+urlsplit_netloc("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_netloc(Rest=[C | _], Acc) when C =:= $/; C =:= $?; C =:= $# ->
+ {lists:reverse(Acc), Rest};
+urlsplit_netloc([C | Rest], Acc) ->
+ urlsplit_netloc(Rest, [C | Acc]).
+
+
+%% @spec path_split(string()) -> {Part, Rest}
+%% @doc Split a path starting from the left, as in URL traversal.
+%% path_split("foo/bar") = {"foo", "bar"},
+%% path_split("/foo/bar") = {"", "foo/bar"}.
+path_split(S) ->
+ path_split(S, []).
+
+path_split("", Acc) ->
+ {lists:reverse(Acc), ""};
+path_split("/" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+path_split([C | Rest], Acc) ->
+ path_split(Rest, [C | Acc]).
+
+
+%% @spec urlunsplit({Scheme, Netloc, Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL from the 5-tuple. Path must be absolute.
+urlunsplit({Scheme, Netloc, Path, Query, Fragment}) ->
+ lists:flatten([case Scheme of "" -> ""; _ -> [Scheme, "://"] end,
+ Netloc,
+ urlunsplit_path({Path, Query, Fragment})]).
+
+%% @spec urlunsplit_path({Path, Query, Fragment}) -> string()
+%% @doc Assemble a URL path from the 3-tuple.
+urlunsplit_path({Path, Query, Fragment}) ->
+ lists:flatten([Path,
+ case Query of "" -> ""; _ -> [$? | Query] end,
+ case Fragment of "" -> ""; _ -> [$# | Fragment] end]).
+
+%% @spec urlsplit_path(Url) -> {Path, Query, Fragment}
+%% @doc Return a 3-tuple, does not expand % escapes. Only supports HTTP style
+%% paths.
+urlsplit_path(Path) ->
+ urlsplit_path(Path, []).
+
+urlsplit_path("", Acc) ->
+ {lists:reverse(Acc), "", ""};
+urlsplit_path("?" ++ Rest, Acc) ->
+ {Query, Fragment} = urlsplit_query(Rest),
+ {lists:reverse(Acc), Query, Fragment};
+urlsplit_path("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), "", Rest};
+urlsplit_path([C | Rest], Acc) ->
+ urlsplit_path(Rest, [C | Acc]).
+
+urlsplit_query(Query) ->
+ urlsplit_query(Query, []).
+
+urlsplit_query("", Acc) ->
+ {lists:reverse(Acc), ""};
+urlsplit_query("#" ++ Rest, Acc) ->
+ {lists:reverse(Acc), Rest};
+urlsplit_query([C | Rest], Acc) ->
+ urlsplit_query(Rest, [C | Acc]).
+
+%% @spec parse_header(string()) -> {Type, [{K, V}]}
+%% @doc Parse a Content-Type like header, return the main Content-Type
+%% and a property list of options.
+parse_header(String) ->
+ %% TODO: This is exactly as broken as Python's cgi module.
+ %% Should parse properly like mochiweb_cookies.
+ [Type | Parts] = [string:strip(S) || S <- string:tokens(String, ";")],
+ F = fun (S, Acc) ->
+ case lists:splitwith(fun (C) -> C =/= $= end, S) of
+ {"", _} ->
+ %% Skip anything with no name
+ Acc;
+ {_, ""} ->
+ %% Skip anything with no value
+ Acc;
+ {Name, [$\= | Value]} ->
+ [{string:to_lower(string:strip(Name)),
+ unquote_header(string:strip(Value))} | Acc]
+ end
+ end,
+ {string:to_lower(Type),
+ lists:foldr(F, [], Parts)}.
+
+unquote_header("\"" ++ Rest) ->
+ unquote_header(Rest, []);
+unquote_header(S) ->
+ S.
+
+unquote_header("", Acc) ->
+ lists:reverse(Acc);
+unquote_header("\"", Acc) ->
+ lists:reverse(Acc);
+unquote_header([$\\, C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]);
+unquote_header([C | Rest], Acc) ->
+ unquote_header(Rest, [C | Acc]).
+
+%% @spec record_to_proplist(Record, Fields) -> proplist()
+%% @doc calls record_to_proplist/3 with a default TypeKey of '__record'
+record_to_proplist(Record, Fields) ->
+ record_to_proplist(Record, Fields, '__record').
+
+%% @spec record_to_proplist(Record, Fields, TypeKey) -> proplist()
+%% @doc Return a proplist of the given Record with each field in the
+%% Fields list set as a key with the corresponding value in the Record.
+%% TypeKey is the key that is used to store the record type
+%% Fields should be obtained by calling record_info(fields, record_type)
+%% where record_type is the record type of Record
+record_to_proplist(Record, Fields, TypeKey)
+ when tuple_size(Record) - 1 =:= length(Fields) ->
+ lists:zip([TypeKey | Fields], tuple_to_list(Record)).
+
+
+shell_quote([], Acc) ->
+ lists:reverse([$\" | Acc]);
+shell_quote([C | Rest], Acc) when C =:= $\" orelse C =:= $\` orelse
+ C =:= $\\ orelse C =:= $\$ ->
+ shell_quote(Rest, [C, $\\ | Acc]);
+shell_quote([C | Rest], Acc) ->
+ shell_quote(Rest, [C | Acc]).
+
+%% @spec parse_qvalues(string()) -> [qvalue()] | invalid_qvalue_string
+%% @type qvalue() = {media_type() | encoding() , float()}.
+%% @type media_type() = string().
+%% @type encoding() = string().
+%%
+%% @doc Parses a list (given as a string) of elements with Q values associated
+%% to them. Elements are separated by commas and each element is separated
+%% from its Q value by a semicolon. Q values are optional but when missing
+%% the value of an element is considered as 1.0. A Q value is always in the
+%% range [0.0, 1.0]. A Q value list is used for example as the value of the
+%% HTTP "Accept" and "Accept-Encoding" headers.
+%%
+%% Q values are described in section 2.9 of the RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% parse_qvalues("gzip; q=0.5, deflate, identity;q=0.0") ->
+%% [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}]
+%%
+parse_qvalues(QValuesStr) ->
+ try
+ lists:map(
+ fun(Pair) ->
+ [Type | Params] = string:tokens(Pair, ";"),
+ NormParams = normalize_media_params(Params),
+ {Q, NonQParams} = extract_q(NormParams),
+ {string:join([string:strip(Type) | NonQParams], ";"), Q}
+ end,
+ string:tokens(string:to_lower(QValuesStr), ",")
+ )
+ catch
+ _Type:_Error ->
+ invalid_qvalue_string
+ end.
+
+normalize_media_params(Params) ->
+ {ok, Re} = re:compile("\\s"),
+ normalize_media_params(Re, Params, []).
+
+normalize_media_params(_Re, [], Acc) ->
+ lists:reverse(Acc);
+normalize_media_params(Re, [Param | Rest], Acc) ->
+ NormParam = re:replace(Param, Re, "", [global, {return, list}]),
+ normalize_media_params(Re, Rest, [NormParam | Acc]).
+
+extract_q(NormParams) ->
+ {ok, KVRe} = re:compile("^([^=]+)=([^=]+)$"),
+ {ok, QRe} = re:compile("^((?:0|1)(?:\\.\\d{1,3})?)$"),
+ extract_q(KVRe, QRe, NormParams, []).
+
+extract_q(_KVRe, _QRe, [], Acc) ->
+ {1.0, lists:reverse(Acc)};
+extract_q(KVRe, QRe, [Param | Rest], Acc) ->
+ case re:run(Param, KVRe, [{capture, [1, 2], list}]) of
+ {match, [Name, Value]} ->
+ case Name of
+ "q" ->
+ {match, [Q]} = re:run(Value, QRe, [{capture, [1], list}]),
+ QVal = case Q of
+ "0" ->
+ 0.0;
+ "1" ->
+ 1.0;
+ Else ->
+ list_to_float(Else)
+ end,
+ case QVal < 0.0 orelse QVal > 1.0 of
+ false ->
+ {QVal, lists:reverse(Acc) ++ Rest}
+ end;
+ _ ->
+ extract_q(KVRe, QRe, Rest, [Param | Acc])
+ end
+ end.
+
+%% @spec pick_accepted_encodings([qvalue()], [encoding()], encoding()) ->
+%% [encoding()]
+%%
+%% @doc Determines which encodings specified in the given Q values list are
+%% valid according to a list of supported encodings and a default encoding.
+%%
+%% The returned list of encodings is sorted, descendingly, according to the
+%% Q values of the given list. The last element of this list is the given
+%% default encoding unless this encoding is explicitly or implicitly
+%% marked with a Q value of 0.0 in the given Q values list.
+%% Note: encodings with the same Q value are kept in the same order as
+%% found in the input Q values list.
+%%
+%% This encoding picking process is described in section 14.3 of the
+%% RFC 2616 (HTTP 1.1).
+%%
+%% Example:
+%%
+%% pick_accepted_encodings(
+%% [{"gzip", 0.5}, {"deflate", 1.0}],
+%% ["gzip", "identity"],
+%% "identity"
+%% ) ->
+%% ["gzip", "identity"]
+%%
+pick_accepted_encodings(AcceptedEncs, SupportedEncs, DefaultEnc) ->
+ SortedQList = lists:reverse(
+ lists:sort(fun({_, Q1}, {_, Q2}) -> Q1 < Q2 end, AcceptedEncs)
+ ),
+ {Accepted, Refused} = lists:foldr(
+ fun({E, Q}, {A, R}) ->
+ case Q > 0.0 of
+ true ->
+ {[E | A], R};
+ false ->
+ {A, [E | R]}
+ end
+ end,
+ {[], []},
+ SortedQList
+ ),
+ Refused1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Refused
+ ),
+ Accepted1 = lists:foldr(
+ fun(Enc, Acc) ->
+ case Enc of
+ "*" ->
+ lists:subtract(SupportedEncs, Accepted ++ Refused1) ++ Acc;
+ _ ->
+ [Enc | Acc]
+ end
+ end,
+ [],
+ Accepted
+ ),
+ Accepted2 = case lists:member(DefaultEnc, Accepted1) of
+ true ->
+ Accepted1;
+ false ->
+ Accepted1 ++ [DefaultEnc]
+ end,
+ [E || E <- Accepted2, lists:member(E, SupportedEncs),
+ not lists:member(E, Refused1)].
+
+make_io(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+make_io(Integer) when is_integer(Integer) ->
+ integer_to_list(Integer);
+make_io(Io) when is_list(Io); is_binary(Io) ->
+ Io.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+make_io_test() ->
+ ?assertEqual(
+ <<"atom">>,
+ iolist_to_binary(make_io(atom))),
+ ?assertEqual(
+ <<"20">>,
+ iolist_to_binary(make_io(20))),
+ ?assertEqual(
+ <<"list">>,
+ iolist_to_binary(make_io("list"))),
+ ?assertEqual(
+ <<"binary">>,
+ iolist_to_binary(make_io(<<"binary">>))),
+ ok.
+
+-record(test_record, {field1=f1, field2=f2}).
+record_to_proplist_test() ->
+ ?assertEqual(
+ [{'__record', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{}, record_info(fields, test_record))),
+ ?assertEqual(
+ [{'typekey', test_record},
+ {field1, f1},
+ {field2, f2}],
+ record_to_proplist(#test_record{},
+ record_info(fields, test_record),
+ typekey)),
+ ok.
+
+shell_quote_test() ->
+ ?assertEqual(
+ "\"foo \\$bar\\\"\\`' baz\"",
+ shell_quote("foo $bar\"`' baz")),
+ ok.
+
+cmd_port_test_spool(Port, Acc) ->
+ receive
+ {Port, eof} ->
+ Acc;
+ {Port, {data, {eol, Data}}} ->
+ cmd_port_test_spool(Port, ["\n", Data | Acc]);
+ {Port, Unknown} ->
+ throw({unknown, Unknown})
+ after 1000 ->
+ throw(timeout)
+ end.
+
+cmd_port_test() ->
+ Port = cmd_port(["echo", "$bling$ `word`!"],
+ [eof, stream, {line, 4096}]),
+ Res = try lists:append(lists:reverse(cmd_port_test_spool(Port, [])))
+ after catch port_close(Port)
+ end,
+ self() ! {Port, wtf},
+ try cmd_port_test_spool(Port, [])
+ catch throw:{unknown, wtf} -> ok
+ end,
+ try cmd_port_test_spool(Port, [])
+ catch throw:timeout -> ok
+ end,
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ Res).
+
+cmd_test() ->
+ ?assertEqual(
+ "$bling$ `word`!\n",
+ cmd(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_string_test() ->
+ ?assertEqual(
+ "\"echo\" \"\\$bling\\$ \\`word\\`!\"",
+ cmd_string(["echo", "$bling$ `word`!"])),
+ ok.
+
+cmd_status_test() ->
+ ?assertEqual(
+ {0, <<"$bling$ `word`!\n">>},
+ cmd_status(["echo", "$bling$ `word`!"])),
+ ok.
+
+
+parse_header_test() ->
+ ?assertEqual(
+ {"multipart/form-data", [{"boundary", "AaB03x"}]},
+ parse_header("multipart/form-data; boundary=AaB03x")),
+ %% This tests (currently) intentionally broken behavior
+ ?assertEqual(
+ {"multipart/form-data",
+ [{"b", ""},
+ {"cgi", "is"},
+ {"broken", "true\"e"}]},
+ parse_header("multipart/form-data;b=;cgi=\"i\\s;broken=true\"e;=z;z")),
+ ok.
+
+path_split_test() ->
+ {"", "foo/bar"} = path_split("/foo/bar"),
+ {"foo", "bar"} = path_split("foo/bar"),
+ {"bar", ""} = path_split("bar"),
+ ok.
+
+urlsplit_test() ->
+ {"", "", "/foo", "", "bar?baz"} = urlsplit("/foo#bar?baz"),
+ {"https", "host:port", "/foo", "", "bar?baz"} =
+ urlsplit("https://host:port/foo#bar?baz"),
+ {"https", "host", "", "", ""} = urlsplit("https://host"),
+ {"", "", "/wiki/Category:Fruit", "", ""} =
+ urlsplit("/wiki/Category:Fruit"),
+ ok.
+
+urlsplit_path_test() ->
+ {"/foo/bar", "", ""} = urlsplit_path("/foo/bar"),
+ {"/foo", "baz", ""} = urlsplit_path("/foo?baz"),
+ {"/foo", "", "bar?baz"} = urlsplit_path("/foo#bar?baz"),
+ {"/foo", "", "bar?baz#wibble"} = urlsplit_path("/foo#bar?baz#wibble"),
+ {"/foo", "bar", "baz"} = urlsplit_path("/foo?bar#baz"),
+ {"/foo", "bar?baz", "baz"} = urlsplit_path("/foo?bar?baz#baz"),
+ ok.
+
+urlunsplit_test() ->
+ "/foo#bar?baz" = urlunsplit({"", "", "/foo", "", "bar?baz"}),
+ "https://host:port/foo#bar?baz" =
+ urlunsplit({"https", "host:port", "/foo", "", "bar?baz"}),
+ ok.
+
+urlunsplit_path_test() ->
+ "/foo/bar" = urlunsplit_path({"/foo/bar", "", ""}),
+ "/foo?baz" = urlunsplit_path({"/foo", "baz", ""}),
+ "/foo#bar?baz" = urlunsplit_path({"/foo", "", "bar?baz"}),
+ "/foo#bar?baz#wibble" = urlunsplit_path({"/foo", "", "bar?baz#wibble"}),
+ "/foo?bar#baz" = urlunsplit_path({"/foo", "bar", "baz"}),
+ "/foo?bar?baz#baz" = urlunsplit_path({"/foo", "bar?baz", "baz"}),
+ ok.
+
+join_test() ->
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], $,)),
+ ?assertEqual("foo,bar,baz",
+ join(["foo", "bar", "baz"], ",")),
+ ?assertEqual("foo bar",
+ join([["foo", " bar"]], ",")),
+ ?assertEqual("foo bar,baz",
+ join([["foo", " bar"], "baz"], ",")),
+ ?assertEqual("foo",
+ join(["foo"], ",")),
+ ?assertEqual("foobarbaz",
+ join(["foo", "bar", "baz"], "")),
+ ?assertEqual("foo" ++ [<<>>] ++ "bar" ++ [<<>>] ++ "baz",
+ join(["foo", "bar", "baz"], <<>>)),
+ ?assertEqual("foobar" ++ [<<"baz">>],
+ join(["foo", "bar", <<"baz">>], "")),
+ ?assertEqual("",
+ join([], "any")),
+ ok.
+
+quote_plus_test() ->
+ "foo" = quote_plus(foo),
+ "1" = quote_plus(1),
+ "1.1" = quote_plus(1.1),
+ "foo" = quote_plus("foo"),
+ "foo+bar" = quote_plus("foo bar"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%0A" = quote_plus("foo\n"),
+ "foo%3B%26%3D" = quote_plus("foo;&="),
+ "foo%3B%26%3D" = quote_plus(<<"foo;&=">>),
+ ok.
+
+unquote_test() ->
+ ?assertEqual("foo bar",
+ unquote("foo+bar")),
+ ?assertEqual("foo bar",
+ unquote("foo%20bar")),
+ ?assertEqual("foo\r\n",
+ unquote("foo%0D%0A")),
+ ?assertEqual("foo\r\n",
+ unquote(<<"foo%0D%0A">>)),
+ ok.
+
+urlencode_test() ->
+ "foo=bar&baz=wibble+%0D%0A&z=1" = urlencode([{foo, "bar"},
+ {"baz", "wibble \r\n"},
+ {z, 1}]),
+ ok.
+
+parse_qs_test() ->
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs("foo=bar&baz=wibble+%0D%0a&z=1")),
+ ?assertEqual(
+ [{"", "bar"}, {"baz", "wibble \r\n"}, {"z", ""}],
+ parse_qs("=bar&baz=wibble+%0D%0a&z=")),
+ ?assertEqual(
+ [{"foo", "bar"}, {"baz", "wibble \r\n"}, {"z", "1"}],
+ parse_qs(<<"foo=bar&baz=wibble+%0D%0a&z=1">>)),
+ ?assertEqual(
+ [],
+ parse_qs("")),
+ ?assertEqual(
+ [{"foo", ""}, {"bar", ""}, {"baz", ""}],
+ parse_qs("foo;bar&baz")),
+ ok.
+
+partition_test() ->
+ {"foo", "", ""} = partition("foo", "/"),
+ {"foo", "/", "bar"} = partition("foo/bar", "/"),
+ {"foo", "/", ""} = partition("foo/", "/"),
+ {"", "/", "bar"} = partition("/bar", "/"),
+ {"f", "oo/ba", "r"} = partition("foo/bar", "oo/ba"),
+ ok.
+
+safe_relative_path_test() ->
+ "foo" = safe_relative_path("foo"),
+ "foo/" = safe_relative_path("foo/"),
+ "foo" = safe_relative_path("foo/bar/.."),
+ "bar" = safe_relative_path("foo/../bar"),
+ "bar/" = safe_relative_path("foo/../bar/"),
+ "" = safe_relative_path("foo/.."),
+ "" = safe_relative_path("foo/../"),
+ undefined = safe_relative_path("/foo"),
+ undefined = safe_relative_path("../foo"),
+ undefined = safe_relative_path("foo/../.."),
+ undefined = safe_relative_path("foo//"),
+ undefined = safe_relative_path("foo\\bar"),
+ ok.
+
+parse_qvalues_test() ->
+ [] = parse_qvalues(""),
+ [{"identity", 0.0}] = parse_qvalues("identity;q=0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ;q=0"),
+ [{"identity", 0.0}] = parse_qvalues(" identity; q =0 "),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q = 0"),
+ [{"identity", 0.0}] = parse_qvalues("identity ; q= 0.0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip,deflate,identity;q=0.0"
+ ),
+ [{"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "deflate,gzip,identity;q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"gzip", 1.0}, {"identity", 0.0}] =
+ parse_qvalues("gzip,deflate,gzip,identity;q=0"),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip, deflate , identity; q=0.0"
+ ),
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=1, deflate;q=1.0, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=1.0, identity;q=0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate , identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 0.8}, {"identity", 0.0}] = parse_qvalues(
+ "gzip; q=0.5, deflate;q=0.8, identity;q=0.0"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}] = parse_qvalues(
+ "gzip; q=0.5,deflate,identity"
+ ),
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"identity", 1.0}, {"identity", 1.0}] =
+ parse_qvalues("gzip; q=0.5,deflate,identity, identity "),
+ [{"text/html;level=1", 1.0}, {"text/plain", 0.5}] =
+ parse_qvalues("text/html;level=1, text/plain;q=0.5"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;level=1;q=0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html; level = 1; q = 0.3, text/plain"),
+ [{"text/html;level=1", 0.3}, {"text/plain", 1.0}] =
+ parse_qvalues("text/html;q=0.3;level=1, text/plain"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=1.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.5, deflate;q=2"),
+ invalid_qvalue_string = parse_qvalues("gzip, deflate;q=AB"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=2.1, deflate"),
+ invalid_qvalue_string = parse_qvalues("gzip; q=0.1234, deflate"),
+ invalid_qvalue_string = parse_qvalues("text/html;level=1;q=0.3, text/html;level"),
+ ok.
+
+pick_accepted_encodings_test() ->
+ ["identity"] = pick_accepted_encodings(
+ [],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["identity"] = pick_accepted_encodings(
+ [{"gzip", 0.0}, {"deflate", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 1.0}, {"identity", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"deflate", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 1.0}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "gzip", "identity"] = pick_accepted_encodings(
+ [{"gzip", 0.2}, {"deflate", 0.9}, {"gzip", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ [] = pick_accepted_encodings(
+ [{"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate", "identity"] = pick_accepted_encodings(
+ [{"*", 0.6}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "deflate"] = pick_accepted_encodings(
+ [{"gzip", 1.0}, {"deflate", 0.6}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["deflate", "gzip"] = pick_accepted_encodings(
+ [{"gzip", 0.5}, {"deflate", 1.0}, {"*", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"deflate", 0.0}, {"*", 1.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ["gzip", "identity"] = pick_accepted_encodings(
+ [{"*", 1.0}, {"deflate", 0.0}],
+ ["gzip", "deflate", "identity"],
+ "identity"
+ ),
+ ok.
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_json.erl b/deps/rabbit_common/src/rabbit_json.erl
new file mode 100644
index 0000000000..a10569135b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_json.erl
@@ -0,0 +1,63 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_json).
+
+-export([decode/1, decode/2, try_decode/1, try_decode/2,
+ encode/1, encode/2, try_encode/1, try_encode/2]).
+
+-define(DEFAULT_DECODE_OPTIONS, [return_maps]).
+
+
+-spec decode(jsx:json_text()) -> jsx:json_term().
+decode(JSON) ->
+ decode(JSON, ?DEFAULT_DECODE_OPTIONS).
+
+
+-spec decode(jsx:json_text(), jsx_to_term:config()) -> jsx:json_term().
+decode(JSON, Opts) ->
+ jsx:decode(JSON, Opts).
+
+
+-spec try_decode(jsx:json_text()) -> {ok, jsx:json_term()} |
+ {error, Reason :: term()}.
+try_decode(JSON) ->
+ try_decode(JSON, ?DEFAULT_DECODE_OPTIONS).
+
+
+-spec try_decode(jsx:json_text(), jsx_to_term:config()) ->
+ {ok, jsx:json_term()} | {error, Reason :: term()}.
+try_decode(JSON, Opts) ->
+ try
+ {ok, decode(JSON, Opts)}
+ catch error: Reason ->
+ {error, Reason}
+ end.
+
+-spec encode(jsx:json_term()) -> jsx:json_text().
+encode(Term) ->
+ encode(Term, []).
+
+-spec encode(jsx:json_term(), jsx_to_json:config()) -> jsx:json_text().
+encode(Term, Opts) ->
+ jsx:encode(Term, Opts).
+
+
+-spec try_encode(jsx:json_term()) -> {ok, jsx:json_text()} |
+ {error, Reason :: term()}.
+try_encode(Term) ->
+ try_encode(Term, []).
+
+
+-spec try_encode(jsx:json_term(), jsx_to_term:config()) ->
+ {ok, jsx:json_text()} | {error, Reason :: term()}.
+try_encode(Term, Opts) ->
+ try
+ {ok, encode(Term, Opts)}
+ catch error: Reason ->
+ {error, Reason}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_log.erl b/deps/rabbit_common/src/rabbit_log.erl
new file mode 100644
index 0000000000..22b4619d1c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log.erl
@@ -0,0 +1,164 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log).
+
+-export([log/2, log/3, log/4]).
+-export([debug/1, debug/2, debug/3,
+ info/1, info/2, info/3,
+ notice/1, notice/2, notice/3,
+ warning/1, warning/2, warning/3,
+ error/1, error/2, error/3,
+ critical/1, critical/2, critical/3,
+ alert/1, alert/2, alert/3,
+ emergency/1, emergency/2, emergency/3,
+ none/1, none/2, none/3]).
+-export([make_internal_sink_name/1]).
+
+-include("rabbit_log.hrl").
+%%----------------------------------------------------------------------------
+
+-type category() :: channel |
+ connection |
+ federation |
+ feature_flags |
+ ldap |
+ mirroring |
+ osiris |
+ prelaunch |
+ queue |
+ ra |
+ shovel |
+ upgrade.
+
+-spec debug(string()) -> 'ok'.
+-spec debug(string(), [any()]) -> 'ok'.
+-spec debug(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec info(string()) -> 'ok'.
+-spec info(string(), [any()]) -> 'ok'.
+-spec info(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec notice(string()) -> 'ok'.
+-spec notice(string(), [any()]) -> 'ok'.
+-spec notice(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec warning(string()) -> 'ok'.
+-spec warning(string(), [any()]) -> 'ok'.
+-spec warning(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec error(string()) -> 'ok'.
+-spec error(string(), [any()]) -> 'ok'.
+-spec error(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec critical(string()) -> 'ok'.
+-spec critical(string(), [any()]) -> 'ok'.
+-spec critical(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec alert(string()) -> 'ok'.
+-spec alert(string(), [any()]) -> 'ok'.
+-spec alert(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec emergency(string()) -> 'ok'.
+-spec emergency(string(), [any()]) -> 'ok'.
+-spec emergency(pid() | [tuple()], string(), [any()]) -> 'ok'.
+-spec none(string()) -> 'ok'.
+-spec none(string(), [any()]) -> 'ok'.
+-spec none(pid() | [tuple()], string(), [any()]) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+-spec log(category(), lager:log_level(), string()) -> 'ok'.
+log(Category, Level, Fmt) -> log(Category, Level, Fmt, []).
+
+-spec log(category(), lager:log_level(), string(), [any()]) -> 'ok'.
+log(Category, Level, Fmt, Args) when is_list(Args) ->
+ Sink = case Category of
+ default -> ?LAGER_SINK;
+ _ -> make_internal_sink_name(Category)
+ end,
+ lager:log(Sink, Level, self(), Fmt, Args).
+
+%% logger(3) handler.
+log(#{level := Level,
+ msg := Msg,
+ meta := #{pid := Pid}} = _LogEvent,
+ _Config) ->
+ case Msg of
+ {report, #{label := {error_logger, _}}} ->
+ %% Avoid recursive loop.
+ ok;
+ {report, #{label := {application_controller, progress}}} ->
+ %% Already logged by Lager.
+ ok;
+ {report, #{label := {supervisor, progress}}} ->
+ %% Already logged by Lager.
+ ok;
+ {report, #{report := Report}} ->
+ %% FIXME: Is this code reached?
+ error_logger:info_report(Report);
+ {report, #{format := Format, args := Args}} when is_list(Format) ->
+ lager:log(?LAGER_SINK, Level, Pid, Format, Args);
+ {string, String} ->
+ lager:log(?LAGER_SINK, Level, Pid, "~ts", [String]);
+ {Format, Args} when is_list(Format) ->
+ lager:log(?LAGER_SINK, Level, Pid, Format, Args)
+ end.
+
+make_internal_sink_name(channel) -> rabbit_log_channel_lager_event;
+make_internal_sink_name(connection) -> rabbit_log_connection_lager_event;
+make_internal_sink_name(default) -> rabbit_log_lager_event;
+make_internal_sink_name(feature_flags) -> rabbit_log_feature_flags_lager_event;
+make_internal_sink_name(federation) -> rabbit_log_federation_lager_event;
+make_internal_sink_name(ldap) -> rabbit_log_ldap_lager_event;
+make_internal_sink_name(mirroring) -> rabbit_log_mirroring_lager_event;
+make_internal_sink_name(osiris) -> rabbit_log_osiris_lager_event;
+make_internal_sink_name(prelaunch) -> rabbit_log_prelaunch_lager_event;
+make_internal_sink_name(queue) -> rabbit_log_queue_lager_event;
+make_internal_sink_name(ra) -> rabbit_log_ra_lager_event;
+make_internal_sink_name(shovel) -> rabbit_log_shovel_lager_event;
+make_internal_sink_name(upgrade) -> rabbit_log_upgrade_lager_event;
+make_internal_sink_name(Category) ->
+ erlang:error({unknown_category, Category}).
+
+debug(Format) -> debug(Format, []).
+debug(Format, Args) -> debug(self(), Format, Args).
+debug(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, debug, Metadata, Format, Args).
+
+info(Format) -> info(Format, []).
+info(Format, Args) -> info(self(), Format, Args).
+info(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, info, Metadata, Format, Args).
+
+notice(Format) -> notice(Format, []).
+notice(Format, Args) -> notice(self(), Format, Args).
+notice(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, notice, Metadata, Format, Args).
+
+warning(Format) -> warning(Format, []).
+warning(Format, Args) -> warning(self(), Format, Args).
+warning(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, warning, Metadata, Format, Args).
+
+error(Format) -> ?MODULE:error(Format, []).
+error(Format, Args) -> ?MODULE:error(self(), Format, Args).
+error(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, error, Metadata, Format, Args).
+
+critical(Format) -> critical(Format, []).
+critical(Format, Args) -> critical(self(), Format, Args).
+critical(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, critical, Metadata, Format, Args).
+
+alert(Format) -> alert(Format, []).
+alert(Format, Args) -> alert(self(), Format, Args).
+alert(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, alert, Metadata, Format, Args).
+
+emergency(Format) -> emergency(Format, []).
+emergency(Format, Args) -> emergency(self(), Format, Args).
+emergency(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, emergency, Metadata, Format, Args).
+
+none(Format) -> none(Format, []).
+none(Format, Args) -> none(self(), Format, Args).
+none(Metadata, Format, Args) ->
+ lager:log(?LAGER_SINK, none, Metadata, Format, Args).
diff --git a/deps/rabbit_common/src/rabbit_log_osiris_shim.erl b/deps/rabbit_common/src/rabbit_log_osiris_shim.erl
new file mode 100644
index 0000000000..09d6a63431
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log_osiris_shim.erl
@@ -0,0 +1,26 @@
+%% The contents of this file are subject to the Mozilla Public License
+%% Version 1.1 (the "License"); you may not use this file except in
+%% compliance with the License. You may obtain a copy of the License
+%% at https://www.mozilla.org/MPL/
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and
+%% limitations under the License.
+%%
+%% The Original Code is RabbitMQ.
+%%
+%% The Initial Developer of the Original Code is GoPivotal, Inc.
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_osiris_shim).
+
+%% just a shim to redirect logs from ra to rabbit_log
+
+-export([log/4]).
+
+-spec log(lager:log_level(), string(), [any()], _) -> ok.
+log(Level, Format, Args, _Meta) ->
+ rabbit_log:log(osiris, Level, Format, Args),
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_log_ra_shim.erl b/deps/rabbit_common/src/rabbit_log_ra_shim.erl
new file mode 100644
index 0000000000..3d35ff6a07
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_log_ra_shim.erl
@@ -0,0 +1,16 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_log_ra_shim).
+
+%% just a shim to redirect logs from ra to rabbit_log
+
+-export([log/4]).
+
+log(Level, Format, Args, _Meta) ->
+ rabbit_log:log(ra, Level, Format, Args),
+ ok.
diff --git a/deps/rabbit_common/src/rabbit_misc.erl b/deps/rabbit_common/src/rabbit_misc.erl
new file mode 100644
index 0000000000..c5fd86dcbb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_misc.erl
@@ -0,0 +1,1434 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_misc).
+
+-ignore_xref([{maps, get, 2}]).
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+-include("rabbit_misc.hrl").
+
+-ifdef(TEST).
+-export([decompose_pid/1, compose_pid/4]).
+-endif.
+
+-export([method_record_type/1, polite_pause/0, polite_pause/1]).
+-export([die/1, frame_error/2, amqp_error/4, quit/1,
+ protocol_error/3, protocol_error/4, protocol_error/1]).
+-export([type_class/1, assert_args_equivalence/4, assert_field_equivalence/4]).
+-export([dirty_read/1]).
+-export([table_lookup/2, set_table_value/4, amqp_table/1, to_amqp_table/1]).
+-export([r/3, r/2, r_arg/4, rs/1]).
+-export([enable_cover/0, report_cover/0]).
+-export([enable_cover/1, report_cover/1]).
+-export([start_cover/1]).
+-export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1,
+ filter_exit_map/2]).
+-export([with_user/2]).
+-export([execute_mnesia_transaction/1]).
+-export([execute_mnesia_transaction/2]).
+-export([execute_mnesia_tx_with_tail/1]).
+-export([ensure_ok/2]).
+-export([tcp_name/3, format_inet_error/1]).
+-export([upmap/2, map_in_order/2, utf8_safe/1]).
+-export([table_filter/3]).
+-export([dirty_read_all/1, dirty_foreach_key/2, dirty_dump_log/1]).
+-export([format/2, format_many/1, format_stderr/2]).
+-export([unfold/2, ceil/1, queue_fold/3]).
+-export([sort_field_table/1]).
+-export([atom_to_binary/1, parse_bool/1, parse_int/1]).
+-export([pid_to_string/1, string_to_pid/1,
+ pid_change_node/2, node_to_fake_pid/1]).
+-export([version_compare/2, version_compare/3]).
+-export([version_minor_equivalent/2, strict_version_minor_equivalent/2]).
+-export([dict_cons/3, orddict_cons/3, maps_cons/3, gb_trees_cons/3]).
+-export([gb_trees_fold/3, gb_trees_foreach/2]).
+-export([all_module_attributes/1,
+ rabbitmq_related_module_attributes/1,
+ module_attributes_from_apps/2,
+ build_acyclic_graph/3]).
+-export([const/1]).
+-export([ntoa/1, ntoab/1]).
+-export([is_process_alive/1]).
+-export([pget/2, pget/3, pupdate/3, pget_or_die/2, pmerge/3, pset/3, plmerge/2]).
+-export([format_message_queue/2]).
+-export([append_rpc_all_nodes/4, append_rpc_all_nodes/5]).
+-export([os_cmd/1]).
+-export([is_os_process_alive/1]).
+-export([gb_sets_difference/2]).
+-export([version/0, otp_release/0, platform_and_version/0, otp_system_version/0,
+ rabbitmq_and_erlang_versions/0, which_applications/0]).
+-export([sequence_error/1]).
+-export([check_expiry/1]).
+-export([base64url/1]).
+-export([interval_operation/5]).
+-export([ensure_timer/4, stop_timer/2, send_after/3, cancel_timer/1]).
+-export([get_parent/0]).
+-export([store_proc_name/1, store_proc_name/2, get_proc_name/0]).
+-export([moving_average/4]).
+-export([escape_html_tags/1, b64decode_or_throw/1]).
+-export([get_env/3]).
+-export([get_channel_operation_timeout/0]).
+-export([random/1]).
+-export([rpc_call/4, rpc_call/5]).
+-export([get_gc_info/1]).
+-export([group_proplists_by/2]).
+
+%% Horrible macro to use in guards
+-define(IS_BENIGN_EXIT(R),
+ R =:= noproc; R =:= noconnection; R =:= nodedown; R =:= normal;
+ R =:= shutdown).
+
+%%----------------------------------------------------------------------------
+
+-export_type([resource_name/0, thunk/1, channel_or_connection_exit/0]).
+
+-type ok_or_error() :: rabbit_types:ok_or_error(any()).
+-type thunk(T) :: fun(() -> T).
+-type resource_name() :: binary().
+-type channel_or_connection_exit()
+ :: rabbit_types:channel_exit() | rabbit_types:connection_exit().
+-type digraph_label() :: term().
+-type graph_vertex_fun() ::
+ fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph_label()}]).
+-type graph_edge_fun() ::
+ fun (({atom(), [term()]}) -> [{digraph:vertex(), digraph:vertex()}]).
+-type tref() :: {'erlang', reference()} | {timer, timer:tref()}.
+
+-spec method_record_type(rabbit_framing:amqp_method_record()) ->
+ rabbit_framing:amqp_method_name().
+-spec polite_pause() -> 'done'.
+-spec polite_pause(non_neg_integer()) -> 'done'.
+-spec die(rabbit_framing:amqp_exception()) -> channel_or_connection_exit().
+
+-spec quit(integer()) -> no_return().
+
+-spec frame_error(rabbit_framing:amqp_method_name(), binary()) ->
+ rabbit_types:connection_exit().
+-spec amqp_error
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name()) ->
+ rabbit_types:amqp_error().
+-spec protocol_error(rabbit_framing:amqp_exception(), string(), [any()]) ->
+ channel_or_connection_exit().
+-spec protocol_error
+ (rabbit_framing:amqp_exception(), string(), [any()],
+ rabbit_framing:amqp_method_name()) ->
+ channel_or_connection_exit().
+-spec protocol_error(rabbit_types:amqp_error()) ->
+ channel_or_connection_exit().
+-spec type_class(rabbit_framing:amqp_field_type()) -> atom().
+-spec assert_args_equivalence
+ (rabbit_framing:amqp_table(), rabbit_framing:amqp_table(),
+ rabbit_types:r(any()), [binary()]) ->
+ 'ok' | rabbit_types:connection_exit().
+-spec assert_field_equivalence
+ (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+ 'ok' | rabbit_types:connection_exit().
+-spec equivalence_fail
+ (any(), any(), rabbit_types:r(any()), atom() | binary()) ->
+ rabbit_types:connection_exit().
+-spec dirty_read({atom(), any()}) ->
+ rabbit_types:ok_or_error2(any(), 'not_found').
+-spec table_lookup(rabbit_framing:amqp_table(), binary()) ->
+ 'undefined' | {rabbit_framing:amqp_field_type(), any()}.
+-spec set_table_value
+ (rabbit_framing:amqp_table(), binary(), rabbit_framing:amqp_field_type(),
+ rabbit_framing:amqp_value()) ->
+ rabbit_framing:amqp_table().
+-spec r(rabbit_types:vhost(), K) ->
+ rabbit_types:r3(rabbit_types:vhost(), K, '_')
+ when is_subtype(K, atom()).
+-spec r(rabbit_types:vhost() | rabbit_types:r(atom()), K, resource_name()) ->
+ rabbit_types:r3(rabbit_types:vhost(), K, resource_name())
+ when is_subtype(K, atom()).
+-spec r_arg
+ (rabbit_types:vhost() | rabbit_types:r(atom()), K,
+ rabbit_framing:amqp_table(), binary()) ->
+ undefined |
+ rabbit_types:error(
+ {invalid_type, rabbit_framing:amqp_field_type()}) |
+ rabbit_types:r(K) when is_subtype(K, atom()).
+-spec rs(rabbit_types:r(atom())) -> string().
+-spec enable_cover() -> ok_or_error().
+-spec start_cover([{string(), string()} | string()]) -> 'ok'.
+-spec report_cover() -> 'ok'.
+-spec enable_cover([file:filename() | atom()]) -> ok_or_error().
+-spec report_cover([file:filename() | atom()]) -> 'ok'.
+-spec throw_on_error
+ (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A.
+-spec with_exit_handler(thunk(A), thunk(A)) -> A.
+-spec is_abnormal_exit(any()) -> boolean().
+-spec filter_exit_map(fun ((A) -> B), [A]) -> [B].
+-spec with_user(rabbit_types:username(), thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A)) -> A.
+-spec execute_mnesia_transaction(thunk(A), fun ((A, boolean()) -> B)) -> B.
+-spec execute_mnesia_tx_with_tail
+ (thunk(fun ((boolean()) -> B))) -> B | (fun ((boolean()) -> B)).
+-spec ensure_ok(ok_or_error(), atom()) -> 'ok'.
+-spec tcp_name(atom(), inet:ip_address(), rabbit_net:ip_port()) ->
+ atom().
+-spec format_inet_error(atom()) -> string().
+-spec upmap(fun ((A) -> B), [A]) -> [B].
+-spec map_in_order(fun ((A) -> B), [A]) -> [B].
+-spec table_filter
+ (fun ((A) -> boolean()), fun ((A, boolean()) -> 'ok'), atom()) -> [A].
+-spec dirty_read_all(atom()) -> [any()].
+-spec dirty_foreach_key(fun ((any()) -> any()), atom()) ->
+ 'ok' | 'aborted'.
+-spec dirty_dump_log(file:filename()) -> ok_or_error().
+-spec format(string(), [any()]) -> string().
+-spec format_many([{string(), [any()]}]) -> string().
+-spec format_stderr(string(), [any()]) -> 'ok'.
+-spec unfold (fun ((A) -> ({'true', B, A} | 'false')), A) -> {[B], A}.
+-spec ceil(number()) -> integer().
+-spec queue_fold(fun ((any(), B) -> B), B, queue:queue()) -> B.
+-spec sort_field_table(rabbit_framing:amqp_table()) ->
+ rabbit_framing:amqp_table().
+-spec pid_to_string(pid()) -> string().
+-spec string_to_pid(string()) -> pid().
+-spec pid_change_node(pid(), node()) -> pid().
+-spec node_to_fake_pid(atom()) -> pid().
+-spec version_compare(string(), string()) -> 'lt' | 'eq' | 'gt'.
+-spec version_compare
+ (rabbit_semver:version_string(), rabbit_semver:version_string(),
+ ('lt' | 'lte' | 'eq' | 'gte' | 'gt')) -> boolean().
+-spec version_minor_equivalent(rabbit_semver:version_string(), rabbit_semver:version_string()) -> boolean().
+-spec dict_cons(any(), any(), dict:dict()) -> dict:dict().
+-spec orddict_cons(any(), any(), orddict:orddict()) -> orddict:orddict().
+-spec gb_trees_cons(any(), any(), gb_trees:tree()) -> gb_trees:tree().
+-spec gb_trees_fold(fun ((any(), any(), A) -> A), A, gb_trees:tree()) -> A.
+-spec gb_trees_foreach(fun ((any(), any()) -> any()), gb_trees:tree()) ->
+ 'ok'.
+-spec all_module_attributes(atom()) -> [{atom(), atom(), [term()]}].
+-spec build_acyclic_graph
+ (graph_vertex_fun(), graph_edge_fun(), [{atom(), [term()]}]) ->
+ rabbit_types:ok_or_error2(
+ digraph:graph(),
+ {'vertex', 'duplicate', digraph:vertex()} |
+ {'edge',
+ ({bad_vertex, digraph:vertex()} |
+ {bad_edge, [digraph:vertex()]}),
+ digraph:vertex(), digraph:vertex()}).
+-spec const(A) -> thunk(A).
+-spec ntoa(inet:ip_address()) -> string().
+-spec ntoab(inet:ip_address()) -> string().
+-spec is_process_alive(pid()) -> boolean().
+
+-spec pmerge(term(), term(), [term()]) -> [term()].
+-spec plmerge([term()], [term()]) -> [term()].
+-spec pset(term(), term(), [term()]) -> [term()].
+-spec format_message_queue(any(), priority_queue:q()) -> term().
+-spec os_cmd(string()) -> string().
+-spec is_os_process_alive(non_neg_integer()) -> boolean().
+-spec gb_sets_difference(gb_sets:set(), gb_sets:set()) -> gb_sets:set().
+-spec version() -> string().
+-spec otp_release() -> string().
+-spec otp_system_version() -> string().
+-spec platform_and_version() -> string().
+-spec rabbitmq_and_erlang_versions() -> {string(), string()}.
+-spec which_applications() -> [{atom(), string(), string()}].
+-spec sequence_error([({'error', any()} | any())]) ->
+ {'error', any()} | any().
+-spec check_expiry(integer()) -> rabbit_types:ok_or_error(any()).
+-spec base64url(binary()) -> string().
+-spec interval_operation
+ ({atom(), atom(), any()}, float(), non_neg_integer(), non_neg_integer(),
+ non_neg_integer()) ->
+ {any(), non_neg_integer()}.
+-spec ensure_timer(A, non_neg_integer(), non_neg_integer(), any()) -> A.
+-spec stop_timer(A, non_neg_integer()) -> A.
+-spec send_after(non_neg_integer(), pid(), any()) -> tref().
+-spec cancel_timer(tref()) -> 'ok'.
+-spec get_parent() -> pid().
+-spec store_proc_name(atom(), rabbit_types:proc_name()) -> ok.
+-spec store_proc_name(rabbit_types:proc_type_and_name()) -> ok.
+-spec get_proc_name() -> rabbit_types:proc_name().
+-spec moving_average(float(), float(), float(), float() | 'undefined') ->
+ float().
+-spec get_env(atom(), atom(), term()) -> term().
+-spec get_channel_operation_timeout() -> non_neg_integer().
+-spec random(non_neg_integer()) -> non_neg_integer().
+-spec get_gc_info(pid()) -> [any()].
+-spec group_proplists_by(fun((proplists:proplist()) -> any()),
+ list(proplists:proplist())) -> list(list(proplists:proplist())).
+
+
+%%----------------------------------------------------------------------------
+
+method_record_type(Record) ->
+ element(1, Record).
+
+polite_pause() ->
+ polite_pause(3000).
+
+polite_pause(N) ->
+ receive
+ after N -> done
+ end.
+
+die(Error) ->
+ protocol_error(Error, "~w", [Error]).
+
+frame_error(MethodName, BinaryFields) ->
+ protocol_error(frame_error, "cannot decode ~w", [BinaryFields], MethodName).
+
+amqp_error(Name, ExplanationFormat, Params, Method) ->
+ Explanation = format(ExplanationFormat, Params),
+ #amqp_error{name = Name, explanation = Explanation, method = Method}.
+
+protocol_error(Name, ExplanationFormat, Params) ->
+ protocol_error(Name, ExplanationFormat, Params, none).
+
+protocol_error(Name, ExplanationFormat, Params, Method) ->
+ protocol_error(amqp_error(Name, ExplanationFormat, Params, Method)).
+
+protocol_error(#amqp_error{} = Error) ->
+ exit(Error).
+
+type_class(byte) -> int;
+type_class(short) -> int;
+type_class(signedint) -> int;
+type_class(long) -> int;
+type_class(decimal) -> int;
+type_class(unsignedbyte) -> int;
+type_class(unsignedshort) -> int;
+type_class(unsignedint) -> int;
+type_class(float) -> float;
+type_class(double) -> float;
+type_class(Other) -> Other.
+
+assert_args_equivalence(Orig, New, Name, Keys) ->
+ [assert_args_equivalence1(Orig, New, Name, Key) || Key <- Keys],
+ ok.
+
+assert_args_equivalence1(Orig, New, Name, Key) ->
+ {Orig1, New1} = {table_lookup(Orig, Key), table_lookup(New, Key)},
+ case {Orig1, New1} of
+ {Same, Same} ->
+ ok;
+ {{OrigType, OrigVal}, {NewType, NewVal}} ->
+ case type_class(OrigType) == type_class(NewType) andalso
+ OrigVal == NewVal of
+ true -> ok;
+ false -> assert_field_equivalence(OrigVal, NewVal, Name, Key)
+ end;
+ {OrigTypeVal, NewTypeVal} ->
+ assert_field_equivalence(OrigTypeVal, NewTypeVal, Name, Key)
+ end.
+
+%% Classic queues do not necessarily have an x-queue-type field associated with them
+%% so we special-case that scenario here
+%%
+%% Fixes rabbitmq/rabbitmq-common#341
+%%
+assert_field_equivalence(_Orig, _Orig, _Name, _Key) ->
+ ok;
+assert_field_equivalence(undefined, {longstr, <<"classic">>}, _Name, <<"x-queue-type">>) ->
+ ok;
+assert_field_equivalence({longstr, <<"classic">>}, undefined, _Name, <<"x-queue-type">>) ->
+ ok;
+assert_field_equivalence(Orig, New, Name, Key) ->
+ equivalence_fail(Orig, New, Name, Key).
+
+equivalence_fail(Orig, New, Name, Key) ->
+ protocol_error(precondition_failed, "inequivalent arg '~s' "
+ "for ~s: received ~s but current is ~s",
+ [Key, rs(Name), val(New), val(Orig)]).
+
+val(undefined) ->
+ "none";
+val({Type, Value}) ->
+ ValFmt = case is_binary(Value) of
+ true -> "~s";
+ false -> "~p"
+ end,
+ format("the value '" ++ ValFmt ++ "' of type '~s'", [Value, Type]);
+val(Value) ->
+ format(case is_binary(Value) of
+ true -> "'~s'";
+ false -> "'~p'"
+ end, [Value]).
+
+%% Normally we'd call mnesia:dirty_read/1 here, but that is quite
+%% expensive due to general mnesia overheads (figuring out table types
+%% and locations, etc). We get away with bypassing these because we
+%% know that the tables we are looking at here
+%% - are not the schema table
+%% - have a local ram copy
+%% - do not have any indices
+dirty_read({Table, Key}) ->
+ case ets:lookup(Table, Key) of
+ [Result] -> {ok, Result};
+ [] -> {error, not_found}
+ end.
+
+%%
+%% Attribute Tables
+%%
+
+table_lookup(Table, Key) ->
+ case lists:keysearch(Key, 1, Table) of
+ {value, {_, TypeBin, ValueBin}} -> {TypeBin, ValueBin};
+ false -> undefined
+ end.
+
+set_table_value(Table, Key, Type, Value) ->
+ sort_field_table(
+ lists:keystore(Key, 1, Table, {Key, Type, Value})).
+
+to_amqp_table(M) when is_map(M) ->
+ lists:reverse(maps:fold(fun(K, V, Acc) -> [to_amqp_table_row(K, V)|Acc] end,
+ [], M));
+to_amqp_table(L) when is_list(L) ->
+ L.
+
+to_amqp_table_row(K, V) ->
+ {T, V2} = type_val(V),
+ {K, T, V2}.
+
+to_amqp_array(L) ->
+ [type_val(I) || I <- L].
+
+type_val(M) when is_map(M) -> {table, to_amqp_table(M)};
+type_val(L) when is_list(L) -> {array, to_amqp_array(L)};
+type_val(X) when is_binary(X) -> {longstr, X};
+type_val(X) when is_integer(X) -> {long, X};
+type_val(X) when is_number(X) -> {double, X};
+type_val(true) -> {bool, true};
+type_val(false) -> {bool, false};
+type_val(null) -> throw({error, null_not_allowed});
+type_val(X) -> throw({error, {unhandled_type, X}}).
+
+amqp_table(unknown) -> unknown;
+amqp_table(undefined) -> amqp_table([]);
+amqp_table([]) -> #{};
+amqp_table(#{}) -> #{};
+amqp_table(Table) -> maps:from_list([{Name, amqp_value(Type, Value)} ||
+ {Name, Type, Value} <- Table]).
+
+amqp_value(array, Vs) -> [amqp_value(T, V) || {T, V} <- Vs];
+amqp_value(table, V) -> amqp_table(V);
+amqp_value(decimal, {Before, After}) ->
+ erlang:list_to_float(
+ lists:flatten(io_lib:format("~p.~p", [Before, After])));
+amqp_value(_Type, V) when is_binary(V) -> utf8_safe(V);
+amqp_value(_Type, V) -> V.
+
+
+%%
+%% Resources
+%%
+
+r(#resource{virtual_host = VHostPath}, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name};
+r(VHostPath, Kind, Name) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = Name}.
+
+r(VHostPath, Kind) ->
+ #resource{virtual_host = VHostPath, kind = Kind, name = '_'}.
+
+r_arg(#resource{virtual_host = VHostPath}, Kind, Table, Key) ->
+ r_arg(VHostPath, Kind, Table, Key);
+r_arg(VHostPath, Kind, Table, Key) ->
+ case table_lookup(Table, Key) of
+ {longstr, NameBin} -> r(VHostPath, Kind, NameBin);
+ undefined -> undefined;
+ {Type, _} -> {error, {invalid_type, Type}}
+ end.
+
+rs(#resource{virtual_host = VHostPath, kind = topic, name = Name}) ->
+ format("'~s' in vhost '~s'", [Name, VHostPath]);
+rs(#resource{virtual_host = VHostPath, kind = Kind, name = Name}) ->
+ format("~s '~s' in vhost '~s'", [Kind, Name, VHostPath]).
+
+enable_cover() -> enable_cover(["."]).
+
+enable_cover(Dirs) ->
+ lists:foldl(fun (Dir, ok) ->
+ case cover:compile_beam_directory(
+ filename:join(lists:concat([Dir]),"ebin")) of
+ {error, _} = Err -> Err;
+ _ -> ok
+ end;
+ (_Dir, Err) ->
+ Err
+ end, ok, Dirs).
+
+start_cover(NodesS) ->
+ {ok, _} = cover:start([rabbit_nodes_common:make(N) || N <- NodesS]),
+ ok.
+
+report_cover() -> report_cover(["."]).
+
+report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok.
+
+report_cover1(Root) ->
+ Dir = filename:join(Root, "cover"),
+ ok = filelib:ensure_dir(filename:join(Dir, "junk")),
+ lists:foreach(fun (F) -> file:delete(F) end,
+ filelib:wildcard(filename:join(Dir, "*.html"))),
+ {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]),
+ {CT, NCT} =
+ lists:foldl(
+ fun (M,{CovTot, NotCovTot}) ->
+ {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module),
+ ok = report_coverage_percentage(SummaryFile,
+ Cov, NotCov, M),
+ {ok,_} = cover:analyze_to_file(
+ M,
+ filename:join(Dir, atom_to_list(M) ++ ".html"),
+ [html]),
+ {CovTot+Cov, NotCovTot+NotCov}
+ end,
+ {0, 0},
+ lists:sort(cover:modules())),
+ ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'),
+ ok = file:close(SummaryFile),
+ ok.
+
+report_coverage_percentage(File, Cov, NotCov, Mod) ->
+ io:fwrite(File, "~6.2f ~p~n",
+ [if
+ Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov);
+ true -> 100.0
+ end,
+ Mod]).
+
+%% @doc Halts the emulator returning the given status code to the os.
+%% On Windows this function will block indefinitely so as to give the io
+%% subsystem time to flush stdout completely.
+quit(Status) ->
+ case os:type() of
+ {unix, _} -> halt(Status);
+ {win32, _} -> init:stop(Status),
+ receive
+ after infinity -> ok
+ end
+ end.
+
+throw_on_error(E, Thunk) ->
+ case Thunk() of
+ {error, Reason} -> throw({E, Reason});
+ {ok, Res} -> Res;
+ Res -> Res
+ end.
+
+with_exit_handler(Handler, Thunk) ->
+ try
+ Thunk()
+ catch
+ exit:{R, _} when ?IS_BENIGN_EXIT(R) -> Handler();
+ exit:{{R, _}, _} when ?IS_BENIGN_EXIT(R) -> Handler()
+ end.
+
+is_abnormal_exit(R) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit({R, _}) when ?IS_BENIGN_EXIT(R) -> false;
+is_abnormal_exit(_) -> true.
+
+filter_exit_map(F, L) ->
+ Ref = make_ref(),
+ lists:filter(fun (R) -> R =/= Ref end,
+ [with_exit_handler(
+ fun () -> Ref end,
+ fun () -> F(I) end) || I <- L]).
+
+
+with_user(Username, Thunk) ->
+ fun () ->
+ case mnesia:read({rabbit_user, Username}) of
+ [] ->
+ mnesia:abort({no_such_user, Username});
+ [_U] ->
+ Thunk()
+ end
+ end.
+
+execute_mnesia_transaction(TxFun) ->
+ %% Making this a sync_transaction allows us to use dirty_read
+ %% elsewhere and get a consistent result even when that read
+ %% executes on a different node.
+ case worker_pool:submit(
+ fun () ->
+ case mnesia:is_transaction() of
+ false -> DiskLogBefore = mnesia_dumper:get_log_writes(),
+ Res = mnesia:sync_transaction(TxFun),
+ DiskLogAfter = mnesia_dumper:get_log_writes(),
+ case DiskLogAfter == DiskLogBefore of
+ true -> file_handle_cache_stats:update(
+ mnesia_ram_tx),
+ Res;
+ false -> file_handle_cache_stats:update(
+ mnesia_disk_tx),
+ {sync, Res}
+ end;
+ true -> mnesia:sync_transaction(TxFun)
+ end
+ end, single) of
+ {sync, {atomic, Result}} -> mnesia_sync:sync(), Result;
+ {sync, {aborted, Reason}} -> throw({error, Reason});
+ {atomic, Result} -> Result;
+ {aborted, Reason} -> throw({error, Reason})
+ end.
+
+%% Like execute_mnesia_transaction/1 with additional Pre- and Post-
+%% commit function
+execute_mnesia_transaction(TxFun, PrePostCommitFun) ->
+ case mnesia:is_transaction() of
+ true -> throw(unexpected_transaction);
+ false -> ok
+ end,
+ PrePostCommitFun(execute_mnesia_transaction(
+ fun () ->
+ Result = TxFun(),
+ PrePostCommitFun(Result, true),
+ Result
+ end), false).
+
+%% Like execute_mnesia_transaction/2, but TxFun is expected to return a
+%% TailFun which gets called (only) immediately after the tx commit
+execute_mnesia_tx_with_tail(TxFun) ->
+ case mnesia:is_transaction() of
+ true -> execute_mnesia_transaction(TxFun);
+ false -> TailFun = execute_mnesia_transaction(TxFun),
+ TailFun()
+ end.
+
+ensure_ok(ok, _) -> ok;
+ensure_ok({error, Reason}, ErrorTag) -> throw({error, {ErrorTag, Reason}}).
+
+tcp_name(Prefix, IPAddress, Port)
+ when is_atom(Prefix) andalso is_number(Port) ->
+ list_to_atom(
+ format("~w_~s:~w", [Prefix, inet_parse:ntoa(IPAddress), Port])).
+
+format_inet_error(E) -> format("~w (~s)", [E, format_inet_error0(E)]).
+
+format_inet_error0(address) -> "cannot connect to host/port";
+format_inet_error0(timeout) -> "timed out";
+format_inet_error0(Error) -> inet:format_error(Error).
+
+%% base64:decode throws lots of weird errors. Catch and convert to one
+%% that will cause a bad_request.
+b64decode_or_throw(B64) ->
+ try
+ base64:decode(B64)
+ catch error:_ ->
+ throw({error, {not_base64, B64}})
+ end.
+
+utf8_safe(V) ->
+ try
+ _ = xmerl_ucs:from_utf8(V),
+ V
+ catch exit:{ucs, _} ->
+ Enc = split_lines(base64:encode(V)),
+ <<"Not UTF-8, base64 is: ", Enc/binary>>
+ end.
+
+%% MIME enforces a limit on line length of base 64-encoded data to 76 characters.
+split_lines(<<Text:76/binary, Rest/binary>>) ->
+ <<Text/binary, $\n, (split_lines(Rest))/binary>>;
+split_lines(Text) ->
+ Text.
+
+
+%% This is a modified version of Luke Gorrie's pmap -
+%% https://lukego.livejournal.com/6753.html - that doesn't care about
+%% the order in which results are received.
+%%
+%% WARNING: This is is deliberately lightweight rather than robust -- if F
+%% throws, upmap will hang forever, so make sure F doesn't throw!
+upmap(F, L) ->
+ Parent = self(),
+ Ref = make_ref(),
+ [receive {Ref, Result} -> Result end
+ || _ <- [spawn(fun () -> Parent ! {Ref, F(X)} end) || X <- L]].
+
+map_in_order(F, L) ->
+ lists:reverse(
+ lists:foldl(fun (E, Acc) -> [F(E) | Acc] end, [], L)).
+
+%% Apply a pre-post-commit function to all entries in a table that
+%% satisfy a predicate, and return those entries.
+%%
+%% We ignore entries that have been modified or removed.
+table_filter(Pred, PrePostCommitFun, TableName) ->
+ lists:foldl(
+ fun (E, Acc) ->
+ case execute_mnesia_transaction(
+ fun () -> mnesia:match_object(TableName, E, read) =/= []
+ andalso Pred(E) end,
+ fun (false, _Tx) -> false;
+ (true, Tx) -> PrePostCommitFun(E, Tx), true
+ end) of
+ false -> Acc;
+ true -> [E | Acc]
+ end
+ end, [], dirty_read_all(TableName)).
+
+dirty_read_all(TableName) ->
+ mnesia:dirty_select(TableName, [{'$1',[],['$1']}]).
+
+dirty_foreach_key(F, TableName) ->
+ dirty_foreach_key1(F, TableName, mnesia:dirty_first(TableName)).
+
+dirty_foreach_key1(_F, _TableName, '$end_of_table') ->
+ ok;
+dirty_foreach_key1(F, TableName, K) ->
+ case catch mnesia:dirty_next(TableName, K) of
+ {'EXIT', _} ->
+ aborted;
+ NextKey ->
+ F(K),
+ dirty_foreach_key1(F, TableName, NextKey)
+ end.
+
+dirty_dump_log(FileName) ->
+ {ok, LH} = disk_log:open([{name, dirty_dump_log},
+ {mode, read_only},
+ {file, FileName}]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, start)),
+ disk_log:close(LH).
+
+dirty_dump_log1(_LH, eof) ->
+ io:format("Done.~n");
+dirty_dump_log1(LH, {K, Terms}) ->
+ io:format("Chunk: ~p~n", [Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K));
+dirty_dump_log1(LH, {K, Terms, BadBytes}) ->
+ io:format("Bad Chunk, ~p: ~p~n", [BadBytes, Terms]),
+ dirty_dump_log1(LH, disk_log:chunk(LH, K)).
+
+format(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)).
+
+format_many(List) ->
+ lists:flatten([io_lib:format(F ++ "~n", A) || {F, A} <- List]).
+
+format_stderr(Fmt, Args) ->
+ io:format(standard_error, Fmt, Args),
+ ok.
+
+unfold(Fun, Init) ->
+ unfold(Fun, [], Init).
+
+unfold(Fun, Acc, Init) ->
+ case Fun(Init) of
+ {true, E, I} -> unfold(Fun, [E|Acc], I);
+ false -> {Acc, Init}
+ end.
+
+ceil(N) ->
+ T = trunc(N),
+ case N == T of
+ true -> T;
+ false -> 1 + T
+ end.
+
+parse_bool(<<"true">>) -> true;
+parse_bool(<<"false">>) -> false;
+parse_bool(true) -> true;
+parse_bool(false) -> false;
+parse_bool(undefined) -> undefined;
+parse_bool(V) -> throw({error, {not_boolean, V}}).
+
+parse_int(I) when is_integer(I) -> I;
+parse_int(F) when is_number(F) -> trunc(F);
+parse_int(S) -> try
+ list_to_integer(binary_to_list(S))
+ catch error:badarg ->
+ throw({error, {not_integer, S}})
+ end.
+
+
+queue_fold(Fun, Init, Q) ->
+ case queue:out(Q) of
+ {empty, _Q} -> Init;
+ {{value, V}, Q1} -> queue_fold(Fun, Fun(V, Init), Q1)
+ end.
+
+%% Sorts a list of AMQP 0-9-1 table fields as per the AMQP 0-9-1 spec
+sort_field_table([]) ->
+ [];
+sort_field_table(M) when is_map(M) andalso map_size(M) =:= 0 ->
+ [];
+sort_field_table(Arguments) when is_map(Arguments) ->
+ sort_field_table(maps:to_list(Arguments));
+sort_field_table(Arguments) ->
+ lists:keysort(1, Arguments).
+
+atom_to_binary(A) ->
+ list_to_binary(atom_to_list(A)).
+
+%% This provides a string representation of a pid that is the same
+%% regardless of what node we are running on. The representation also
+%% permits easy identification of the pid's node.
+pid_to_string(Pid) when is_pid(Pid) ->
+ {Node, Cre, Id, Ser} = decompose_pid(Pid),
+ format("<~s.~B.~B.~B>", [Node, Cre, Id, Ser]).
+
+%% inverse of above
+string_to_pid(Str) ->
+ Err = {error, {invalid_pid_syntax, Str}},
+ %% The \ before the trailing $ is only there to keep emacs
+ %% font-lock from getting confused.
+ case re:run(Str, "^<(.*)\\.(\\d+)\\.(\\d+)\\.(\\d+)>\$",
+ [{capture,all_but_first,list}]) of
+ {match, [NodeStr, CreStr, IdStr, SerStr]} ->
+ [Cre, Id, Ser] = lists:map(fun list_to_integer/1,
+ [CreStr, IdStr, SerStr]),
+ compose_pid(list_to_atom(NodeStr), Cre, Id, Ser);
+ nomatch ->
+ throw(Err)
+ end.
+
+pid_change_node(Pid, NewNode) ->
+ {_OldNode, Cre, Id, Ser} = decompose_pid(Pid),
+ compose_pid(NewNode, Cre, Id, Ser).
+
+%% node(node_to_fake_pid(Node)) =:= Node.
+node_to_fake_pid(Node) ->
+ compose_pid(Node, 0, 0, 0).
+
+decompose_pid(Pid) when is_pid(Pid) ->
+ %% see http://erlang.org/doc/apps/erts/erl_ext_dist.html (8.10 and
+ %% 8.7)
+ Node = node(Pid),
+ BinPid0 = term_to_binary(Pid),
+ case BinPid0 of
+ %% NEW_PID_EXT
+ <<131, 88, BinPid/bits>> ->
+ NodeByteSize = byte_size(BinPid0) - 14,
+ <<_NodePrefix:NodeByteSize/binary, Id:32, Ser:32, Cre:32>> = BinPid,
+ {Node, Cre, Id, Ser};
+ %% PID_EXT
+ <<131, 103, BinPid/bits>> ->
+ NodeByteSize = byte_size(BinPid0) - 11,
+ <<_NodePrefix:NodeByteSize/binary, Id:32, Ser:32, Cre:8>> = BinPid,
+ {Node, Cre, Id, Ser}
+ end.
+
+compose_pid(Node, Cre, Id, Ser) ->
+ <<131,NodeEnc/binary>> = term_to_binary(Node),
+ binary_to_term(<<131,88,NodeEnc/binary,Id:32,Ser:32,Cre:32>>).
+
+version_compare(A, B, eq) -> rabbit_semver:eql(A, B);
+version_compare(A, B, lt) -> rabbit_semver:lt(A, B);
+version_compare(A, B, lte) -> rabbit_semver:lte(A, B);
+version_compare(A, B, gt) -> rabbit_semver:gt(A, B);
+version_compare(A, B, gte) -> rabbit_semver:gte(A, B).
+
+version_compare(A, B) ->
+ case version_compare(A, B, lt) of
+ true -> lt;
+ false -> case version_compare(A, B, gt) of
+ true -> gt;
+ false -> eq
+ end
+ end.
+
+%% For versions starting from 3.7.x:
+%% Versions are considered compatible (except for special cases; see
+%% below). The feature flags will determine if they are actually
+%% compatible.
+%%
+%% For versions up-to 3.7.x:
+%% a.b.c and a.b.d match, but a.b.c and a.d.e don't. If
+%% versions do not match that pattern, just compare them.
+%%
+%% Special case for 3.6.6 because it introduced a change to the schema.
+%% e.g. 3.6.6 is not compatible with 3.6.5
+%% This special case can be removed once 3.6.x reaches EOL
+version_minor_equivalent(A, B) ->
+ {{MajA, MinA, PatchA, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(A)),
+ {{MajB, MinB, PatchB, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(B)),
+
+ case {MajA, MinA, MajB, MinB} of
+ {3, 6, 3, 6} ->
+ if
+ PatchA >= 6 -> PatchB >= 6;
+ PatchA < 6 -> PatchB < 6;
+ true -> false
+ end;
+ _
+ when (MajA < 3 orelse (MajA =:= 3 andalso MinA =< 6))
+ orelse
+ (MajB < 3 orelse (MajB =:= 3 andalso MinB =< 6)) ->
+ MajA =:= MajB andalso MinA =:= MinB;
+ _ ->
+ %% Starting with RabbitMQ 3.7.x, we consider this
+ %% minor release series and all subsequent series to
+ %% be possibly compatible, based on just the version.
+ %% The real compatibility check is deferred to the
+ %% rabbit_feature_flags module in rabbitmq-server.
+ true
+ end.
+
+%% This is the same as above except that e.g. 3.7.x and 3.8.x are
+%% considered incompatible (as if there were no feature flags). This is
+%% useful to check plugin compatibility (`broker_versions_requirement`
+%% field in plugins).
+
+strict_version_minor_equivalent(A, B) ->
+ {{MajA, MinA, PatchA, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(A)),
+ {{MajB, MinB, PatchB, _}, _} = rabbit_semver:normalize(rabbit_semver:parse(B)),
+
+ case {MajA, MinA, MajB, MinB} of
+ {3, 6, 3, 6} -> if
+ PatchA >= 6 -> PatchB >= 6;
+ PatchA < 6 -> PatchB < 6;
+ true -> false
+ end;
+ _ -> MajA =:= MajB andalso MinA =:= MinB
+ end.
+
+dict_cons(Key, Value, Dict) ->
+ dict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+orddict_cons(Key, Value, Dict) ->
+ orddict:update(Key, fun (List) -> [Value | List] end, [Value], Dict).
+
+maps_cons(Key, Value, Map) ->
+ maps:update_with(Key, fun (List) -> [Value | List] end, [Value], Map).
+
+gb_trees_cons(Key, Value, Tree) ->
+ case gb_trees:lookup(Key, Tree) of
+ {value, Values} -> gb_trees:update(Key, [Value | Values], Tree);
+ none -> gb_trees:insert(Key, [Value], Tree)
+ end.
+
+gb_trees_fold(Fun, Acc, Tree) ->
+ gb_trees_fold1(Fun, Acc, gb_trees:next(gb_trees:iterator(Tree))).
+
+gb_trees_fold1(_Fun, Acc, none) ->
+ Acc;
+gb_trees_fold1(Fun, Acc, {Key, Val, It}) ->
+ gb_trees_fold1(Fun, Fun(Key, Val, Acc), gb_trees:next(It)).
+
+gb_trees_foreach(Fun, Tree) ->
+ gb_trees_fold(fun (Key, Val, Acc) -> Fun(Key, Val), Acc end, ok, Tree).
+
+module_attributes(Module) ->
+ try
+ Module:module_info(attributes)
+ catch
+ _:undef ->
+ io:format("WARNING: module ~p not found, so not scanned for boot steps.~n",
+ [Module]),
+ []
+ end.
+
+all_module_attributes(Name) ->
+ Apps = [App || {App, _, _} <- application:loaded_applications()],
+ module_attributes_from_apps(Name, Apps).
+
+rabbitmq_related_module_attributes(Name) ->
+ Apps = rabbitmq_related_apps(),
+ module_attributes_from_apps(Name, Apps).
+
+rabbitmq_related_apps() ->
+ [App
+ || {App, _, _} <- application:loaded_applications(),
+ %% Only select RabbitMQ-related applications.
+ App =:= rabbit_common orelse
+ App =:= rabbitmq_prelaunch orelse
+ App =:= rabbit orelse
+ lists:member(
+ rabbit,
+ element(2, application:get_key(App, applications)))].
+
+module_attributes_from_apps(Name, Apps) ->
+ Targets =
+ lists:usort(
+ lists:append(
+ [[{App, Module} || Module <- Modules] ||
+ App <- Apps,
+ {ok, Modules} <- [application:get_key(App, modules)]])),
+ lists:foldl(
+ fun ({App, Module}, Acc) ->
+ case lists:append([Atts || {N, Atts} <- module_attributes(Module),
+ N =:= Name]) of
+ [] -> Acc;
+ Atts -> [{App, Module, Atts} | Acc]
+ end
+ end, [], Targets).
+
+build_acyclic_graph(VertexFun, EdgeFun, Graph) ->
+ G = digraph:new([acyclic]),
+ try
+ _ = [case digraph:vertex(G, Vertex) of
+ false -> digraph:add_vertex(G, Vertex, Label);
+ _ -> ok = throw({graph_error, {vertex, duplicate, Vertex}})
+ end || GraphElem <- Graph,
+ {Vertex, Label} <- VertexFun(GraphElem)],
+ [case digraph:add_edge(G, From, To) of
+ {error, E} -> throw({graph_error, {edge, E, From, To}});
+ _ -> ok
+ end || GraphElem <- Graph,
+ {From, To} <- EdgeFun(GraphElem)],
+ {ok, G}
+ catch {graph_error, Reason} ->
+ true = digraph:delete(G),
+ {error, Reason}
+ end.
+
+const(X) -> fun () -> X end.
+
+%% Format IPv4-mapped IPv6 addresses as IPv4, since they're what we see
+%% when IPv6 is enabled but not used (i.e. 99% of the time).
+ntoa({0,0,0,0,0,16#ffff,AB,CD}) ->
+ inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256});
+ntoa(IP) ->
+ inet_parse:ntoa(IP).
+
+ntoab(IP) ->
+ Str = ntoa(IP),
+ case string:str(Str, ":") of
+ 0 -> Str;
+ _ -> "[" ++ Str ++ "]"
+ end.
+
+%% We try to avoid reconnecting to down nodes here; this is used in a
+%% loop in rabbit_amqqueue:on_node_down/1 and any delays we incur
+%% would be bad news.
+%%
+%% See also rabbit_mnesia:is_process_alive/1 which also requires the
+%% process be in the same running cluster as us (i.e. not partitioned
+%% or some random node).
+is_process_alive(Pid) when node(Pid) =:= node() ->
+ erlang:is_process_alive(Pid);
+is_process_alive(Pid) ->
+ Node = node(Pid),
+ lists:member(Node, [node() | nodes(connected)]) andalso
+ rpc:call(Node, erlang, is_process_alive, [Pid]) =:= true.
+
+-spec pget(term(), list() | map()) -> term().
+pget(K, M) when is_map(M) ->
+ case maps:find(K, M) of
+ {ok, V} ->
+ V;
+ _ ->
+ undefined
+ end;
+
+pget(K, P) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ V;
+ _ ->
+ undefined
+ end.
+
+-spec pget(term(), list() | map(), term()) -> term().
+pget(K, M, D) when is_map(M) ->
+ case maps:find(K, M) of
+ {ok, V} ->
+ V;
+ _ ->
+ D
+ end;
+
+pget(K, P, D) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ V;
+ _ ->
+ D
+ end.
+
+-spec pget_or_die(term(), list() | map()) -> term() | no_return().
+pget_or_die(K, M) when is_map(M) ->
+ case maps:find(K, M) of
+ error -> exit({error, key_missing, K});
+ {ok, V} -> V
+ end;
+
+pget_or_die(K, P) ->
+ case proplists:get_value(K, P) of
+ undefined -> exit({error, key_missing, K});
+ V -> V
+ end.
+
+pupdate(K, UpdateFun, P) ->
+ case lists:keyfind(K, 1, P) of
+ {K, V} ->
+ pset(K, UpdateFun(V), P);
+ _ ->
+ undefined
+ end.
+
+%% property merge
+pmerge(Key, Val, List) ->
+ case proplists:is_defined(Key, List) of
+ true -> List;
+ _ -> [{Key, Val} | List]
+ end.
+
+%% proplists merge
+plmerge(P1, P2) ->
+ %% Value from P1 suppresses value from P2
+ maps:to_list(maps:merge(maps:from_list(P2),
+ maps:from_list(P1))).
+
+%% groups a list of proplists by a key function
+group_proplists_by(KeyFun, ListOfPropLists) ->
+ Res = lists:foldl(fun(P, Agg) ->
+ Key = KeyFun(P),
+ Val = case maps:find(Key, Agg) of
+ {ok, O} -> [P|O];
+ error -> [P]
+ end,
+ maps:put(Key, Val, Agg)
+ end, #{}, ListOfPropLists),
+ [ X || {_, X} <- maps:to_list(Res)].
+
+pset(Key, Value, List) -> [{Key, Value} | proplists:delete(Key, List)].
+
+format_message_queue(_Opt, MQ) ->
+ Len = priority_queue:len(MQ),
+ {Len,
+ case Len > 100 of
+ false -> priority_queue:to_list(MQ);
+ true -> {summary,
+ maps:to_list(
+ lists:foldl(
+ fun ({P, V}, Counts) ->
+ maps:update_with(
+ {P, format_message_queue_entry(V)},
+ fun(Old) -> Old + 1 end, 1, Counts)
+ end, maps:new(), priority_queue:to_list(MQ)))}
+ end}.
+
+format_message_queue_entry(V) when is_atom(V) ->
+ V;
+format_message_queue_entry(V) when is_tuple(V) ->
+ list_to_tuple([format_message_queue_entry(E) || E <- tuple_to_list(V)]);
+format_message_queue_entry(_V) ->
+ '_'.
+
+%% Same as rpc:multicall/4 but concatenates all results.
+%% M, F, A is expected to return a list. If it does not,
+%% its return value will be wrapped in a list.
+-spec append_rpc_all_nodes([node()], atom(), atom(), [any()]) -> [any()].
+append_rpc_all_nodes(Nodes, M, F, A) ->
+ do_append_rpc_all_nodes(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT).
+
+-spec append_rpc_all_nodes([node()], atom(), atom(), [any()], timeout()) -> [any()].
+append_rpc_all_nodes(Nodes, M, F, A, Timeout) ->
+ do_append_rpc_all_nodes(Nodes, M, F, A, Timeout).
+
+do_append_rpc_all_nodes(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT) ->
+ {ResL, _} = rpc:multicall(Nodes, M, F, A, ?RPC_INFINITE_TIMEOUT),
+ process_rpc_multicall_result(ResL);
+do_append_rpc_all_nodes(Nodes, M, F, A, Timeout) ->
+ {ResL, _} = try
+ rpc:multicall(Nodes, M, F, A, Timeout)
+ catch
+ error:internal_error -> {[], Nodes}
+ end,
+ process_rpc_multicall_result(ResL).
+
+process_rpc_multicall_result(ResL) ->
+ lists:append([case Res of
+ {badrpc, _} -> [];
+ Xs when is_list(Xs) -> Xs;
+ %% wrap it in a list
+ Other -> [Other]
+ end || Res <- ResL]).
+
+os_cmd(Command) ->
+ case os:type() of
+ {win32, _} ->
+ %% Clink workaround; see
+ %% https://code.google.com/p/clink/issues/detail?id=141
+ os:cmd(" " ++ Command);
+ _ ->
+ %% Don't just return "/bin/sh: <cmd>: not found" if not found
+ Exec = hd(string:tokens(Command, " ")),
+ case os:find_executable(Exec) of
+ false -> throw({command_not_found, Exec});
+ _ -> os:cmd(Command)
+ end
+ end.
+
+is_os_process_alive(Pid) ->
+ with_os([{unix, fun () ->
+ run_ps(Pid) =:= 0
+ end},
+ {win32, fun () ->
+ PidS = rabbit_data_coercion:to_list(Pid),
+ case os:find_executable("tasklist.exe") of
+ false ->
+ Cmd =
+ format(
+ "PowerShell -Command "
+ "\"(Get-Process -Id ~s).ProcessName\"",
+ [PidS]),
+ Res =
+ os_cmd(Cmd ++ " 2>&1") -- [$\r, $\n],
+ case Res of
+ "erl" -> true;
+ "werl" -> true;
+ _ -> false
+ end;
+ _ ->
+ Cmd =
+ "tasklist /nh /fi "
+ "\"pid eq " ++ PidS ++ "\"",
+ Res = os_cmd(Cmd ++ " 2>&1"),
+ match =:= re:run(Res,
+ "erl\\.exe",
+ [{capture, none}])
+ end
+ end}]).
+
+with_os(Handlers) ->
+ {OsFamily, _} = os:type(),
+ case proplists:get_value(OsFamily, Handlers) of
+ undefined -> throw({unsupported_os, OsFamily});
+ Handler -> Handler()
+ end.
+
+run_ps(Pid) ->
+ Cmd = "ps -p " ++ rabbit_data_coercion:to_list(Pid),
+ Port = erlang:open_port({spawn, Cmd},
+ [exit_status, {line, 16384},
+ use_stdio, stderr_to_stdout]),
+ exit_loop(Port).
+
+exit_loop(Port) ->
+ receive
+ {Port, {exit_status, Rc}} -> Rc;
+ {Port, _} -> exit_loop(Port)
+ end.
+
+gb_sets_difference(S1, S2) ->
+ gb_sets:fold(fun gb_sets:delete_any/2, S1, S2).
+
+version() ->
+ {ok, VSN} = application:get_key(rabbit, vsn),
+ VSN.
+
+%% See https://www.erlang.org/doc/system_principles/versions.html
+otp_release() ->
+ File = filename:join([code:root_dir(), "releases",
+ erlang:system_info(otp_release), "OTP_VERSION"]),
+ case file:read_file(File) of
+ {ok, VerBin} ->
+ %% 17.0 or later, we need the file for the minor version
+ string:strip(binary_to_list(VerBin), both, $\n);
+ {error, _} ->
+ %% R16B03 or earlier (no file, otp_release is correct)
+ %% or we couldn't read the file (so this is best we can do)
+ erlang:system_info(otp_release)
+ end.
+
+platform_and_version() ->
+ string:join(["Erlang/OTP", otp_release()], " ").
+
+otp_system_version() ->
+ string:strip(erlang:system_info(system_version), both, $\n).
+
+rabbitmq_and_erlang_versions() ->
+ {version(), otp_release()}.
+
+%% application:which_applications(infinity) is dangerous, since it can
+%% cause deadlocks on shutdown. So we have to use a timeout variant,
+%% but w/o creating spurious timeout errors. The timeout value is twice
+%% that of gen_server:call/2.
+which_applications() ->
+ try
+ application:which_applications(10000)
+ catch
+ exit:{timeout, _} -> []
+ end.
+
+sequence_error([T]) -> T;
+sequence_error([{error, _} = Error | _]) -> Error;
+sequence_error([_ | Rest]) -> sequence_error(Rest).
+
+check_expiry(N) when N < 0 -> {error, {value_negative, N}};
+check_expiry(_N) -> ok.
+
+base64url(In) ->
+ lists:reverse(lists:foldl(fun ($\+, Acc) -> [$\- | Acc];
+ ($\/, Acc) -> [$\_ | Acc];
+ ($\=, Acc) -> Acc;
+ (Chr, Acc) -> [Chr | Acc]
+ end, [], base64:encode_to_string(In))).
+
+%% Ideally, you'd want Fun to run every IdealInterval. but you don't
+%% want it to take more than MaxRatio of IdealInterval. So if it takes
+%% more then you want to run it less often. So we time how long it
+%% takes to run, and then suggest how long you should wait before
+%% running it again with a user specified max interval. Times are in millis.
+interval_operation({M, F, A}, MaxRatio, MaxInterval, IdealInterval, LastInterval) ->
+ {Micros, Res} = timer:tc(M, F, A),
+ {Res, case {Micros > 1000 * (MaxRatio * IdealInterval),
+ Micros > 1000 * (MaxRatio * LastInterval)} of
+ {true, true} -> lists:min([MaxInterval,
+ round(LastInterval * 1.5)]);
+ {true, false} -> LastInterval;
+ {false, false} -> lists:max([IdealInterval,
+ round(LastInterval / 1.5)])
+ end}.
+
+ensure_timer(State, Idx, After, Msg) ->
+ case element(Idx, State) of
+ undefined -> TRef = send_after(After, self(), Msg),
+ setelement(Idx, State, TRef);
+ _ -> State
+ end.
+
+stop_timer(State, Idx) ->
+ case element(Idx, State) of
+ undefined -> State;
+ TRef -> cancel_timer(TRef),
+ setelement(Idx, State, undefined)
+ end.
+
+%% timer:send_after/3 goes through a single timer process but allows
+%% long delays. erlang:send_after/3 does not have a bottleneck but
+%% only allows max 2^32-1 millis.
+-define(MAX_ERLANG_SEND_AFTER, 4294967295).
+send_after(Millis, Pid, Msg) when Millis > ?MAX_ERLANG_SEND_AFTER ->
+ {ok, Ref} = timer:send_after(Millis, Pid, Msg),
+ {timer, Ref};
+send_after(Millis, Pid, Msg) ->
+ {erlang, erlang:send_after(Millis, Pid, Msg)}.
+
+cancel_timer({erlang, Ref}) -> _ = erlang:cancel_timer(Ref),
+ ok;
+cancel_timer({timer, Ref}) -> {ok, cancel} = timer:cancel(Ref),
+ ok.
+
+store_proc_name(Type, ProcName) -> store_proc_name({Type, ProcName}).
+store_proc_name(TypeProcName) -> put(process_name, TypeProcName).
+
+get_proc_name() ->
+ case get(process_name) of
+ undefined ->
+ undefined;
+ {_Type, Name} ->
+ {ok, Name}
+ end.
+
+%% application:get_env/3 is only available in R16B01 or later.
+get_env(Application, Key, Def) ->
+ case application:get_env(Application, Key) of
+ {ok, Val} -> Val;
+ undefined -> Def
+ end.
+
+get_channel_operation_timeout() ->
+ %% Default channel_operation_timeout set to net_ticktime + 10s to
+ %% give allowance for any down messages to be received first,
+ %% whenever it is used for cross-node calls with timeouts.
+ Default = (net_kernel:get_net_ticktime() + 10) * 1000,
+ application:get_env(rabbit, channel_operation_timeout, Default).
+
+moving_average(_Time, _HalfLife, Next, undefined) ->
+ Next;
+%% We want the Weight to decrease as Time goes up (since Weight is the
+%% weight for the current sample, not the new one), so that the moving
+%% average decays at the same speed regardless of how long the time is
+%% between samplings. So we want Weight = math:exp(Something), where
+%% Something turns out to be negative.
+%%
+%% We want to determine Something here in terms of the Time taken
+%% since the last measurement, and a HalfLife. So we want Weight =
+%% math:exp(Time * Constant / HalfLife). What should Constant be? We
+%% want Weight to be 0.5 when Time = HalfLife.
+%%
+%% Plug those numbers in and you get 0.5 = math:exp(Constant). Take
+%% the log of each side and you get math:log(0.5) = Constant.
+moving_average(Time, HalfLife, Next, Current) ->
+ Weight = math:exp(Time * math:log(0.5) / HalfLife),
+ Next * (1 - Weight) + Current * Weight.
+
+random(N) ->
+ rand:uniform(N).
+
+-spec escape_html_tags(string()) -> binary().
+
+escape_html_tags(S) ->
+ escape_html_tags(rabbit_data_coercion:to_list(S), []).
+
+
+-spec escape_html_tags(string(), string()) -> binary().
+
+escape_html_tags([], Acc) ->
+ rabbit_data_coercion:to_binary(lists:reverse(Acc));
+escape_html_tags("<" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&lt;", Acc));
+escape_html_tags(">" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&gt;", Acc));
+escape_html_tags("&" ++ Rest, Acc) ->
+ escape_html_tags(Rest, lists:reverse("&amp;", Acc));
+escape_html_tags([C | Rest], Acc) ->
+ escape_html_tags(Rest, [C | Acc]).
+
+%% If the server we are talking to has non-standard net_ticktime, and
+%% our connection lasts a while, we could get disconnected because of
+%% a timeout unless we set our ticktime to be the same. So let's do
+%% that.
+%% TODO: do not use an infinite timeout!
+-spec rpc_call(node(), atom(), atom(), [any()]) -> any() | {badrpc, term()}.
+rpc_call(Node, Mod, Fun, Args) ->
+ rpc_call(Node, Mod, Fun, Args, ?RPC_INFINITE_TIMEOUT).
+
+-spec rpc_call(node(), atom(), atom(), [any()], infinity | non_neg_integer()) -> any() | {badrpc, term()}.
+rpc_call(Node, Mod, Fun, Args, Timeout) ->
+ case rpc:call(Node, net_kernel, get_net_ticktime, [], Timeout) of
+ {badrpc, _} = E -> E;
+ ignored ->
+ rpc:call(Node, Mod, Fun, Args, Timeout);
+ {ongoing_change_to, NewValue} ->
+ _ = net_kernel:set_net_ticktime(NewValue, 0),
+ rpc:call(Node, Mod, Fun, Args, Timeout);
+ Time ->
+ _ = net_kernel:set_net_ticktime(Time, 0),
+ rpc:call(Node, Mod, Fun, Args, Timeout)
+ end.
+
+get_gc_info(Pid) ->
+ rabbit_runtime:get_gc_info(Pid).
+
+%% -------------------------------------------------------------------------
+%% Begin copypasta from gen_server2.erl
+
+get_parent() ->
+ case get('$ancestors') of
+ [Parent | _] when is_pid (Parent) -> Parent;
+ [Parent | _] when is_atom(Parent) -> name_to_pid(Parent);
+ _ -> exit(process_was_not_started_by_proc_lib)
+ end.
+
+name_to_pid(Name) ->
+ case whereis(Name) of
+ undefined -> case whereis_name(Name) of
+ undefined -> exit(could_not_find_registered_name);
+ Pid -> Pid
+ end;
+ Pid -> Pid
+ end.
+
+whereis_name(Name) ->
+ case ets:lookup(global_names, Name) of
+ [{_Name, Pid, _Method, _RPid, _Ref}] ->
+ if node(Pid) == node() -> case erlang:is_process_alive(Pid) of
+ true -> Pid;
+ false -> undefined
+ end;
+ true -> Pid
+ end;
+ [] -> undefined
+ end.
+
+%% End copypasta from gen_server2.erl
+%% -------------------------------------------------------------------------
diff --git a/deps/rabbit_common/src/rabbit_msg_store_index.erl b/deps/rabbit_common/src/rabbit_msg_store_index.erl
new file mode 100644
index 0000000000..ce9abe97a6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_msg_store_index.erl
@@ -0,0 +1,89 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_msg_store_index).
+
+-include("rabbit_msg_store.hrl").
+
+%% Behaviour module to provide pluggable message store index.
+%% The index is used to locate message on disk and for reference-counting.
+
+%% Message store have several additional assumptions about performance and
+%% atomicity of some operations. See comments for each callback.
+
+-type(dir() :: string()).
+-type(index_state() :: any()).
+-type(fieldpos() :: non_neg_integer()).
+-type(fieldvalue() :: any()).
+-type(msg_location() :: #msg_location{}).
+
+
+%% There are two ways of starting an index:
+%% - `new` - starts a clean index
+%% - `recover` - attempts to read a saved index
+%% In both cases the old saved state should be deleted from directory.
+
+%% Initialize a fresh index state for msg store directory.
+-callback new(dir()) -> index_state().
+%% Try to recover gracefully stopped index state.
+-callback recover(dir()) -> rabbit_types:ok_or_error2(index_state(), any()).
+%% Gracefully shutdown the index.
+%% Should save the index state, which will be loaded by the 'recover' function.
+-callback terminate(index_state()) -> any().
+
+%% Lookup an entry in the index.
+%% Is called concurrently by msg_store, it's clients and GC processes.
+%% This function is called multiple times for each message store operation.
+%% Message store tries to avoid writing messages on disk if consumers can
+%% process them fast, so there will be a lot of lookups for non-existent
+%% entries, which should be as fast as possible.
+-callback lookup(rabbit_types:msg_id(), index_state()) -> ('not_found' | msg_location()).
+
+%% Insert an entry into the index.
+%% Is called by a msg_store process only.
+%% This function can exit if there is already an entry with the same ID
+-callback insert(msg_location(), index_state()) -> 'ok'.
+
+%% Update an entry in the index.
+%% Is called by a msg_store process only.
+%% The function is called during message store recovery after crash.
+%% The difference between update and insert functions, is that update
+%% should not fail if entry already exist, and should be atomic.
+-callback update(msg_location(), index_state()) -> 'ok'.
+
+%% Update positional fields in the entry tuple.
+%% Is called by msg_store and GC processes concurrently.
+%% This function can exit if there is no entry with specified ID
+%% This function is called to update reference-counters and file locations.
+%% File locations are updated from a GC process, reference-counters are
+%% updated from a message store process.
+%% This function should be atomic.
+-callback update_fields(rabbit_types:msg_id(), ({fieldpos(), fieldvalue()} |
+ [{fieldpos(), fieldvalue()}]),
+ index_state()) -> 'ok'.
+
+%% Delete an entry from the index by ID.
+%% Is called from a msg_store process only.
+%% This function should be atomic.
+-callback delete(rabbit_types:msg_id(), index_state()) -> 'ok'.
+
+%% Delete an exactly matching entry from the index.
+%% Is called by GC process only.
+%% This function should match exact object to avoid deleting a zero-reference
+%% object, which reference-counter is being concurrently updated.
+%% This function should be atomic.
+-callback delete_object(msg_location(), index_state()) -> 'ok'.
+
+%% Delete temporary reference count entries with the 'file' record field equal to 'undefined'.
+%% Is called during index rebuild from scratch (e.g. after non-clean stop)
+%% During recovery after non-clean stop or file corruption, reference-counters
+%% are added to the index with `undefined` value for the `file` field.
+%% If message is found in a message store file, it's file field is updated.
+%% If some reference-counters miss the message location after recovery - they
+%% should be deleted.
+-callback clean_up_temporary_reference_count_entries_without_file(index_state()) -> 'ok'.
+
diff --git a/deps/rabbit_common/src/rabbit_net.erl b/deps/rabbit_common/src/rabbit_net.erl
new file mode 100644
index 0000000000..7685687ff0
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_net.erl
@@ -0,0 +1,321 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_net).
+-include("rabbit.hrl").
+
+-include_lib("kernel/include/inet.hrl").
+
+-export([is_ssl/1, ssl_info/1, controlling_process/2, getstat/2,
+ recv/1, sync_recv/2, async_recv/3, port_command/2, getopts/2,
+ setopts/2, send/2, close/1, fast_close/1, sockname/1, peername/1,
+ peercert/1, connection_string/2, socket_ends/2, is_loopback/1,
+ tcp_host/1, unwrap_socket/1, maybe_get_proxy_socket/1,
+ hostname/0, getifaddrs/0]).
+
+%%---------------------------------------------------------------------------
+
+-export_type([socket/0, ip_port/0, hostname/0]).
+
+-type stat_option() ::
+ 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
+ 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+-type ok_val_or_error(A) :: rabbit_types:ok_or_error2(A, any()).
+-type ok_or_any_error() :: rabbit_types:ok_or_error(any()).
+-type socket() :: port() | ssl:sslsocket().
+-type opts() :: [{atom(), any()} |
+ {raw, non_neg_integer(), non_neg_integer(), binary()}].
+-type hostname() :: inet:hostname().
+-type ip_port() :: inet:port_number().
+% -type host_or_ip() :: binary() | inet:ip_address().
+-spec is_ssl(socket()) -> boolean().
+-spec ssl_info(socket()) -> 'nossl' | ok_val_or_error([{atom(), any()}]).
+-spec controlling_process(socket(), pid()) -> ok_or_any_error().
+-spec getstat(socket(), [stat_option()]) ->
+ ok_val_or_error([{stat_option(), integer()}]).
+-spec recv(socket()) ->
+ {'data', [char()] | binary()} |
+ 'closed' |
+ rabbit_types:error(any()) |
+ {'other', any()}.
+-spec sync_recv(socket(), integer()) ->
+ rabbit_types:ok(binary()) |
+ rabbit_types:error(any()).
+-spec async_recv(socket(), integer(), timeout()) ->
+ rabbit_types:ok(any()).
+-spec port_command(socket(), iolist()) -> 'true'.
+-spec getopts
+ (socket(),
+ [atom() |
+ {raw, non_neg_integer(), non_neg_integer(),
+ non_neg_integer() | binary()}]) ->
+ ok_val_or_error(opts()).
+-spec setopts(socket(), opts()) -> ok_or_any_error().
+-spec send(socket(), binary() | iolist()) -> ok_or_any_error().
+-spec close(socket()) -> ok_or_any_error().
+-spec fast_close(socket()) -> ok_or_any_error().
+-spec sockname(socket()) ->
+ ok_val_or_error({inet:ip_address(), ip_port()}).
+-spec peername(socket()) ->
+ ok_val_or_error({inet:ip_address(), ip_port()}).
+-spec peercert(socket()) ->
+ 'nossl' | ok_val_or_error(rabbit_ssl:certificate()).
+-spec connection_string(socket(), 'inbound' | 'outbound') ->
+ ok_val_or_error(string()).
+% -spec socket_ends(socket() | ranch_proxy:proxy_socket() | ranch_proxy_ssl:ssl_socket(),
+% 'inbound' | 'outbound') ->
+% ok_val_or_error({host_or_ip(), ip_port(),
+% host_or_ip(), ip_port()}).
+-spec is_loopback(socket() | inet:ip_address()) -> boolean().
+% -spec unwrap_socket(socket() | ranch_proxy:proxy_socket() | ranch_proxy_ssl:ssl_socket()) -> socket().
+
+-dialyzer({nowarn_function, [socket_ends/2, unwrap_socket/1]}).
+
+%%---------------------------------------------------------------------------
+
+-define(SSL_CLOSE_TIMEOUT, 5000).
+
+-define(IS_SSL(Sock), is_tuple(Sock)
+ andalso (tuple_size(Sock) =:= 3)
+ andalso (element(1, Sock) =:= sslsocket)).
+
+is_ssl(Sock) -> ?IS_SSL(Sock).
+
+%% Seems hackish. Is hackish. But the structure is stable and
+%% kept this way for backward compatibility reasons. We need
+%% it for two reasons: there are no ssl:getstat(Sock) function,
+%% and no ssl:close(Timeout) function. Both of them are being
+%% worked on as we speak.
+ssl_get_socket(Sock) ->
+ element(2, element(2, Sock)).
+
+ssl_info(Sock) when ?IS_SSL(Sock) ->
+ ssl:connection_information(Sock);
+ssl_info(_Sock) ->
+ nossl.
+
+controlling_process(Sock, Pid) when ?IS_SSL(Sock) ->
+ ssl:controlling_process(Sock, Pid);
+controlling_process(Sock, Pid) when is_port(Sock) ->
+ gen_tcp:controlling_process(Sock, Pid).
+
+getstat(Sock, Stats) when ?IS_SSL(Sock) ->
+ inet:getstat(ssl_get_socket(Sock), Stats);
+getstat(Sock, Stats) when is_port(Sock) ->
+ inet:getstat(Sock, Stats);
+%% Used by Proxy protocol support in plugins
+getstat({rabbit_proxy_socket, Sock, _}, Stats) when ?IS_SSL(Sock) ->
+ inet:getstat(ssl_get_socket(Sock), Stats);
+getstat({rabbit_proxy_socket, Sock, _}, Stats) when is_port(Sock) ->
+ inet:getstat(Sock, Stats).
+
+recv(Sock) when ?IS_SSL(Sock) ->
+ recv(Sock, {ssl, ssl_closed, ssl_error});
+recv(Sock) when is_port(Sock) ->
+ recv(Sock, {tcp, tcp_closed, tcp_error}).
+
+recv(S, {DataTag, ClosedTag, ErrorTag}) ->
+ receive
+ {DataTag, S, Data} -> {data, Data};
+ {ClosedTag, S} -> closed;
+ {ErrorTag, S, Reason} -> {error, Reason};
+ Other -> {other, Other}
+ end.
+
+sync_recv(Sock, Length) when ?IS_SSL(Sock) ->
+ ssl:recv(Sock, Length);
+sync_recv(Sock, Length) ->
+ gen_tcp:recv(Sock, Length).
+
+async_recv(Sock, Length, Timeout) when ?IS_SSL(Sock) ->
+ Pid = self(),
+ Ref = make_ref(),
+
+ spawn(fun () -> Pid ! {inet_async, Sock, Ref,
+ ssl:recv(Sock, Length, Timeout)}
+ end),
+
+ {ok, Ref};
+async_recv(Sock, Length, infinity) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, -1);
+async_recv(Sock, Length, Timeout) when is_port(Sock) ->
+ prim_inet:async_recv(Sock, Length, Timeout).
+
+port_command(Sock, Data) when ?IS_SSL(Sock) ->
+ case ssl:send(Sock, Data) of
+ ok -> self() ! {inet_reply, Sock, ok},
+ true;
+ {error, Reason} -> erlang:error(Reason)
+ end;
+port_command(Sock, Data) when is_port(Sock) ->
+ erlang:port_command(Sock, Data).
+
+getopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:getopts(Sock, Options);
+getopts(Sock, Options) when is_port(Sock) ->
+ inet:getopts(Sock, Options).
+
+setopts(Sock, Options) when ?IS_SSL(Sock) ->
+ ssl:setopts(Sock, Options);
+setopts(Sock, Options) when is_port(Sock) ->
+ inet:setopts(Sock, Options).
+
+send(Sock, Data) when ?IS_SSL(Sock) -> ssl:send(Sock, Data);
+send(Sock, Data) when is_port(Sock) -> gen_tcp:send(Sock, Data).
+
+close(Sock) when ?IS_SSL(Sock) -> ssl:close(Sock);
+close(Sock) when is_port(Sock) -> gen_tcp:close(Sock).
+
+fast_close(Sock) when ?IS_SSL(Sock) ->
+ %% We cannot simply port_close the underlying tcp socket since the
+ %% TLS protocol is quite insistent that a proper closing handshake
+ %% should take place (see RFC 5245 s7.2.1). So we call ssl:close
+ %% instead, but that can block for a very long time, e.g. when
+ %% there is lots of pending output and there is tcp backpressure,
+ %% or the ssl_connection process has entered the the
+ %% workaround_transport_delivery_problems function during
+ %% termination, which, inexplicably, does a gen_tcp:recv(Socket,
+ %% 0), which may never return if the client doesn't send a FIN or
+ %% that gets swallowed by the network. Since there is no timeout
+ %% variant of ssl:close, we construct our own.
+ {Pid, MRef} = spawn_monitor(fun () -> ssl:close(Sock) end),
+ erlang:send_after(?SSL_CLOSE_TIMEOUT, self(), {Pid, ssl_close_timeout}),
+ receive
+ {Pid, ssl_close_timeout} ->
+ erlang:demonitor(MRef, [flush]),
+ exit(Pid, kill);
+ {'DOWN', MRef, process, Pid, _Reason} ->
+ ok
+ end,
+ catch port_close(ssl_get_socket(Sock)),
+ ok;
+fast_close(Sock) when is_port(Sock) ->
+ catch port_close(Sock), ok.
+
+sockname(Sock) when ?IS_SSL(Sock) -> ssl:sockname(Sock);
+sockname(Sock) when is_port(Sock) -> inet:sockname(Sock).
+
+peername(Sock) when ?IS_SSL(Sock) -> ssl:peername(Sock);
+peername(Sock) when is_port(Sock) -> inet:peername(Sock).
+
+peercert(Sock) when ?IS_SSL(Sock) -> ssl:peercert(Sock);
+peercert(Sock) when is_port(Sock) -> nossl.
+
+connection_string(Sock, Direction) ->
+ case socket_ends(Sock, Direction) of
+ {ok, {FromAddress, FromPort, ToAddress, ToPort}} ->
+ {ok, rabbit_misc:format(
+ "~s:~p -> ~s:~p",
+ [maybe_ntoab(FromAddress), FromPort,
+ maybe_ntoab(ToAddress), ToPort])};
+ Error ->
+ Error
+ end.
+
+socket_ends(Sock, Direction) when ?IS_SSL(Sock);
+ is_port(Sock) ->
+ {From, To} = sock_funs(Direction),
+ case {From(Sock), To(Sock)} of
+ {{ok, {FromAddress, FromPort}}, {ok, {ToAddress, ToPort}}} ->
+ {ok, {rdns(FromAddress), FromPort,
+ rdns(ToAddress), ToPort}};
+ {{error, _Reason} = Error, _} ->
+ Error;
+ {_, {error, _Reason} = Error} ->
+ Error
+ end;
+socket_ends({rabbit_proxy_socket, CSocket, ProxyInfo}, Direction = inbound) ->
+ #{
+ src_address := FromAddress,
+ src_port := FromPort
+ } = ProxyInfo,
+ {_From, To} = sock_funs(Direction),
+ case To(CSocket) of
+ {ok, {ToAddress, ToPort}} ->
+ {ok, {rdns(FromAddress), FromPort,
+ rdns(ToAddress), ToPort}};
+ {error, _Reason} = Error ->
+ Error
+ end.
+
+maybe_ntoab(Addr) when is_tuple(Addr) -> rabbit_misc:ntoab(Addr);
+maybe_ntoab(Host) -> Host.
+
+tcp_host({0,0,0,0}) ->
+ hostname();
+
+tcp_host({0,0,0,0,0,0,0,0}) ->
+ hostname();
+
+tcp_host(IPAddress) ->
+ case inet:gethostbyaddr(IPAddress) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> rabbit_misc:ntoa(IPAddress)
+ end.
+
+hostname() ->
+ {ok, Hostname} = inet:gethostname(),
+ case inet:gethostbyname(Hostname) of
+ {ok, #hostent{h_name = Name}} -> Name;
+ {error, _Reason} -> Hostname
+ end.
+
+format_nic_attribute({Key, undefined}) ->
+ {Key, undefined};
+format_nic_attribute({Key = flags, List}) when is_list(List) ->
+ Val = string:join(lists:map(fun rabbit_data_coercion:to_list/1, List), ", "),
+ {Key, rabbit_data_coercion:to_binary(Val)};
+format_nic_attribute({Key, Tuple}) when is_tuple(Tuple) and (Key =:= addr orelse
+ Key =:= broadaddr orelse
+ Key =:= netmask orelse
+ Key =:= dstaddr) ->
+ Val = inet_parse:ntoa(Tuple),
+ {Key, rabbit_data_coercion:to_binary(Val)};
+format_nic_attribute({Key = hwaddr, List}) when is_list(List) ->
+ %% [140, 133, 144, 28, 241, 121] => 8C:85:90:1C:F1:79
+ Val = string:join(lists:map(fun(N) -> integer_to_list(N, 16) end, List), ":"),
+ {Key, rabbit_data_coercion:to_binary(Val)}.
+
+getifaddrs() ->
+ {ok, AddrList} = inet:getifaddrs(),
+ Addrs0 = maps:from_list(AddrList),
+ maps:map(fun (_Key, Proplist) ->
+ lists:map(fun format_nic_attribute/1, Proplist)
+ end, Addrs0).
+
+rdns(Addr) ->
+ case application:get_env(rabbit, reverse_dns_lookups) of
+ {ok, true} -> list_to_binary(tcp_host(Addr));
+ _ -> Addr
+ end.
+
+sock_funs(inbound) -> {fun peername/1, fun sockname/1};
+sock_funs(outbound) -> {fun sockname/1, fun peername/1}.
+
+is_loopback(Sock) when is_port(Sock) ; ?IS_SSL(Sock) ->
+ case sockname(Sock) of
+ {ok, {Addr, _Port}} -> is_loopback(Addr);
+ {error, _} -> false
+ end;
+%% We could parse the results of inet:getifaddrs() instead. But that
+%% would be more complex and less maybe Windows-compatible...
+is_loopback({127,_,_,_}) -> true;
+is_loopback({0,0,0,0,0,0,0,1}) -> true;
+is_loopback({0,0,0,0,0,65535,AB,CD}) -> is_loopback(ipv4(AB, CD));
+is_loopback(_) -> false.
+
+ipv4(AB, CD) -> {AB bsr 8, AB band 255, CD bsr 8, CD band 255}.
+
+unwrap_socket({rabbit_proxy_socket, Sock, _}) ->
+ Sock;
+unwrap_socket(Sock) ->
+ Sock.
+
+maybe_get_proxy_socket(Sock={rabbit_proxy_socket, _, _}) ->
+ Sock;
+maybe_get_proxy_socket(_Sock) ->
+ undefined.
diff --git a/deps/rabbit_common/src/rabbit_nodes_common.erl b/deps/rabbit_common/src/rabbit_nodes_common.erl
new file mode 100644
index 0000000000..7e87ce2ea4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_nodes_common.erl
@@ -0,0 +1,227 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_nodes_common).
+
+-define(EPMD_TIMEOUT, 30000).
+-define(TCP_DIAGNOSTIC_TIMEOUT, 5000).
+-define(ERROR_LOGGER_HANDLER, rabbit_error_logger_handler).
+
+-include_lib("kernel/include/inet.hrl").
+
+%%
+%% API
+%%
+
+-export([make/1, parts/1, names/1, name_type/1, ensure_epmd/0, is_running/2, is_process_running/2]).
+-export([cookie_hash/0, epmd_port/0, diagnostics/1]).
+
+-spec make({string(), string()} | string()) -> node().
+-spec parts(node() | string()) -> {string(), string()}.
+-spec ensure_epmd() -> 'ok'.
+-spec epmd_port() -> string().
+
+-spec names(string()) ->
+ rabbit_types:ok_or_error2([{string(), integer()}], term()).
+-spec diagnostics([node()]) -> string().
+-spec cookie_hash() -> string().
+
+%% net_adm:name/1 returns a new value, 'noport', in Erlang 24. This value being
+%% absent in the function spec in previous versions of Erlang, we get a warning
+%% from Dialyzer until we start to the yet-to-be-release Erlang 24 in CI.
+%% Therefore we disable this specific warning.
+-dialyzer({nowarn_function, diagnostics_node/1}).
+
+names(Hostname) ->
+ Self = self(),
+ Ref = make_ref(),
+ {Pid, MRef} = spawn_monitor(
+ fun () -> Self ! {Ref, net_adm:names(Hostname)} end),
+ _ = timer:exit_after(?EPMD_TIMEOUT, Pid, timeout),
+ receive
+ {Ref, Names} -> erlang:demonitor(MRef, [flush]),
+ Names;
+ {'DOWN', MRef, process, Pid, Reason} -> {error, Reason}
+ end.
+
+make({Prefix, Suffix}) -> rabbit_data_coercion:to_atom(
+ lists:append([rabbit_data_coercion:to_list(Prefix),
+ "@",
+ rabbit_data_coercion:to_list(Suffix)]));
+make(NodeStr) -> make(parts(NodeStr)).
+
+parts(Node) when is_atom(Node) ->
+ parts(atom_to_list(Node));
+parts(NodeStr) ->
+ case lists:splitwith(fun (E) -> E =/= $@ end, NodeStr) of
+ {Prefix, []} -> {_, Suffix} = parts(node()),
+ {Prefix, Suffix};
+ {Prefix, Suffix} -> {Prefix, tl(Suffix)}
+ end.
+
+name_type(Node) ->
+ {_, HostPart} = parts(Node),
+ case lists:member($., HostPart) of
+ false -> shortnames;
+ true -> longnames
+ end.
+
+epmd_port() ->
+ case init:get_argument(epmd_port) of
+ {ok, [[Port | _] | _]} when is_list(Port) -> Port;
+ error -> "4369"
+ end.
+
+ensure_epmd() ->
+ Exe = rabbit_runtime:get_erl_path(),
+ ID = rabbit_misc:random(1000000000),
+ Port = open_port(
+ {spawn_executable, Exe},
+ [{args, ["-boot", "no_dot_erlang",
+ "-sname", rabbit_misc:format("epmd-starter-~b", [ID]),
+ "-noinput", "-s", "erlang", "halt"]},
+ exit_status, stderr_to_stdout, use_stdio]),
+ port_shutdown_loop(Port).
+
+port_shutdown_loop(Port) ->
+ receive
+ {Port, {exit_status, _Rc}} -> ok;
+ {Port, _} -> port_shutdown_loop(Port)
+ end.
+
+cookie_hash() ->
+ base64:encode_to_string(erlang:md5(atom_to_list(erlang:get_cookie()))).
+
+diagnostics(Nodes) ->
+ verbose_erlang_distribution(true),
+ NodeDiags = [{"~nDIAGNOSTICS~n===========~n~n"
+ "attempted to contact: ~p~n", [Nodes]}] ++
+ [diagnostics_node(Node) || Node <- Nodes] ++
+ current_node_details(),
+ verbose_erlang_distribution(false),
+ rabbit_misc:format_many(lists:flatten(NodeDiags)).
+
+verbose_erlang_distribution(true) ->
+ net_kernel:verbose(1),
+ error_logger:add_report_handler(?ERROR_LOGGER_HANDLER);
+verbose_erlang_distribution(false) ->
+ net_kernel:verbose(0),
+ error_logger:delete_report_handler(?ERROR_LOGGER_HANDLER).
+
+current_node_details() ->
+ [{"~nCurrent node details:~n * node name: ~w", [node()]},
+ case init:get_argument(home) of
+ {ok, [[Home]]} -> {" * effective user's home directory: ~s", [Home]};
+ Other -> {" * effective user has no home directory: ~p", [Other]}
+ end,
+ {" * Erlang cookie hash: ~s", [cookie_hash()]}].
+
+diagnostics_node(Node) ->
+ {Name, Host} = parts(Node),
+ [{"~s:", [Node]} |
+ case names(Host) of
+ {error, Reason} ->
+ [{" * unable to connect to epmd (port ~s) on ~s: ~s~n",
+ [epmd_port(), Host, rabbit_misc:format_inet_error(Reason)]}];
+ noport ->
+ [{" * unable to connect to epmd (port ~s) on ~s: "
+ "couldn't resolve hostname~n",
+ [epmd_port(), Host]}];
+ {ok, NamePorts} ->
+ [{" * connected to epmd (port ~s) on ~s",
+ [epmd_port(), Host]}] ++
+ case net_adm:ping(Node) of
+ pong -> dist_working_diagnostics(Node);
+ pang -> dist_broken_diagnostics(Name, Host, NamePorts)
+ end
+ end].
+
+dist_working_diagnostics(Node) ->
+ case is_process_running(Node, rabbit) of
+ true -> [{" * node ~s up, 'rabbit' application running", [Node]}];
+ false -> [{" * node ~s up, 'rabbit' application not running~n"
+ " * running applications on ~s: ~p~n"
+ " * suggestion: use rabbitmqctl start_app on ~s",
+ [Node, Node, remote_apps(Node), Node]}]
+ end.
+
+remote_apps(Node) ->
+ %% We want a timeout here because really, we don't trust the node,
+ %% the last thing we want to do is hang.
+ case rpc:call(Node, application, which_applications, [5000]) of
+ {badrpc, _} = E -> E;
+ Apps -> [App || {App, _, _} <- Apps]
+ end.
+
+dist_broken_diagnostics(Name, Host, NamePorts) ->
+ case [{N, P} || {N, P} <- NamePorts, N =:= Name] of
+ [] ->
+ {SelfName, SelfHost} = parts(node()),
+ Others = [list_to_atom(N) || {N, _} <- NamePorts,
+ N =/= case SelfHost of
+ Host -> SelfName;
+ _ -> never_matches
+ end],
+ OthersDiag = case Others of
+ [] -> [{" no other nodes on ~s",
+ [Host]}];
+ _ -> [{" other nodes on ~s: ~p",
+ [Host, Others]}]
+ end,
+ [{" * epmd reports: node '~s' not running at all", [Name]},
+ OthersDiag, {" * suggestion: start the node", []}];
+ [{Name, Port}] ->
+ [{" * epmd reports node '~s' uses port ~b for inter-node and CLI tool traffic ", [Name, Port]} |
+ case diagnose_connect(Host, Port) of
+ ok ->
+ connection_succeeded_diagnostics();
+ {error, Reason} ->
+ [{" * can't establish TCP connection to the target node, reason: ~s~n"
+ " * suggestion: check if host '~s' resolves, is reachable and ports ~b, 4369 are not blocked by firewall",
+ [rabbit_misc:format_inet_error(Reason), Host, Port]}]
+ end]
+ end.
+
+connection_succeeded_diagnostics() ->
+ case gen_event:call(error_logger, ?ERROR_LOGGER_HANDLER, get_connection_report) of
+ [] ->
+ [{" * TCP connection succeeded but Erlang distribution failed ~n"
+ " * suggestion: check if the Erlang cookie identical for all server nodes and CLI tools~n"
+ " * suggestion: check if all server nodes and CLI tools use consistent hostnames when addressing each other~n"
+ " * suggestion: check if inter-node connections may be configured to use TLS. If so, all nodes and CLI tools must do that~n"
+ " * suggestion: see the CLI, clustering and networking guides on https://rabbitmq.com/documentation.html to learn more~n", []}];
+ Report ->
+ [{" * TCP connection succeeded but Erlang distribution "
+ "failed ~n", []}]
+ ++ Report
+ end.
+
+diagnose_connect(Host, Port) ->
+ case inet:gethostbyname(Host) of
+ {ok, #hostent{h_addrtype = Family}} ->
+ case gen_tcp:connect(Host, Port, [Family],
+ ?TCP_DIAGNOSTIC_TIMEOUT) of
+ {ok, Socket} -> gen_tcp:close(Socket),
+ ok;
+ {error, _} = E -> E
+ end;
+ {error, _} = E ->
+ E
+ end.
+
+is_running(Node, Application) ->
+ case rpc:call(Node, rabbit_misc, which_applications, []) of
+ {badrpc, _} -> false;
+ Apps -> proplists:is_defined(Application, Apps)
+ end.
+
+is_process_running(Node, Process) ->
+ case rpc:call(Node, erlang, whereis, [Process]) of
+ {badrpc, _} -> false;
+ undefined -> false;
+ P when is_pid(P) -> true
+ end.
diff --git a/deps/rabbit_common/src/rabbit_numerical.erl b/deps/rabbit_common/src/rabbit_numerical.erl
new file mode 100644
index 0000000000..45cc67fda6
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_numerical.erl
@@ -0,0 +1,358 @@
+%% This file is a copy of `mochijson2.erl' from mochiweb, revision
+%% d541e9a0f36c00dcadc2e589f20e47fbf46fc76f. For the license, see
+%% `LICENSE-MIT-Mochi'.
+
+%% @copyright 2007 Mochi Media, Inc.
+%% @author Bob Ippolito <bob@mochimedia.com>
+
+%% @doc Useful numeric algorithms for floats that cover some deficiencies
+%% in the math module. More interesting is digits/1, which implements
+%% the algorithm from:
+%% https://cs.indiana.edu/~burger/fp/index.html
+%% See also "Printing Floating-Point Numbers Quickly and Accurately"
+%% in Proceedings of the SIGPLAN '96 Conference on Programming Language
+%% Design and Implementation.
+
+-module(rabbit_numerical).
+-author("Bob Ippolito <bob@mochimedia.com>").
+-export([digits/1, frexp/1, int_pow/2, int_ceil/1]).
+
+%% IEEE 754 Float exponent bias
+-define(FLOAT_BIAS, 1022).
+-define(MIN_EXP, -1074).
+-define(BIG_POW, 4503599627370496).
+
+%% External API
+
+%% @spec digits(number()) -> string()
+%% @doc Returns a string that accurately represents the given integer or float
+%% using a conservative amount of digits. Great for generating
+%% human-readable output, or compact ASCII serializations for floats.
+digits(N) when is_integer(N) ->
+ integer_to_list(N);
+digits(0.0) ->
+ "0.0";
+digits(Float) ->
+ {Frac1, Exp1} = frexp_int(Float),
+ [Place0 | Digits0] = digits1(Float, Exp1, Frac1),
+ {Place, Digits} = transform_digits(Place0, Digits0),
+ R = insert_decimal(Place, Digits),
+ case Float < 0 of
+ true ->
+ [$- | R];
+ _ ->
+ R
+ end.
+
+%% @spec frexp(F::float()) -> {Frac::float(), Exp::float()}
+%% @doc Return the fractional and exponent part of an IEEE 754 double,
+%% equivalent to the libc function of the same name.
+%% F = Frac * pow(2, Exp).
+frexp(F) ->
+ frexp1(unpack(F)).
+
+%% @spec int_pow(X::integer(), N::integer()) -> Y::integer()
+%% @doc Moderately efficient way to exponentiate integers.
+%% int_pow(10, 2) = 100.
+int_pow(_X, 0) ->
+ 1;
+int_pow(X, N) when N > 0 ->
+ int_pow(X, N, 1).
+
+%% @spec int_ceil(F::float()) -> integer()
+%% @doc Return the ceiling of F as an integer. The ceiling is defined as
+%% F when F == trunc(F);
+%% trunc(F) when F &lt; 0;
+%% trunc(F) + 1 when F &gt; 0.
+int_ceil(X) ->
+ T = trunc(X),
+ case (X - T) of
+ Pos when Pos > 0 -> T + 1;
+ _ -> T
+ end.
+
+
+%% Internal API
+
+int_pow(X, N, R) when N < 2 ->
+ R * X;
+int_pow(X, N, R) ->
+ int_pow(X * X, N bsr 1, case N band 1 of 1 -> R * X; 0 -> R end).
+
+insert_decimal(0, S) ->
+ "0." ++ S;
+insert_decimal(Place, S) when Place > 0 ->
+ L = length(S),
+ case Place - L of
+ 0 ->
+ S ++ ".0";
+ N when N < 0 ->
+ {S0, S1} = lists:split(L + N, S),
+ S0 ++ "." ++ S1;
+ N when N < 6 ->
+ %% More places than digits
+ S ++ lists:duplicate(N, $0) ++ ".0";
+ _ ->
+ insert_decimal_exp(Place, S)
+ end;
+insert_decimal(Place, S) when Place > -6 ->
+ "0." ++ lists:duplicate(abs(Place), $0) ++ S;
+insert_decimal(Place, S) ->
+ insert_decimal_exp(Place, S).
+
+insert_decimal_exp(Place, S) ->
+ [C | S0] = S,
+ S1 = case S0 of
+ [] ->
+ "0";
+ _ ->
+ S0
+ end,
+ Exp = case Place < 0 of
+ true ->
+ "e-";
+ false ->
+ "e+"
+ end,
+ [C] ++ "." ++ S1 ++ Exp ++ integer_to_list(abs(Place - 1)).
+
+
+digits1(Float, Exp, Frac) ->
+ Round = ((Frac band 1) =:= 0),
+ case Exp >= 0 of
+ true ->
+ BExp = 1 bsl Exp,
+ case (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * BExp * 2), 2, BExp, BExp,
+ Round, Round, Float);
+ false ->
+ scale((Frac * BExp * 4), 4, (BExp * 2), BExp,
+ Round, Round, Float)
+ end;
+ false ->
+ case (Exp =:= ?MIN_EXP) orelse (Frac =/= ?BIG_POW) of
+ true ->
+ scale((Frac * 2), 1 bsl (1 - Exp), 1, 1,
+ Round, Round, Float);
+ false ->
+ scale((Frac * 4), 1 bsl (2 - Exp), 2, 1,
+ Round, Round, Float)
+ end
+ end.
+
+scale(R, S, MPlus, MMinus, LowOk, HighOk, Float) ->
+ Est = int_ceil(math:log10(abs(Float)) - 1.0e-10),
+ %% Note that the scheme implementation uses a 326 element look-up table
+ %% for int_pow(10, N) where we do not.
+ case Est >= 0 of
+ true ->
+ fixup(R, S * int_pow(10, Est), MPlus, MMinus, Est,
+ LowOk, HighOk);
+ false ->
+ Scale = int_pow(10, -Est),
+ fixup(R * Scale, S, MPlus * Scale, MMinus * Scale, Est,
+ LowOk, HighOk)
+ end.
+
+fixup(R, S, MPlus, MMinus, K, LowOk, HighOk) ->
+ TooLow = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TooLow of
+ true ->
+ [(K + 1) | generate(R, S, MPlus, MMinus, LowOk, HighOk)];
+ false ->
+ [K | generate(R * 10, S, MPlus * 10, MMinus * 10, LowOk, HighOk)]
+ end.
+
+generate(R0, S, MPlus, MMinus, LowOk, HighOk) ->
+ D = R0 div S,
+ R = R0 rem S,
+ TC1 = case LowOk of
+ true ->
+ R =< MMinus;
+ false ->
+ R < MMinus
+ end,
+ TC2 = case HighOk of
+ true ->
+ (R + MPlus) >= S;
+ false ->
+ (R + MPlus) > S
+ end,
+ case TC1 of
+ false ->
+ case TC2 of
+ false ->
+ [D | generate(R * 10, S, MPlus * 10, MMinus * 10,
+ LowOk, HighOk)];
+ true ->
+ [D + 1]
+ end;
+ true ->
+ case TC2 of
+ false ->
+ [D];
+ true ->
+ case R * 2 < S of
+ true ->
+ [D];
+ false ->
+ [D + 1]
+ end
+ end
+ end.
+
+unpack(Float) ->
+ <<Sign:1, Exp:11, Frac:52>> = <<Float:64/float>>,
+ {Sign, Exp, Frac}.
+
+frexp1({_Sign, 0, 0}) ->
+ {0.0, 0};
+frexp1({Sign, 0, Frac}) ->
+ Exp = log2floor(Frac),
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, (Frac-1):52>>,
+ {Frac1, -(?FLOAT_BIAS) - 52 + Exp};
+frexp1({Sign, Exp, Frac}) ->
+ <<Frac1:64/float>> = <<Sign:1, ?FLOAT_BIAS:11, Frac:52>>,
+ {Frac1, Exp - ?FLOAT_BIAS}.
+
+log2floor(Int) ->
+ log2floor(Int, 0).
+
+log2floor(0, N) ->
+ N;
+log2floor(Int, N) ->
+ log2floor(Int bsr 1, 1 + N).
+
+
+transform_digits(Place, [0 | Rest]) ->
+ transform_digits(Place, Rest);
+transform_digits(Place, Digits) ->
+ {Place, [$0 + D || D <- Digits]}.
+
+
+frexp_int(F) ->
+ case unpack(F) of
+ {_Sign, 0, Frac} ->
+ {Frac, ?MIN_EXP};
+ {_Sign, Exp, Frac} ->
+ {Frac + (1 bsl 52), Exp - 53 - ?FLOAT_BIAS}
+ end.
+
+%%
+%% Tests
+%%
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+int_ceil_test() ->
+ ?assertEqual(1, int_ceil(0.0001)),
+ ?assertEqual(0, int_ceil(0.0)),
+ ?assertEqual(1, int_ceil(0.99)),
+ ?assertEqual(1, int_ceil(1.0)),
+ ?assertEqual(-1, int_ceil(-1.5)),
+ ?assertEqual(-2, int_ceil(-2.0)),
+ ok.
+
+int_pow_test() ->
+ ?assertEqual(1, int_pow(1, 1)),
+ ?assertEqual(1, int_pow(1, 0)),
+ ?assertEqual(1, int_pow(10, 0)),
+ ?assertEqual(10, int_pow(10, 1)),
+ ?assertEqual(100, int_pow(10, 2)),
+ ?assertEqual(1000, int_pow(10, 3)),
+ ok.
+
+digits_test() ->
+ ?assertEqual("0",
+ digits(0)),
+ ?assertEqual("0.0",
+ digits(0.0)),
+ ?assertEqual("1.0",
+ digits(1.0)),
+ ?assertEqual("-1.0",
+ digits(-1.0)),
+ ?assertEqual("0.1",
+ digits(0.1)),
+ ?assertEqual("0.01",
+ digits(0.01)),
+ ?assertEqual("0.001",
+ digits(0.001)),
+ ?assertEqual("1.0e+6",
+ digits(1000000.0)),
+ ?assertEqual("0.5",
+ digits(0.5)),
+ ?assertEqual("4503599627370496.0",
+ digits(4503599627370496.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324 =:= 5.0e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual("5.0e-324",
+ digits(SmallDenorm)),
+ ?assertEqual(SmallDenorm,
+ list_to_float(digits(SmallDenorm))),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual("2.225073858507201e-308",
+ digits(BigDenorm)),
+ ?assertEqual(BigDenorm,
+ list_to_float(digits(BigDenorm))),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual("2.2250738585072014e-308",
+ digits(SmallNorm)),
+ ?assertEqual(SmallNorm,
+ list_to_float(digits(SmallNorm))),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual("1.7976931348623157e+308",
+ digits(LargeNorm)),
+ ?assertEqual(LargeNorm,
+ list_to_float(digits(LargeNorm))),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual("5.0e-324",
+ digits(math:pow(2, -1074))),
+ ok.
+
+frexp_test() ->
+ %% zero
+ ?assertEqual({0.0, 0}, frexp(0.0)),
+ %% one
+ ?assertEqual({0.5, 1}, frexp(1.0)),
+ %% negative one
+ ?assertEqual({-0.5, 1}, frexp(-1.0)),
+ %% small denormalized number
+ %% 4.94065645841246544177e-324
+ <<SmallDenorm/float>> = <<0,0,0,0,0,0,0,1>>,
+ ?assertEqual({0.5, -1073}, frexp(SmallDenorm)),
+ %% large denormalized number
+ %% 2.22507385850720088902e-308
+ <<BigDenorm/float>> = <<0,15,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999978, -1022},
+ frexp(BigDenorm)),
+ %% small normalized number
+ %% 2.22507385850720138309e-308
+ <<SmallNorm/float>> = <<0,16,0,0,0,0,0,0>>,
+ ?assertEqual({0.5, -1021}, frexp(SmallNorm)),
+ %% large normalized number
+ %% 1.79769313486231570815e+308
+ <<LargeNorm/float>> = <<127,239,255,255,255,255,255,255>>,
+ ?assertEqual(
+ {0.99999999999999989, 1024},
+ frexp(LargeNorm)),
+ %% issue #10 - mochinum:frexp(math:pow(2, -1074)).
+ ?assertEqual(
+ {0.5, -1073},
+ frexp(math:pow(2, -1074))),
+ ok.
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_password_hashing.erl b/deps/rabbit_common/src/rabbit_password_hashing.erl
new file mode 100644
index 0000000000..53d4d04e10
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_password_hashing.erl
@@ -0,0 +1,11 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_password_hashing).
+-include("rabbit.hrl").
+
+-callback hash(rabbit_types:password()) -> rabbit_types:password_hash().
diff --git a/deps/rabbit_common/src/rabbit_pbe.erl b/deps/rabbit_common/src/rabbit_pbe.erl
new file mode 100644
index 0000000000..d999d520a4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_pbe.erl
@@ -0,0 +1,54 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_pbe).
+
+-export([supported_ciphers/0, supported_hashes/0, default_cipher/0, default_hash/0, default_iterations/0]).
+-export([encrypt_term/5, decrypt_term/5]).
+-export([encrypt/5, decrypt/5]).
+
+-export_type([encryption_result/0]).
+
+supported_ciphers() ->
+ credentials_obfuscation_pbe:supported_ciphers().
+
+supported_hashes() ->
+ credentials_obfuscation_pbe:supported_hashes().
+
+%% Default encryption parameters.
+default_cipher() ->
+ credentials_obfuscation_pbe:default_cipher().
+
+default_hash() ->
+ credentials_obfuscation_pbe:default_hash().
+
+default_iterations() ->
+ credentials_obfuscation_pbe:default_iterations().
+
+%% Encryption/decryption of arbitrary Erlang terms.
+
+encrypt_term(Cipher, Hash, Iterations, PassPhrase, Term) ->
+ credentials_obfuscation_pbe:encrypt_term(Cipher, Hash, Iterations, PassPhrase, Term).
+
+decrypt_term(_Cipher, _Hash, _Iterations, _PassPhrase, {plaintext, Term}) ->
+ Term;
+decrypt_term(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=Encrypted) ->
+ credentials_obfuscation_pbe:decrypt_term(Cipher, Hash, Iterations, PassPhrase, Encrypted).
+
+-type encryption_result() :: {'encrypted', binary()} | {'plaintext', binary()}.
+
+-spec encrypt(crypto:block_cipher(), crypto:hash_algorithms(),
+ pos_integer(), iodata() | '$pending-secret', binary()) -> encryption_result().
+encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText) ->
+ credentials_obfuscation_pbe:encrypt(Cipher, Hash, Iterations, PassPhrase, ClearText).
+
+-spec decrypt(crypto:block_cipher(), crypto:hash_algorithms(),
+ pos_integer(), iodata(), encryption_result()) -> any().
+decrypt(_Cipher, _Hash, _Iterations, _PassPhrase, {plaintext, Term}) ->
+ Term;
+decrypt(Cipher, Hash, Iterations, PassPhrase, {encrypted, _Base64Binary}=Encrypted) ->
+ credentials_obfuscation_pbe:decrypt(Cipher, Hash, Iterations, PassPhrase, Encrypted).
diff --git a/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl
new file mode 100644
index 0000000000..af3683e72b
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_peer_discovery_backend.erl
@@ -0,0 +1,59 @@
+%% This module is based on the autocluster_backend module
+%% from rabbitmq-autocluster by Gavin Roy.
+%%
+%% Copyright (c) 2014-2015 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates
+%% All rights reserved.
+%%
+%% Redistribution and use in source and binary forms, with or without modification,
+%% are permitted provided that the following conditions are met:
+%%
+%% * Redistributions of source code must retain the above copyright notice, this
+%% list of conditions and the following disclaimer.
+%% * Redistributions in binary form must reproduce the above copyright notice,
+%% this list of conditions and the following disclaimer in the documentation
+%% and/or other materials provided with the distribution.
+%% * Neither the name of the project nor the names of its
+%% contributors may be used to endorse or promote products derived from this
+%% software without specific prior written permission.
+%%
+%% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+%% ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+%% WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+%% IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+%% INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+%% BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+%% DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+%% LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+%% OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+%% ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+%%
+%% The Original Code is rabbitmq-autocluster.
+%%
+%% The Initial Developer of the Original Code is AWeber Communications.
+%% Copyright (c) 2014-2015 AWeber Communications
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_peer_discovery_backend).
+
+-include("rabbit.hrl").
+
+-callback init() -> ok | {error, Reason :: string()}.
+
+-callback list_nodes() -> {ok, {Nodes :: list(), NodeType :: rabbit_types:node_type()}} |
+ {error, Reason :: string()}.
+
+-callback supports_registration() -> boolean().
+
+-callback register() -> ok | {error, Reason :: string()}.
+
+-callback unregister() -> ok | {error, Reason :: string()}.
+
+-callback post_registration() -> ok | {error, Reason :: string()}.
+
+-callback lock(Node :: atom()) -> {ok, Data :: term()} | not_supported | {error, Reason :: string()}.
+
+-callback unlock(Data :: term()) -> ok | {error, Reason :: string()}.
+
+-optional_callbacks([init/0]).
diff --git a/deps/rabbit_common/src/rabbit_policy_validator.erl b/deps/rabbit_common/src/rabbit_policy_validator.erl
new file mode 100644
index 0000000000..32b7a44fd9
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_policy_validator.erl
@@ -0,0 +1,22 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_policy_validator).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-export_type([validate_results/0]).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate_policy([{binary(), term()}]) -> validate_results().
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_queue_collector.erl b/deps/rabbit_common/src/rabbit_queue_collector.erl
new file mode 100644
index 0000000000..ffc94ba6fb
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_queue_collector.erl
@@ -0,0 +1,80 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_queue_collector).
+
+%% Queue collector keeps track of exclusive queues and cleans them
+%% up e.g. when their connection is closed.
+
+-behaviour(gen_server).
+
+-export([start_link/1, register/2, delete_all/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-record(state, {monitors, delete_from}).
+
+-include("rabbit.hrl").
+
+%%----------------------------------------------------------------------------
+
+-spec start_link(rabbit_types:proc_name()) -> rabbit_types:ok_pid_or_error().
+
+start_link(ProcName) ->
+ gen_server:start_link(?MODULE, [ProcName], []).
+
+-spec register(pid(), pid()) -> 'ok'.
+
+register(CollectorPid, Q) ->
+ gen_server:call(CollectorPid, {register, Q}, infinity).
+
+delete_all(CollectorPid) ->
+ gen_server:call(CollectorPid, delete_all, infinity).
+
+%%----------------------------------------------------------------------------
+
+init([ProcName]) ->
+ ?LG_PROCESS_TYPE(queue_collector),
+ ?store_proc_name(ProcName),
+ {ok, #state{monitors = pmon:new(), delete_from = undefined}}.
+
+%%--------------------------------------------------------------------------
+
+handle_call({register, QPid}, _From,
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ case Deleting of
+ undefined -> ok;
+ _ -> ok = rabbit_amqqueue_common:delete_exclusive([QPid], Deleting)
+ end,
+ {reply, ok, State#state{monitors = pmon:monitor(QPid, QMons)}};
+
+handle_call(delete_all, From, State = #state{monitors = QMons,
+ delete_from = undefined}) ->
+ case pmon:monitored(QMons) of
+ [] -> {reply, ok, State#state{delete_from = From}};
+ QPids -> ok = rabbit_amqqueue_common:delete_exclusive(QPids, From),
+ {noreply, State#state{delete_from = From}}
+ end.
+
+handle_cast(Msg, State) ->
+ {stop, {unhandled_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, DownPid, _Reason},
+ State = #state{monitors = QMons, delete_from = Deleting}) ->
+ QMons1 = pmon:erase(DownPid, QMons),
+ case Deleting =/= undefined andalso pmon:is_empty(QMons1) of
+ true -> gen_server:reply(Deleting, ok);
+ false -> ok
+ end,
+ {noreply, State#state{monitors = QMons1}}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/rabbit_registry.erl b/deps/rabbit_common/src/rabbit_registry.erl
new file mode 100644
index 0000000000..e68574828c
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_registry.erl
@@ -0,0 +1,165 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_registry).
+
+-behaviour(gen_server).
+
+-export([start_link/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2,
+ code_change/3]).
+
+-export([register/3, unregister/2,
+ binary_to_type/1, lookup_module/2, lookup_all/1]).
+
+-define(SERVER, ?MODULE).
+-define(ETS_NAME, ?MODULE).
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec register(atom(), binary(), atom()) -> 'ok'.
+-spec unregister(atom(), binary()) -> 'ok'.
+-spec binary_to_type(binary()) -> atom() | rabbit_types:error('not_found').
+-spec lookup_module(atom(), atom()) ->
+ rabbit_types:ok_or_error2(atom(), 'not_found').
+-spec lookup_all(atom()) -> [{atom(), atom()}].
+
+%%---------------------------------------------------------------------------
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+%%---------------------------------------------------------------------------
+
+register(Class, TypeName, ModuleName) ->
+ gen_server:call(?SERVER, {register, Class, TypeName, ModuleName}, infinity).
+
+unregister(Class, TypeName) ->
+ gen_server:call(?SERVER, {unregister, Class, TypeName}, infinity).
+
+%% This is used with user-supplied arguments (e.g., on exchange
+%% declare), so we restrict it to existing atoms only. This means it
+%% can throw a badarg, indicating that the type cannot have been
+%% registered.
+binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ case catch list_to_existing_atom(binary_to_list(TypeBin)) of
+ {'EXIT', {badarg, _}} -> {error, not_found};
+ TypeAtom -> TypeAtom
+ end.
+
+lookup_module(Class, T) when is_atom(T) ->
+ case ets:lookup(?ETS_NAME, {Class, T}) of
+ [{_, Module}] ->
+ {ok, Module};
+ [] ->
+ {error, not_found}
+ end.
+
+lookup_all(Class) ->
+ [{K, V} || [K, V] <- ets:match(?ETS_NAME, {{Class, '$1'}, '$2'})].
+
+%%---------------------------------------------------------------------------
+
+internal_binary_to_type(TypeBin) when is_binary(TypeBin) ->
+ list_to_atom(binary_to_list(TypeBin)).
+
+internal_register(Class, TypeName, ModuleName)
+ when is_atom(Class), is_binary(TypeName), is_atom(ModuleName) ->
+ ClassModule = class_module(Class),
+ Type = internal_binary_to_type(TypeName),
+ RegArg = {{Class, Type}, ModuleName},
+ ok = sanity_check_module(ClassModule, ModuleName),
+ true = ets:insert(?ETS_NAME, RegArg),
+ conditional_register(RegArg),
+ ok = ClassModule:added_to_rabbit_registry(Type, ModuleName),
+ ok.
+
+internal_unregister(Class, TypeName) ->
+ ClassModule = class_module(Class),
+ Type = internal_binary_to_type(TypeName),
+ UnregArg = {Class, Type},
+ conditional_unregister(UnregArg),
+ true = ets:delete(?ETS_NAME, UnregArg),
+ ok = ClassModule:removed_from_rabbit_registry(Type),
+ ok.
+
+%% register exchange decorator route callback only when implemented,
+%% in order to avoid unnecessary decorator calls on the fast
+%% publishing path
+conditional_register({{exchange_decorator, Type}, ModuleName}) ->
+ case erlang:function_exported(ModuleName, route, 2) of
+ true -> true = ets:insert(?ETS_NAME,
+ {{exchange_decorator_route, Type},
+ ModuleName});
+ false -> ok
+ end;
+conditional_register(_) ->
+ ok.
+
+conditional_unregister({exchange_decorator, Type}) ->
+ true = ets:delete(?ETS_NAME, {exchange_decorator_route, Type}),
+ ok;
+conditional_unregister(_) ->
+ ok.
+
+sanity_check_module(ClassModule, Module) ->
+ case catch lists:member(ClassModule,
+ lists:flatten(
+ [Bs || {Attr, Bs} <-
+ Module:module_info(attributes),
+ Attr =:= behavior orelse
+ Attr =:= behaviour])) of
+ {'EXIT', {undef, _}} -> {error, not_module};
+ false -> {error, {not_type, ClassModule}};
+ true -> ok
+ end.
+
+
+% Registry class modules. There should exist module for each registry class.
+% Class module should be behaviour (export behaviour_info/1) and implement
+% rabbit_registry_class behaviour itself: export added_to_rabbit_registry/2
+% and removed_from_rabbit_registry/1 functions.
+class_module(exchange) -> rabbit_exchange_type;
+class_module(auth_mechanism) -> rabbit_auth_mechanism;
+class_module(runtime_parameter) -> rabbit_runtime_parameter;
+class_module(exchange_decorator) -> rabbit_exchange_decorator;
+class_module(queue_decorator) -> rabbit_queue_decorator;
+class_module(policy_validator) -> rabbit_policy_validator;
+class_module(operator_policy_validator) -> rabbit_policy_validator;
+class_module(policy_merge_strategy) -> rabbit_policy_merge_strategy;
+class_module(ha_mode) -> rabbit_mirror_queue_mode;
+class_module(channel_interceptor) -> rabbit_channel_interceptor;
+class_module(queue_master_locator) -> rabbit_queue_master_locator.
+
+%%---------------------------------------------------------------------------
+
+init([]) ->
+ ?ETS_NAME = ets:new(?ETS_NAME, [protected, set, named_table]),
+ {ok, none}.
+
+handle_call({register, Class, TypeName, ModuleName}, _From, State) ->
+ ok = internal_register(Class, TypeName, ModuleName),
+ {reply, ok, State};
+
+handle_call({unregister, Class, TypeName}, _From, State) ->
+ ok = internal_unregister(Class, TypeName),
+ {reply, ok, State};
+
+handle_call(Request, _From, State) ->
+ {stop, {unhandled_call, Request}, State}.
+
+handle_cast(Request, State) ->
+ {stop, {unhandled_cast, Request}, State}.
+
+handle_info(Message, State) ->
+ {stop, {unhandled_info, Message}, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
diff --git a/deps/rabbit_common/src/rabbit_registry_class.erl b/deps/rabbit_common/src/rabbit_registry_class.erl
new file mode 100644
index 0000000000..c302dc2311
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_registry_class.erl
@@ -0,0 +1,12 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_registry_class).
+
+-callback added_to_rabbit_registry(atom(), atom()) -> ok.
+
+-callback removed_from_rabbit_registry(atom()) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl
new file mode 100644
index 0000000000..6661706998
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_resource_monitor_misc.erl
@@ -0,0 +1,39 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+
+-module(rabbit_resource_monitor_misc).
+
+-export([parse_information_unit/1]).
+
+-spec parse_information_unit(integer() | string()) ->
+ {ok, integer()} | {error, parse_error}.
+
+parse_information_unit(Value) when is_integer(Value) -> {ok, Value};
+parse_information_unit(Value0) ->
+ Value = rabbit_data_coercion:to_list(Value0),
+ case re:run(Value,
+ "^(?<VAL>[0-9]+)(?<UNIT>kB|KB|MB|GB|kb|mb|gb|Kb|Mb|Gb|kiB|KiB|MiB|GiB|kib|mib|gib|KIB|MIB|GIB|k|K|m|M|g|G)?$",
+ [{capture, all_but_first, list}]) of
+ {match, [[], _]} ->
+ {ok, list_to_integer(Value)};
+ {match, [Num]} ->
+ {ok, list_to_integer(Num)};
+ {match, [Num, Unit]} ->
+ Multiplier = case Unit of
+ KiB when KiB =:= "k"; KiB =:= "kiB"; KiB =:= "K"; KiB =:= "KIB"; KiB =:= "kib" -> 1024;
+ MiB when MiB =:= "m"; MiB =:= "MiB"; MiB =:= "M"; MiB =:= "MIB"; MiB =:= "mib" -> 1024*1024;
+ GiB when GiB =:= "g"; GiB =:= "GiB"; GiB =:= "G"; GiB =:= "GIB"; GiB =:= "gib" -> 1024*1024*1024;
+ KB when KB =:= "KB"; KB =:= "kB"; KB =:= "kb"; KB =:= "Kb" -> 1000;
+ MB when MB =:= "MB"; MB =:= "mB"; MB =:= "mb"; MB =:= "Mb" -> 1000000;
+ GB when GB =:= "GB"; GB =:= "gB"; GB =:= "gb"; GB =:= "Gb" -> 1000000000
+ end,
+ {ok, list_to_integer(Num) * Multiplier};
+ nomatch ->
+ % log error
+ {error, parse_error}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_runtime.erl b/deps/rabbit_common/src/rabbit_runtime.erl
new file mode 100644
index 0000000000..94a5a5fcfe
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_runtime.erl
@@ -0,0 +1,66 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% This module provides access to runtime metrics that are exposed
+%% via CLI tools, management UI or otherwise used by the broker.
+
+-module(rabbit_runtime).
+
+%%
+%% API
+%%
+
+-export([guess_number_of_cpu_cores/0, msacc_stats/1]).
+-export([get_gc_info/1, gc_all_processes/0]).
+-export([get_erl_path/0]).
+
+-spec guess_number_of_cpu_cores() -> pos_integer().
+guess_number_of_cpu_cores() ->
+ case erlang:system_info(logical_processors_available) of
+ unknown -> % Happens on Mac OS X.
+ erlang:system_info(schedulers);
+ N -> N
+ end.
+
+-spec gc_all_processes() -> ok.
+gc_all_processes() ->
+ %% Run GC asynchronously. We don't care for completion notifications, so
+ %% don't use the asynchonous execution option.
+ spawn(fun() -> [erlang:garbage_collect(P, []) || P <- erlang:processes()] end),
+ ok.
+
+-spec get_gc_info(pid()) -> nonempty_list(tuple()).
+get_gc_info(Pid) ->
+ {garbage_collection, GC} = erlang:process_info(Pid, garbage_collection),
+ case proplists:get_value(max_heap_size, GC) of
+ I when is_integer(I) ->
+ GC;
+ undefined ->
+ GC;
+ Map ->
+ lists:keyreplace(max_heap_size, 1, GC,
+ {max_heap_size, maps:get(size, Map)})
+ end.
+
+-spec msacc_stats(integer()) -> nonempty_list(#{atom() => any()}).
+msacc_stats(TimeInMs) ->
+ msacc:start(TimeInMs),
+ S = msacc:stats(),
+ msacc:stop(),
+ S.
+
+% get the full path to the erl executable used to start this VM
+-spec get_erl_path() -> file:filename_all().
+get_erl_path() ->
+ {ok, [[Root]]} = init:get_argument(root),
+ Bin = filename:join(Root, "bin"),
+ case os:type() of
+ {win32, _} ->
+ filename:join(Bin, "erl.exe");
+ _ ->
+ filename:join(Bin, "erl")
+ end.
diff --git a/deps/rabbit_common/src/rabbit_runtime_parameter.erl b/deps/rabbit_common/src/rabbit_runtime_parameter.erl
new file mode 100644
index 0000000000..5f9970d25d
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_runtime_parameter.erl
@@ -0,0 +1,25 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_runtime_parameter).
+
+-behaviour(rabbit_registry_class).
+
+-export([added_to_rabbit_registry/2, removed_from_rabbit_registry/1]).
+
+-type(validate_results() ::
+ 'ok' | {error, string(), [term()]} | [validate_results()]).
+
+-callback validate(rabbit_types:vhost(), binary(), binary(),
+ term(), rabbit_types:user()) -> validate_results().
+-callback notify(rabbit_types:vhost(), binary(), binary(), term(),
+ rabbit_types:username()) -> 'ok'.
+-callback notify_clear(rabbit_types:vhost(), binary(), binary(),
+ rabbit_types:username()) -> 'ok'.
+
+added_to_rabbit_registry(_Type, _ModuleName) -> ok.
+removed_from_rabbit_registry(_Type) -> ok.
diff --git a/deps/rabbit_common/src/rabbit_semver.erl b/deps/rabbit_common/src/rabbit_semver.erl
new file mode 100644
index 0000000000..c80db0c27a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_semver.erl
@@ -0,0 +1,730 @@
+%%% vi:ts=4 sw=4 et
+
+%%% Imported from https://github.com/erlware/erlware_commons.git
+%%% Commit 09168347525916e291c8aa6e3073e260e5f4a116
+%%% - We export normalize/1.
+%%% - We add a few more testcases around string/binary comparison.
+
+%%%-------------------------------------------------------------------
+%%% @copyright (C) 2011, Erlware LLC
+%%% @doc
+%%% Helper functions for working with semver versioning strings.
+%%% See https://semver.org/ for the spec.
+%%% @end
+%%%-------------------------------------------------------------------
+-module(rabbit_semver).
+
+-export([parse/1,
+ format/1,
+ eql/2,
+ gt/2,
+ gte/2,
+ lt/2,
+ lte/2,
+ pes/2,
+ normalize/1,
+ between/3]).
+
+%% For internal use by the rabbit_semver_parser peg
+-export([internal_parse_version/1]).
+
+-export_type([semver/0,
+ version_string/0,
+ any_version/0]).
+
+%%%===================================================================
+%%% Public Types
+%%%===================================================================
+
+-type version_element() :: non_neg_integer() | binary().
+
+-type major_minor_patch_minpatch() ::
+ version_element()
+ | {version_element(), version_element()}
+ | {version_element(), version_element(), version_element()}
+ | {version_element(), version_element(),
+ version_element(), version_element()}.
+
+-type alpha_part() :: integer() | binary() | string().
+-type alpha_info() :: {PreRelease::[alpha_part()],
+ BuildVersion::[alpha_part()]}.
+
+-type semver() :: {major_minor_patch_minpatch(), alpha_info()}.
+
+-type version_string() :: string() | binary().
+
+-type any_version() :: version_string() | semver().
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%% @doc parse a string or binary into a valid semver representation
+-spec parse(any_version()) -> semver().
+parse(Version) when erlang:is_list(Version) ->
+ case rabbit_semver_parser:parse(Version) of
+ {fail, _} ->
+ {erlang:iolist_to_binary(Version), {[],[]}};
+ Good ->
+ Good
+ end;
+parse(Version) when erlang:is_binary(Version) ->
+ case rabbit_semver_parser:parse(Version) of
+ {fail, _} ->
+ {Version, {[],[]}};
+ Good ->
+ Good
+ end;
+parse(Version) ->
+ Version.
+
+-spec format(semver()) -> iolist().
+format({Maj, {AlphaPart, BuildPart}})
+ when erlang:is_integer(Maj);
+ erlang:is_binary(Maj) ->
+ [format_version_part(Maj),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min, Patch}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min), ".",
+ format_version_part(Patch),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)];
+format({{Maj, Min, Patch, MinPatch}, {AlphaPart, BuildPart}}) ->
+ [format_version_part(Maj), ".",
+ format_version_part(Min), ".",
+ format_version_part(Patch), ".",
+ format_version_part(MinPatch),
+ format_vsn_rest(<<"-">>, AlphaPart),
+ format_vsn_rest(<<"+">>, BuildPart)].
+
+-spec format_version_part(integer() | binary()) -> iolist().
+format_version_part(Vsn)
+ when erlang:is_integer(Vsn) ->
+ erlang:integer_to_list(Vsn);
+format_version_part(Vsn)
+ when erlang:is_binary(Vsn) ->
+ Vsn.
+
+
+
+%% @doc test for quality between semver versions
+-spec eql(any_version(), any_version()) -> boolean().
+eql(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ NVsnA =:= NVsnB.
+
+%% @doc Test that VsnA is greater than VsnB
+-spec gt(any_version(), any_version()) -> boolean().
+gt(VsnA, VsnB) ->
+ {MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
+ {MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
+ ((MMPA > MMPB)
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ ((AlphaA =:= [] andalso AlphaB =/= [])
+ orelse
+ ((not (AlphaB =:= [] andalso AlphaA =/= []))
+ andalso
+ (AlphaA > AlphaB))))
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ (AlphaA =:= AlphaB)
+ andalso
+ ((PatchB =:= [] andalso PatchA =/= [])
+ orelse
+ PatchA > PatchB))).
+
+%% @doc Test that VsnA is greater than or equal to VsnB
+-spec gte(any_version(), any_version()) -> boolean().
+gte(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ gt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
+
+%% @doc Test that VsnA is less than VsnB
+-spec lt(any_version(), any_version()) -> boolean().
+lt(VsnA, VsnB) ->
+ {MMPA, {AlphaA, PatchA}} = normalize(parse(VsnA)),
+ {MMPB, {AlphaB, PatchB}} = normalize(parse(VsnB)),
+ ((MMPA < MMPB)
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ ((AlphaB =:= [] andalso AlphaA =/= [])
+ orelse
+ ((not (AlphaA =:= [] andalso AlphaB =/= []))
+ andalso
+ (AlphaA < AlphaB))))
+ orelse
+ ((MMPA =:= MMPB)
+ andalso
+ (AlphaA =:= AlphaB)
+ andalso
+ ((PatchA =:= [] andalso PatchB =/= [])
+ orelse
+ PatchA < PatchB))).
+
+%% @doc Test that VsnA is less than or equal to VsnB
+-spec lte(any_version(), any_version()) -> boolean().
+lte(VsnA, VsnB) ->
+ NVsnA = normalize(parse(VsnA)),
+ NVsnB = normalize(parse(VsnB)),
+ lt(NVsnA, NVsnB) orelse eql(NVsnA, NVsnB).
+
+%% @doc Test that VsnMatch is greater than or equal to Vsn1 and
+%% less than or equal to Vsn2
+-spec between(any_version(), any_version(), any_version()) -> boolean().
+between(Vsn1, Vsn2, VsnMatch) ->
+ NVsnA = normalize(parse(Vsn1)),
+ NVsnB = normalize(parse(Vsn2)),
+ NVsnMatch = normalize(parse(VsnMatch)),
+ gte(NVsnMatch, NVsnA) andalso
+ lte(NVsnMatch, NVsnB).
+
+%% @doc check that VsnA is Approximately greater than VsnB
+%%
+%% Specifying ">= 2.6.5" is an optimistic version constraint. All
+%% versions greater than the one specified, including major releases
+%% (e.g. 3.0.0) are allowed.
+%%
+%% Conversely, specifying "~> 2.6" is pessimistic about future major
+%% revisions and "~> 2.6.5" is pessimistic about future minor
+%% revisions.
+%%
+%% "~> 2.6" matches cookbooks >= 2.6.0 AND &lt; 3.0.0
+%% "~> 2.6.5" matches cookbooks >= 2.6.5 AND &lt; 2.7.0
+pes(VsnA, VsnB) ->
+ internal_pes(parse(VsnA), parse(VsnB)).
+
+%%%===================================================================
+%%% Friend Functions
+%%%===================================================================
+%% @doc helper function for the peg grammar to parse the iolist into a semver
+-spec internal_parse_version(iolist()) -> semver().
+internal_parse_version([MMP, AlphaPart, BuildPart, _]) ->
+ {parse_major_minor_patch_minpatch(MMP), {parse_alpha_part(AlphaPart),
+ parse_alpha_part(BuildPart)}}.
+
+%% @doc helper function for the peg grammar to parse the iolist into a major_minor_patch
+-spec parse_major_minor_patch_minpatch(iolist()) -> major_minor_patch_minpatch().
+parse_major_minor_patch_minpatch([MajVsn, [], [], []]) ->
+ strip_maj_version(MajVsn);
+parse_major_minor_patch_minpatch([MajVsn, [<<".">>, MinVsn], [], []]) ->
+ {strip_maj_version(MajVsn), MinVsn};
+parse_major_minor_patch_minpatch([MajVsn,
+ [<<".">>, MinVsn],
+ [<<".">>, PatchVsn], []]) ->
+ {strip_maj_version(MajVsn), MinVsn, PatchVsn};
+parse_major_minor_patch_minpatch([MajVsn,
+ [<<".">>, MinVsn],
+ [<<".">>, PatchVsn],
+ [<<".">>, MinPatch]]) ->
+ {strip_maj_version(MajVsn), MinVsn, PatchVsn, MinPatch}.
+
+%% @doc helper function for the peg grammar to parse the iolist into an alpha part
+-spec parse_alpha_part(iolist()) -> [alpha_part()].
+parse_alpha_part([]) ->
+ [];
+parse_alpha_part([_, AV1, Rest]) ->
+ [erlang:iolist_to_binary(AV1) |
+ [format_alpha_part(Part) || Part <- Rest]].
+
+%% @doc according to semver alpha parts that can be treated like
+%% numbers must be. We implement that here by taking the alpha part
+%% and trying to convert it to a number, if it succeeds we use
+%% it. Otherwise we do not.
+-spec format_alpha_part(iolist()) -> integer() | binary().
+format_alpha_part([<<".">>, AlphaPart]) ->
+ Bin = erlang:iolist_to_binary(AlphaPart),
+ try
+ erlang:list_to_integer(erlang:binary_to_list(Bin))
+ catch
+ error:badarg ->
+ Bin
+ end.
+
+%%%===================================================================
+%%% Internal Functions
+%%%===================================================================
+-spec strip_maj_version(iolist()) -> version_element().
+strip_maj_version([<<"v">>, MajVsn]) ->
+ MajVsn;
+strip_maj_version([[], MajVsn]) ->
+ MajVsn;
+strip_maj_version(MajVsn) ->
+ MajVsn.
+
+-spec to_list(integer() | binary() | string()) -> string() | binary().
+to_list(Detail) when erlang:is_integer(Detail) ->
+ erlang:integer_to_list(Detail);
+to_list(Detail) when erlang:is_list(Detail); erlang:is_binary(Detail) ->
+ Detail.
+
+-spec format_vsn_rest(binary() | string(), [integer() | binary()]) -> iolist().
+format_vsn_rest(_TypeMark, []) ->
+ [];
+format_vsn_rest(TypeMark, [Head | Rest]) ->
+ [TypeMark, Head |
+ [[".", to_list(Detail)] || Detail <- Rest]].
+
+%% @doc normalize the semver so they can be compared
+-spec normalize(semver()) -> semver().
+normalize({Vsn, Rest})
+ when erlang:is_binary(Vsn);
+ erlang:is_integer(Vsn) ->
+ {{Vsn, 0, 0, 0}, Rest};
+normalize({{Maj, Min}, Rest}) ->
+ {{Maj, Min, 0, 0}, Rest};
+normalize({{Maj, Min, Patch}, Rest}) ->
+ {{Maj, Min, Patch, 0}, Rest};
+normalize(Other = {{_, _, _, _}, {_,_}}) ->
+ Other.
+
+%% @doc to do the pessimistic compare we need a parsed semver. This is
+%% the internal implementation of the of the pessimistic run. The
+%% external just ensures that versions are parsed.
+-spec internal_pes(semver(), semver()) -> boolean().
+internal_pes(VsnA, {{LM, LMI}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI) ->
+ gte(VsnA, {{LM, LMI, 0}, {[], []}}) andalso
+ lt(VsnA, {{LM + 1, 0, 0, 0}, {[], []}});
+internal_pes(VsnA, {{LM, LMI, LP}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI),
+ erlang:is_integer(LP) ->
+ gte(VsnA, {{LM, LMI, LP}, {[], []}})
+ andalso
+ lt(VsnA, {{LM, LMI + 1, 0, 0}, {[], []}});
+internal_pes(VsnA, {{LM, LMI, LP, LMP}, _})
+ when erlang:is_integer(LM),
+ erlang:is_integer(LMI),
+ erlang:is_integer(LP),
+ erlang:is_integer(LMP) ->
+ gte(VsnA, {{LM, LMI, LP, LMP}, {[], []}})
+ andalso
+ lt(VsnA, {{LM, LMI, LP + 1, 0}, {[], []}});
+internal_pes(Vsn, LVsn) ->
+ gte(Vsn, LVsn).
+
+%%%===================================================================
+%%% Test Functions
+%%%===================================================================
+
+-ifdef(TEST).
+-include_lib("eunit/include/eunit.hrl").
+
+eql_test() ->
+ ?assertMatch(true, eql("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql(<<"1.0.0-alpha">>,
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql("1.0.0-alpha",
+ <<"1.0.0-alpha">>)),
+ ?assertMatch(true, eql(<<"1.0.0-alpha">>,
+ <<"1.0.0-alpha">>)),
+ ?assertMatch(true, eql("v1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, eql("1",
+ "1.0.0")),
+ ?assertMatch(true, eql("v1",
+ "v1.0.0")),
+ ?assertMatch(true, eql("1.0",
+ "1.0.0")),
+ ?assertMatch(true, eql("1.0.0",
+ "1")),
+ ?assertMatch(true, eql("1.0.0.0",
+ "1")),
+ ?assertMatch(true, eql("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("1.0-alpha.1+build.1",
+ "v1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, eql("aa", "aa")),
+ ?assertMatch(true, eql("AA.BB", "AA.BB")),
+ ?assertMatch(true, eql("BBB-super", "BBB-super")),
+ ?assertMatch(true, not eql("1.0.0",
+ "1.0.1")),
+ ?assertMatch(true, not eql(<<"1.0.0">>,
+ "1.0.1")),
+ ?assertMatch(true, not eql("1.0.0",
+ <<"1.0.1">>)),
+ ?assertMatch(true, not eql(<<"1.0.0">>,
+ <<"1.0.1">>)),
+ ?assertMatch(true, not eql("1.0.0-alpha",
+ "1.0.1+alpha")),
+ ?assertMatch(true, not eql("1.0.0+build.1",
+ "1.0.1+build.2")),
+ ?assertMatch(true, not eql("1.0.0.0+build.1",
+ "1.0.0.1+build.2")),
+ ?assertMatch(true, not eql("FFF", "BBB")),
+ ?assertMatch(true, not eql("1", "1BBBB")).
+
+gt_test() ->
+ ?assertMatch(true, gt("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gt("1.0.0.1-alpha.1",
+ "1.0.0.1-alpha")),
+ ?assertMatch(true, gt("1.0.0.4-alpha.1",
+ "1.0.0.2-alpha")),
+ ?assertMatch(true, gt("1.0.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gt("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, gt("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, gt("1.0.0-beta.11",
+ "1.0.0.0-beta.2")),
+ ?assertMatch(true, gt("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, gt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, gt("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, gt("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, gt("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, gt("1.3.7+build.2.b8f12d7",
+ "1.3.7.0+build")),
+ ?assertMatch(true, gt("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, gt("aa.cc",
+ "aa.bb")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0.0-alpha.1")),
+ ?assertMatch(true, not gt("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not gt("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, not gt("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, not gt("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not gt("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, not gt("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, not gt("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, not gt("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, not gt("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not gt("1",
+ "1.0.0")),
+ ?assertMatch(true, not gt("aa.bb",
+ "aa.bb")),
+ ?assertMatch(true, not gt("aa.cc",
+ "aa.dd")),
+ ?assertMatch(true, not gt("1.0",
+ "1.0.0")),
+ ?assertMatch(true, not gt("1.0.0",
+ "1")),
+ ?assertMatch(true, not gt("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, not gt("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")).
+
+lt_test() ->
+ ?assertMatch(true, lt("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, lt("1.0.0-alpha",
+ "1.0.0.0-alpha.1")),
+ ?assertMatch(true, lt("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, lt("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, lt("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, lt("1.0.0.1-beta.11",
+ "1.0.0.1-rc.1")),
+ ?assertMatch(true, lt("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, lt("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, lt("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, lt("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, lt("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, lt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, not lt("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lt("1",
+ "1.0.0")),
+ ?assertMatch(true, lt("1",
+ "1.0.0.1")),
+ ?assertMatch(true, lt("AA.DD",
+ "AA.EE")),
+ ?assertMatch(true, not lt("1.0",
+ "1.0.0")),
+ ?assertMatch(true, not lt("1.0.0.0",
+ "1")),
+ ?assertMatch(true, not lt("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, not lt("AA.DD", "AA.CC")),
+ ?assertMatch(true, not lt("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, not lt("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lt("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not lt("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not lt("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, not lt("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, not lt("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not lt("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, not lt("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, not lt("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, not lt("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")).
+
+gte_test() ->
+ ?assertMatch(true, gte("1.0.0-alpha",
+ "1.0.0-alpha")),
+
+ ?assertMatch(true, gte("1",
+ "1.0.0")),
+
+ ?assertMatch(true, gte("1.0",
+ "1.0.0")),
+
+ ?assertMatch(true, gte("1.0.0",
+ "1")),
+
+ ?assertMatch(true, gte("1.0.0.0",
+ "1")),
+
+ ?assertMatch(true, gte("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+
+ ?assertMatch(true, gte("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+
+ ?assertMatch(true, gte("1.0.0-alpha.1+build.1",
+ "1.0.0.0-alpha.1+build.1")),
+ ?assertMatch(true, gte("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, gte("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, gte("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, gte("aa.bb", "aa.bb")),
+ ?assertMatch(true, gte("dd", "aa")),
+ ?assertMatch(true, gte("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, gte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, gte("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, gte("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, gte("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, gte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, gte("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gte("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not gte("CC", "DD")),
+ ?assertMatch(true, not gte("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not gte("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, not gte("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, not gte("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not gte("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, not gte("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, not gte("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, not gte("1.0.0",
+ "1.0.0+build.1")),
+ ?assertMatch(true, not gte("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, not gte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")).
+lte_test() ->
+ ?assertMatch(true, lte("1.0.0-alpha",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, lte("1.0.0-alpha.1",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, lte("1.0.0-beta.2",
+ "1.0.0-beta.11")),
+ ?assertMatch(true, lte("1.0.0-beta.11",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, lte("1.0.0-rc.1",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, lte("1.0.0-rc.1+build.1",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0.0",
+ "1.0.0+0.3.7")),
+ ?assertMatch(true, lte("1.0.0+0.3.7",
+ "1.3.7+build")),
+ ?assertMatch(true, lte("1.3.7+build",
+ "1.3.7+build.2.b8f12d7")),
+ ?assertMatch(true, lte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a")),
+ ?assertMatch(true, lte("1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, lte("1",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0",
+ "1.0.0")),
+ ?assertMatch(true, lte("1.0.0",
+ "1")),
+ ?assertMatch(true, lte("1.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, lte("1.0.0.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, lte("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, lte("aa","cc")),
+ ?assertMatch(true, lte("cc","cc")),
+ ?assertMatch(true, not lte("1.0.0-alpha.1",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not lte("cc", "aa")),
+ ?assertMatch(true, not lte("1.0.0-beta.2",
+ "1.0.0-alpha.1")),
+ ?assertMatch(true, not lte("1.0.0-beta.11",
+ "1.0.0-beta.2")),
+ ?assertMatch(true, not lte("1.0.0-rc.1", "1.0.0-beta.11")),
+ ?assertMatch(true, not lte("1.0.0-rc.1+build.1", "1.0.0-rc.1")),
+ ?assertMatch(true, not lte("1.0.0", "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, not lte("1.0.0+0.3.7", "1.0.0")),
+ ?assertMatch(true, not lte("1.3.7+build", "1.0.0+0.3.7")),
+ ?assertMatch(true, not lte("1.3.7+build.2.b8f12d7",
+ "1.3.7+build")),
+ ?assertMatch(true, not lte("1.3.7+build.11.e0f985a",
+ "1.3.7+build.2.b8f12d7")).
+
+between_test() ->
+ ?assertMatch(true, between("1.0.0-alpha",
+ "1.0.0-alpha.3",
+ "1.0.0-alpha.2")),
+ ?assertMatch(true, between("1.0.0-alpha.1",
+ "1.0.0-beta.2",
+ "1.0.0-alpha.25")),
+ ?assertMatch(true, between("1.0.0-beta.2",
+ "1.0.0-beta.11",
+ "1.0.0-beta.7")),
+ ?assertMatch(true, between("1.0.0-beta.11",
+ "1.0.0-rc.3",
+ "1.0.0-rc.1")),
+ ?assertMatch(true, between("1.0.0-rc.1",
+ "1.0.0-rc.1+build.3",
+ "1.0.0-rc.1+build.1")),
+
+ ?assertMatch(true, between("1.0.0.0-rc.1",
+ "1.0.0-rc.1+build.3",
+ "1.0.0-rc.1+build.1")),
+ ?assertMatch(true, between("1.0.0-rc.1+build.1",
+ "1.0.0",
+ "1.0.0-rc.33")),
+ ?assertMatch(true, between("1.0.0",
+ "1.0.0+0.3.7",
+ "1.0.0+0.2")),
+ ?assertMatch(true, between("1.0.0+0.3.7",
+ "1.3.7+build",
+ "1.2")),
+ ?assertMatch(true, between("1.3.7+build",
+ "1.3.7+build.2.b8f12d7",
+ "1.3.7+build.1")),
+ ?assertMatch(true, between("1.3.7+build.2.b8f12d7",
+ "1.3.7+build.11.e0f985a",
+ "1.3.7+build.10.a36faa")),
+ ?assertMatch(true, between("1.0.0-alpha",
+ "1.0.0-alpha",
+ "1.0.0-alpha")),
+ ?assertMatch(true, between("1",
+ "1.0.0",
+ "1.0.0")),
+ ?assertMatch(true, between("1.0",
+ "1.0.0",
+ "1.0.0")),
+
+ ?assertMatch(true, between("1.0",
+ "1.0.0.0",
+ "1.0.0.0")),
+ ?assertMatch(true, between("1.0.0",
+ "1",
+ "1")),
+ ?assertMatch(true, between("1.0+alpha.1",
+ "1.0.0+alpha.1",
+ "1.0.0+alpha.1")),
+ ?assertMatch(true, between("1.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1",
+ "1.0.0-alpha.1+build.1")),
+ ?assertMatch(true, between("aaa",
+ "ddd",
+ "cc")),
+ ?assertMatch(true, not between("1.0.0-alpha.1",
+ "1.0.0-alpha.22",
+ "1.0.0")),
+ ?assertMatch(true, not between("1.0.0",
+ "1.0.0-alpha.1",
+ "2.0")),
+ ?assertMatch(true, not between("1.0.0-beta.1",
+ "1.0.0-beta.11",
+ "1.0.0-alpha")),
+ ?assertMatch(true, not between("1.0.0-beta.11", "1.0.0-rc.1",
+ "1.0.0-rc.22")),
+ ?assertMatch(true, not between("aaa", "ddd", "zzz")).
+
+pes_test() ->
+ ?assertMatch(true, pes("2.6.0", "2.6")),
+ ?assertMatch(true, pes("2.7", "2.6")),
+ ?assertMatch(true, pes("2.8", "2.6")),
+ ?assertMatch(true, pes("2.9", "2.6")),
+ ?assertMatch(true, pes("A.B", "A.A")),
+ ?assertMatch(true, not pes("3.0.0", "2.6")),
+ ?assertMatch(true, not pes("2.5", "2.6")),
+ ?assertMatch(true, pes("2.6.5", "2.6.5")),
+ ?assertMatch(true, pes("2.6.6", "2.6.5")),
+ ?assertMatch(true, pes("2.6.7", "2.6.5")),
+ ?assertMatch(true, pes("2.6.8", "2.6.5")),
+ ?assertMatch(true, pes("2.6.9", "2.6.5")),
+ ?assertMatch(true, pes("2.6.0.9", "2.6.0.5")),
+ ?assertMatch(true, not pes("2.7", "2.6.5")),
+ ?assertMatch(true, not pes("2.1.7", "2.1.6.5")),
+ ?assertMatch(true, not pes("A.A", "A.B")),
+ ?assertMatch(true, not pes("2.5", "2.6.5")).
+
+version_format_test() ->
+ ?assertEqual(["1", [], []], format({1, {[],[]}})),
+ ?assertEqual(["1", ".", "2", ".", "34", [], []], format({{1,2,34},{[],[]}})),
+ ?assertEqual(<<"a">>, erlang:iolist_to_binary(format({<<"a">>, {[],[]}}))),
+ ?assertEqual(<<"a.b">>, erlang:iolist_to_binary(format({{<<"a">>,<<"b">>}, {[],[]}}))),
+ ?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))),
+ ?assertEqual(<<"1.2">>, erlang:iolist_to_binary(format({{1,2}, {[],[]}}))),
+ ?assertEqual(<<"1.2.2">>, erlang:iolist_to_binary(format({{1,2,2}, {[],[]}}))),
+ ?assertEqual(<<"1.99.2">>, erlang:iolist_to_binary(format({{1,99,2}, {[],[]}}))),
+ ?assertEqual(<<"1.99.2-alpha">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>],[]}}))),
+ ?assertEqual(<<"1.99.2-alpha.1">>, erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>,1], []}}))),
+ ?assertEqual(<<"1.99.2+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2}, {[], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1.99.2.44+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2,44}, {[], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1.99.2-alpha.1+build.1.a36">>,
+ erlang:iolist_to_binary(format({{1,99,2}, {[<<"alpha">>, 1], [<<"build">>, 1, <<"a36">>]}}))),
+ ?assertEqual(<<"1">>, erlang:iolist_to_binary(format({1, {[],[]}}))).
+
+-endif.
diff --git a/deps/rabbit_common/src/rabbit_semver_parser.erl b/deps/rabbit_common/src/rabbit_semver_parser.erl
new file mode 100644
index 0000000000..3a036021f7
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_semver_parser.erl
@@ -0,0 +1,306 @@
+%%% Imported from https://github.com/erlware/erlware_commons.git
+%%% Commit 09168347525916e291c8aa6e3073e260e5f4a116
+
+-module(rabbit_semver_parser).
+-export([parse/1,file/1]).
+-define(p_anything,true).
+-define(p_charclass,true).
+-define(p_choose,true).
+-define(p_not,true).
+-define(p_one_or_more,true).
+-define(p_optional,true).
+-define(p_scan,true).
+-define(p_seq,true).
+-define(p_string,true).
+-define(p_zero_or_more,true).
+
+
+
+-spec file(file:name()) -> any().
+file(Filename) -> case file:read_file(Filename) of {ok,Bin} -> parse(Bin); Err -> Err end.
+
+-spec parse(binary() | list()) -> any().
+parse(List) when is_list(List) -> parse(unicode:characters_to_binary(List));
+parse(Input) when is_binary(Input) ->
+ _ = setup_memo(),
+ Result = case 'semver'(Input,{{line,1},{column,1}}) of
+ {AST, <<>>, _Index} -> AST;
+ Any -> Any
+ end,
+ release_memo(), Result.
+
+-spec 'semver'(input(), index()) -> parse_result().
+'semver'(Input, Index) ->
+ p(Input, Index, 'semver', fun(I,D) -> (p_seq([fun 'major_minor_patch_min_patch'/2, p_optional(p_seq([p_string(<<"-">>), fun 'alpha_part'/2, p_zero_or_more(p_seq([p_string(<<".">>), fun 'alpha_part'/2]))])), p_optional(p_seq([p_string(<<"+">>), fun 'alpha_part'/2, p_zero_or_more(p_seq([p_string(<<".">>), fun 'alpha_part'/2]))])), p_not(p_anything())]))(I,D) end, fun(Node, _Idx) -> rabbit_semver:internal_parse_version(Node) end).
+
+-spec 'major_minor_patch_min_patch'(input(), index()) -> parse_result().
+'major_minor_patch_min_patch'(Input, Index) ->
+ p(Input, Index, 'major_minor_patch_min_patch', fun(I,D) -> (p_seq([p_choose([p_seq([p_optional(p_string(<<"v">>)), fun 'numeric_part'/2]), fun 'alpha_part'/2]), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2])), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2])), p_optional(p_seq([p_string(<<".">>), fun 'version_part'/2]))]))(I,D) end, fun(Node, Idx) ->transform('major_minor_patch_min_patch', Node, Idx) end).
+
+-spec 'version_part'(input(), index()) -> parse_result().
+'version_part'(Input, Index) ->
+ p(Input, Index, 'version_part', fun(I,D) -> (p_choose([fun 'numeric_part'/2, fun 'alpha_part'/2]))(I,D) end, fun(Node, Idx) ->transform('version_part', Node, Idx) end).
+
+-spec 'numeric_part'(input(), index()) -> parse_result().
+'numeric_part'(Input, Index) ->
+ p(Input, Index, 'numeric_part', fun(I,D) -> (p_one_or_more(p_charclass(<<"[0-9]">>)))(I,D) end, fun(Node, _Idx) ->erlang:list_to_integer(erlang:binary_to_list(erlang:iolist_to_binary(Node))) end).
+
+-spec 'alpha_part'(input(), index()) -> parse_result().
+'alpha_part'(Input, Index) ->
+ p(Input, Index, 'alpha_part', fun(I,D) -> (p_one_or_more(p_charclass(<<"[A-Za-z0-9]">>)))(I,D) end, fun(Node, _Idx) ->erlang:iolist_to_binary(Node) end).
+
+
+transform(_,Node,_Index) -> Node.
+-file("peg_includes.hrl", 1).
+-type index() :: {{line, pos_integer()}, {column, pos_integer()}}.
+-type input() :: binary().
+-type parse_failure() :: {fail, term()}.
+-type parse_success() :: {term(), input(), index()}.
+-type parse_result() :: parse_failure() | parse_success().
+-type parse_fun() :: fun((input(), index()) -> parse_result()).
+-type xform_fun() :: fun((input(), index()) -> term()).
+
+-spec p(input(), index(), atom(), parse_fun(), xform_fun()) -> parse_result().
+p(Inp, StartIndex, Name, ParseFun, TransformFun) ->
+ case get_memo(StartIndex, Name) of % See if the current reduction is memoized
+ {ok, Memo} -> %Memo; % If it is, return the stored result
+ Memo;
+ _ -> % If not, attempt to parse
+ Result = case ParseFun(Inp, StartIndex) of
+ {fail,_} = Failure -> % If it fails, memoize the failure
+ Failure;
+ {Match, InpRem, NewIndex} -> % If it passes, transform and memoize the result.
+ Transformed = TransformFun(Match, StartIndex),
+ {Transformed, InpRem, NewIndex}
+ end,
+ memoize(StartIndex, Name, Result),
+ Result
+ end.
+
+-spec setup_memo() -> ets:tid().
+setup_memo() ->
+ put({parse_memo_table, ?MODULE}, ets:new(?MODULE, [set])).
+
+-spec release_memo() -> true.
+release_memo() ->
+ ets:delete(memo_table_name()).
+
+-spec memoize(index(), atom(), parse_result()) -> true.
+memoize(Index, Name, Result) ->
+ Memo = case ets:lookup(memo_table_name(), Index) of
+ [] -> [];
+ [{Index, Plist}] -> Plist
+ end,
+ ets:insert(memo_table_name(), {Index, [{Name, Result}|Memo]}).
+
+-spec get_memo(index(), atom()) -> {ok, term()} | {error, not_found}.
+get_memo(Index, Name) ->
+ case ets:lookup(memo_table_name(), Index) of
+ [] -> {error, not_found};
+ [{Index, Plist}] ->
+ case proplists:lookup(Name, Plist) of
+ {Name, Result} -> {ok, Result};
+ _ -> {error, not_found}
+ end
+ end.
+
+-spec memo_table_name() -> ets:tid().
+memo_table_name() ->
+ get({parse_memo_table, ?MODULE}).
+
+-ifdef(p_eof).
+-spec p_eof() -> parse_fun().
+p_eof() ->
+ fun(<<>>, Index) -> {eof, [], Index};
+ (_, Index) -> {fail, {expected, eof, Index}} end.
+-endif.
+
+-ifdef(p_optional).
+-spec p_optional(parse_fun()) -> parse_fun().
+p_optional(P) ->
+ fun(Input, Index) ->
+ case P(Input, Index) of
+ {fail,_} -> {[], Input, Index};
+ {_, _, _} = Success -> Success
+ end
+ end.
+-endif.
+
+-ifdef(p_not).
+-spec p_not(parse_fun()) -> parse_fun().
+p_not(P) ->
+ fun(Input, Index)->
+ case P(Input,Index) of
+ {fail,_} ->
+ {[], Input, Index};
+ {Result, _, _} -> {fail, {expected, {no_match, Result},Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_assert).
+-spec p_assert(parse_fun()) -> parse_fun().
+p_assert(P) ->
+ fun(Input,Index) ->
+ case P(Input,Index) of
+ {fail,_} = Failure-> Failure;
+ _ -> {[], Input, Index}
+ end
+ end.
+-endif.
+
+-ifdef(p_seq).
+-spec p_seq([parse_fun()]) -> parse_fun().
+p_seq(P) ->
+ fun(Input, Index) ->
+ p_all(P, Input, Index, [])
+ end.
+
+-spec p_all([parse_fun()], input(), index(), [term()]) -> parse_result().
+p_all([], Inp, Index, Accum ) -> {lists:reverse( Accum ), Inp, Index};
+p_all([P|Parsers], Inp, Index, Accum) ->
+ case P(Inp, Index) of
+ {fail, _} = Failure -> Failure;
+ {Result, InpRem, NewIndex} -> p_all(Parsers, InpRem, NewIndex, [Result|Accum])
+ end.
+-endif.
+
+-ifdef(p_choose).
+-spec p_choose([parse_fun()]) -> parse_fun().
+p_choose(Parsers) ->
+ fun(Input, Index) ->
+ p_attempt(Parsers, Input, Index, none)
+ end.
+
+-spec p_attempt([parse_fun()], input(), index(), none | parse_failure()) -> parse_result().
+p_attempt([], _Input, _Index, Failure) -> Failure;
+p_attempt([P|Parsers], Input, Index, FirstFailure)->
+ case P(Input, Index) of
+ {fail, _} = Failure ->
+ case FirstFailure of
+ none -> p_attempt(Parsers, Input, Index, Failure);
+ _ -> p_attempt(Parsers, Input, Index, FirstFailure)
+ end;
+ Result -> Result
+ end.
+-endif.
+
+-ifdef(p_zero_or_more).
+-spec p_zero_or_more(parse_fun()) -> parse_fun().
+p_zero_or_more(P) ->
+ fun(Input, Index) ->
+ p_scan(P, Input, Index, [])
+ end.
+-endif.
+
+-ifdef(p_one_or_more).
+-spec p_one_or_more(parse_fun()) -> parse_fun().
+p_one_or_more(P) ->
+ fun(Input, Index)->
+ Result = p_scan(P, Input, Index, []),
+ case Result of
+ {[_|_], _, _} ->
+ Result;
+ _ ->
+ {fail, {expected, Failure, _}} = P(Input,Index),
+ {fail, {expected, {at_least_one, Failure}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_label).
+-spec p_label(atom(), parse_fun()) -> parse_fun().
+p_label(Tag, P) ->
+ fun(Input, Index) ->
+ case P(Input, Index) of
+ {fail,_} = Failure ->
+ Failure;
+ {Result, InpRem, NewIndex} ->
+ {{Tag, Result}, InpRem, NewIndex}
+ end
+ end.
+-endif.
+
+-ifdef(p_scan).
+-spec p_scan(parse_fun(), input(), index(), [term()]) -> {[term()], input(), index()}.
+p_scan(_, <<>>, Index, Accum) -> {lists:reverse(Accum), <<>>, Index};
+p_scan(P, Inp, Index, Accum) ->
+ case P(Inp, Index) of
+ {fail,_} -> {lists:reverse(Accum), Inp, Index};
+ {Result, InpRem, NewIndex} -> p_scan(P, InpRem, NewIndex, [Result | Accum])
+ end.
+-endif.
+
+-ifdef(p_string).
+-spec p_string(binary()) -> parse_fun().
+p_string(S) ->
+ Length = erlang:byte_size(S),
+ fun(Input, Index) ->
+ try
+ <<S:Length/binary, Rest/binary>> = Input,
+ {S, Rest, p_advance_index(S, Index)}
+ catch
+ error:{badmatch,_} -> {fail, {expected, {string, S}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_anything).
+-spec p_anything() -> parse_fun().
+p_anything() ->
+ fun(<<>>, Index) -> {fail, {expected, any_character, Index}};
+ (Input, Index) when is_binary(Input) ->
+ <<C/utf8, Rest/binary>> = Input,
+ {<<C/utf8>>, Rest, p_advance_index(<<C/utf8>>, Index)}
+ end.
+-endif.
+
+-ifdef(p_charclass).
+-spec p_charclass(string() | binary()) -> parse_fun().
+p_charclass(Class) ->
+ {ok, RE} = re:compile(Class, [unicode, dotall]),
+ fun(Inp, Index) ->
+ case re:run(Inp, RE, [anchored]) of
+ {match, [{0, Length}|_]} ->
+ {Head, Tail} = erlang:split_binary(Inp, Length),
+ {Head, Tail, p_advance_index(Head, Index)};
+ _ -> {fail, {expected, {character_class, binary_to_list(Class)}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(p_regexp).
+-spec p_regexp(binary()) -> parse_fun().
+p_regexp(Regexp) ->
+ {ok, RE} = re:compile(Regexp, [unicode, dotall, anchored]),
+ fun(Inp, Index) ->
+ case re:run(Inp, RE) of
+ {match, [{0, Length}|_]} ->
+ {Head, Tail} = erlang:split_binary(Inp, Length),
+ {Head, Tail, p_advance_index(Head, Index)};
+ _ -> {fail, {expected, {regexp, binary_to_list(Regexp)}, Index}}
+ end
+ end.
+-endif.
+
+-ifdef(line).
+-spec line(index() | term()) -> pos_integer() | undefined.
+line({{line,L},_}) -> L;
+line(_) -> undefined.
+-endif.
+
+-ifdef(column).
+-spec column(index() | term()) -> pos_integer() | undefined.
+column({_,{column,C}}) -> C;
+column(_) -> undefined.
+-endif.
+
+-spec p_advance_index(input() | unicode:charlist() | pos_integer(), index()) -> index().
+p_advance_index(MatchedInput, Index) when is_list(MatchedInput) orelse is_binary(MatchedInput)-> % strings
+ lists:foldl(fun p_advance_index/2, Index, unicode:characters_to_list(MatchedInput));
+p_advance_index(MatchedInput, Index) when is_integer(MatchedInput) -> % single characters
+ {{line, Line}, {column, Col}} = Index,
+ case MatchedInput of
+ $\n -> {{line, Line+1}, {column, 1}};
+ _ -> {{line, Line}, {column, Col+1}}
+ end.
diff --git a/deps/rabbit_common/src/rabbit_ssl_options.erl b/deps/rabbit_common/src/rabbit_ssl_options.erl
new file mode 100644
index 0000000000..4c2967df97
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_ssl_options.erl
@@ -0,0 +1,86 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_ssl_options).
+
+-export([fix/1]).
+
+
+-define(BAD_SSL_PROTOCOL_VERSIONS, [
+ %% POODLE
+ sslv3
+ ]).
+
+-spec fix(rabbit_types:infos()) -> rabbit_types:infos().
+
+fix(Config) ->
+ fix_verify_fun(fix_ssl_protocol_versions(Config)).
+
+fix_verify_fun(SslOptsConfig) ->
+ %% Starting with ssl 4.0.1 in Erlang R14B, the verify_fun function
+ %% takes 3 arguments and returns a tuple.
+ case rabbit_misc:pget(verify_fun, SslOptsConfig) of
+ {Module, Function, InitialUserState} ->
+ Fun = make_verify_fun(Module, Function, InitialUserState),
+ rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
+ {Module, Function} when is_atom(Module) ->
+ Fun = make_verify_fun(Module, Function, none),
+ rabbit_misc:pset(verify_fun, Fun, SslOptsConfig);
+ {Verifyfun, _InitialUserState} when is_function(Verifyfun, 3) ->
+ SslOptsConfig;
+ undefined ->
+ SslOptsConfig
+ end.
+
+make_verify_fun(Module, Function, InitialUserState) ->
+ try
+ %% Preload the module: it is required to use
+ %% erlang:function_exported/3.
+ Module:module_info()
+ catch
+ _:Exception ->
+ rabbit_log:error("SSL verify_fun: module ~s missing: ~p~n",
+ [Module, Exception]),
+ throw({error, {invalid_verify_fun, missing_module}})
+ end,
+ NewForm = erlang:function_exported(Module, Function, 3),
+ OldForm = erlang:function_exported(Module, Function, 1),
+ case {NewForm, OldForm} of
+ {true, _} ->
+ %% This verify_fun is supported by Erlang R14B+ (ssl
+ %% 4.0.1 and later).
+ Fun = fun(OtpCert, Event, UserState) ->
+ Module:Function(OtpCert, Event, UserState)
+ end,
+ {Fun, InitialUserState};
+ {_, true} ->
+ %% This verify_fun is supported by Erlang R14B+ for
+ %% undocumented backward compatibility.
+ %%
+ %% InitialUserState is ignored in this case.
+ fun(Args) ->
+ Module:Function(Args)
+ end;
+ _ ->
+ rabbit_log:error("SSL verify_fun: no ~s:~s/3 exported~n",
+ [Module, Function]),
+ throw({error, {invalid_verify_fun, function_not_exported}})
+ end.
+
+fix_ssl_protocol_versions(Config) ->
+ case application:get_env(rabbit, ssl_allow_poodle_attack) of
+ {ok, true} ->
+ Config;
+ _ ->
+ Configured = case rabbit_misc:pget(versions, Config) of
+ undefined -> rabbit_misc:pget(available,
+ ssl:versions(),
+ []);
+ Vs -> Vs
+ end,
+ rabbit_misc:pset(versions, Configured -- ?BAD_SSL_PROTOCOL_VERSIONS, Config)
+ end.
diff --git a/deps/rabbit_common/src/rabbit_types.erl b/deps/rabbit_common/src/rabbit_types.erl
new file mode 100644
index 0000000000..c11004fdf4
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_types.erl
@@ -0,0 +1,196 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_types).
+
+-include("rabbit.hrl").
+
+-export_type([maybe/1, info/0, infos/0, info_key/0, info_keys/0,
+ message/0, msg_id/0, basic_message/0,
+ delivery/0, content/0, decoded_content/0, undecoded_content/0,
+ unencoded_content/0, encoded_content/0, message_properties/0,
+ vhost/0, ctag/0, amqp_error/0, r/1, r2/2, r3/3, listener/0,
+ binding/0, binding_source/0, binding_destination/0,
+ exchange/0,
+ connection/0, connection_name/0, channel/0, channel_name/0,
+ protocol/0, auth_user/0, user/0,
+ username/0, password/0, password_hash/0,
+ ok/1, error/1, error/2, ok_or_error/1, ok_or_error2/2, ok_pid_or_error/0,
+ channel_exit/0, connection_exit/0, mfargs/0, proc_name/0,
+ proc_type_and_name/0, timestamp/0, tracked_connection_id/0,
+ tracked_connection/0, tracked_channel_id/0, tracked_channel/0,
+ node_type/0, topic_access_context/0,
+ authz_data/0, authz_context/0]).
+
+-type(maybe(T) :: T | 'none').
+-type(timestamp() :: {non_neg_integer(), non_neg_integer(), non_neg_integer()}).
+
+-type(vhost() :: vhost:name()).
+-type(ctag() :: binary()).
+
+%% TODO: make this more precise by tying specific class_ids to
+%% specific properties
+-type(undecoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: 'none',
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]} |
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: 'none',
+ payload_fragments_rev :: [binary()]}).
+-type(unencoded_content() :: undecoded_content()).
+-type(decoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: rabbit_framing:amqp_property_record(),
+ properties_bin :: maybe(binary()),
+ payload_fragments_rev :: [binary()]}).
+-type(encoded_content() ::
+ #content{class_id :: rabbit_framing:amqp_class_id(),
+ properties :: maybe(rabbit_framing:amqp_property_record()),
+ properties_bin :: binary(),
+ payload_fragments_rev :: [binary()]}).
+-type(content() :: undecoded_content() | decoded_content()).
+-type(msg_id() :: rabbit_guid:guid()).
+-type(basic_message() ::
+ #basic_message{exchange_name :: rabbit_exchange:name(),
+ routing_keys :: [rabbit_router:routing_key()],
+ content :: content(),
+ id :: msg_id(),
+ is_persistent :: boolean()}).
+-type(message() :: basic_message()).
+-type(delivery() ::
+ #delivery{mandatory :: boolean(),
+ sender :: pid(),
+ message :: message()}).
+-type(message_properties() ::
+ #message_properties{expiry :: pos_integer() | 'undefined',
+ needs_confirming :: boolean()}).
+
+-type(info_key() :: atom()).
+-type(info_keys() :: [info_key()]).
+
+-type(info() :: {info_key(), any()}).
+-type(infos() :: [info()]).
+
+-type(amqp_error() ::
+ #amqp_error{name :: rabbit_framing:amqp_exception(),
+ explanation :: string(),
+ method :: rabbit_framing:amqp_method_name()}).
+
+-type(r(Kind) ::
+ r2(vhost(), Kind)).
+-type(r2(VirtualHost, Kind) ::
+ r3(VirtualHost, Kind, rabbit_misc:resource_name())).
+-type(r3(VirtualHost, Kind, Name) ::
+ #resource{virtual_host :: VirtualHost,
+ kind :: Kind,
+ name :: Name}).
+
+-type(listener() ::
+ #listener{node :: node(),
+ protocol :: atom(),
+ host :: rabbit_net:hostname(),
+ port :: rabbit_net:ip_port()}).
+
+-type(binding_source() :: rabbit_exchange:name()).
+-type(binding_destination() :: rabbit_amqqueue:name() | rabbit_exchange:name()).
+
+-type(binding() ::
+ #binding{source :: rabbit_exchange:name(),
+ destination :: binding_destination(),
+ key :: rabbit_binding:key(),
+ args :: rabbit_framing:amqp_table()}).
+
+-type(exchange() ::
+ #exchange{name :: rabbit_exchange:name(),
+ type :: rabbit_exchange:type(),
+ durable :: boolean(),
+ auto_delete :: boolean(),
+ arguments :: rabbit_framing:amqp_table()}).
+
+-type(connection_name() :: binary()).
+
+%% used e.g. by rabbit_networking
+-type(connection() :: pid()).
+
+%% used e.g. by rabbit_connection_tracking
+-type(tracked_connection_id() :: {node(), connection_name()}).
+
+-type(tracked_connection() ::
+ #tracked_connection{id :: tracked_connection_id(),
+ node :: node(),
+ vhost :: vhost(),
+ name :: connection_name(),
+ pid :: connection(),
+ protocol :: protocol_name(),
+ peer_host :: rabbit_networking:hostname(),
+ peer_port :: rabbit_networking:ip_port(),
+ username :: username(),
+ connected_at :: integer()}).
+
+-type(channel_name() :: binary()).
+
+-type(channel() :: pid()).
+
+%% used e.g. by rabbit_channel_tracking
+-type(tracked_channel_id() :: {node(), channel_name()}).
+
+-type(tracked_channel() ::
+ #tracked_channel{ id :: tracked_channel_id(),
+ node :: node(),
+ vhost :: vhost(),
+ name :: channel_name(),
+ pid :: channel(),
+ username :: username(),
+ connection :: connection()}).
+
+%% old AMQP 0-9-1-centric type, avoid when possible
+-type(protocol() :: rabbit_framing:protocol()).
+
+-type(protocol_name() :: 'amqp0_8' | 'amqp0_9_1' | 'amqp1_0' | 'mqtt' | 'stomp' | any()).
+
+-type(node_type() :: 'disc' | 'ram').
+
+-type(auth_user() ::
+ #auth_user{username :: username(),
+ tags :: [atom()],
+ impl :: any()}).
+
+-type(authz_data() ::
+ #{peeraddr := inet:ip_address() | binary(),
+ _ => _ } | undefined).
+
+-type(user() ::
+ #user{username :: username(),
+ tags :: [atom()],
+ authz_backends :: [{atom(), any()}]}).
+
+-type(username() :: binary()).
+-type(password() :: binary()).
+-type(password_hash() :: binary()).
+
+-type(ok(A) :: {'ok', A}).
+-type(error(A) :: {'error', A}).
+-type(error(A, B) :: {'error', A, B}).
+-type(ok_or_error(A) :: 'ok' | error(A)).
+-type(ok_or_error2(A, B) :: ok(A) | error(B)).
+-type(ok_pid_or_error() :: ok_or_error2(pid(), any())).
+
+-type(channel_exit() :: no_return()).
+-type(connection_exit() :: no_return()).
+
+-type(mfargs() :: {atom(), atom(), [any()]}).
+
+-type(proc_name() :: term()).
+-type(proc_type_and_name() :: {atom(), proc_name()}).
+
+-type(topic_access_context() :: #{routing_key => rabbit_router:routing_key(),
+ variable_map => map(),
+ _ => _}).
+
+-type(authz_context() :: map()).
diff --git a/deps/rabbit_common/src/rabbit_writer.erl b/deps/rabbit_common/src/rabbit_writer.erl
new file mode 100644
index 0000000000..5bce50c87a
--- /dev/null
+++ b/deps/rabbit_common/src/rabbit_writer.erl
@@ -0,0 +1,437 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_writer).
+
+%% This module backs writer processes ("writers"). The responsibility of
+%% a writer is to serialise protocol methods and write them to the socket.
+%% Every writer is associated with a channel and normally it's the channel
+%% that delegates method delivery to it. However, rabbit_reader
+%% (connection process) can use this module's functions to send data
+%% on channel 0, which is only used for connection negotiation and
+%% other "special" purposes.
+%%
+%% This module provides multiple functions that send protocol commands,
+%% including some that are credit flow-aware.
+%%
+%% Writers perform internal buffering. When the amount of data
+%% buffered exceeds a threshold, a socket flush is performed.
+%% See FLUSH_THRESHOLD for details.
+%%
+%% When a socket write fails, writer will exit.
+
+-include("rabbit.hrl").
+-include("rabbit_framing.hrl").
+
+-export([start/6, start_link/6, start/7, start_link/7, start/8, start_link/8]).
+
+-export([system_continue/3, system_terminate/4, system_code_change/4]).
+
+-export([send_command/2, send_command/3,
+ send_command_sync/2, send_command_sync/3,
+ send_command_and_notify/4, send_command_and_notify/5,
+ send_command_flow/2, send_command_flow/3,
+ flush/1]).
+-export([internal_send_command/4, internal_send_command/6]).
+-export([msg_size/1, maybe_gc_large_msg/1, maybe_gc_large_msg/2]).
+
+%% internal
+-export([enter_mainloop/2, mainloop/2, mainloop1/2]).
+
+-record(wstate, {
+ %% socket (port)
+ sock,
+ %% channel number
+ channel,
+ %% connection-negotiated frame_max setting
+ frame_max,
+ %% see #connection.protocol in rabbit_reader
+ protocol,
+ %% connection (rabbit_reader) process
+ reader,
+ %% statistics emission timer
+ stats_timer,
+ %% data pending delivery (between socket
+ %% flushes)
+ pending,
+ %% defines how ofter gc will be executed
+ writer_gc_threshold
+}).
+
+-define(HIBERNATE_AFTER, 5000).
+%% 1GB
+-define(DEFAULT_GC_THRESHOLD, 1000000000).
+
+%%---------------------------------------------------------------------------
+
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name()) ->
+ rabbit_types:ok(pid()).
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean()) ->
+ rabbit_types:ok(pid()).
+-spec start
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean(), undefined|non_neg_integer()) ->
+ rabbit_types:ok(pid()).
+-spec start_link
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ non_neg_integer(), rabbit_types:protocol(), pid(),
+ rabbit_types:proc_name(), boolean(), undefined|non_neg_integer()) ->
+ rabbit_types:ok(pid()).
+
+-spec system_code_change(_,_,_,_) -> {'ok',_}.
+-spec system_continue(_,_,#wstate{}) -> any().
+-spec system_terminate(_,_,_,_) -> no_return().
+
+-spec send_command(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_sync(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_sync
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_and_notify
+ (pid(), pid(), pid(), rabbit_framing:amqp_method_record(),
+ rabbit_types:content()) ->
+ 'ok'.
+-spec send_command_flow(pid(), rabbit_framing:amqp_method_record()) -> 'ok'.
+-spec send_command_flow
+ (pid(), rabbit_framing:amqp_method_record(), rabbit_types:content()) ->
+ 'ok'.
+-spec flush(pid()) -> 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:protocol()) ->
+ 'ok'.
+-spec internal_send_command
+ (rabbit_net:socket(), rabbit_channel:channel_number(),
+ rabbit_framing:amqp_method_record(), rabbit_types:content(),
+ non_neg_integer(), rabbit_types:protocol()) ->
+ 'ok'.
+
+-spec msg_size
+ (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
+
+-spec maybe_gc_large_msg
+ (rabbit_types:content() | rabbit_types:message()) -> non_neg_integer().
+-spec maybe_gc_large_msg
+ (rabbit_types:content() | rabbit_types:message(),
+ undefined | non_neg_integer()) -> undefined | non_neg_integer().
+
+%%---------------------------------------------------------------------------
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity, false).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, ?DEFAULT_GC_THRESHOLD).
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats) ->
+ start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, ?DEFAULT_GC_THRESHOLD).
+
+start(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, GCThreshold) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats, GCThreshold),
+ {ok, proc_lib:spawn(?MODULE, enter_mainloop, [Identity, State])}.
+
+start_link(Sock, Channel, FrameMax, Protocol, ReaderPid, Identity,
+ ReaderWantsStats, GCThreshold) ->
+ State = initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid,
+ ReaderWantsStats, GCThreshold),
+ {ok, proc_lib:spawn_link(?MODULE, enter_mainloop, [Identity, State])}.
+
+initial_state(Sock, Channel, FrameMax, Protocol, ReaderPid, ReaderWantsStats, GCThreshold) ->
+ (case ReaderWantsStats of
+ true -> fun rabbit_event:init_stats_timer/2;
+ false -> fun rabbit_event:init_disabled_stats_timer/2
+ end)(#wstate{sock = Sock,
+ channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ reader = ReaderPid,
+ pending = [],
+ writer_gc_threshold = GCThreshold},
+ #wstate.stats_timer).
+
+system_continue(Parent, Deb, State) ->
+ mainloop(Deb, State#wstate{reader = Parent}).
+
+system_terminate(Reason, _Parent, _Deb, _State) ->
+ exit(Reason).
+
+system_code_change(Misc, _Module, _OldVsn, _Extra) ->
+ {ok, Misc}.
+
+enter_mainloop(Identity, State) ->
+ ?LG_PROCESS_TYPE(writer),
+ Deb = sys:debug_options([]),
+ ?store_proc_name(Identity),
+ mainloop(Deb, State).
+
+mainloop(Deb, State) ->
+ try
+ mainloop1(Deb, State)
+ catch
+ exit:Error -> #wstate{reader = ReaderPid, channel = Channel} = State,
+ ReaderPid ! {channel_exit, Channel, Error}
+ end,
+ done.
+
+mainloop1(Deb, State = #wstate{pending = []}) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after ?HIBERNATE_AFTER ->
+ erlang:hibernate(?MODULE, mainloop, [Deb, State])
+ end;
+mainloop1(Deb, State) ->
+ receive
+ Message -> {Deb1, State1} = handle_message(Deb, Message, State),
+ ?MODULE:mainloop1(Deb1, State1)
+ after 0 ->
+ ?MODULE:mainloop1(Deb, internal_flush(State))
+ end.
+
+handle_message(Deb, {system, From, Req}, State = #wstate{reader = Parent}) ->
+ sys:handle_system_msg(Req, From, Parent, ?MODULE, Deb, State);
+handle_message(Deb, Message, State) ->
+ {Deb, handle_message(Message, State)}.
+
+handle_message({send_command, MethodRecord}, State) ->
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command, MethodRecord, Content}, State) ->
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({send_command_flow, MethodRecord, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, State);
+handle_message({send_command_flow, MethodRecord, Content, Sender}, State) ->
+ credit_flow:ack(Sender),
+ internal_send_command_async(MethodRecord, Content, State);
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord}}, State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, {send_command_sync, MethodRecord, Content}},
+ State) ->
+ State1 = internal_flush(
+ internal_send_command_async(MethodRecord, Content, State)),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({'$gen_call', From, flush}, State) ->
+ State1 = internal_flush(State),
+ gen_server:reply(From, ok),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord}, State) ->
+ State1 = internal_send_command_async(MethodRecord, State),
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid),
+ State1;
+handle_message({send_command_and_notify, QPid, ChPid, MethodRecord, Content},
+ State) ->
+ State1 = internal_send_command_async(MethodRecord, Content, State),
+ rabbit_amqqueue_common:notify_sent(QPid, ChPid),
+ State1;
+handle_message({'DOWN', _MRef, process, QPid, _Reason}, State) ->
+ rabbit_amqqueue_common:notify_sent_queue_down(QPid),
+ State;
+handle_message({inet_reply, _, ok}, State) ->
+ rabbit_event:ensure_stats_timer(State, #wstate.stats_timer, emit_stats);
+handle_message({inet_reply, _, Status}, _State) ->
+ exit({writer, send_failed, Status});
+handle_message(emit_stats, State = #wstate{reader = ReaderPid}) ->
+ ReaderPid ! ensure_stats,
+ rabbit_event:reset_stats_timer(State, #wstate.stats_timer);
+handle_message(Message, _State) ->
+ exit({writer, message_not_understood, Message}).
+
+%%---------------------------------------------------------------------------
+
+send_command(W, MethodRecord) ->
+ W ! {send_command, MethodRecord},
+ ok.
+
+send_command(W, MethodRecord, Content) ->
+ W ! {send_command, MethodRecord, Content},
+ ok.
+
+send_command_flow(W, MethodRecord) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, self()},
+ ok.
+
+send_command_flow(W, MethodRecord, Content) ->
+ credit_flow:send(W),
+ W ! {send_command_flow, MethodRecord, Content, self()},
+ ok.
+
+send_command_sync(W, MethodRecord) ->
+ call(W, {send_command_sync, MethodRecord}).
+
+send_command_sync(W, MethodRecord, Content) ->
+ call(W, {send_command_sync, MethodRecord, Content}).
+
+send_command_and_notify(W, Q, ChPid, MethodRecord) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord},
+ ok.
+
+send_command_and_notify(W, Q, ChPid, MethodRecord, Content) ->
+ W ! {send_command_and_notify, Q, ChPid, MethodRecord, Content},
+ ok.
+
+flush(W) -> call(W, flush).
+
+%%---------------------------------------------------------------------------
+
+call(Pid, Msg) ->
+ {ok, Res} = gen:call(Pid, '$gen_call', Msg, infinity),
+ Res.
+
+%%---------------------------------------------------------------------------
+
+assemble_frame(Channel, MethodRecord, Protocol) ->
+ rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol).
+
+assemble_frames(Channel, MethodRecord, Content, FrameMax, Protocol) ->
+ MethodName = rabbit_misc:method_record_type(MethodRecord),
+ true = Protocol:method_has_content(MethodName), % assertion
+ MethodFrame = rabbit_binary_generator:build_simple_method_frame(
+ Channel, MethodRecord, Protocol),
+ ContentFrames = rabbit_binary_generator:build_simple_content_frames(
+ Channel, Content, FrameMax, Protocol),
+ [MethodFrame | ContentFrames].
+
+tcp_send(Sock, Data) ->
+ rabbit_misc:throw_on_error(inet_error,
+ fun () -> rabbit_net:send(Sock, Data) end).
+
+internal_send_command(Sock, Channel, MethodRecord, Protocol) ->
+ ok = tcp_send(Sock, assemble_frame(Channel, MethodRecord, Protocol)).
+
+internal_send_command(Sock, Channel, MethodRecord, Content, FrameMax,
+ Protocol) ->
+ ok = lists:foldl(fun (Frame, ok) -> tcp_send(Sock, Frame);
+ (_Frame, Other) -> Other
+ end, ok, assemble_frames(Channel, MethodRecord,
+ Content, FrameMax, Protocol)).
+
+internal_send_command_async(MethodRecord,
+ State = #wstate{channel = Channel,
+ protocol = Protocol,
+ pending = Pending}) ->
+ Frame = assemble_frame(Channel, MethodRecord, Protocol),
+ maybe_flush(State#wstate{pending = [Frame | Pending]}).
+
+internal_send_command_async(MethodRecord, Content,
+ State = #wstate{channel = Channel,
+ frame_max = FrameMax,
+ protocol = Protocol,
+ pending = Pending,
+ writer_gc_threshold = GCThreshold}) ->
+ Frames = assemble_frames(Channel, MethodRecord, Content, FrameMax,
+ Protocol),
+ maybe_gc_large_msg(Content, GCThreshold),
+ maybe_flush(State#wstate{pending = [Frames | Pending]}).
+
+%% When the amount of protocol method data buffered exceeds
+%% this threshold, a socket flush is performed.
+%%
+%% This magic number is the tcp-over-ethernet MSS (1460) minus the
+%% minimum size of a AMQP 0-9-1 basic.deliver method frame (24) plus basic
+%% content header (22). The idea is that we want to flush just before
+%% exceeding the MSS.
+-define(FLUSH_THRESHOLD, 1414).
+
+maybe_flush(State = #wstate{pending = Pending}) ->
+ case iolist_size(Pending) >= ?FLUSH_THRESHOLD of
+ true -> internal_flush(State);
+ false -> State
+ end.
+
+internal_flush(State = #wstate{pending = []}) ->
+ State;
+internal_flush(State = #wstate{sock = Sock, pending = Pending}) ->
+ ok = port_cmd(Sock, lists:reverse(Pending)),
+ State#wstate{pending = []}.
+
+%% gen_tcp:send/2 does a selective receive of {inet_reply, Sock,
+%% Status} to obtain the result. That is bad when it is called from
+%% the writer since it requires scanning of the writers possibly quite
+%% large message queue.
+%%
+%% So instead we lift the code from prim_inet:send/2, which is what
+%% gen_tcp:send/2 calls, do the first half here and then just process
+%% the result code in handle_message/2 as and when it arrives.
+%%
+%% This means we may end up happily sending data down a closed/broken
+%% socket, but that's ok since a) data in the buffers will be lost in
+%% any case (so qualitatively we are no worse off than if we used
+%% gen_tcp:send/2), and b) we do detect the changed socket status
+%% eventually, i.e. when we get round to handling the result code.
+%%
+%% Also note that the port has bounded buffers and port_command blocks
+%% when these are full. So the fact that we process the result
+%% asynchronously does not impact flow control.
+port_cmd(Sock, Data) ->
+ true = try rabbit_net:port_command(Sock, Data)
+ catch error:Error -> exit({writer, send_failed, Error})
+ end,
+ ok.
+
+%% Some processes (channel, writer) can get huge amounts of binary
+%% garbage when processing huge messages at high speed (since we only
+%% do enough reductions to GC every few hundred messages, and if each
+%% message is 1MB then that's ugly). So count how many bytes of
+%% message we have processed, and force a GC every so often.
+maybe_gc_large_msg(Content) ->
+ maybe_gc_large_msg(Content, ?DEFAULT_GC_THRESHOLD).
+
+maybe_gc_large_msg(_Content, undefined) ->
+ undefined;
+maybe_gc_large_msg(Content, GCThreshold) ->
+ Size = msg_size(Content),
+ Current = case get(msg_size_for_gc) of
+ undefined -> 0;
+ C -> C
+ end,
+ New = Current + Size,
+ put(msg_size_for_gc, case New > GCThreshold of
+ true -> erlang:garbage_collect(),
+ 0;
+ false -> New
+ end),
+ Size.
+
+msg_size(#content{payload_fragments_rev = PFR}) -> iolist_size(PFR);
+msg_size(#basic_message{content = Content}) -> msg_size(Content).
diff --git a/deps/rabbit_common/src/supervisor2.erl b/deps/rabbit_common/src/supervisor2.erl
new file mode 100644
index 0000000000..08c764d0d8
--- /dev/null
+++ b/deps/rabbit_common/src/supervisor2.erl
@@ -0,0 +1,1651 @@
+%% This file is a copy of supervisor.erl from the Erlang/OTP
+%% distribution, with the following modifications:
+%%
+%% 1) the module name is supervisor2
+%%
+%% 2) a find_child/2 utility function has been added
+%%
+%% 3) Added an 'intrinsic' restart type. Like the transient type, this
+%% type means the child should only be restarted if the child exits
+%% abnormally. Unlike the transient type, if the child exits
+%% normally, the supervisor itself also exits normally. If the
+%% child is a supervisor and it exits normally (i.e. with reason of
+%% 'shutdown') then the child's parent also exits normally.
+%%
+%% 4) child specifications can contain, as the restart type, a tuple
+%% {permanent, Delay} | {transient, Delay} | {intrinsic, Delay}
+%% where Delay >= 0 (see point (4) below for intrinsic). The delay,
+%% in seconds, indicates what should happen if a child, upon being
+%% restarted, exceeds the MaxT and MaxR parameters. Thus, if a
+%% child exits, it is restarted as normal. If it exits sufficiently
+%% quickly and often to exceed the boundaries set by the MaxT and
+%% MaxR parameters, and a Delay is specified, then rather than
+%% stopping the supervisor, the supervisor instead continues and
+%% tries to start up the child again, Delay seconds later.
+%%
+%% Note that if a child is delay-restarted this will reset the
+%% count of restarts towrds MaxR and MaxT. This matters if MaxT >
+%% Delay, since otherwise we would fail to restart after the delay.
+%%
+%% Sometimes, you may wish for a transient or intrinsic child to
+%% exit abnormally so that it gets restarted, but still log
+%% nothing. gen_server will log any exit reason other than
+%% 'normal', 'shutdown' or {'shutdown', _}. Thus the exit reason of
+%% {'shutdown', 'restart'} is interpreted to mean you wish the
+%% child to be restarted according to the delay parameters, but
+%% gen_server will not log the error. Thus from gen_server's
+%% perspective it's a normal exit, whilst from supervisor's
+%% perspective, it's an abnormal exit.
+%%
+%% 5) normal, and {shutdown, _} exit reasons are all treated the same
+%% (i.e. are regarded as normal exits)
+%%
+%% All modifications are (C) 2010-2020 VMware, Inc. or its affiliates.
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(supervisor2).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/2, start_link/3,
+ start_child/2, restart_child/2,
+ delete_child/2, terminate_child/2,
+ which_children/1, count_children/1,
+ check_childspecs/1, get_childspec/2,
+ find_child/2]).
+
+%% Internal exports
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, format_status/2]).
+
+%% For release_handler only
+-export([get_callback_module/1]).
+
+-include_lib("kernel/include/logger.hrl").
+
+-define(report_error(Error, Reason, Child, SupName),
+ ?LOG_ERROR(#{label=>{supervisor,Error},
+ report=>[{supervisor,SupName},
+ {errorContext,Error},
+ {reason,Reason},
+ {offender,extract_child(Child)}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,
+ type=>supervisor_report}})).
+
+%%--------------------------------------------------------------------------
+
+-export_type([sup_flags/0, child_spec/0, startchild_ret/0, strategy/0]).
+
+%%--------------------------------------------------------------------------
+
+-type child() :: 'undefined' | pid().
+-type child_id() :: term().
+-type mfargs() :: {M :: module(), F :: atom(), A :: [term()] | undefined}.
+-type modules() :: [module()] | 'dynamic'.
+-type delay() :: non_neg_integer().
+-type restart() :: 'permanent' | 'transient' | 'temporary' | 'intrinsic' | {'permanent', delay()} | {'transient', delay()} | {'intrinsic', delay()}.
+-type shutdown() :: 'brutal_kill' | timeout().
+-type worker() :: 'worker' | 'supervisor'.
+-type sup_name() :: {'local', Name :: atom()}
+ | {'global', Name :: atom()}
+ | {'via', Module :: module(), Name :: any()}.
+-type sup_ref() :: (Name :: atom())
+ | {Name :: atom(), Node :: node()}
+ | {'global', Name :: atom()}
+ | {'via', Module :: module(), Name :: any()}
+ | pid().
+-type child_spec() :: #{id := child_id(), % mandatory
+ start := mfargs(), % mandatory
+ restart => restart(), % optional
+ shutdown => shutdown(), % optional
+ type => worker(), % optional
+ modules => modules()} % optional
+ | {Id :: child_id(),
+ StartFunc :: mfargs(),
+ Restart :: restart(),
+ Shutdown :: shutdown(),
+ Type :: worker(),
+ Modules :: modules()}.
+
+-type strategy() :: 'one_for_all' | 'one_for_one'
+ | 'rest_for_one' | 'simple_one_for_one'.
+
+-type sup_flags() :: #{strategy => strategy(), % optional
+ intensity => non_neg_integer(), % optional
+ period => pos_integer()} % optional
+ | {RestartStrategy :: strategy(),
+ Intensity :: non_neg_integer(),
+ Period :: pos_integer()}.
+-type children() :: {Ids :: [child_id()], Db :: #{child_id() => child_rec()}}.
+
+%%--------------------------------------------------------------------------
+%% Defaults
+-define(default_flags, #{strategy => one_for_one,
+ intensity => 1,
+ period => 5}).
+-define(default_child_spec, #{restart => permanent,
+ type => worker}).
+%% Default 'shutdown' is 5000 for workers and infinity for supervisors.
+%% Default 'modules' is [M], where M comes from the child's start {M,F,A}.
+
+%%--------------------------------------------------------------------------
+
+-record(child, {% pid is undefined when child is not running
+ pid = undefined :: child()
+ | {restarting, pid() | undefined}
+ | [pid()],
+ id :: child_id(),
+ mfargs :: mfargs(),
+ restart_type :: restart(),
+ shutdown :: shutdown(),
+ child_type :: worker(),
+ modules = [] :: modules()}).
+-type child_rec() :: #child{}.
+
+-record(state, {name,
+ strategy :: strategy() | 'undefined',
+ children = {[],#{}} :: children(), % Ids in start order
+ dynamics :: {'maps', #{pid() => list()}}
+ | {'sets', sets:set(pid())}
+ | 'undefined',
+ intensity :: non_neg_integer() | 'undefined',
+ period :: pos_integer() | 'undefined',
+ restarts = [],
+ dynamic_restarts = 0 :: non_neg_integer(),
+ module,
+ args}).
+-type state() :: #state{}.
+
+-define(is_simple(State), State#state.strategy=:=simple_one_for_one).
+-define(is_temporary(_Child_), _Child_#child.restart_type=:=temporary).
+-define(is_permanent(_Child_), ((_Child_#child.restart_type=:=permanent) orelse
+ (is_tuple(_Child_#child.restart_type) andalso
+ tuple_size(_Child_#child.restart_type) =:= 2 andalso
+ element(1, _Child_#child.restart_type) =:= permanent))).
+
+-define(is_explicit_restart(R),
+ R == {shutdown, restart}).
+
+-callback init(Args :: term()) ->
+ {ok, {SupFlags :: sup_flags(), [ChildSpec :: child_spec()]}}
+ | ignore.
+
+-define(restarting(_Pid_), {restarting,_Pid_}).
+
+%%% ---------------------------------------------------
+%%% This is a general process supervisor built upon gen_server.erl.
+%%% Servers/processes should/could also be built using gen_server.erl.
+%%% SupName = {local, atom()} | {global, atom()}.
+%%% ---------------------------------------------------
+
+-type startlink_err() :: {'already_started', pid()}
+ | {'shutdown', term()}
+ | term().
+-type startlink_ret() :: {'ok', pid()} | 'ignore' | {'error', startlink_err()}.
+
+-spec start_link(Module, Args) -> startlink_ret() when
+ Module :: module(),
+ Args :: term().
+start_link(Mod, Args) ->
+ gen_server:start_link(?MODULE, {self, Mod, Args}, []).
+
+-spec start_link(SupName, Module, Args) -> startlink_ret() when
+ SupName :: sup_name(),
+ Module :: module(),
+ Args :: term().
+start_link(SupName, Mod, Args) ->
+ gen_server:start_link(SupName, ?MODULE, {SupName, Mod, Args}, []).
+
+%%% ---------------------------------------------------
+%%% Interface functions.
+%%% ---------------------------------------------------
+
+-type startchild_err() :: 'already_present' | {'already_started', Child :: child()} | term().
+-type startchild_ret() :: {'ok', Child :: child()} | {'ok', Child :: child(), Info :: term()} | {'error', startchild_err()}.
+
+-spec start_child(SupRef, ChildSpec) -> startchild_ret() when
+ SupRef :: sup_ref(),
+ ChildSpec :: child_spec() | (List :: [term()]).
+start_child(Supervisor, ChildSpec) ->
+ call(Supervisor, {start_child, ChildSpec}).
+
+-spec restart_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: {'ok', Child :: child()}
+ | {'ok', Child :: child(), Info :: term()}
+ | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one' | term().
+restart_child(Supervisor, Id) ->
+ call(Supervisor, {restart_child, Id}).
+
+-spec delete_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'running' | 'restarting' | 'not_found' | 'simple_one_for_one'.
+delete_child(Supervisor, Id) ->
+ call(Supervisor, {delete_child, Id}).
+
+%%-----------------------------------------------------------------
+%% Func: terminate_child/2
+%% Returns: ok | {error, Reason}
+%% Note that the child is *always* terminated in some
+%% way (maybe killed).
+%%-----------------------------------------------------------------
+
+-spec terminate_child(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: 'ok' | {'error', Error},
+ Error :: 'not_found' | 'simple_one_for_one'.
+terminate_child(Supervisor, Id) ->
+ call(Supervisor, {terminate_child, Id}).
+
+-spec get_childspec(SupRef, Id) -> Result when
+ SupRef :: sup_ref(),
+ Id :: pid() | child_id(),
+ Result :: {'ok', child_spec()} | {'error', Error},
+ Error :: 'not_found'.
+get_childspec(Supervisor, Id) ->
+ call(Supervisor, {get_childspec, Id}).
+
+-spec which_children(SupRef) -> [{Id,Child,Type,Modules}] when
+ SupRef :: sup_ref(),
+ Id :: child_id() | undefined,
+ Child :: child() | 'restarting',
+ Type :: worker(),
+ Modules :: modules().
+which_children(Supervisor) ->
+ call(Supervisor, which_children).
+
+-spec count_children(SupRef) -> PropListOfCounts when
+ SupRef :: sup_ref(),
+ PropListOfCounts :: [Count],
+ Count :: {specs, ChildSpecCount :: non_neg_integer()}
+ | {active, ActiveProcessCount :: non_neg_integer()}
+ | {supervisors, ChildSupervisorCount :: non_neg_integer()}
+ |{workers, ChildWorkerCount :: non_neg_integer()}.
+count_children(Supervisor) ->
+ call(Supervisor, count_children).
+
+-spec find_child(Supervisor, Name) -> [pid()] when
+ Supervisor :: sup_ref(),
+ Name :: child_id().
+find_child(Supervisor, Name) ->
+ [Pid || {Name1, Pid, _Type, _Modules} <- which_children(Supervisor),
+ Name1 =:= Name].
+
+call(Supervisor, Req) ->
+ gen_server:call(Supervisor, Req, infinity).
+
+-spec check_childspecs(ChildSpecs) -> Result when
+ ChildSpecs :: [child_spec()],
+ Result :: 'ok' | {'error', Error :: term()}.
+check_childspecs(ChildSpecs) when is_list(ChildSpecs) ->
+ case check_startspec(ChildSpecs) of
+ {ok, _} -> ok;
+ Error -> {error, Error}
+ end;
+check_childspecs(X) -> {error, {badarg, X}}.
+
+%%%-----------------------------------------------------------------
+%%% Called by release_handler during upgrade
+-spec get_callback_module(Pid) -> Module when
+ Pid :: pid(),
+ Module :: atom().
+get_callback_module(Pid) ->
+ {status, _Pid, {module, _Mod},
+ [_PDict, _SysState, _Parent, _Dbg, Misc]} = sys:get_status(Pid),
+ case lists:keyfind(?MODULE, 1, Misc) of
+ {?MODULE, [{"Callback", Mod}]} ->
+ Mod;
+ _ ->
+ [_Header, _Data, {data, [{"State", State}]} | _] = Misc,
+ State#state.module
+ end.
+
+%%% ---------------------------------------------------
+%%%
+%%% Initialize the supervisor.
+%%%
+%%% ---------------------------------------------------
+
+-type init_sup_name() :: sup_name() | 'self'.
+
+-type stop_rsn() :: {'shutdown', term()}
+ | {'bad_return', {module(),'init', term()}}
+ | {'bad_start_spec', term()}
+ | {'start_spec', term()}
+ | {'supervisor_data', term()}.
+
+-spec init({init_sup_name(), module(), [term()]}) ->
+ {'ok', state()} | 'ignore' | {'stop', stop_rsn()}.
+
+init({SupName, Mod, Args}) ->
+ process_flag(trap_exit, true),
+ case Mod:init(Args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case init_state(SupName, SupFlags, Mod, Args) of
+ {ok, State} when ?is_simple(State) ->
+ init_dynamic(State, StartSpec);
+ {ok, State} ->
+ init_children(State, StartSpec);
+ Error ->
+ {stop, {supervisor_data, Error}}
+ end;
+ ignore ->
+ ignore;
+ Error ->
+ {stop, {bad_return, {Mod, init, Error}}}
+ end.
+
+init_children(State, StartSpec) ->
+ SupName = State#state.name,
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ case start_children(Children, SupName) of
+ {ok, NChildren} ->
+ {ok, State#state{children = NChildren}};
+ {error, NChildren, Reason} ->
+ _ = terminate_children(NChildren, SupName),
+ {stop, {shutdown, Reason}}
+ end;
+ Error ->
+ {stop, {start_spec, Error}}
+ end.
+
+init_dynamic(State, [StartSpec]) ->
+ case check_startspec([StartSpec]) of
+ {ok, Children} ->
+ {ok, dyn_init(State#state{children = Children})};
+ Error ->
+ {stop, {start_spec, Error}}
+ end;
+init_dynamic(_State, StartSpec) ->
+ {stop, {bad_start_spec, StartSpec}}.
+
+%%-----------------------------------------------------------------
+%% Func: start_children/2
+%% Args: Children = children() % Ids in start order
+%% SupName = {local, atom()} | {global, atom()} | {pid(), Mod}
+%% Purpose: Start all children. The new map contains #child's
+%% with pids.
+%% Returns: {ok, NChildren} | {error, NChildren, Reason}
+%% NChildren = children() % Ids in termination order
+%% (reversed start order)
+%%-----------------------------------------------------------------
+start_children(Children, SupName) ->
+ Start =
+ fun(Id,Child) ->
+ case do_start_child(SupName, Child) of
+ {ok, undefined} when ?is_temporary(Child) ->
+ remove;
+ {ok, Pid} ->
+ {update,Child#child{pid = Pid}};
+ {ok, Pid, _Extra} ->
+ {update,Child#child{pid = Pid}};
+ {error, Reason} ->
+ ?report_error(start_error, Reason, Child, SupName),
+ {abort,{failed_to_start_child,Id,Reason}}
+ end
+ end,
+ children_map(Start,Children).
+
+do_start_child(SupName, Child) ->
+ #child{mfargs = {M, F, Args}} = Child,
+ case do_start_child_i(M, F, Args) of
+ {ok, Pid} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ NChild = Child#child{pid = Pid},
+ report_progress(NChild, SupName),
+ {ok, Pid, Extra};
+ Other ->
+ Other
+ end.
+
+do_start_child_i(M, F, A) ->
+ case catch apply(M, F, A) of
+ {ok, Pid} when is_pid(Pid) ->
+ {ok, Pid};
+ {ok, Pid, Extra} when is_pid(Pid) ->
+ {ok, Pid, Extra};
+ ignore ->
+ {ok, undefined};
+ {error, Error} ->
+ {error, Error};
+ What ->
+ {error, What}
+ end.
+
+%%% ---------------------------------------------------
+%%%
+%%% Callback functions.
+%%%
+%%% ---------------------------------------------------
+-type call() :: 'which_children' | 'count_children' | {_, _}. % XXX: refine
+-spec handle_call(call(), term(), state()) -> {'reply', term(), state()}.
+
+handle_call({start_child, EArgs}, _From, State) when ?is_simple(State) ->
+ Child = get_dynamic_child(State),
+ #child{mfargs = {M, F, A}} = Child,
+ Args = A ++ EArgs,
+ case do_start_child_i(M, F, Args) of
+ {ok, undefined} ->
+ {reply, {ok, undefined}, State};
+ {ok, Pid} ->
+ NState = dyn_store(Pid, Args, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = dyn_store(Pid, Args, State),
+ {reply, {ok, Pid, Extra}, NState};
+ What ->
+ {reply, What, State}
+ end;
+
+handle_call({start_child, ChildSpec}, _From, State) ->
+ case check_childspec(ChildSpec) of
+ {ok, Child} ->
+ {Resp, NState} = handle_start_child(Child, State),
+ {reply, Resp, NState};
+ What ->
+ {reply, {error, What}, State}
+ end;
+
+%% terminate_child for simple_one_for_one can only be done with pid
+handle_call({terminate_child, Id}, _From, State) when not is_pid(Id),
+ ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({terminate_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} ->
+ do_terminate(Child, State#state.name),
+ {reply, ok, del_child(Child, State)};
+ error ->
+ {reply, {error, not_found}, State}
+ end;
+
+%% restart_child request is invalid for simple_one_for_one supervisors
+handle_call({restart_child, _Id}, _From, State) when ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({restart_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} when Child#child.pid =:= undefined ->
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = set_pid(Pid, Id, State),
+ {reply, {ok, Pid}, NState};
+ {ok, Pid, Extra} ->
+ NState = set_pid(Pid, Id, State),
+ {reply, {ok, Pid, Extra}, NState};
+ Error ->
+ {reply, Error, State}
+ end;
+ {ok, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {ok, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+%% delete_child request is invalid for simple_one_for_one supervisors
+handle_call({delete_child, _Id}, _From, State) when ?is_simple(State) ->
+ {reply, {error, simple_one_for_one}, State};
+
+handle_call({delete_child, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} when Child#child.pid =:= undefined ->
+ NState = remove_child(Id, State),
+ {reply, ok, NState};
+ {ok, #child{pid=?restarting(_)}} ->
+ {reply, {error, restarting}, State};
+ {ok, _} ->
+ {reply, {error, running}, State};
+ _ ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call({get_childspec, Id}, _From, State) ->
+ case internal_find_child(Id, State) of
+ {ok, Child} ->
+ {reply, {ok, child_to_spec(Child)}, State};
+ error ->
+ {reply, {error, not_found}, State}
+ end;
+
+handle_call(which_children, _From, State) when ?is_simple(State) ->
+ #child{child_type = CT,modules = Mods} = get_dynamic_child(State),
+ Reply = dyn_map(fun(?restarting(_)) -> {undefined, restarting, CT, Mods};
+ (Pid) -> {undefined, Pid, CT, Mods}
+ end, State),
+ {reply, Reply, State};
+
+handle_call(which_children, _From, State) ->
+ Resp =
+ children_to_list(
+ fun(Id,#child{pid = ?restarting(_),
+ child_type = ChildType, modules = Mods}) ->
+ {Id, restarting, ChildType, Mods};
+ (Id,#child{pid = Pid,
+ child_type = ChildType, modules = Mods}) ->
+ {Id, Pid, ChildType, Mods}
+ end,
+ State#state.children),
+ {reply, Resp, State};
+
+handle_call(count_children, _From, #state{dynamic_restarts = Restarts} = State)
+ when ?is_simple(State) ->
+ #child{child_type = CT} = get_dynamic_child(State),
+ Sz = dyn_size(State),
+ Active = Sz - Restarts, % Restarts is always 0 for temporary children
+ Reply = case CT of
+ supervisor -> [{specs, 1}, {active, Active},
+ {supervisors, Sz}, {workers, 0}];
+ worker -> [{specs, 1}, {active, Active},
+ {supervisors, 0}, {workers, Sz}]
+ end,
+ {reply, Reply, State};
+
+handle_call(count_children, _From, State) ->
+ %% Specs and children are together on the children list...
+ {Specs, Active, Supers, Workers} =
+ children_fold(fun(_Id, Child, Counts) ->
+ count_child(Child, Counts)
+ end, {0,0,0,0}, State#state.children),
+
+ %% Reformat counts to a property list.
+ Reply = [{specs, Specs}, {active, Active},
+ {supervisors, Supers}, {workers, Workers}],
+ {reply, Reply, State}.
+
+count_child(#child{pid = Pid, child_type = worker},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers, Workers+1};
+ false -> {Specs+1, Active, Supers, Workers+1}
+ end;
+count_child(#child{pid = Pid, child_type = supervisor},
+ {Specs, Active, Supers, Workers}) ->
+ case is_pid(Pid) andalso is_process_alive(Pid) of
+ true -> {Specs+1, Active+1, Supers+1, Workers};
+ false -> {Specs+1, Active, Supers+1, Workers}
+ end.
+
+%%% If a restart attempt failed, this message is cast
+%%% from restart/2 in order to give gen_server the chance to
+%%% check it's inbox before trying again.
+-spec handle_cast({try_again_restart, child_id() | {'restarting',pid()}}, state()) ->
+ {'noreply', state()} | {stop, shutdown, state()}.
+
+handle_cast({try_again_restart,TryAgainId}, State) ->
+ case find_child_and_args(TryAgainId, State) of
+ {ok, Child = #child{pid=?restarting(_)}} ->
+ case restart(Child,State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {shutdown, State1} ->
+ {stop, shutdown, State1}
+ end;
+ _ ->
+ {noreply,State}
+ end.
+
+%%
+%% Take care of terminated children.
+%%
+-spec handle_info(term(), state()) ->
+ {'noreply', state()} | {'stop', 'shutdown', state()}.
+
+handle_info({'EXIT', Pid, Reason}, State) ->
+ case restart_child(Pid, Reason, State) of
+ {ok, State1} ->
+ {noreply, State1};
+ {shutdown, State1} ->
+ {stop, shutdown, State1}
+ end;
+
+handle_info({delayed_restart, {Reason, Child}}, State) when ?is_simple(State) ->
+ try_restart(Reason, Child, State#state{restarts = []}); %% [1]
+handle_info({delayed_restart, {Reason, Child}}, State) ->
+ ChildId = Child#child.id,
+ case internal_find_child(ChildId, State) of
+ {ok, Child1} ->
+ try_restart(Reason, Child1, State#state{restarts = []}); %% [1]
+ _What ->
+ {noreply, State}
+ end;
+%% [1] When we receive a delayed_restart message we want to reset the
+%% restarts field since otherwise the MaxT might not have elapsed and
+%% we would just delay again and again. Since a common use of the
+%% delayed restart feature is for MaxR = 1, MaxT = some huge number
+%% (so that we don't end up bouncing around in non-delayed restarts)
+%% this is important.
+
+handle_info(Msg, State) ->
+ ?LOG_ERROR("Supervisor received unexpected message: ~tp~n",[Msg],
+ #{domain=>[otp],
+ error_logger=>#{tag=>error}}),
+ {noreply, State}.
+
+%%
+%% Terminate this server.
+%%
+-spec terminate(term(), state()) -> 'ok'.
+
+terminate(_Reason, State) when ?is_simple(State) ->
+ terminate_dynamic_children(State);
+terminate(_Reason, State) ->
+ terminate_children(State#state.children, State#state.name).
+
+%%
+%% Change code for the supervisor.
+%% Call the new call-back module and fetch the new start specification.
+%% Combine the new spec. with the old. If the new start spec. is
+%% not valid the code change will not succeed.
+%% Use the old Args as argument to Module:init/1.
+%% NOTE: This requires that the init function of the call-back module
+%% does not have any side effects.
+%%
+-spec code_change(term(), state(), term()) ->
+ {'ok', state()} | {'error', term()}.
+
+code_change(_, State, _) ->
+ case (State#state.module):init(State#state.args) of
+ {ok, {SupFlags, StartSpec}} ->
+ case set_flags(SupFlags, State) of
+ {ok, State1} ->
+ update_childspec(State1, StartSpec);
+ {invalid_type, SupFlags} ->
+ {error, {bad_flags, SupFlags}}; % backwards compatibility
+ Error ->
+ {error, Error}
+ end;
+ ignore ->
+ {ok, State};
+ Error ->
+ Error
+ end.
+
+update_childspec(State, StartSpec) when ?is_simple(State) ->
+ case check_startspec(StartSpec) of
+ {ok, {[_],_}=Children} ->
+ {ok, State#state{children = Children}};
+ Error ->
+ {error, Error}
+ end;
+update_childspec(State, StartSpec) ->
+ case check_startspec(StartSpec) of
+ {ok, Children} ->
+ OldC = State#state.children, % In reverse start order !
+ NewC = update_childspec1(OldC, Children, []),
+ {ok, State#state{children = NewC}};
+ Error ->
+ {error, Error}
+ end.
+
+update_childspec1({[Id|OldIds], OldDb}, {Ids,Db}, KeepOld) ->
+ case update_chsp(maps:get(Id,OldDb), Db) of
+ {ok,NewDb} ->
+ update_childspec1({OldIds,OldDb}, {Ids,NewDb}, KeepOld);
+ false ->
+ update_childspec1({OldIds,OldDb}, {Ids,Db}, [Id|KeepOld])
+ end;
+update_childspec1({[],OldDb}, {Ids,Db}, KeepOld) ->
+ KeepOldDb = maps:with(KeepOld,OldDb),
+ %% Return them in (kept) reverse start order.
+ {lists:reverse(Ids ++ KeepOld),maps:merge(KeepOldDb,Db)}.
+
+update_chsp(#child{id=Id}=OldChild, NewDb) ->
+ case maps:find(Id, NewDb) of
+ {ok,Child} ->
+ {ok,NewDb#{Id => Child#child{pid = OldChild#child.pid}}};
+ error -> % Id not found in new spec.
+ false
+ end.
+
+
+%%% ---------------------------------------------------
+%%% Start a new child.
+%%% ---------------------------------------------------
+
+handle_start_child(Child, State) ->
+ case internal_find_child(Child#child.id, State) of
+ error ->
+ case do_start_child(State#state.name, Child) of
+ {ok, undefined} when ?is_temporary(Child) ->
+ {{ok, undefined}, State};
+ {ok, Pid} ->
+ {{ok, Pid}, save_child(Child#child{pid = Pid}, State)};
+ {ok, Pid, Extra} ->
+ {{ok, Pid, Extra}, save_child(Child#child{pid = Pid}, State)};
+ {error, What} ->
+ {{error, {What, Child}}, State}
+ end;
+ {ok, OldChild} when is_pid(OldChild#child.pid) ->
+ {{error, {already_started, OldChild#child.pid}}, State};
+ {ok, _OldChild} ->
+ {{error, already_present}, State}
+ end.
+
+%%% ---------------------------------------------------
+%%% Restart. A process has terminated.
+%%% Returns: {ok, state()} | {shutdown, state()}
+%%% ---------------------------------------------------
+
+restart_child(Pid, Reason, State) ->
+ case find_child_and_args(Pid, State) of
+ {ok, Child} ->
+ do_restart(Reason, Child, State);
+ error ->
+ {ok, State}
+ end.
+
+try_restart(Reason, Child, State) ->
+ case do_restart(Reason, Child, State) of
+ {ok, NState} -> {noreply, NState};
+ {shutdown, State2} -> {stop, shutdown, State2}
+ end.
+
+do_restart(Reason, Child=#child{restart_type=permanent}, State) -> % is_permanent
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ restart(Child, State);
+do_restart(Reason, Child=#child{restart_type={permanent,_Delay}}, State) -> % is_permanent_delay
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ do_restart_delay(Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type=transient}, State) -> % is_transient
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type={transient,_Delay}}, State) -> % is_transient_delay
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Reason),
+ fun delete_child_and_continue/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type=intrinsic}, State) -> % is_intrinsic
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(fun restart/2,
+ fun delete_child_and_stop/2,
+ Reason, Child, State);
+do_restart(Reason, Child=#child{restart_type={intrinsic,_Delay}}, State) -> % is_intrinsic_delay
+ maybe_report_error(Reason, Child, State),
+ restart_if_explicit_or_abnormal(defer_to_restart_delay(Reason),
+ fun delete_child_and_stop/2,
+ Reason, Child, State);
+do_restart(normal, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart(shutdown, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart({shutdown, _Term}, Child, State) ->
+ NState = del_child(Child, State),
+ {ok, NState};
+do_restart(Reason, Child, State) when ?is_temporary(Child) ->
+ ?report_error(child_terminated, Reason, Child, State#state.name),
+ NState = del_child(Child, State),
+ {ok, NState}.
+
+maybe_report_error(Reason, Child, State) ->
+ case is_abnormal_termination(Reason) of
+ true ->
+ ?report_error(child_terminated, Reason, Child, State#state.name);
+ false ->
+ ok
+ end.
+
+restart_if_explicit_or_abnormal(RestartHow, Otherwise, Reason, Child, State) ->
+ case ?is_explicit_restart(Reason) orelse is_abnormal_termination(Reason) of
+ true -> RestartHow(Child, State);
+ false -> Otherwise(Child, State)
+ end.
+
+defer_to_restart_delay(Reason) ->
+ fun(Child, State) -> do_restart_delay(Reason, Child, State) end.
+
+delete_child_and_continue(Child, State) ->
+ {ok, del_child(Child, State)}.
+
+delete_child_and_stop(Child, State) ->
+ NState = del_child(Child, State),
+ {shutdown, NState}.
+
+is_abnormal_termination(normal) -> false;
+is_abnormal_termination(shutdown) -> false;
+is_abnormal_termination({shutdown, _}) -> false;
+is_abnormal_termination(_Other) -> true.
+
+do_restart_delay(Reason,
+ Child = #child{id = ChildId,
+ pid = ChildPid0,
+ restart_type = {_RestartType, Delay}},
+ State0) ->
+ case add_restart(State0) of
+ {ok, State1} ->
+ Strategy = State1#state.strategy,
+ maybe_restart(Strategy, Child, State1);
+ {terminate, State1} ->
+ %% we've reached the max restart intensity, but the
+ %% add_restart will have added to the restarts
+ %% field. Given we don't want to die here, we need to go
+ %% back to the old restarts field otherwise we'll never
+ %% attempt to restart later, which is why we ignore
+ %% NState for this clause.
+ Msg = {delayed_restart, {Reason, Child}},
+ _TRef = erlang:send_after(trunc(Delay*1000), self(), Msg),
+ ChildPid1 = restarting(ChildPid0),
+ % Note: State0 is intentionally used here
+ % TODO LRB
+ State2 = set_pid(ChildPid1, ChildId, State1),
+ {ok, State2}
+ end.
+
+maybe_restart(Strategy, Child, State) ->
+ case restart(Strategy, Child, State) of
+ {{try_again, Reason}, NState2} ->
+ %% Leaving control back to gen_server before
+ %% trying again. This way other incoming requests
+ %% for the supervisor can be handled - e.g. a
+ %% shutdown request for the supervisor or the
+ %% child.
+ Id = if ?is_simple(State) -> Child#child.pid;
+ true -> Child#child.id
+ end,
+ Args = [self(), Id, Reason],
+ {ok, _TRef} = timer:apply_after(0, ?MODULE, try_again_restart, Args),
+ {ok, NState2};
+ Other ->
+ Other
+ end.
+
+restart(Child, State) ->
+ case add_restart(State) of
+ {ok, NState} ->
+ case restart(NState#state.strategy, Child, NState) of
+ {{try_again, TryAgainId}, NState2} ->
+ %% Leaving control back to gen_server before
+ %% trying again. This way other incoming requsts
+ %% for the supervisor can be handled - e.g. a
+ %% shutdown request for the supervisor or the
+ %% child.
+ try_again_restart(TryAgainId),
+ {ok,NState2};
+ Other ->
+ Other
+ end;
+ {terminate, NState} ->
+ ?report_error(shutdown, reached_max_restart_intensity,
+ Child, State#state.name),
+ {shutdown, del_child(Child, NState)}
+ end.
+
+restart(simple_one_for_one, Child, State0) ->
+ #child{pid = OldPid, mfargs = {M, F, A}} = Child,
+ State1 = case OldPid of
+ ?restarting(_) ->
+ NRes = State0#state.dynamic_restarts - 1,
+ State0#state{dynamic_restarts = NRes};
+ _ ->
+ State0
+ end,
+ State2 = dyn_erase(OldPid, State1),
+ case do_start_child_i(M, F, A) of
+ {ok, Pid} ->
+ NState = dyn_store(Pid, A, State2),
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = dyn_store(Pid, A, State2),
+ {ok, NState};
+ {error, Error} ->
+ ROldPid = restarting(OldPid),
+ NRestarts = State2#state.dynamic_restarts + 1,
+ State3 = State2#state{dynamic_restarts = NRestarts},
+ NState = dyn_store(ROldPid, A, State3),
+ ?report_error(start_error, Error, Child, NState#state.name),
+ {{try_again, ROldPid}, NState}
+ end;
+restart(one_for_one, #child{id=Id} = Child, State) ->
+ OldPid = Child#child.pid,
+ case do_start_child(State#state.name, Child) of
+ {ok, Pid} ->
+ NState = set_pid(Pid, Id, State),
+ {ok, NState};
+ {ok, Pid, _Extra} ->
+ NState = set_pid(Pid, Id, State),
+ {ok, NState};
+ {error, Reason} ->
+ NState = set_pid(restarting(OldPid), Id, State),
+ ?report_error(start_error, Reason, Child, State#state.name),
+ {{try_again,Id}, NState}
+ end;
+restart(rest_for_one, #child{id=Id} = Child, #state{name=SupName} = State) ->
+ {ChAfter, ChBefore} = split_child(Id, State#state.children),
+ {Return, ChAfter2} = restart_multiple_children(Child, ChAfter, SupName),
+ {Return, State#state{children = append(ChAfter2,ChBefore)}};
+restart(one_for_all, Child, #state{name=SupName} = State) ->
+ Children1 = del_child(Child#child.id, State#state.children),
+ {Return, NChildren} = restart_multiple_children(Child, Children1, SupName),
+ {Return, State#state{children = NChildren}}.
+
+restart_multiple_children(Child, Children, SupName) ->
+ Children1 = terminate_children(Children, SupName),
+ case start_children(Children1, SupName) of
+ {ok, NChildren} ->
+ {ok, NChildren};
+ {error, NChildren, {failed_to_start_child, FailedId, _Reason}} ->
+ NewPid = if FailedId =:= Child#child.id ->
+ restarting(Child#child.pid);
+ true ->
+ ?restarting(undefined)
+ end,
+ {{try_again, FailedId}, set_pid(NewPid,FailedId,NChildren)}
+ end.
+
+restarting(Pid) when is_pid(Pid) -> ?restarting(Pid);
+restarting(RPid) -> RPid.
+
+-spec try_again_restart(child_id() | {'restarting',pid()}) -> 'ok'.
+try_again_restart(TryAgainId) ->
+ gen_server:cast(self(), {try_again_restart, TryAgainId}).
+
+%%-----------------------------------------------------------------
+%% Func: terminate_children/2
+%% Args: Children = children() % Ids in termination order
+%% SupName = {local, atom()} | {global, atom()} | {pid(),Mod}
+%% Returns: NChildren = children() % Ids in startup order
+%% % (reversed termination order)
+%%-----------------------------------------------------------------
+terminate_children(Children, SupName) ->
+ Terminate =
+ fun(_Id,Child) when ?is_temporary(Child) ->
+ %% Temporary children should not be restarted and thus should
+ %% be skipped when building the list of terminated children.
+ do_terminate(Child, SupName),
+ remove;
+ (_Id,Child) ->
+ do_terminate(Child, SupName),
+ {update,Child#child{pid=undefined}}
+ end,
+ {ok,NChildren} = children_map(Terminate, Children),
+ NChildren.
+
+do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
+ case shutdown(Child#child.pid, Child#child.shutdown) of
+ ok ->
+ ok;
+ {error, normal} when not (?is_permanent(Child)) ->
+ ok;
+ {error, OtherReason} ->
+ ?report_error(shutdown_error, OtherReason, Child, SupName)
+ end,
+ ok;
+do_terminate(_Child, _SupName) ->
+ ok.
+
+%%-----------------------------------------------------------------
+%% Shutdowns a child. We must check the EXIT value
+%% of the child, because it might have died with another reason than
+%% the wanted. In that case we want to report the error. We put a
+%% monitor on the child an check for the 'DOWN' message instead of
+%% checking for the 'EXIT' message, because if we check the 'EXIT'
+%% message a "naughty" child, who does unlink(Sup), could hang the
+%% supervisor.
+%% Returns: ok | {error, OtherReason} (this should be reported)
+%%-----------------------------------------------------------------
+shutdown(Pid, brutal_kill) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, kill),
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end;
+shutdown(Pid, Time) ->
+ case monitor_child(Pid) of
+ ok ->
+ exit(Pid, shutdown), %% Try to shutdown gracefully
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ ok;
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ after Time ->
+ exit(Pid, kill), %% Force termination.
+ receive
+ {'DOWN', _MRef, process, Pid, OtherReason} ->
+ {error, OtherReason}
+ end
+ end;
+ {error, Reason} ->
+ {error, Reason}
+ end.
+
+%% Help function to shutdown/2 switches from link to monitor approach
+monitor_child(Pid) ->
+
+ %% Do the monitor operation first so that if the child dies
+ %% before the monitoring is done causing a 'DOWN'-message with
+ %% reason noproc, we will get the real reason in the 'EXIT'-message
+ %% unless a naughty child has already done unlink...
+ erlang:monitor(process, Pid),
+ unlink(Pid),
+
+ receive
+ %% If the child dies before the unlik we must empty
+ %% the mail-box of the 'EXIT'-message and the 'DOWN'-message.
+ {'EXIT', Pid, Reason} ->
+ receive
+ {'DOWN', _, process, Pid, _} ->
+ {error, Reason}
+ end
+ after 0 ->
+ %% If a naughty child did unlink and the child dies before
+ %% monitor the result will be that shutdown/2 receives a
+ %% 'DOWN'-message with reason noproc.
+ %% If the child should die after the unlink there
+ %% will be a 'DOWN'-message with a correct reason
+ %% that will be handled in shutdown/2.
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+%% Func: terminate_dynamic_children/1
+%% Args: State
+%% Returns: ok
+%%
+%% Shutdown all dynamic children. This happens when the supervisor is
+%% stopped. Because the supervisor can have millions of dynamic children, we
+%% can have a significative overhead here.
+%%-----------------------------------------------------------------
+terminate_dynamic_children(State) ->
+ Child = get_dynamic_child(State),
+ {Pids, EStack0} = monitor_dynamic_children(Child,State),
+ Sz = sets:size(Pids),
+ EStack = case Child#child.shutdown of
+ brutal_kill ->
+ sets:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ infinity ->
+ sets:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack0);
+ Time ->
+ sets:fold(fun(P, _) -> exit(P, shutdown) end, ok, Pids),
+ TRef = erlang:start_timer(Time, self(), kill),
+ wait_dynamic_children(Child, Pids, Sz, TRef, EStack0)
+ end,
+ %% Unroll stacked errors and report them
+ dict:fold(fun(Reason, Ls, _) ->
+ ?report_error(shutdown_error, Reason,
+ Child#child{pid=Ls}, State#state.name)
+ end, ok, EStack).
+
+monitor_dynamic_children(Child,State) ->
+ dyn_fold(fun(P,{Pids, EStack}) when is_pid(P) ->
+ case monitor_child(P) of
+ ok ->
+ {sets:add_element(P, Pids), EStack};
+ {error, normal} when not (?is_permanent(Child)) ->
+ {Pids, EStack};
+ {error, Reason} ->
+ {Pids, dict:append(Reason, P, EStack)}
+ end;
+ (?restarting(_), {Pids, EStack}) ->
+ {Pids, EStack}
+ end, {sets:new(), dict:new()}, State).
+
+wait_dynamic_children(_Child, _Pids, 0, undefined, EStack) ->
+ EStack;
+wait_dynamic_children(_Child, _Pids, 0, TRef, EStack) ->
+ %% If the timer has expired before its cancellation, we must empty the
+ %% mail-box of the 'timeout'-message.
+ _ = erlang:cancel_timer(TRef),
+ receive
+ {timeout, TRef, kill} ->
+ EStack
+ after 0 ->
+ EStack
+ end;
+wait_dynamic_children(#child{shutdown=brutal_kill} = Child, Pids, Sz,
+ TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, killed} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, dict:append(Reason, Pid, EStack))
+ end;
+wait_dynamic_children(Child, Pids, Sz, TRef, EStack) ->
+ receive
+ {'DOWN', _MRef, process, Pid, shutdown} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, {shutdown, _}} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, normal} when not (?is_permanent(Child)) ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, EStack);
+
+ {'DOWN', _MRef, process, Pid, Reason} ->
+ wait_dynamic_children(Child, sets:del_element(Pid, Pids), Sz-1,
+ TRef, dict:append(Reason, Pid, EStack));
+
+ {timeout, TRef, kill} ->
+ sets:fold(fun(P, _) -> exit(P, kill) end, ok, Pids),
+ wait_dynamic_children(Child, Pids, Sz, undefined, EStack)
+ end.
+
+%%-----------------------------------------------------------------
+%% Access #state.children
+%%-----------------------------------------------------------------
+
+%% Note we do not want to save the parameter list for temporary processes as
+%% they will not be restarted, and hence we do not need this information.
+%% Especially for dynamic children to simple_one_for_one supervisors
+%% it could become very costly as it is not uncommon to spawn
+%% very many such processes.
+-spec save_child(child_rec(), state()) -> state().
+save_child(#child{mfargs = {M, F, _}} = Child, State) when ?is_temporary(Child) ->
+ do_save_child(Child#child{mfargs = {M, F, undefined}}, State);
+save_child(Child, State) ->
+ do_save_child(Child, State).
+
+-spec do_save_child(child_rec(), state()) -> state().
+do_save_child(#child{id = Id} = Child, #state{children = {Ids,Db}} = State) ->
+ State#state{children = {[Id|Ids],Db#{Id => Child}}}.
+
+-spec del_child(child_rec(), state()) -> state();
+ (child_id(), children()) -> children().
+del_child(#child{pid = Pid}, State) when ?is_simple(State) ->
+ dyn_erase(Pid,State);
+del_child(Child, State) when is_record(Child,child), is_record(State,state) ->
+ NChildren = del_child(Child#child.id, State#state.children),
+ State#state{children = NChildren};
+del_child(Id, {Ids,Db}) ->
+ case maps:get(Id, Db) of
+ Child when Child#child.restart_type =:= temporary ->
+ {lists:delete(Id, Ids), maps:remove(Id, Db)};
+ Child ->
+ {Ids, Db#{Id=>Child#child{pid=undefined}}}
+ end.
+
+%% In: {[S4, S3, Ch, S1, S0],Db}
+%% Ret: {{[S4, S3, Ch],Db1}, {[S1, S0],Db2}}
+%% Db1 and Db2 contain the keys in the lists they are associated with.
+-spec split_child(child_id(), children()) -> {children(), children()}.
+split_child(Id, {Ids,Db}) ->
+ {IdsAfter,IdsBefore} = split_ids(Id, Ids, []),
+ DbBefore = maps:with(IdsBefore,Db),
+ #{Id:=Ch} = DbAfter = maps:with(IdsAfter,Db),
+ {{IdsAfter,DbAfter#{Id=>Ch#child{pid=undefined}}},{IdsBefore,DbBefore}}.
+
+split_ids(Id, [Id|Ids], After) ->
+ {lists:reverse([Id|After]), Ids};
+split_ids(Id, [Other|Ids], After) ->
+ split_ids(Id, Ids, [Other | After]).
+
+%% Find the child record for a given Pid (dynamic child) or Id
+%% (non-dynamic child). This is called from the API functions.
+-spec internal_find_child(pid() | child_id(), state()) -> {ok,child_rec()} | error.
+internal_find_child(Pid, State) when is_pid(Pid), ?is_simple(State) ->
+ case find_dynamic_child(Pid, State) of
+ error ->
+ case find_dynamic_child(restarting(Pid), State) of
+ error ->
+ case erlang:is_process_alive(Pid) of
+ true -> error;
+ false -> {ok, get_dynamic_child(State)}
+ end;
+ Other ->
+ Other
+ end;
+ Other ->
+ Other
+ end;
+internal_find_child(Id, #state{children = {_Ids,Db}}) ->
+ maps:find(Id, Db).
+
+%% Get the child record - either by child id or by pid. If
+%% simple_one_for_one, then insert the pid and args into the returned
+%% child record. This is called when trying to restart the child.
+-spec find_child_and_args(IdOrPid, state()) -> {ok, child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()} | child_id().
+find_child_and_args(Pid, State) when ?is_simple(State) ->
+ case find_dynamic_child(Pid, State) of
+ {ok,#child{mfargs={M,F,_}} = Child} ->
+ {ok, Args} = dyn_args(Pid, State),
+ {ok, Child#child{mfargs = {M, F, Args}}};
+ error ->
+ error
+ end;
+find_child_and_args(Pid, State) when is_pid(Pid) ->
+ find_child_by_pid(Pid, State);
+find_child_and_args(Id, #state{children={_Ids,Db}}) ->
+ maps:find(Id, Db).
+
+%% Given the pid, find the child record for a dynamic child, and
+%% include the pid in the returned record.
+-spec find_dynamic_child(IdOrPid, state()) -> {ok, child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()} | child_id().
+find_dynamic_child(Pid, State) ->
+ case dyn_exists(Pid, State) of
+ true ->
+ Child = get_dynamic_child(State),
+ {ok, Child#child{pid=Pid}};
+ false ->
+ error
+ end.
+
+%% Given the pid, find the child record for a non-dyanamic child.
+-spec find_child_by_pid(IdOrPid, state()) -> {ok,child_rec()} | error when
+ IdOrPid :: pid() | {restarting,pid()}.
+find_child_by_pid(Pid,#state{children={_Ids,Db}}) ->
+ Fun = fun(_Id,#child{pid=P}=Ch,_) when P =:= Pid ->
+ throw(Ch);
+ (_,_,error) ->
+ error
+ end,
+ try maps:fold(Fun,error,Db)
+ catch throw:Child -> {ok,Child}
+ end.
+
+%% Get the child record from a simple_one_for_one supervisor - no pid
+%% It is assumed that the child can always be found
+-spec get_dynamic_child(state()) -> child_rec().
+get_dynamic_child(#state{children={[Id],Db}}) ->
+ #{Id := Child} = Db,
+ Child.
+
+%% Update pid in the given child record and store it in the process state
+-spec set_pid(term(), child_id(), state()) -> state();
+ (term(), child_id(), children()) -> children().
+set_pid(Pid, Id, #state{children=Children} = State) ->
+ State#state{children = set_pid(Pid, Id, Children)};
+set_pid(Pid, Id, {Ids, Db}) ->
+ NewDb = maps:update_with(Id, fun(Child) -> Child#child{pid=Pid} end, Db),
+ {Ids,NewDb}.
+
+%% Remove the Id and the child record from the process state
+-spec remove_child(child_id(), state()) -> state().
+remove_child(Id, #state{children={Ids,Db}} = State) ->
+ NewIds = lists:delete(Id,Ids),
+ NewDb = maps:remove(Id,Db),
+ State#state{children = {NewIds,NewDb}}.
+
+%% In the order of Ids, traverse the children and update each child
+%% according to the return value of the Fun.
+%% On error, abort and return the merge of the old and the updated map.
+%% NOTE: The returned list of Ids is reverted compared to the input.
+-spec children_map(Fun, children()) -> {ok, children()} |
+ {error,children(),Reason} when
+ Fun :: fun((child_id(),child_rec()) -> {update,child_rec()} |
+ remove |
+ {abort, Reason}),
+ Reason :: term().
+children_map(Fun,{Ids,Db}) ->
+ children_map(Fun, Ids, Db, []).
+
+children_map(Fun,[Id|Ids],Db,Acc) ->
+ case Fun(Id,maps:get(Id,Db)) of
+ {update,Child} ->
+ children_map(Fun,Ids,Db#{Id => Child},[Id|Acc]);
+ remove ->
+ children_map(Fun,Ids,maps:remove(Id,Db),Acc);
+ {abort,Reason} ->
+ {error,{lists:reverse(Ids)++[Id|Acc],Db},Reason}
+ end;
+children_map(_Fun,[],Db,Acc) ->
+ {ok,{Acc,Db}}.
+
+%% In the order of Ids, map over all children and return the list
+-spec children_to_list(Fun, children()) -> List when
+ Fun :: fun((child_id(), child_rec()) -> Elem),
+ List :: list(Elem),
+ Elem :: term().
+children_to_list(Fun,{Ids,Db}) ->
+ children_to_list(Fun, Ids, Db, []).
+children_to_list(Fun,[Id|Ids],Db,Acc) ->
+ children_to_list(Fun,Ids,Db,[Fun(Id,maps:get(Id,Db))|Acc]);
+children_to_list(_Fun,[],_Db,Acc) ->
+ lists:reverse(Acc).
+
+%% The order is not important - so ignore Ids
+-spec children_fold(Fun, Acc0, children()) -> Acc1 when
+ Fun :: fun((child_id(), child_rec(), AccIn) -> AccOut),
+ Acc0 :: term(),
+ Acc1 :: term(),
+ AccIn :: term(),
+ AccOut :: term().
+children_fold(Fun,Init,{_Ids,Db}) ->
+ maps:fold(Fun, Init, Db).
+
+-spec append(children(), children()) -> children().
+append({Ids1,Db1},{Ids2,Db2}) ->
+ {Ids1++Ids2,maps:merge(Db1,Db2)}.
+
+%%-----------------------------------------------------------------
+%% Func: init_state/4
+%% Args: SupName = {local, atom()} | {global, atom()} | self
+%% Type = {Strategy, MaxIntensity, Period}
+%% Strategy = one_for_one | one_for_all | simple_one_for_one |
+%% rest_for_one
+%% MaxIntensity = integer() >= 0
+%% Period = integer() > 0
+%% Mod :== atom()
+%% Args :== term()
+%% Purpose: Check that Type is of correct type (!)
+%% Returns: {ok, state()} | Error
+%%-----------------------------------------------------------------
+init_state(SupName, Type, Mod, Args) ->
+ set_flags(Type, #state{name = supname(SupName,Mod),
+ module = Mod,
+ args = Args}).
+
+set_flags(Flags, State) ->
+ try check_flags(Flags) of
+ #{strategy := Strategy, intensity := MaxIntensity, period := Period} ->
+ {ok, State#state{strategy = Strategy,
+ intensity = MaxIntensity,
+ period = Period}}
+ catch
+ Thrown -> Thrown
+ end.
+
+check_flags(SupFlags) when is_map(SupFlags) ->
+ do_check_flags(maps:merge(?default_flags,SupFlags));
+check_flags({Strategy, MaxIntensity, Period}) ->
+ check_flags(#{strategy => Strategy,
+ intensity => MaxIntensity,
+ period => Period});
+check_flags(What) ->
+ throw({invalid_type, What}).
+
+do_check_flags(#{strategy := Strategy,
+ intensity := MaxIntensity,
+ period := Period} = Flags) ->
+ validStrategy(Strategy),
+ validIntensity(MaxIntensity),
+ validPeriod(Period),
+ Flags.
+
+validStrategy(simple_one_for_one) -> true;
+validStrategy(one_for_one) -> true;
+validStrategy(one_for_all) -> true;
+validStrategy(rest_for_one) -> true;
+validStrategy(What) -> throw({invalid_strategy, What}).
+
+validIntensity(Max) when is_integer(Max),
+ Max >= 0 -> true;
+validIntensity(What) -> throw({invalid_intensity, What}).
+
+validPeriod(Period) when is_integer(Period),
+ Period > 0 -> true;
+validPeriod(What) -> throw({invalid_period, What}).
+
+supname(self, Mod) -> {self(), Mod};
+supname(N, _) -> N.
+
+%%% ------------------------------------------------------
+%%% Check that the children start specification is valid.
+%%% Input: [child_spec()]
+%%% Returns: {ok, [child_rec()]} | Error
+%%% ------------------------------------------------------
+
+check_startspec(Children) -> check_startspec(Children, [], #{}).
+
+check_startspec([ChildSpec|T], Ids, Db) ->
+ case check_childspec(ChildSpec) of
+ {ok, #child{id=Id}=Child} ->
+ case maps:is_key(Id, Db) of
+ %% The error message duplicate_child_name is kept for
+ %% backwards compatibility, although
+ %% duplicate_child_id would be more correct.
+ true -> {duplicate_child_name, Id};
+ false -> check_startspec(T, [Id | Ids], Db#{Id=>Child})
+ end;
+ Error -> Error
+ end;
+check_startspec([], Ids, Db) ->
+ {ok, {lists:reverse(Ids),Db}}.
+
+check_childspec(ChildSpec) when is_map(ChildSpec) ->
+ catch do_check_childspec(maps:merge(?default_child_spec,ChildSpec));
+check_childspec({Id, Func, RestartType, Shutdown, ChildType, Mods}) ->
+ check_childspec(#{id => Id,
+ start => Func,
+ restart => RestartType,
+ shutdown => Shutdown,
+ type => ChildType,
+ modules => Mods});
+check_childspec(X) -> {invalid_child_spec, X}.
+
+do_check_childspec(#{restart := RestartType,
+ type := ChildType} = ChildSpec)->
+ Id = case ChildSpec of
+ #{id := I} -> I;
+ _ -> throw(missing_id)
+ end,
+ Func = case ChildSpec of
+ #{start := F} -> F;
+ _ -> throw(missing_start)
+ end,
+ validId(Id),
+ validFunc(Func),
+ validRestartType(RestartType),
+ validChildType(ChildType),
+ Shutdown = case ChildSpec of
+ #{shutdown := S} -> S;
+ #{type := worker} -> 5000;
+ #{type := supervisor} -> infinity
+ end,
+ validShutdown(Shutdown),
+ Mods = case ChildSpec of
+ #{modules := Ms} -> Ms;
+ _ -> {M,_,_} = Func, [M]
+ end,
+ validMods(Mods),
+ {ok, #child{id = Id, mfargs = Func, restart_type = RestartType,
+ shutdown = Shutdown, child_type = ChildType, modules = Mods}}.
+
+validChildType(supervisor) -> true;
+validChildType(worker) -> true;
+validChildType(What) -> throw({invalid_child_type, What}).
+
+validId(_Id) -> true.
+
+validFunc({M, F, A}) when is_atom(M),
+ is_atom(F),
+ is_list(A) -> true;
+validFunc(Func) -> throw({invalid_mfa, Func}).
+
+validRestartType(permanent) -> true;
+validRestartType({permanent, Delay}) -> validDelay(Delay);
+validRestartType(temporary) -> true;
+validRestartType(transient) -> true;
+validRestartType({transient, Delay}) -> validDelay(Delay);
+validRestartType(intrinsic) -> true;
+validRestartType({intrinsic, Delay}) -> validDelay(Delay);
+validRestartType(RestartType) -> throw({invalid_restart_type, RestartType}).
+
+validDelay(Delay) when is_number(Delay), Delay >= 0 ->
+ true;
+validDelay(What) ->
+ throw({invalid_delay, What}).
+
+validShutdown(Shutdown)
+ when is_integer(Shutdown), Shutdown > 0 -> true;
+validShutdown(infinity) -> true;
+validShutdown(brutal_kill) -> true;
+validShutdown(Shutdown) -> throw({invalid_shutdown, Shutdown}).
+
+validMods(dynamic) -> true;
+validMods(Mods) when is_list(Mods) ->
+ lists:foreach(fun(Mod) ->
+ if
+ is_atom(Mod) -> ok;
+ true -> throw({invalid_module, Mod})
+ end
+ end,
+ Mods);
+validMods(Mods) -> throw({invalid_modules, Mods}).
+
+child_to_spec(#child{id = Id,
+ mfargs = Func,
+ restart_type = RestartType,
+ shutdown = Shutdown,
+ child_type = ChildType,
+ modules = Mods}) ->
+ #{id => Id,
+ start => Func,
+ restart => RestartType,
+ shutdown => Shutdown,
+ type => ChildType,
+ modules => Mods}.
+
+%%% ------------------------------------------------------
+%%% Add a new restart and calculate if the max restart
+%%% intensity has been reached (in that case the supervisor
+%%% shall terminate).
+%%% All restarts accured inside the period amount of seconds
+%%% are kept in the #state.restarts list.
+%%% Returns: {ok, State'} | {terminate, State'}
+%%% ------------------------------------------------------
+
+add_restart(State) ->
+ I = State#state.intensity,
+ P = State#state.period,
+ R = State#state.restarts,
+ Now = erlang:monotonic_time(1),
+ R1 = add_restart([Now|R], Now, P),
+ State1 = State#state{restarts = R1},
+ case length(R1) of
+ CurI when CurI =< I ->
+ {ok, State1};
+ _ ->
+ {terminate, State1}
+ end.
+
+add_restart([R|Restarts], Now, Period) ->
+ case inPeriod(R, Now, Period) of
+ true ->
+ [R|add_restart(Restarts, Now, Period)];
+ _ ->
+ []
+ end;
+add_restart([], _, _) ->
+ [].
+
+inPeriod(Then, Now, Period) ->
+ Now =< Then + Period.
+
+%%% ------------------------------------------------------
+%%% Error and progress reporting.
+%%% ------------------------------------------------------
+extract_child(Child) when is_list(Child#child.pid) ->
+ [{nb_children, length(Child#child.pid)},
+ {id, Child#child.id},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}];
+extract_child(Child) ->
+ [{pid, Child#child.pid},
+ {id, Child#child.id},
+ {mfargs, Child#child.mfargs},
+ {restart_type, Child#child.restart_type},
+ {shutdown, Child#child.shutdown},
+ {child_type, Child#child.child_type}].
+
+report_progress(Child, SupName) ->
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor,SupName},
+ {started,extract_child(Child)}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
+
+format_status(terminate, [_PDict, State]) ->
+ State;
+format_status(_, [_PDict, State]) ->
+ [{data, [{"State", State}]},
+ {supervisor, [{"Callback", State#state.module}]}].
+
+%%%-----------------------------------------------------------------
+%%% Dynamics database access
+dyn_size(#state{dynamics = {Mod,Db}}) ->
+ Mod:size(Db).
+
+dyn_erase(Pid,#state{dynamics={sets,Db}}=State) ->
+ State#state{dynamics={sets,sets:del_element(Pid,Db)}};
+dyn_erase(Pid,#state{dynamics={maps,Db}}=State) ->
+ State#state{dynamics={maps,maps:remove(Pid,Db)}}.
+
+dyn_store(Pid,_,#state{dynamics={sets,Db}}=State) ->
+ State#state{dynamics={sets,sets:add_element(Pid,Db)}};
+dyn_store(Pid,Args,#state{dynamics={maps,Db}}=State) ->
+ State#state{dynamics={maps,Db#{Pid => Args}}}.
+
+dyn_fold(Fun,Init,#state{dynamics={sets,Db}}) ->
+ sets:fold(Fun,Init,Db);
+dyn_fold(Fun,Init,#state{dynamics={maps,Db}}) ->
+ maps:fold(fun(Pid,_,Acc) -> Fun(Pid,Acc) end, Init, Db).
+
+dyn_map(Fun, #state{dynamics={sets,Db}}) ->
+ lists:map(Fun, sets:to_list(Db));
+dyn_map(Fun, #state{dynamics={maps,Db}}) ->
+ lists:map(Fun, maps:keys(Db)).
+
+dyn_exists(Pid, #state{dynamics={sets, Db}}) ->
+ sets:is_element(Pid, Db);
+dyn_exists(Pid, #state{dynamics={maps, Db}}) ->
+ maps:is_key(Pid, Db).
+
+dyn_args(_Pid, #state{dynamics={sets, _Db}}) ->
+ {ok,undefined};
+dyn_args(Pid, #state{dynamics={maps, Db}}) ->
+ maps:find(Pid, Db).
+
+dyn_init(State) ->
+ dyn_init(get_dynamic_child(State),State).
+
+dyn_init(Child,State) when ?is_temporary(Child) ->
+ State#state{dynamics={sets,sets:new()}};
+dyn_init(_Child,State) ->
+ State#state{dynamics={maps,maps:new()}}.
diff --git a/deps/rabbit_common/src/vm_memory_monitor.erl b/deps/rabbit_common/src/vm_memory_monitor.erl
new file mode 100644
index 0000000000..73b5a23b78
--- /dev/null
+++ b/deps/rabbit_common/src/vm_memory_monitor.erl
@@ -0,0 +1,576 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+%% In practice Erlang shouldn't be allowed to grow to more than a half
+%% of available memory. The pessimistic scenario is when the Erlang VM
+%% has a single process that's consuming all memory. In such a case,
+%% during garbage collection, Erlang tries to allocate a huge chunk of
+%% continuous memory, which can result in a crash or heavy swapping.
+%%
+%% This module tries to warn Rabbit before such situations occur, so
+%% that it has a higher chance to avoid running out of memory.
+
+-module(vm_memory_monitor).
+
+-behaviour(gen_server).
+
+-export([start_link/1, start_link/3]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-export([get_total_memory/0, get_vm_limit/0,
+ get_check_interval/0, set_check_interval/1,
+ get_vm_memory_high_watermark/0, set_vm_memory_high_watermark/1,
+ get_memory_limit/0,
+ %% TODO: refactor in master
+ get_memory_use/1,
+ get_process_memory/0,
+ get_process_memory/1,
+ get_memory_calculation_strategy/0,
+ get_rss_memory/0]).
+
+%% for tests
+-export([parse_line_linux/1, parse_mem_limit/1]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {total_memory,
+ memory_limit,
+ process_memory,
+ memory_config_limit,
+ timeout,
+ timer,
+ alarmed,
+ alarm_funs,
+ os_type = undefined,
+ os_pid = undefined,
+ page_size = undefined,
+ proc_file = undefined}).
+
+-include("rabbit_memory.hrl").
+
+%%----------------------------------------------------------------------------
+
+-type memory_calculation_strategy() :: rss | erlang | allocated.
+-type vm_memory_high_watermark() :: (float() | {'absolute', integer() | string()}).
+-spec start_link(float()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(float(), fun ((any()) -> 'ok'),
+ fun ((any()) -> 'ok')) -> rabbit_types:ok_pid_or_error().
+-spec get_total_memory() -> (non_neg_integer() | 'unknown').
+-spec get_vm_limit() -> non_neg_integer().
+-spec get_check_interval() -> non_neg_integer().
+-spec set_check_interval(non_neg_integer()) -> 'ok'.
+-spec get_vm_memory_high_watermark() -> vm_memory_high_watermark().
+-spec set_vm_memory_high_watermark(vm_memory_high_watermark()) -> 'ok'.
+-spec get_memory_limit() -> non_neg_integer().
+-spec get_memory_use(bytes) -> {non_neg_integer(), float() | infinity};
+ (ratio) -> float() | infinity.
+-spec get_cached_process_memory_and_limit() -> {non_neg_integer(),
+ float() | infinity}.
+-spec get_rss_memory() -> non_neg_integer().
+
+-export_type([memory_calculation_strategy/0]).
+%%----------------------------------------------------------------------------
+%% Public API
+%%----------------------------------------------------------------------------
+
+get_total_memory() ->
+ case application:get_env(rabbit, total_memory_available_override_value) of
+ {ok, Value} ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Value) of
+ {ok, ParsedTotal} ->
+ ParsedTotal;
+ {error, parse_error} ->
+ rabbit_log:warning(
+ "The override value for the total memmory available is "
+ "not a valid value: ~p, getting total from the system.~n",
+ [Value]),
+ get_total_memory_from_os()
+ end;
+ undefined ->
+ get_total_memory_from_os()
+ end.
+
+get_vm_limit() -> get_vm_limit(os:type()).
+
+get_check_interval() ->
+ gen_server:call(?MODULE, get_check_interval, infinity).
+
+set_check_interval(Fraction) ->
+ gen_server:call(?MODULE, {set_check_interval, Fraction}, infinity).
+
+get_vm_memory_high_watermark() ->
+ gen_server:call(?MODULE, get_vm_memory_high_watermark, infinity).
+
+set_vm_memory_high_watermark(Fraction) ->
+ gen_server:call(?MODULE, {set_vm_memory_high_watermark, Fraction},
+ infinity).
+
+get_memory_limit() ->
+ gen_server:call(?MODULE, get_memory_limit, infinity).
+
+get_memory_use(bytes) ->
+ {ProcessMemory, MemoryLimit} = get_cached_process_memory_and_limit(),
+ {ProcessMemory, case MemoryLimit > 0.0 of
+ true -> MemoryLimit;
+ false -> infinity
+ end};
+get_memory_use(ratio) ->
+ {ProcessMemory, MemoryLimit} = get_cached_process_memory_and_limit(),
+ case MemoryLimit > 0.0 of
+ true -> ProcessMemory / MemoryLimit;
+ false -> infinity
+ end.
+
+%% Memory reported by erlang:memory(total) is not supposed to
+%% be equal to the total size of all pages mapped to the emulator,
+%% according to http://erlang.org/doc/man/erlang.html#memory-0
+%% erlang:memory(total) under-reports memory usage by around 20%
+%%
+%% Win32 Note: 3.6.12 shipped with code that used wmic.exe to get the
+%% WorkingSetSize value for the running erl.exe process. Unfortunately
+%% even with a moderate invocation rate of 1 ops/second that uses more
+%% CPU resources than some Windows users are willing to tolerate.
+%% See rabbitmq/rabbitmq-server#1343 and rabbitmq/rabbitmq-common#224
+%% for details.
+-spec get_process_memory() -> Bytes :: integer().
+get_process_memory() ->
+ {ProcMem, _} = get_memory_use(bytes),
+ ProcMem.
+
+-spec get_process_memory(cached | current) -> Bytes :: integer().
+get_process_memory(cached) ->
+ {ProcMem, _} = get_memory_use(bytes),
+ ProcMem;
+get_process_memory(current) ->
+ get_process_memory_uncached().
+
+-spec get_memory_calculation_strategy() -> memory_calculation_strategy().
+get_memory_calculation_strategy() ->
+ case rabbit_misc:get_env(rabbit, vm_memory_calculation_strategy, rss) of
+ allocated -> allocated;
+ erlang -> erlang;
+ legacy -> erlang; %% backwards compatibility
+ rss -> rss;
+ UnsupportedValue ->
+ rabbit_log:warning(
+ "Unsupported value '~p' for vm_memory_calculation_strategy. "
+ "Supported values: (allocated|erlang|legacy|rss). "
+ "Defaulting to 'rss'",
+ [UnsupportedValue]
+ ),
+ rss
+ end.
+
+%%----------------------------------------------------------------------------
+%% gen_server callbacks
+%%----------------------------------------------------------------------------
+
+start_link(MemFraction) ->
+ start_link(MemFraction,
+ fun alarm_handler:set_alarm/1, fun alarm_handler:clear_alarm/1).
+
+start_link(MemFraction, AlarmSet, AlarmClear) ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE,
+ [MemFraction, {AlarmSet, AlarmClear}], []).
+
+init([MemFraction, AlarmFuns]) ->
+ TRef = erlang:send_after(?DEFAULT_MEMORY_CHECK_INTERVAL, self(), update),
+ State0 = #state{timeout = ?DEFAULT_MEMORY_CHECK_INTERVAL,
+ timer = TRef,
+ alarmed = false,
+ alarm_funs = AlarmFuns},
+ State1 = update_process_memory(init_state_by_os(State0)),
+ {ok, set_mem_limits(State1, MemFraction)}.
+
+handle_call(get_vm_memory_high_watermark, _From,
+ #state{memory_config_limit = MemLimit} = State) ->
+ {reply, MemLimit, State};
+
+handle_call({set_vm_memory_high_watermark, MemLimit}, _From, State) ->
+ {reply, ok, set_mem_limits(State, MemLimit)};
+
+handle_call(get_check_interval, _From, State) ->
+ {reply, State#state.timeout, State};
+
+handle_call({set_check_interval, Timeout}, _From, State) ->
+ State1 = case erlang:cancel_timer(State#state.timer) of
+ false ->
+ State#state{timeout = Timeout};
+ _ ->
+ State#state{timeout = Timeout,
+ timer = erlang:send_after(Timeout, self(), update)}
+ end,
+ {reply, ok, State1};
+
+handle_call(get_memory_limit, _From, State) ->
+ {reply, State#state.memory_limit, State};
+
+handle_call(get_cached_process_memory_and_limit, _From, State) ->
+ {reply, {State#state.process_memory, State#state.memory_limit}, State};
+
+handle_call(_Request, _From, State) ->
+ {noreply, State}.
+
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+handle_info(update, State) ->
+ _ = erlang:cancel_timer(State#state.timer),
+ State1 = internal_update(State),
+ TRef = erlang:send_after(State1#state.timeout, self(), update),
+ {noreply, State1#state{ timer = TRef }};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%----------------------------------------------------------------------------
+%% Server Internals
+%%----------------------------------------------------------------------------
+get_rss_memory() ->
+ TmpState = init_state_by_os(#state{}),
+ {ok, ProcMem} = get_process_memory_using_strategy(rss, TmpState),
+ ProcMem.
+
+get_cached_process_memory_and_limit() ->
+ try
+ gen_server:call(?MODULE, get_cached_process_memory_and_limit, infinity)
+ catch exit:{noproc, Error} ->
+ rabbit_log:warning("Memory monitor process not yet started: ~p~n", [Error]),
+ ProcessMemory = get_process_memory_uncached(),
+ {ProcessMemory, infinity}
+ end.
+
+get_process_memory_uncached() ->
+ TmpState = update_process_memory(init_state_by_os(#state{})),
+ TmpState#state.process_memory.
+
+update_process_memory(State) ->
+ Strategy = get_memory_calculation_strategy(),
+ {ok, ProcMem} = get_process_memory_using_strategy(Strategy, State),
+ State#state{process_memory = ProcMem}.
+
+init_state_by_os(State = #state{os_type = undefined}) ->
+ OsType = os:type(),
+ OsPid = os:getpid(),
+ init_state_by_os(State#state{os_type = OsType, os_pid = OsPid});
+init_state_by_os(State0 = #state{os_type = {unix, linux}, os_pid = OsPid}) ->
+ PageSize = get_linux_pagesize(),
+ ProcFile = io_lib:format("/proc/~s/statm", [OsPid]),
+ State0#state{page_size = PageSize, proc_file = ProcFile};
+init_state_by_os(State) ->
+ State.
+
+get_process_memory_using_strategy(rss, #state{os_type = {unix, linux},
+ page_size = PageSize,
+ proc_file = ProcFile}) ->
+ Data = read_proc_file(ProcFile),
+ [_|[RssPagesStr|_]] = string:tokens(Data, " "),
+ ProcMem = list_to_integer(RssPagesStr) * PageSize,
+ {ok, ProcMem};
+get_process_memory_using_strategy(rss, #state{os_type = {unix, _},
+ os_pid = OsPid}) ->
+ Cmd = "ps -p " ++ OsPid ++ " -o rss=",
+ CmdOutput = os:cmd(Cmd),
+ case re:run(CmdOutput, "[0-9]+", [{capture, first, list}]) of
+ {match, [Match]} ->
+ ProcMem = list_to_integer(Match) * 1024,
+ {ok, ProcMem};
+ _ ->
+ {error, {unexpected_output_from_command, Cmd, CmdOutput}}
+ end;
+get_process_memory_using_strategy(rss, _State) ->
+ {ok, recon_alloc:memory(allocated)};
+get_process_memory_using_strategy(allocated, _State) ->
+ {ok, recon_alloc:memory(allocated)};
+get_process_memory_using_strategy(erlang, _State) ->
+ {ok, erlang:memory(total)}.
+
+get_total_memory_from_os() ->
+ try
+ get_total_memory(os:type())
+ catch _:Error:Stacktrace ->
+ rabbit_log:warning(
+ "Failed to get total system memory: ~n~p~n~p~n",
+ [Error, Stacktrace]),
+ unknown
+ end.
+
+set_mem_limits(State, MemLimit) ->
+ case erlang:system_info(wordsize) of
+ 4 ->
+ rabbit_log:warning(
+ "You are using a 32-bit version of Erlang: you may run into "
+ "memory address~n"
+ "space exhaustion or statistic counters overflow.~n");
+ _ ->
+ ok
+ end,
+ TotalMemory =
+ case get_total_memory() of
+ unknown ->
+ case State of
+ #state { total_memory = undefined,
+ memory_limit = undefined } ->
+ rabbit_log:warning(
+ "Unknown total memory size for your OS ~p. "
+ "Assuming memory size is ~p MiB (~p bytes).~n",
+ [os:type(),
+ trunc(?MEMORY_SIZE_FOR_UNKNOWN_OS/?ONE_MiB),
+ ?MEMORY_SIZE_FOR_UNKNOWN_OS]);
+ _ ->
+ ok
+ end,
+ ?MEMORY_SIZE_FOR_UNKNOWN_OS;
+ Memory -> Memory
+ end,
+ UsableMemory =
+ case get_vm_limit() of
+ Limit when Limit < TotalMemory ->
+ rabbit_log:warning(
+ "Only ~p MiB (~p bytes) of ~p MiB (~p bytes) memory usable due to "
+ "limited address space.~n"
+ "Crashes due to memory exhaustion are possible - see~n"
+ "https://www.rabbitmq.com/memory.html#address-space~n",
+ [trunc(Limit/?ONE_MiB), Limit, trunc(TotalMemory/?ONE_MiB),
+ TotalMemory]),
+ Limit;
+ _ ->
+ TotalMemory
+ end,
+ MemLim = interpret_limit(parse_mem_limit(MemLimit), UsableMemory),
+ rabbit_log:info(
+ "Memory high watermark set to ~p MiB (~p bytes)"
+ " of ~p MiB (~p bytes) total~n",
+ [trunc(MemLim/?ONE_MiB), MemLim,
+ trunc(TotalMemory/?ONE_MiB), TotalMemory]
+ ),
+ internal_update(State #state { total_memory = TotalMemory,
+ memory_limit = MemLim,
+ memory_config_limit = MemLimit}).
+
+interpret_limit({'absolute', MemLim}, UsableMemory) ->
+ erlang:min(MemLim, UsableMemory);
+interpret_limit(MemFraction, UsableMemory) ->
+ trunc(MemFraction * UsableMemory).
+
+parse_mem_limit({absolute, Limit}) ->
+ case rabbit_resource_monitor_misc:parse_information_unit(Limit) of
+ {ok, ParsedLimit} -> {absolute, ParsedLimit};
+ {error, parse_error} ->
+ rabbit_log:error("Unable to parse vm_memory_high_watermark value ~p", [Limit]),
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK
+ end;
+parse_mem_limit(MemLimit) when is_integer(MemLimit) ->
+ parse_mem_limit(float(MemLimit));
+parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit =< ?MAX_VM_MEMORY_HIGH_WATERMARK ->
+ MemLimit;
+parse_mem_limit(MemLimit) when is_float(MemLimit), MemLimit > ?MAX_VM_MEMORY_HIGH_WATERMARK ->
+ rabbit_log:warning(
+ "Memory high watermark of ~p is above the allowed maximum, falling back to ~p~n",
+ [MemLimit, ?MAX_VM_MEMORY_HIGH_WATERMARK]
+ ),
+ ?MAX_VM_MEMORY_HIGH_WATERMARK;
+parse_mem_limit(MemLimit) ->
+ rabbit_log:warning(
+ "Memory high watermark of ~p is invalid, defaulting to ~p~n",
+ [MemLimit, ?DEFAULT_VM_MEMORY_HIGH_WATERMARK]
+ ),
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK.
+
+internal_update(State0 = #state{memory_limit = MemLimit,
+ alarmed = Alarmed,
+ alarm_funs = {AlarmSet, AlarmClear}}) ->
+ State1 = update_process_memory(State0),
+ ProcMem = State1#state.process_memory,
+ NewAlarmed = ProcMem > MemLimit,
+ case {Alarmed, NewAlarmed} of
+ {false, true} -> emit_update_info(set, ProcMem, MemLimit),
+ AlarmSet({{resource_limit, memory, node()}, []});
+ {true, false} -> emit_update_info(clear, ProcMem, MemLimit),
+ AlarmClear({resource_limit, memory, node()});
+ _ -> ok
+ end,
+ State1#state{alarmed = NewAlarmed}.
+
+emit_update_info(AlarmState, MemUsed, MemLimit) ->
+ rabbit_log:info(
+ "vm_memory_high_watermark ~p. Memory used:~p allowed:~p~n",
+ [AlarmState, MemUsed, MemLimit]).
+
+%% According to https://msdn.microsoft.com/en-us/library/aa366778(VS.85).aspx
+%% Windows has 2GB and 8TB of address space for 32 and 64 bit accordingly.
+get_vm_limit({win32,_OSname}) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 8*1024*1024*1024*1024 %% 8 TB for 64 bits 2^42
+ end;
+
+%% On a 32-bit machine, if you're using more than 2 gigs of RAM you're
+%% in big trouble anyway.
+get_vm_limit(_OsType) ->
+ case erlang:system_info(wordsize) of
+ 4 -> 2*1024*1024*1024; %% 2 GB for 32 bits 2^31
+ 8 -> 256*1024*1024*1024*1024 %% 256 TB for 64 bits 2^48
+ %%https://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
+ end.
+
+%%----------------------------------------------------------------------------
+%% Internal Helpers
+%%----------------------------------------------------------------------------
+cmd(Command) ->
+ cmd(Command, true).
+
+cmd(Command, ThrowIfMissing) ->
+ Exec = hd(string:tokens(Command, " ")),
+ case {ThrowIfMissing, os:find_executable(Exec)} of
+ {true, false} ->
+ throw({command_not_found, Exec});
+ {false, false} ->
+ {error, command_not_found};
+ {_, _Filename} ->
+ os:cmd(Command)
+ end.
+
+default_linux_pagesize(CmdOutput) ->
+ rabbit_log:warning(
+ "Failed to get memory page size, using 4096. Reason: ~s",
+ [CmdOutput]),
+ 4096.
+
+get_linux_pagesize() ->
+ case cmd("getconf PAGESIZE", false) of
+ {error, command_not_found} ->
+ default_linux_pagesize("getconf not found in PATH");
+ CmdOutput ->
+ case re:run(CmdOutput, "^[0-9]+", [{capture, first, list}]) of
+ {match, [Match]} -> list_to_integer(Match);
+ _ ->
+ default_linux_pagesize(CmdOutput)
+ end
+ end.
+
+%% get_total_memory(OS) -> Total
+%% Windows and Freebsd code based on: memsup:get_memory_usage/1
+%% Original code was part of OTP and released under "Erlang Public License".
+
+get_total_memory({unix, darwin}) ->
+ sysctl("hw.memsize");
+
+get_total_memory({unix, freebsd}) ->
+ PageSize = sysctl("vm.stats.vm.v_page_size"),
+ PageCount = sysctl("vm.stats.vm.v_page_count"),
+ PageCount * PageSize;
+
+get_total_memory({unix, openbsd}) ->
+ sysctl("hw.usermem");
+
+get_total_memory({win32, _OSname}) ->
+ [Result|_] = os_mon_sysinfo:get_mem_info(),
+ {ok, [_MemLoad, TotPhys, _AvailPhys, _TotPage, _AvailPage, _TotV, _AvailV],
+ _RestStr} =
+ io_lib:fread("~d~d~d~d~d~d~d", Result),
+ TotPhys;
+
+get_total_memory({unix, linux}) ->
+ File = read_proc_file("/proc/meminfo"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_linux/1, Lines)),
+ dict:fetch('MemTotal', Dict);
+
+get_total_memory({unix, sunos}) ->
+ File = cmd("/usr/sbin/prtconf"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_sunos/1, Lines)),
+ dict:fetch('Memory size', Dict);
+
+get_total_memory({unix, aix}) ->
+ File = cmd("/usr/bin/vmstat -v"),
+ Lines = string:tokens(File, "\n"),
+ Dict = dict:from_list(lists:map(fun parse_line_aix/1, Lines)),
+ dict:fetch('memory pages', Dict) * 4096;
+
+get_total_memory(_OsType) ->
+ unknown.
+
+%% A line looks like "MemTotal: 502968 kB"
+%% or (with broken OS/modules) "Readahead 123456 kB"
+parse_line_linux(Line) ->
+ {Name, Value, UnitRest} =
+ case string:tokens(Line, ":") of
+ %% no colon in the line
+ [S] ->
+ [K, RHS] = re:split(S, "\s", [{parts, 2}, {return, list}]),
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit};
+ [K, RHS | _Rest] ->
+ [V | Unit] = string:tokens(RHS, " "),
+ {K, V, Unit}
+ end,
+ Value1 = case UnitRest of
+ [] -> list_to_integer(Value); %% no units
+ ["kB"] -> list_to_integer(Value) * 1024;
+ ["KB"] -> list_to_integer(Value) * 1024
+ end,
+ {list_to_atom(Name), Value1}.
+
+%% A line looks like "Memory size: 1024 Megabytes"
+parse_line_sunos(Line) ->
+ case string:tokens(Line, ":") of
+ [Name, RHS | _Rest] ->
+ [Value1 | UnitsRest] = string:tokens(RHS, " "),
+ Value2 = case UnitsRest of
+ ["Gigabytes"] ->
+ list_to_integer(Value1) * ?ONE_MiB * 1024;
+ ["Megabytes"] ->
+ list_to_integer(Value1) * ?ONE_MiB;
+ ["Kilobytes"] ->
+ list_to_integer(Value1) * 1024;
+ _ ->
+ Value1 ++ UnitsRest %% no known units
+ end,
+ {list_to_atom(Name), Value2};
+ [Name] -> {list_to_atom(Name), none}
+ end.
+
+%% Lines look like " 12345 memory pages"
+%% or " 80.1 maxpin percentage"
+parse_line_aix(Line) ->
+ [Value | NameWords] = string:tokens(Line, " "),
+ Name = string:join(NameWords, " "),
+ {list_to_atom(Name),
+ case lists:member($., Value) of
+ true -> trunc(list_to_float(Value));
+ false -> list_to_integer(Value)
+ end}.
+
+sysctl(Def) ->
+ list_to_integer(cmd("/usr/bin/env sysctl -n " ++ Def) -- "\n").
+
+%% file:read_file does not work on files in /proc as it seems to get
+%% the size of the file first and then read that many bytes. But files
+%% in /proc always have length 0, we just have to read until we get
+%% eof.
+read_proc_file(File) ->
+ {ok, IoDevice} = file:open(File, [read, raw]),
+ Res = read_proc_file(IoDevice, []),
+ _ = file:close(IoDevice),
+ lists:flatten(lists:reverse(Res)).
+
+-define(BUFFER_SIZE, 1024).
+read_proc_file(IoDevice, Acc) ->
+ case file:read(IoDevice, ?BUFFER_SIZE) of
+ {ok, Res} -> read_proc_file(IoDevice, [Res | Acc]);
+ eof -> Acc
+ end.
diff --git a/deps/rabbit_common/src/worker_pool.erl b/deps/rabbit_common/src/worker_pool.erl
new file mode 100644
index 0000000000..f81e924653
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool.erl
@@ -0,0 +1,172 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool).
+
+%% Generic worker pool manager.
+%%
+%% Submitted jobs are functions. They can be executed synchronously
+%% (using worker_pool:submit/1, worker_pool:submit/2) or asynchronously
+%% (using worker_pool:submit_async/1).
+%%
+%% We typically use the worker pool if we want to limit the maximum
+%% parallelism of some job. We are not trying to dodge the cost of
+%% creating Erlang processes.
+%%
+%% Supports nested submission of jobs and two execution modes:
+%% 'single' and 'reuse'. Jobs executed in 'single' mode are invoked in
+%% a one-off process. Those executed in 'reuse' mode are invoked in a
+%% worker process out of the pool. Nested jobs are always executed
+%% immediately in current worker process.
+%%
+%% 'single' mode is offered to work around a bug in Mnesia: after
+%% network partitions reply messages for prior failed requests can be
+%% sent to Mnesia clients - a reused worker pool process can crash on
+%% receiving one.
+%%
+%% Caller submissions are enqueued internally. When the next worker
+%% process is available, it communicates it to the pool and is
+%% assigned a job to execute. If job execution fails with an error, no
+%% response is returned to the caller.
+%%
+%% Worker processes prioritise certain command-and-control messages
+%% from the pool.
+%%
+%% Future improvement points: job prioritisation.
+
+-behaviour(gen_server2).
+
+-export([start_link/1,
+ submit/1, submit/2, submit/3,
+ submit_async/1, submit_async/2,
+ dispatch_sync/1, dispatch_sync/2,
+ ready/2,
+ idle/2,
+ default_pool/0]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link(atom()) -> {'ok', pid()} | {'error', any()}.
+-spec submit(fun (() -> A) | mfargs()) -> A.
+-spec submit(fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit(atom(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(fun (() -> any()) | mfargs()) -> 'ok'.
+-spec dispatch_sync(fun(() -> any()) | mfargs()) -> 'ok'.
+-spec ready(atom(), pid()) -> 'ok'.
+-spec idle(atom(), pid()) -> 'ok'.
+-spec default_pool() -> atom().
+
+%%----------------------------------------------------------------------------
+
+-define(DEFAULT_POOL, ?MODULE).
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+-record(state, { available, pending }).
+
+%%----------------------------------------------------------------------------
+
+start_link(Name) -> gen_server2:start_link({local, Name}, ?MODULE, [],
+ [{timeout, infinity}]).
+
+submit(Fun) ->
+ submit(?DEFAULT_POOL, Fun, reuse).
+
+%% ProcessModel =:= single is for working around the mnesia_locker bug.
+submit(Fun, ProcessModel) ->
+ submit(?DEFAULT_POOL, Fun, ProcessModel).
+
+submit(Server, Fun, ProcessModel) ->
+ case get(worker_pool_worker) of
+ true -> worker_pool_worker:run(Fun);
+ _ -> Pid = gen_server2:call(Server, {next_free, self()}, infinity),
+ worker_pool_worker:submit(Pid, Fun, ProcessModel)
+ end.
+
+submit_async(Fun) -> submit_async(?DEFAULT_POOL, Fun).
+
+submit_async(Server, Fun) -> gen_server2:cast(Server, {run_async, Fun}).
+
+dispatch_sync(Fun) ->
+ dispatch_sync(?DEFAULT_POOL, Fun).
+
+dispatch_sync(Server, Fun) ->
+ Pid = gen_server2:call(Server, {next_free, self()}, infinity),
+ worker_pool_worker:submit_async(Pid, Fun).
+
+ready(Server, WPid) -> gen_server2:cast(Server, {ready, WPid}).
+
+idle(Server, WPid) -> gen_server2:cast(Server, {idle, WPid}).
+
+default_pool() -> ?DEFAULT_POOL.
+
+%%----------------------------------------------------------------------------
+
+init([]) ->
+ {ok, #state { pending = queue:new(), available = ordsets:new() }, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+handle_call({next_free, CPid}, From, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State#state{pending = queue:in({next_free, From, CPid}, Pending)},
+ hibernate};
+handle_call({next_free, CPid}, _From, State = #state { available =
+ [WPid | Avail1] }) ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ {reply, WPid, State #state { available = Avail1 }, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({ready, WPid}, State) ->
+ erlang:monitor(process, WPid),
+ handle_cast({idle, WPid}, State);
+
+handle_cast({idle, WPid}, State = #state { available = Avail,
+ pending = Pending }) ->
+ {noreply,
+ case queue:out(Pending) of
+ {empty, _Pending} ->
+ State #state { available = ordsets:add_element(WPid, Avail) };
+ {{value, {next_free, From, CPid}}, Pending1} ->
+ worker_pool_worker:next_job_from(WPid, CPid),
+ gen_server2:reply(From, WPid),
+ State #state { pending = Pending1 };
+ {{value, {run_async, Fun}}, Pending1} ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ State #state { pending = Pending1 }
+ end, hibernate};
+
+handle_cast({run_async, Fun}, State = #state { available = [],
+ pending = Pending }) ->
+ {noreply, State #state { pending = queue:in({run_async, Fun}, Pending)},
+ hibernate};
+handle_cast({run_async, Fun}, State = #state { available = [WPid | Avail1] }) ->
+ worker_pool_worker:submit_async(WPid, Fun),
+ {noreply, State #state { available = Avail1 }, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', _MRef, process, WPid, _Reason},
+ State = #state { available = Avail }) ->
+ {noreply, State #state { available = ordsets:del_element(WPid, Avail) },
+ hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
diff --git a/deps/rabbit_common/src/worker_pool_sup.erl b/deps/rabbit_common/src/worker_pool_sup.erl
new file mode 100644
index 0000000000..96dbbb2357
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool_sup.erl
@@ -0,0 +1,69 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_sup).
+
+-behaviour(supervisor).
+
+-export([start_link/0, start_link/1, start_link/2]).
+
+-export([init/1]).
+
+-export([default_pool_size/0]).
+
+%%----------------------------------------------------------------------------
+
+-spec start_link() -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer()) -> rabbit_types:ok_pid_or_error().
+-spec start_link(non_neg_integer(), atom())
+ -> rabbit_types:ok_pid_or_error().
+
+%%----------------------------------------------------------------------------
+
+start_link() ->
+ Size = default_pool_size(),
+ start_link(Size).
+
+start_link(PoolSize) ->
+ rabbit_log:info("Will use ~p processes for default worker pool", [PoolSize]),
+ start_link(PoolSize, worker_pool:default_pool()).
+
+start_link(PoolSize, PoolName) ->
+ rabbit_log:info("Starting worker pool '~p' with ~p processes in it", [PoolName, PoolSize]),
+ SupName = list_to_atom(atom_to_list(PoolName) ++ "_sup"),
+ supervisor:start_link({local, SupName}, ?MODULE, [PoolSize, PoolName]).
+
+%%----------------------------------------------------------------------------
+
+init([PoolSize, PoolName]) ->
+ %% we want to survive up to 1K of worker restarts per second,
+ %% e.g. when a large worker pool used for network connections
+ %% encounters a network failure. This is the case in the LDAP authentication
+ %% backend plugin.
+ {ok, {{one_for_one, 1000, 1},
+ [{worker_pool, {worker_pool, start_link, [PoolName]}, transient,
+ 16#ffffffff, worker, [worker_pool]} |
+ [{N, {worker_pool_worker, start_link, [PoolName]}, transient,
+ 16#ffffffff, worker, [worker_pool_worker]}
+ || N <- lists:seq(1, PoolSize)]]}}.
+
+%%
+%% Implementation
+%%
+
+-spec default_pool_size() -> integer().
+
+default_pool_size() ->
+ case rabbit_misc:get_env(rabbit, default_worker_pool_size, undefined) of
+ N when is_integer(N) -> N;
+ _ -> guess_default_pool_size()
+ end.
+
+-spec guess_default_pool_size() -> integer().
+
+guess_default_pool_size() ->
+ erlang:system_info(schedulers).
diff --git a/deps/rabbit_common/src/worker_pool_worker.erl b/deps/rabbit_common/src/worker_pool_worker.erl
new file mode 100644
index 0000000000..79436e0773
--- /dev/null
+++ b/deps/rabbit_common/src/worker_pool_worker.erl
@@ -0,0 +1,192 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_worker).
+
+%% Executes jobs (functions) submitted to a worker pool with worker_pool:submit/1,
+%% worker_pool:submit/2 or worker_pool:submit_async/1.
+%%
+%% See worker_pool for an overview.
+
+-behaviour(gen_server2).
+
+-export([start_link/1, next_job_from/2, submit/3, submit_async/2,
+ run/1]).
+
+-export([set_maximum_since_use/2]).
+-export([set_timeout/2, set_timeout/3, clear_timeout/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, prioritise_cast/3]).
+
+%%----------------------------------------------------------------------------
+
+-type mfargs() :: {atom(), atom(), [any()]}.
+
+-spec start_link(atom) -> {'ok', pid()} | {'error', any()}.
+-spec next_job_from(pid(), pid()) -> 'ok'.
+-spec submit(pid(), fun (() -> A) | mfargs(), 'reuse' | 'single') -> A.
+-spec submit_async(pid(), fun (() -> any()) | mfargs()) -> 'ok'.
+-spec run(fun (() -> A)) -> A; (mfargs()) -> any().
+-spec set_maximum_since_use(pid(), non_neg_integer()) -> 'ok'.
+
+%%----------------------------------------------------------------------------
+
+-define(HIBERNATE_AFTER_MIN, 1000).
+-define(DESIRED_HIBERNATE, 10000).
+
+%%----------------------------------------------------------------------------
+
+start_link(PoolName) ->
+ gen_server2:start_link(?MODULE, [PoolName], [{timeout, infinity}]).
+
+next_job_from(Pid, CPid) ->
+ gen_server2:cast(Pid, {next_job_from, CPid}).
+
+submit(Pid, Fun, ProcessModel) ->
+ gen_server2:call(Pid, {submit, Fun, self(), ProcessModel}, infinity).
+
+submit_async(Pid, Fun) ->
+ gen_server2:cast(Pid, {submit_async, Fun, self()}).
+
+set_maximum_since_use(Pid, Age) ->
+ gen_server2:cast(Pid, {set_maximum_since_use, Age}).
+
+run({M, F, A}) -> apply(M, F, A);
+run(Fun) -> Fun().
+
+run(Fun, reuse) ->
+ run(Fun);
+run(Fun, single) ->
+ Self = self(),
+ Ref = make_ref(),
+ spawn_link(fun () ->
+ put(worker_pool_worker, true),
+ Self ! {Ref, run(Fun)},
+ unlink(Self)
+ end),
+ receive
+ {Ref, Res} -> Res
+ end.
+
+%%----------------------------------------------------------------------------
+
+init([PoolName]) ->
+ ok = file_handle_cache:register_callback(?MODULE, set_maximum_since_use,
+ [self()]),
+ ok = worker_pool:ready(PoolName, self()),
+ put(worker_pool_worker, true),
+ put(worker_pool_name, PoolName),
+ {ok, undefined, hibernate,
+ {backoff, ?HIBERNATE_AFTER_MIN, ?HIBERNATE_AFTER_MIN, ?DESIRED_HIBERNATE}}.
+
+prioritise_cast({set_maximum_since_use, _Age}, _Len, _State) -> 8;
+prioritise_cast({next_job_from, _CPid}, _Len, _State) -> 7;
+prioritise_cast(_Msg, _Len, _State) -> 0.
+
+handle_call({submit, Fun, CPid, ProcessModel}, From, undefined) ->
+ {noreply, {job, CPid, From, Fun, ProcessModel}, hibernate};
+
+handle_call({submit, Fun, CPid, ProcessModel}, From, {from, CPid, MRef}) ->
+ erlang:demonitor(MRef),
+ gen_server2:reply(From, run(Fun, ProcessModel)),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_call(Msg, _From, State) ->
+ {stop, {unexpected_call, Msg}, State}.
+
+handle_cast({next_job_from, CPid}, undefined) ->
+ MRef = erlang:monitor(process, CPid),
+ {noreply, {from, CPid, MRef}, hibernate};
+
+handle_cast({next_job_from, CPid}, {job, CPid, From, Fun, ProcessModel}) ->
+ gen_server2:reply(From, run(Fun, ProcessModel)),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({submit_async, Fun, _CPid}, undefined) ->
+ run(Fun),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({submit_async, Fun, CPid}, {from, CPid, MRef}) ->
+ erlang:demonitor(MRef),
+ run(Fun),
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_cast({set_maximum_since_use, Age}, State) ->
+ ok = file_handle_cache:set_maximum_since_use(Age),
+ {noreply, State, hibernate};
+
+handle_cast(Msg, State) ->
+ {stop, {unexpected_cast, Msg}, State}.
+
+handle_info({'DOWN', MRef, process, CPid, _Reason}, {from, CPid, MRef}) ->
+ ok = worker_pool:idle(get(worker_pool_name), self()),
+ {noreply, undefined, hibernate};
+
+handle_info({'DOWN', _MRef, process, _Pid, _Reason}, State) ->
+ {noreply, State, hibernate};
+
+handle_info({timeout, Key, Fun}, State) ->
+ clear_timeout(Key),
+ Fun(),
+ {noreply, State, hibernate};
+
+handle_info(Msg, State) ->
+ {stop, {unexpected_info, Msg}, State}.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+terminate(_Reason, State) ->
+ State.
+
+-spec set_timeout(non_neg_integer(), fun(() -> any())) ->
+ {ok, reference()}.
+set_timeout(Time, Fun) ->
+ Key = make_ref(),
+ set_timeout(Key, Time, Fun).
+
+-spec set_timeout(Key, non_neg_integer(), fun(() -> any())) ->
+ {ok, Key} when Key :: any().
+set_timeout(Key, Time, Fun) ->
+ Timeouts = get_timeouts(),
+ set_timeout(Key, Time, Fun, Timeouts).
+
+-spec clear_timeout(any()) -> ok.
+clear_timeout(Key) ->
+ NewTimeouts = cancel_timeout(Key, get_timeouts()),
+ put(timeouts, NewTimeouts),
+ ok.
+
+get_timeouts() ->
+ case get(timeouts) of
+ undefined -> dict:new();
+ Dict -> Dict
+ end.
+
+set_timeout(Key, Time, Fun, Timeouts) ->
+ _ = cancel_timeout(Key, Timeouts),
+ TRef = erlang:send_after(Time, self(), {timeout, Key, Fun}),
+ NewTimeouts = dict:store(Key, TRef, Timeouts),
+ put(timeouts, NewTimeouts),
+ {ok, Key}.
+
+cancel_timeout(Key, Timeouts) ->
+ case dict:find(Key, Timeouts) of
+ {ok, TRef} ->
+ _ = erlang:cancel_timer(TRef),
+ receive {timeout, Key, _} -> ok
+ after 0 -> ok
+ end,
+ dict:erase(Key, Timeouts);
+ error ->
+ Timeouts
+ end.
diff --git a/deps/rabbit_common/test/gen_server2_test_server.erl b/deps/rabbit_common/test/gen_server2_test_server.erl
new file mode 100644
index 0000000000..0d68df8f7e
--- /dev/null
+++ b/deps/rabbit_common/test/gen_server2_test_server.erl
@@ -0,0 +1,72 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2017-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(gen_server2_test_server).
+-behaviour(gen_server2).
+-record(gs2_state, {parent, name, state, mod, time,
+ timeout_state, queue, debug, prioritisers,
+ timer, emit_stats_fun, stop_stats_fun}).
+
+-export([start_link/0, start_link/1, start_link/2, stats_count/1]).
+
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, handle_post_hibernate/1]).
+
+start_link(count_stats) ->
+ start_link(count_stats, infinity).
+
+start_link(count_stats, Time) ->
+ {ok, Server} = gen_server2:start_link(gen_server2_test_server, [Time], []),
+ Counter = gen_server2:call(Server, get_counter),
+ sys:replace_state(Server,
+ fun(GSState) ->
+ GSState#gs2_state{
+ emit_stats_fun = fun(State) -> count_stats(Counter), State end
+ }
+ end),
+ {ok, Server}.
+
+start_link() ->
+ gen_server2:start_link(gen_server2_test_server, [], []).
+
+stats_count(Server) ->
+ Counter = gen_server2:call(Server, get_counter),
+ [{count, Count}] = ets:lookup(Counter, count),
+ Count.
+
+init([]) ->
+ init([infinity]);
+init([Time]) ->
+ Counter = ets:new(stats_count, [public]),
+ ets:insert(Counter, {count, 0}),
+ case Time of
+ {backoff, _, _, _} ->
+ {ok, {counter, Counter}, hibernate, Time};
+ _ ->
+ {ok, {counter, Counter}, Time}
+ end.
+
+count_stats(Counter) ->
+ ets:update_counter(Counter, count, {2, 1}).
+
+handle_call(get_counter,_, {counter, Counter} = State) ->
+ {reply, Counter, State};
+handle_call(hibernate, _, State) ->
+ {reply, ok, State, hibernate};
+handle_call(_,_,State) ->
+ {reply, ok, State}.
+
+handle_cast({sleep, Time}, State) -> timer:sleep(Time), {noreply, State};
+handle_cast(_,State) -> {noreply, State}.
+
+handle_post_hibernate(State) -> {noreply, State}.
+
+handle_info(_,State) -> {noreply, State}.
+
+terminate(_,_State) -> ok.
+
+code_change(_,State,_) -> {ok, State}.
diff --git a/deps/rabbit_common/test/rabbit_env_SUITE.erl b/deps/rabbit_common/test/rabbit_env_SUITE.erl
new file mode 100644
index 0000000000..a881097e6b
--- /dev/null
+++ b/deps/rabbit_common/test/rabbit_env_SUITE.erl
@@ -0,0 +1,1098 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2019-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(rabbit_env_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-export([all/0,
+ suite/0,
+ groups/0,
+ init_per_suite/1,
+ end_per_suite/1,
+ init_per_group/2,
+ end_per_group/2,
+ init_per_testcase/2,
+ end_per_testcase/2,
+ check_data_dir/1,
+ check_default_values/1,
+ check_values_from_reachable_remote_node/1,
+ check_values_from_offline_remote_node/1,
+ check_context_to_app_env_vars/1,
+ check_context_to_code_path/1,
+ check_RABBITMQ_ADVANCED_CONFIG_FILE/1,
+ check_RABBITMQ_CONFIG_FILE/1,
+ check_RABBITMQ_CONFIG_FILES/1,
+ check_RABBITMQ_DIST_PORT/1,
+ check_RABBITMQ_ENABLED_PLUGINS/1,
+ check_RABBITMQ_ENABLED_PLUGINS_FILE/1,
+ check_RABBITMQ_FEATURE_FLAGS_FILE/1,
+ check_RABBITMQ_KEEP_PID_FILE_ON_EXIT/1,
+ check_RABBITMQ_LOG/1,
+ check_RABBITMQ_LOG_BASE/1,
+ check_RABBITMQ_LOGS/1,
+ check_RABBITMQ_MNESIA_BASE/1,
+ check_RABBITMQ_MNESIA_DIR/1,
+ check_RABBITMQ_MOTD_FILE/1,
+ check_RABBITMQ_NODE_IP_ADDRESS/1,
+ check_RABBITMQ_NODE_PORT/1,
+ check_RABBITMQ_NODENAME/1,
+ check_RABBITMQ_PID_FILE/1,
+ check_RABBITMQ_PLUGINS_DIR/1,
+ check_RABBITMQ_PLUGINS_EXPAND_DIR/1,
+ check_RABBITMQ_PRODUCT_NAME/1,
+ check_RABBITMQ_PRODUCT_VERSION/1,
+ check_RABBITMQ_QUORUM_DIR/1,
+ check_RABBITMQ_STREAM_DIR/1,
+ check_RABBITMQ_UPGRADE_LOG/1,
+ check_RABBITMQ_USE_LOGNAME/1,
+ check_value_is_yes/1,
+ check_log_process_env/1,
+ check_log_context/1,
+ check_get_used_env_vars/1,
+ check_parse_conf_env_file_output/1
+ ]).
+
+all() ->
+ [
+ check_data_dir,
+ check_default_values,
+ check_values_from_reachable_remote_node,
+ check_values_from_offline_remote_node,
+ check_context_to_app_env_vars,
+ check_context_to_code_path,
+ check_RABBITMQ_ADVANCED_CONFIG_FILE,
+ check_RABBITMQ_CONFIG_FILE,
+ check_RABBITMQ_CONFIG_FILES,
+ check_RABBITMQ_DIST_PORT,
+ check_RABBITMQ_ENABLED_PLUGINS,
+ check_RABBITMQ_ENABLED_PLUGINS_FILE,
+ check_RABBITMQ_FEATURE_FLAGS_FILE,
+ check_RABBITMQ_KEEP_PID_FILE_ON_EXIT,
+ check_RABBITMQ_LOG,
+ check_RABBITMQ_LOG_BASE,
+ check_RABBITMQ_LOGS,
+ check_RABBITMQ_MNESIA_BASE,
+ check_RABBITMQ_MNESIA_DIR,
+ check_RABBITMQ_MOTD_FILE,
+ check_RABBITMQ_NODE_IP_ADDRESS,
+ check_RABBITMQ_NODE_PORT,
+ check_RABBITMQ_NODENAME,
+ check_RABBITMQ_PID_FILE,
+ check_RABBITMQ_PLUGINS_DIR,
+ check_RABBITMQ_PLUGINS_EXPAND_DIR,
+ check_RABBITMQ_PRODUCT_NAME,
+ check_RABBITMQ_PRODUCT_VERSION,
+ check_RABBITMQ_QUORUM_DIR,
+ check_RABBITMQ_UPGRADE_LOG,
+ check_RABBITMQ_USE_LOGNAME,
+ check_value_is_yes,
+ check_log_process_env,
+ check_log_context,
+ check_get_used_env_vars,
+ check_parse_conf_env_file_output
+ ].
+
+suite() ->
+ [{timetrap, {seconds, 10}}].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], all()}
+ ].
+
+init_per_suite(Config) ->
+ persistent_term:put({rabbit_env, load_conf_env_file}, false),
+ Config.
+
+end_per_suite(Config) ->
+ persistent_term:erase({rabbit_env, load_conf_env_file}),
+ Config.
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+end_per_testcase(_, Config) -> Config.
+
+check_data_dir(_) ->
+ {Variable, ExpValue} = case os:type() of
+ {win32, _} ->
+ {"RABBITMQ_BASE",
+ "value of RABBITMQ_BASE"};
+ {unix, _} ->
+ {"SYS_PREFIX",
+ "value of SYS_PREFIX/var/lib/rabbitmq"}
+ end,
+ Value = "value of " ++ Variable,
+ os:putenv(Variable, Value),
+ ?assertMatch(#{data_dir := ExpValue}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ ?assertNotMatch(#{data_dir := ExpValue}, rabbit_env:get_context()),
+ ?assertMatch(#{data_dir := _}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable).
+
+check_default_values(_) ->
+ %% When `rabbit_env` is built with `TEST` defined, we can override
+ %% the OS type.
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:put({rabbit_env, os_type}, {win32, undefined}),
+ SavedAppData = os:getenv("APPDATA"),
+ os:putenv("APPDATA", "%APPDATA%"),
+ Win32Context = rabbit_env:get_context(),
+ case SavedAppData of
+ false -> os:unsetenv("APPDATA");
+ _ -> os:putenv("APPDATA", SavedAppData)
+ end,
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Node = get_default_nodename(),
+ NodeS = atom_to_list(Node),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => default,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => default,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => default,
+ motd_file => default,
+ nodename => default,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => default,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => "/etc/rabbitmq/enabled_plugins",
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "-feature_flags",
+ forced_feature_flags_on_init => RFFValue,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => "/var/lib/rabbitmq/mnesia",
+ mnesia_dir => "/var/lib/rabbitmq/mnesia/" ++ NodeS,
+ motd_file => "/etc/rabbitmq/motd",
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ ".pid",
+ plugins_expand_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "-plugins-expand",
+ plugins_path => maps:get(plugins_path, UnixContext),
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "/quorum",
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir =>
+ "/var/lib/rabbitmq/mnesia/" ++ NodeS ++ "/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext),
+
+ ?assertEqual(
+ #{additional_config_files => "%APPDATA%/RabbitMQ/conf.d/*.conf",
+ advanced_config_file => "%APPDATA%/RabbitMQ/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "%APPDATA%/RabbitMQ/rabbitmq-env-conf.bat",
+ config_base_dir => "%APPDATA%/RabbitMQ",
+ data_dir => "%APPDATA%/RabbitMQ",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => "%APPDATA%/RabbitMQ/enabled_plugins",
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-feature_flags",
+ forced_feature_flags_on_init => RFFValue,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "%APPDATA%/RabbitMQ/log",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "%APPDATA%/RabbitMQ/rabbitmq",
+ main_log_file => "%APPDATA%/RabbitMQ/log/" ++ NodeS ++ ".log",
+ mnesia_base_dir => "%APPDATA%/RabbitMQ/db",
+ mnesia_dir => "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia",
+ motd_file => "%APPDATA%/RabbitMQ/motd.txt",
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {win32, undefined},
+ output_supports_colors => false,
+ pid_file => "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ ".pid",
+ plugins_expand_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-plugins-expand",
+ plugins_path => maps:get(plugins_path, Win32Context),
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia/quorum",
+ rabbitmq_base => "%APPDATA%/RabbitMQ",
+ rabbitmq_home => maps:get(rabbitmq_home, Win32Context),
+ stream_queue_dir =>
+ "%APPDATA%/RabbitMQ/db/" ++ NodeS ++ "-mnesia/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ upgrade_log_file =>
+ "%APPDATA%/RabbitMQ/log/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{rabbitmq_base => default}},
+ Win32Context).
+
+forced_feature_flags_on_init_expect() ->
+ %% In the case of mixed-versions-cluster testing in CI, the test
+ %% sets $RABBITMQ_FEATURE_FLAGS to an empty string. This obviously
+ %% changes the context returned by rabbit_env.
+ case os:getenv("RABBITMQ_FEATURE_FLAGS") of
+ false -> {undefined, default};
+ "" -> {[], environment}
+ end.
+
+check_values_from_reachable_remote_node(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+
+ MnesiaDir = filename:join(PrivDir, "mnesia"),
+ RabbitAppDir = filename:join(PrivDir, "rabbit"),
+ RabbitEbinDir = filename:join(RabbitAppDir, "ebin"),
+
+ FeatureFlagsFile = filename:join(PrivDir, "feature_flags"),
+ PluginsDir = filename:join(PrivDir, "plugins"),
+ EnabledPluginsFile = filename:join(PrivDir, "enabled_plugins"),
+
+ ok = file:make_dir(MnesiaDir),
+ ok = file:make_dir(RabbitAppDir),
+ ok = file:make_dir(RabbitEbinDir),
+
+ %% Create a fake `rabbit` application.
+ App = {application,
+ rabbit,
+ [{vsn, "fake-rabbit"}]},
+ AppFile = filename:join(RabbitEbinDir, "rabbit.app"),
+ AppContent = io_lib:format("~p.~n", [App]),
+ ok = file:write_file(AppFile, AppContent),
+
+ %% Start a fake RabbitMQ node.
+ Node = rabbit_nodes_common:make(
+ {atom_to_list(?FUNCTION_NAME), "localhost"}),
+ NodeS = atom_to_list(Node),
+ true = os:putenv("RABBITMQ_NODENAME", NodeS),
+ RabbitCommonEbinDir = filename:dirname(code:which(rabbit_env)),
+ Args = ["-noinput",
+ "-sname", atom_to_list(Node),
+ "-pa", RabbitCommonEbinDir,
+ "-pa", RabbitEbinDir,
+ "-mnesia", "dir",
+ rabbit_misc:format("~p", [MnesiaDir]),
+ "-rabbit", "feature_flags_file",
+ rabbit_misc:format("~p", [FeatureFlagsFile]),
+ "-rabbit", "plugins_dir",
+ rabbit_misc:format("~p", [PluginsDir]),
+ "-rabbit", "enabled_plugins_file",
+ rabbit_misc:format("~p", [EnabledPluginsFile]),
+ "-eval",
+ "ok = application:load(mnesia),"
+ "ok = application:load(rabbit)."],
+ PortName = {spawn_executable, os:find_executable("erl")},
+ PortSettings = [{cd, PrivDir},
+ {args, Args},
+ {env, [{"ERL_LIBS", false}]},
+ {line, 512},
+ exit_status,
+ stderr_to_stdout],
+ ct:pal(
+ "Starting fake RabbitMQ node with the following settings:~n~p",
+ [PortSettings]),
+ Pid = spawn_link(
+ fun() ->
+ Port = erlang:open_port(PortName, PortSettings),
+ consume_stdout(Port, Node)
+ end),
+ wait_for_remote_node(Node),
+
+ try
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(Node),
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => remote_node,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => remote_node,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => remote_node,
+ motd_file => default,
+ nodename => environment,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => remote_node,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => EnabledPluginsFile,
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file => FeatureFlagsFile,
+ forced_feature_flags_on_init => RFFValue,
+ from_remote_node => {Node, 10000},
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => undefined,
+ mnesia_dir => MnesiaDir,
+ motd_file => undefined,
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => undefined,
+ plugins_expand_dir => undefined,
+ plugins_path => PluginsDir,
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir => MnesiaDir ++ "/quorum",
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir => MnesiaDir ++ "/stream",
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext)
+ after
+ os:unsetenv("RABBITMQ_NODENAME"),
+ unlink(Pid),
+ rpc:call(Node, erlang, halt, [])
+ end.
+
+consume_stdout(Port, Nodename) ->
+ receive
+ {Port, {exit_status, X}} ->
+ ?assertEqual(0, X);
+ {Port, {data, Out}} ->
+ ct:pal("stdout: ~p", [Out]),
+ consume_stdout(Port, Nodename)
+ end.
+
+wait_for_remote_node(Nodename) ->
+ case net_adm:ping(Nodename) of
+ pong -> ok;
+ pang -> timer:sleep(200),
+ wait_for_remote_node(Nodename)
+ end.
+
+check_values_from_offline_remote_node(_) ->
+ Node = rabbit_nodes_common:make(
+ {atom_to_list(?FUNCTION_NAME), "localhost"}),
+ NodeS = atom_to_list(Node),
+ true = os:putenv("RABBITMQ_NODENAME", NodeS),
+
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(offline),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_NODENAME"),
+
+ {RFFValue, RFFOrigin} = forced_feature_flags_on_init_expect(),
+
+ Origins = #{
+ additional_config_files => default,
+ advanced_config_file => default,
+ amqp_ipaddr => default,
+ amqp_tcp_port => default,
+ conf_env_file => default,
+ enabled_plugins => default,
+ enabled_plugins_file => default,
+ erlang_dist_tcp_port => default,
+ feature_flags_file => default,
+ forced_feature_flags_on_init => RFFOrigin,
+ interactive_shell => default,
+ keep_pid_file_on_exit => default,
+ log_base_dir => default,
+ log_feature_flags_registry => default,
+ log_levels => default,
+ main_config_file => default,
+ main_log_file => default,
+ mnesia_base_dir => default,
+ mnesia_dir => default,
+ motd_file => default,
+ nodename => environment,
+ nodename_type => default,
+ os_type => environment,
+ output_supports_colors => default,
+ pid_file => default,
+ plugins_expand_dir => default,
+ plugins_path => default,
+ product_name => default,
+ product_version => default,
+ quorum_queue_dir => default,
+ rabbitmq_home => default,
+ stream_queue_dir => default,
+ upgrade_log_file => default
+ },
+
+ ?assertEqual(
+ #{additional_config_files => "/etc/rabbitmq/conf.d/*.conf",
+ advanced_config_file => "/etc/rabbitmq/advanced.config",
+ amqp_ipaddr => "auto",
+ amqp_tcp_port => 5672,
+ conf_env_file => "/etc/rabbitmq/rabbitmq-env.conf",
+ config_base_dir => "/etc/rabbitmq",
+ data_dir => "/var/lib/rabbitmq",
+ dbg_mods => [],
+ dbg_output => stdout,
+ enabled_plugins => undefined,
+ enabled_plugins_file => undefined,
+ erlang_dist_tcp_port => 25672,
+ feature_flags_file => undefined,
+ forced_feature_flags_on_init => RFFValue,
+ from_remote_node => offline,
+ interactive_shell => false,
+ keep_pid_file_on_exit => false,
+ log_base_dir => "/var/log/rabbitmq",
+ log_feature_flags_registry => false,
+ log_levels => undefined,
+ main_config_file => "/etc/rabbitmq/rabbitmq",
+ main_log_file => "/var/log/rabbitmq/" ++ NodeS ++ ".log",
+ mnesia_base_dir => undefined,
+ mnesia_dir => undefined,
+ motd_file => undefined,
+ nodename => Node,
+ nodename_type => shortnames,
+ os_type => {unix, undefined},
+ output_supports_colors => true,
+ pid_file => undefined,
+ plugins_expand_dir => undefined,
+ plugins_path => undefined,
+ product_name => undefined,
+ product_version => undefined,
+ quorum_queue_dir => undefined,
+ rabbitmq_home => maps:get(rabbitmq_home, UnixContext),
+ stream_queue_dir => undefined,
+ split_nodename => rabbit_nodes_common:parts(Node),
+ sys_prefix => "",
+ upgrade_log_file =>
+ "/var/log/rabbitmq/" ++ NodeS ++ "_upgrade.log",
+
+ var_origins => Origins#{sys_prefix => default}},
+ UnixContext).
+
+check_context_to_app_env_vars(_) ->
+ %% When `rabbit_env` is built with `TEST` defined, we can override
+ %% the OS type.
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+
+ Vars = [{mnesia, dir, maps:get(mnesia_dir, UnixContext)},
+ {ra, data_dir, maps:get(quorum_queue_dir, UnixContext)},
+ {osiris, data_dir, maps:get(stream_queue_dir, UnixContext)},
+ {rabbit, feature_flags_file,
+ maps:get(feature_flags_file, UnixContext)},
+ {rabbit, plugins_dir, maps:get(plugins_path, UnixContext)},
+ {rabbit, plugins_expand_dir,
+ maps:get(plugins_expand_dir, UnixContext)},
+ {rabbit, enabled_plugins_file,
+ maps:get(enabled_plugins_file, UnixContext)}],
+
+ lists:foreach(
+ fun({App, Param, _}) ->
+ ?assertEqual(undefined, application:get_env(App, Param))
+ end,
+ Vars),
+
+ rabbit_env:context_to_app_env_vars(UnixContext),
+ lists:foreach(
+ fun({App, Param, Value}) ->
+ ?assertEqual({ok, Value}, application:get_env(App, Param))
+ end,
+ Vars),
+
+ lists:foreach(
+ fun({App, Param, _}) ->
+ application:unset_env(App, Param),
+ ?assertEqual(undefined, application:get_env(App, Param))
+ end,
+ Vars),
+
+ rabbit_env:context_to_app_env_vars_no_logging(UnixContext),
+ lists:foreach(
+ fun({App, Param, Value}) ->
+ ?assertEqual({ok, Value}, application:get_env(App, Param))
+ end,
+ Vars).
+
+check_context_to_code_path(Config) ->
+ PrivDir = ?config(priv_dir, Config),
+ PluginsDir1 = filename:join(
+ PrivDir, rabbit_misc:format("~s-1", [?FUNCTION_NAME])),
+ MyPlugin1Dir = filename:join(PluginsDir1, "my_plugin1"),
+ MyPlugin1EbinDir = filename:join(MyPlugin1Dir, "ebin"),
+ PluginsDir2 = filename:join(
+ PrivDir, rabbit_misc:format("~s-2", [?FUNCTION_NAME])),
+ MyPlugin2Dir = filename:join(PluginsDir2, "my_plugin2"),
+ MyPlugin2EbinDir = filename:join(MyPlugin2Dir, "ebin"),
+
+ ok = file:make_dir(PluginsDir1),
+ ok = file:make_dir(MyPlugin1Dir),
+ ok = file:make_dir(MyPlugin1EbinDir),
+ ok = file:make_dir(PluginsDir2),
+ ok = file:make_dir(MyPlugin2Dir),
+ ok = file:make_dir(MyPlugin2EbinDir),
+
+ %% On Unix.
+ %%
+ %% We can't test the Unix codepath on Windows because the drive letter
+ %% separator conflicts with the path separator (they are both ':').
+ %% However, the Windows codepath can be tested on both Unix and Windows.
+ case os:type() of
+ {unix, _} ->
+ UnixPluginsPath = PluginsDir1 ++ ":" ++ PluginsDir2,
+ true = os:putenv("RABBITMQ_PLUGINS_DIR", UnixPluginsPath),
+ persistent_term:put({rabbit_env, os_type}, {unix, undefined}),
+ UnixContext = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_PLUGINS_DIR"),
+
+ ?assertEqual(UnixPluginsPath, maps:get(plugins_path, UnixContext)),
+
+ OldCodePath1 = code:get_path(),
+ ?assertNot(lists:member(MyPlugin1EbinDir, OldCodePath1)),
+ ?assertNot(lists:member(MyPlugin2EbinDir, OldCodePath1)),
+
+ rabbit_env:context_to_code_path(UnixContext),
+
+ NewCodePath1 = code:get_path(),
+ ?assert(lists:member(MyPlugin1EbinDir, NewCodePath1)),
+ ?assert(lists:member(MyPlugin2EbinDir, NewCodePath1)),
+ ?assertEqual(
+ [MyPlugin1EbinDir, MyPlugin2EbinDir],
+ lists:filter(
+ fun(Dir) ->
+ Dir =:= MyPlugin1EbinDir orelse
+ Dir =:= MyPlugin2EbinDir
+ end, NewCodePath1)),
+
+ true = code:del_path(MyPlugin1EbinDir),
+ true = code:del_path(MyPlugin2EbinDir);
+ _ ->
+ ok
+ end,
+
+ %% On Windows.
+ Win32PluginsPath = PluginsDir1 ++ ";" ++ PluginsDir2,
+ true = os:putenv("RABBITMQ_PLUGINS_DIR", Win32PluginsPath),
+ persistent_term:put({rabbit_env, os_type}, {win32, undefined}),
+ Win32Context = rabbit_env:get_context(),
+
+ persistent_term:erase({rabbit_env, os_type}),
+ os:unsetenv("RABBITMQ_PLUGINS_DIR"),
+
+ ?assertEqual(Win32PluginsPath, maps:get(plugins_path, Win32Context)),
+
+ OldCodePath2 = code:get_path(),
+ ?assertNot(lists:member(MyPlugin1EbinDir, OldCodePath2)),
+ ?assertNot(lists:member(MyPlugin2EbinDir, OldCodePath2)),
+
+ rabbit_env:context_to_code_path(Win32Context),
+
+ NewCodePath2 = code:get_path(),
+ ?assert(lists:member(MyPlugin1EbinDir, NewCodePath2)),
+ ?assert(lists:member(MyPlugin2EbinDir, NewCodePath2)),
+ ?assertEqual(
+ [MyPlugin1EbinDir, MyPlugin2EbinDir],
+ lists:filter(
+ fun(Dir) ->
+ Dir =:= MyPlugin1EbinDir orelse
+ Dir =:= MyPlugin2EbinDir
+ end, NewCodePath2)),
+
+ true = code:del_path(MyPlugin1EbinDir),
+ true = code:del_path(MyPlugin2EbinDir).
+
+check_RABBITMQ_ADVANCED_CONFIG_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_ADVANCED_CONFIG_FILE",
+ advanced_config_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_CONFIG_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_CONFIG_FILE",
+ main_config_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_CONFIG_FILES(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_CONFIG_FILES",
+ additional_config_files,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_DIST_PORT(_) ->
+ Value1 = random_int(),
+ Value2 = random_int(),
+ check_prefixed_variable("RABBITMQ_DIST_PORT",
+ erlang_dist_tcp_port,
+ 25672,
+ integer_to_list(Value1), Value1,
+ integer_to_list(Value2), Value2).
+
+check_RABBITMQ_ENABLED_PLUGINS(_) ->
+ Value1 = [random_atom(), random_atom()],
+ Value2 = [random_atom(), random_atom()],
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS",
+ enabled_plugins,
+ '_',
+ "", [],
+ "", []),
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS",
+ enabled_plugins,
+ '_',
+ rabbit_misc:format("~s,~s", Value1), Value1,
+ rabbit_misc:format("~s,~s", Value2), Value2).
+
+check_RABBITMQ_ENABLED_PLUGINS_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_ENABLED_PLUGINS_FILE",
+ enabled_plugins_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_FEATURE_FLAGS_FILE(_) ->
+ Value1 = random_string(),
+ check_variable("RABBITMQ_FEATURE_FLAGS_FILE",
+ feature_flags_file,
+ Value1, Value1).
+
+check_RABBITMQ_KEEP_PID_FILE_ON_EXIT(_) ->
+ Value1 = true,
+ Value2 = false,
+ check_prefixed_variable("RABBITMQ_KEEP_PID_FILE_ON_EXIT",
+ keep_pid_file_on_exit,
+ false,
+ atom_to_list(Value1), Value1,
+ atom_to_list(Value2), Value2).
+
+check_RABBITMQ_LOG(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ "critical", #{global => critical},
+ "emergency", #{global => emergency}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1, #{Value1 => info},
+ Value2, #{Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ ",none", #{global => none,
+ Value1 => info},
+ Value2 ++ ",none", #{global => none,
+ Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=debug", #{Value1 => debug},
+ Value2 ++ "=info", #{Value2 => info}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ ",-color", #{Value1 => info,
+ color => false},
+ Value2 ++ ",+color", #{Value2 => info,
+ color => true}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=notice,-color", #{Value1 => notice,
+ color => false},
+ Value2 ++ "=warning,+color", #{Value2 => warning,
+ color => true}),
+ check_prefixed_variable("RABBITMQ_LOG",
+ log_levels,
+ '_',
+ Value1 ++ "=error," ++ Value2, #{Value1 => error,
+ Value2 => info},
+ Value2 ++ "=alert," ++ Value1, #{Value1 => info,
+ Value2 => alert}).
+
+check_RABBITMQ_LOG_BASE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOG_BASE",
+ log_base_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_LOGS(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_LOGS",
+ main_log_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_UPGRADE_LOG(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_UPGRADE_LOG",
+ upgrade_log_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MNESIA_BASE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MNESIA_BASE",
+ mnesia_base_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MNESIA_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MNESIA_DIR",
+ mnesia_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_NODE_IP_ADDRESS(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_NODE_IP_ADDRESS",
+ amqp_ipaddr,
+ "auto",
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_NODE_PORT(_) ->
+ Value1 = random_int(),
+ Value2 = random_int(),
+ check_prefixed_variable("RABBITMQ_NODE_PORT",
+ amqp_tcp_port,
+ 5672,
+ integer_to_list(Value1), Value1,
+ integer_to_list(Value2), Value2).
+
+check_RABBITMQ_NODENAME(_) ->
+ DefaultNodename = get_default_nodename(),
+ {_, DefaultHostname} = rabbit_nodes_common:parts(DefaultNodename),
+
+ Value1 = random_atom(),
+ Value2 = random_atom(),
+ check_prefixed_variable("RABBITMQ_NODENAME",
+ nodename,
+ DefaultNodename,
+ atom_to_list(Value1),
+ list_to_atom(
+ atom_to_list(Value1) ++ "@" ++ DefaultHostname),
+ atom_to_list(Value2),
+ list_to_atom(
+ atom_to_list(Value2) ++ "@" ++ DefaultHostname)),
+
+ Value3 = list_to_atom(random_string() ++ "@" ++ random_string()),
+ Value4 = list_to_atom(random_string() ++ "@" ++ random_string()),
+ check_prefixed_variable("RABBITMQ_NODENAME",
+ nodename,
+ DefaultNodename,
+ atom_to_list(Value3), Value3,
+ atom_to_list(Value4), Value4).
+
+check_RABBITMQ_PID_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PID_FILE",
+ pid_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PLUGINS_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PLUGINS_DIR",
+ plugins_path,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PLUGINS_EXPAND_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PLUGINS_EXPAND_DIR",
+ plugins_expand_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PRODUCT_NAME(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PRODUCT_NAME",
+ product_name,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_PRODUCT_VERSION(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_PRODUCT_VERSION",
+ product_version,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_MOTD_FILE(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_MOTD_FILE",
+ motd_file,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_QUORUM_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_QUORUM_DIR",
+ quorum_queue_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+
+check_RABBITMQ_STREAM_DIR(_) ->
+ Value1 = random_string(),
+ Value2 = random_string(),
+ check_prefixed_variable("RABBITMQ_STREAM_DIR",
+ stream_queue_dir,
+ '_',
+ Value1, Value1,
+ Value2, Value2).
+check_RABBITMQ_USE_LOGNAME(_) ->
+ check_prefixed_variable("RABBITMQ_USE_LONGNAME",
+ nodename_type,
+ shortnames,
+ "true", longnames,
+ "false", shortnames).
+
+check_value_is_yes(_) ->
+ ?assert(rabbit_env:value_is_yes("1")),
+ ?assert(rabbit_env:value_is_yes("yes")),
+ ?assert(rabbit_env:value_is_yes("true")),
+ ?assertNot(rabbit_env:value_is_yes("0")),
+ ?assertNot(rabbit_env:value_is_yes("no")),
+ ?assertNot(rabbit_env:value_is_yes("false")),
+ ?assertNot(rabbit_env:value_is_yes(random_string() ++ ".")).
+
+check_log_process_env(_) ->
+ ok = rabbit_env:log_process_env().
+
+check_log_context(_) ->
+ Context = rabbit_env:get_context(),
+ ok = rabbit_env:log_context(Context).
+
+check_get_used_env_vars(_) ->
+ os:putenv("RABBITMQ_LOGS", "-"),
+ os:putenv("CONFIG_FILE", "filename"),
+ Vars = rabbit_env:get_used_env_vars(),
+ ?assert(lists:keymember("RABBITMQ_LOGS", 1, Vars)),
+ ?assert(lists:keymember("CONFIG_FILE", 1, Vars)),
+ ?assertNot(lists:keymember("HOME", 1, Vars)),
+ ?assertNot(lists:keymember("PATH", 1, Vars)),
+ os:unsetenv("RABBITMQ_LOGS"),
+ os:unsetenv("CONFIG_FILE").
+
+check_variable(Variable, Key, ValueToSet, Comparison) ->
+ os:putenv(Variable, ValueToSet),
+ ?assertMatch(#{Key := Comparison}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ Context = rabbit_env:get_context(),
+ ?assertNotMatch(#{Key := Comparison}, Context),
+ ?assertMatch(#{Key := _}, Context).
+
+check_prefixed_variable("RABBITMQ_" ++ Variable = PrefixedVariable,
+ Key,
+ DefaultValue,
+ Value1ToSet, Comparison1,
+ Value2ToSet, Comparison2) ->
+ os:putenv(Variable, Value1ToSet),
+ os:unsetenv(PrefixedVariable),
+ ?assertMatch(#{Key := Comparison1}, rabbit_env:get_context()),
+
+ os:putenv(PrefixedVariable, Value2ToSet),
+ ?assertMatch(#{Key := Comparison2}, rabbit_env:get_context()),
+
+ os:unsetenv(Variable),
+ os:unsetenv(PrefixedVariable),
+ Context = rabbit_env:get_context(),
+ case DefaultValue of
+ '_' ->
+ ?assertNotMatch(#{Key := Comparison1}, Context),
+ ?assertNotMatch(#{Key := Comparison2}, Context),
+ ?assertMatch(#{Key := _}, Context);
+ _ ->
+ ?assertMatch(#{Key := DefaultValue}, Context)
+ end.
+
+random_int() -> rand:uniform(50000).
+random_string() -> integer_to_list(random_int()).
+random_atom() -> list_to_atom(random_string()).
+
+get_default_nodename() ->
+ CTNode = node(),
+ NodeS = re:replace(
+ atom_to_list(CTNode),
+ "^[^@]+@(.*)$",
+ "rabbit@\\1",
+ [{return, list}]),
+ list_to_atom(NodeS).
+
+check_parse_conf_env_file_output(_) ->
+ ?assertEqual(
+ #{},
+ rabbit_env:parse_conf_env_file_output2(
+ [],
+ #{}
+ )),
+ ?assertEqual(
+ #{"UNQUOTED" => "a",
+ "SINGLE_QUOTED" => "b",
+ "DOUBLE_QUOTED" => "c",
+ "SINGLE_DOLLAR" => "d"},
+ rabbit_env:parse_conf_env_file_output2(
+ ["UNQUOTED=a",
+ "SINGLE_QUOTED='b'",
+ "DOUBLE_QUOTED=\"c\"",
+ "SINGLE_DOLLAR=$'d'"],
+ #{}
+ )),
+ ?assertEqual(
+ #{"A" => "a",
+ "B" => "b",
+ "MULTI_LINE" => "\n'foobar'"},
+ rabbit_env:parse_conf_env_file_output2(
+ ["A=a",
+ "MULTI_LINE='",
+ "'\"'\"'foobar'\"'\"",
+ "B=b"],
+ #{}
+ )).
diff --git a/deps/rabbit_common/test/supervisor2_SUITE.erl b/deps/rabbit_common/test/supervisor2_SUITE.erl
new file mode 100644
index 0000000000..7b89363999
--- /dev/null
+++ b/deps/rabbit_common/test/supervisor2_SUITE.erl
@@ -0,0 +1,128 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(supervisor2_SUITE).
+
+-behaviour(supervisor2).
+
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+all() -> [intrinsic, delayed_restart].
+
+intrinsic(_Config) ->
+ false = process_flag(trap_exit, true),
+ Intensity = 5,
+ Args = {one_for_one, intrinsic, Intensity},
+ {passed, SupPid} = with_sup(Args, fun test_supervisor_intrinsic/1),
+ receive
+ {'EXIT', SupPid, shutdown} -> ok
+ end,
+ false = is_process_alive(SupPid).
+
+delayed_restart(_Config) ->
+ DelayInSeconds = 1,
+ Intensity = 1,
+ Args0 = {simple_one_for_one, {permanent, DelayInSeconds}, Intensity},
+ F = fun(SupPid) ->
+ {ok, _ChildPid} =
+ supervisor2:start_child(SupPid, []),
+ test_supervisor_delayed_restart(SupPid)
+ end,
+ {passed, _} = with_sup(Args0, F),
+
+ Args1 = {one_for_one, {permanent, DelayInSeconds}, Intensity},
+ {passed, _} = with_sup(Args1, fun test_supervisor_delayed_restart/1).
+
+test_supervisor_intrinsic(SupPid) ->
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, {shutdown, restart}),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, shutdown),
+ ok = timer:sleep(100),
+ passed.
+
+test_supervisor_delayed_restart(SupPid) ->
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ ok = ping_child(SupPid),
+
+ ok = exit_child(SupPid, abnormal),
+ ok = timer:sleep(100),
+ timeout = ping_child(SupPid),
+
+ ok = timer:sleep(1010),
+ ok = ping_child(SupPid),
+ passed.
+
+with_sup({RestartStrategy, Restart, Intensity}, Fun) ->
+ {ok, SupPid} = supervisor2:start_link(?MODULE, [RestartStrategy, Restart, Intensity]),
+ Res = Fun(SupPid),
+ true = unlink(SupPid),
+ {Res, SupPid}.
+
+init([RestartStrategy, Restart, Intensity]) ->
+ SupFlags = #{
+ strategy => RestartStrategy,
+ intensity => Intensity,
+ period => 1
+ },
+ ChildSpec = #{
+ id => test,
+ start => {?MODULE, start_child, []},
+ restart => Restart,
+ shutdown => 16#ffffffff,
+ type => worker,
+ modules => [?MODULE]
+ },
+ {ok, {SupFlags, [ChildSpec]}}.
+
+start_child() ->
+ {ok, proc_lib:spawn_link(fun run_child/0)}.
+
+ping_child(SupPid) ->
+ Ref = make_ref(),
+ F = fun(ChildPid) ->
+ ChildPid ! {ping, Ref, self()}
+ end,
+ with_child_pid(SupPid, F),
+ receive
+ {pong, Ref} -> ok
+ after 1000 -> timeout
+ end.
+
+exit_child(SupPid, ExitType) ->
+ F = fun(ChildPid) ->
+ exit(ChildPid, ExitType)
+ end,
+ with_child_pid(SupPid, F),
+ ok.
+
+with_child_pid(SupPid, Fun) ->
+ case supervisor2:which_children(SupPid) of
+ [{_Id, undefined, worker, [?MODULE]}] -> ok;
+ [{_Id, restarting, worker, [?MODULE]}] -> ok;
+ [{_Id, ChildPid, worker, [?MODULE]}] -> Fun(ChildPid);
+ [] -> ok
+ end.
+
+run_child() ->
+ receive
+ {ping, Ref, Pid} ->
+ Pid ! {pong, Ref},
+ run_child()
+ end.
diff --git a/deps/rabbit_common/test/unit_SUITE.erl b/deps/rabbit_common/test/unit_SUITE.erl
new file mode 100644
index 0000000000..925155211f
--- /dev/null
+++ b/deps/rabbit_common/test/unit_SUITE.erl
@@ -0,0 +1,446 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2016-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("proper/include/proper.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-include("rabbit_memory.hrl").
+-include("rabbit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ {group, parallel_tests},
+ {group, parse_mem_limit},
+ {group, gen_server2}
+ ].
+
+groups() ->
+ [
+ {parallel_tests, [parallel], [
+ data_coercion_to_proplist,
+ data_coercion_to_list,
+ data_coercion_to_map,
+ pget,
+ encrypt_decrypt,
+ encrypt_decrypt_term,
+ version_equivalence,
+ pid_decompose_compose,
+ platform_and_version,
+ frame_encoding_does_not_fail_with_empty_binary_payload,
+ amqp_table_conversion,
+ name_type,
+ get_erl_path
+ ]},
+ {parse_mem_limit, [parallel], [
+ parse_mem_limit_relative_exactly_max,
+ parse_mem_relative_above_max,
+ parse_mem_relative_integer,
+ parse_mem_relative_invalid
+ ]},
+ {gen_server2, [parallel], [
+ stats_timer_is_working,
+ stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists,
+ stop_stats_timer_on_hibernation,
+ stop_stats_timer_on_backoff,
+ stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout,
+ gen_server2_stop
+ ]}
+ ].
+
+init_per_group(_, Config) -> Config.
+end_per_group(_, Config) -> Config.
+
+init_per_testcase(_, Config) -> Config.
+
+end_per_testcase(stats_timer_is_working, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_hibernation, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_backoff, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout, Config) ->
+ reset_stats_interval(),
+ Config;
+end_per_testcase(stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists, Config) ->
+ rabbit_core_metrics:terminate(),
+ reset_stats_interval(),
+ Config;
+end_per_testcase(_, Config) -> Config.
+
+stats_timer_is_working(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ {ok, TestServer} = gen_server2_test_server:start_link(count_stats),
+ %% Start the emission
+ % TestServer ! emit_gen_server2_stats,
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(4, StatsCount).
+
+stats_timer_writes_gen_server2_metrics_if_core_metrics_ets_exists(_) ->
+ rabbit_core_metrics:init(),
+
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ {ok, TestServer} = gen_server2_test_server:start_link(),
+ timer:sleep(StatsInterval * 4),
+
+ %% No messages in the buffer
+ ?assertEqual(0, rabbit_core_metrics:get_gen_server2_stats(TestServer)),
+
+ %% Sleep to accumulate messages
+ gen_server2:cast(TestServer, {sleep, StatsInterval + 100}),
+
+ %% Sleep to get results
+ gen_server2:cast(TestServer, {sleep, 1000}),
+ gen_server2:cast(TestServer, ignore),
+ gen_server2:cast(TestServer, ignore),
+ gen_server2:cast(TestServer, ignore),
+
+ timer:sleep(StatsInterval + 150),
+ ?assertEqual(4, rabbit_core_metrics:get_gen_server2_stats(TestServer)).
+
+stop_stats_timer_on_hibernation(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ %% No backoff configured
+ {ok, TestServer} = gen_server2_test_server:start_link(count_stats),
+
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ timer:sleep(50),
+
+ ?assertEqual({current_function,{erlang, hibernate, 3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 6 + 100),
+ StatsCount1 = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount1),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+ gen_server2:call(TestServer, wake_up),
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5),
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ timer:sleep(50),
+
+ {current_function,{erlang,hibernate,3}} =
+ erlang:process_info(TestServer, current_function),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount6 = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ 6 = StatsCount6.
+
+stop_stats_timer_on_backoff(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ Backoff = 1000,
+ {ok, TestServer} =
+ gen_server2_test_server:start_link(
+ count_stats,
+ {backoff, Backoff, Backoff, 10000}),
+
+ ok = gen_server2:call(TestServer, hibernate),
+
+ {current_function,{gen_server2,process_next_msg,1}} =
+ erlang:process_info(TestServer, current_function),
+
+ %% Receiving messages during backoff period does not emit stats
+ timer:sleep(Backoff div 2),
+ ok = gen_server2:call(TestServer, hibernate),
+
+ timer:sleep(Backoff div 2 + 50),
+ ?assertEqual({current_function,{gen_server2,process_next_msg,1}},
+ erlang:process_info(TestServer, current_function)),
+
+ %% Hibernate after backoff time after last message
+ timer:sleep(Backoff div 2),
+ ?assertEqual({current_function,{erlang,hibernate,3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5).
+
+stop_stats_timer_on_backoff_when_backoff_less_than_stats_timeout(_) ->
+ StatsInterval = 300,
+ set_stats_interval(StatsInterval),
+
+ Backoff = 200,
+ {ok, TestServer} =
+ gen_server2_test_server:start_link(
+ count_stats,
+ {backoff, Backoff, Backoff, 10000}),
+
+ ?assertEqual(ok, gen_server2:call(TestServer, hibernate)),
+
+ ?assertEqual({current_function, {gen_server2, process_next_msg, 1}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(Backoff + 50),
+
+ ?assertEqual({current_function, {erlang, hibernate, 3}},
+ erlang:process_info(TestServer, current_function)),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount = gen_server2_test_server:stats_count(TestServer),
+ %% The timer was stopped. No stats collected
+ %% The count is 1 because hibernation emits stats
+ ?assertEqual(1, StatsCount),
+
+ %% A message will wake up the process
+ gen_server2:call(TestServer, wake_up),
+
+ timer:sleep(StatsInterval * 4 + 100),
+ StatsCount5 = gen_server2_test_server:stats_count(TestServer),
+ ?assertEqual(5, StatsCount5).
+
+gen_server2_stop(_) ->
+ {ok, TestServer} = gen_server2_test_server:start_link(),
+ ?assertEqual(ok, gen_server2:stop(TestServer)),
+ ?assertEqual(false, erlang:is_process_alive(TestServer)),
+ ?assertEqual({'EXIT', noproc}, (catch gen_server:stop(TestServer))),
+ ok.
+
+parse_mem_limit_relative_exactly_max(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1.0),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_above_max(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1.01),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_integer(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit(1),
+ case MemLimit of
+ ?MAX_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?MAX_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+parse_mem_relative_invalid(_Config) ->
+ MemLimit = vm_memory_monitor:parse_mem_limit([255]),
+ case MemLimit of
+ ?DEFAULT_VM_MEMORY_HIGH_WATERMARK -> ok;
+ _ -> ct:fail(
+ "Expected memory limit to be ~p, but it was ~p",
+ [?DEFAULT_VM_MEMORY_HIGH_WATERMARK, MemLimit]
+ )
+ end.
+
+platform_and_version(_Config) ->
+ MajorVersion = erlang:system_info(otp_release),
+ Result = rabbit_misc:platform_and_version(),
+ RegExp = "^Erlang/OTP\s" ++ MajorVersion,
+ case re:run(Result, RegExp) of
+ nomatch -> ct:fail("~p does not match ~p", [Result, RegExp]);
+ {error, ErrType} -> ct:fail("~p", [ErrType]);
+ _ -> ok
+ end.
+
+data_coercion_to_map(_Config) ->
+ ?assertEqual(#{a => 1}, rabbit_data_coercion:to_map([{a, 1}])),
+ ?assertEqual(#{a => 1}, rabbit_data_coercion:to_map(#{a => 1})).
+
+data_coercion_to_proplist(_Config) ->
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_proplist([{a, 1}])),
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_proplist(#{a => 1})).
+
+data_coercion_to_list(_Config) ->
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_list([{a, 1}])),
+ ?assertEqual([{a, 1}], rabbit_data_coercion:to_list(#{a => 1})).
+
+pget(_Config) ->
+ ?assertEqual(1, rabbit_misc:pget(a, [{a, 1}])),
+ ?assertEqual(undefined, rabbit_misc:pget(b, [{a, 1}])),
+
+ ?assertEqual(1, rabbit_misc:pget(a, #{a => 1})),
+ ?assertEqual(undefined, rabbit_misc:pget(b, #{a => 1})).
+
+pid_decompose_compose(_Config) ->
+ Pid = self(),
+ {Node, Cre, Id, Ser} = rabbit_misc:decompose_pid(Pid),
+ Node = node(Pid),
+ Pid = rabbit_misc:compose_pid(Node, Cre, Id, Ser),
+ OtherNode = 'some_node@localhost',
+ PidOnOtherNode = rabbit_misc:pid_change_node(Pid, OtherNode),
+ {OtherNode, Cre, Id, Ser} = rabbit_misc:decompose_pid(PidOnOtherNode).
+
+encrypt_decrypt(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ %% For each cipher, try to encrypt and decrypt data sizes from 0 to 64 bytes
+ %% with a random passphrase.
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ Iterations = rand:uniform(100),
+ Data = crypto:strong_rand_bytes(64),
+ [begin
+ Expected = binary:part(Data, 0, Len),
+ Enc = rabbit_pbe:encrypt(C, H, Iterations, PassPhrase, Expected),
+ Expected = iolist_to_binary(rabbit_pbe:decrypt(C, H, Iterations, PassPhrase, Enc))
+ end || Len <- lists:seq(0, byte_size(Data))]
+ end || H <- Hashes, C <- Ciphers],
+ ok.
+
+encrypt_decrypt_term(_Config) ->
+ %% Take all available block ciphers.
+ Hashes = rabbit_pbe:supported_hashes(),
+ Ciphers = rabbit_pbe:supported_ciphers(),
+ %% Different Erlang terms to try encrypting.
+ DataSet = [
+ 10000,
+ [5672],
+ [{"127.0.0.1", 5672},
+ {"::1", 5672}],
+ [{connection, info}, {channel, info}],
+ [{cacertfile, "/path/to/testca/cacert.pem"},
+ {certfile, "/path/to/server/cert.pem"},
+ {keyfile, "/path/to/server/key.pem"},
+ {verify, verify_peer},
+ {fail_if_no_peer_cert, false}],
+ [<<".*">>, <<".*">>, <<".*">>]
+ ],
+ _ = [begin
+ PassPhrase = crypto:strong_rand_bytes(16),
+ Iterations = rand:uniform(100),
+ Enc = rabbit_pbe:encrypt_term(C, H, Iterations, PassPhrase, Data),
+ Data = rabbit_pbe:decrypt_term(C, H, Iterations, PassPhrase, Enc)
+ end || H <- Hashes, C <- Ciphers, Data <- DataSet],
+ ok.
+
+version_equivalence(_Config) ->
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.1"),
+ true = rabbit_misc:version_minor_equivalent("%%VSN%%", "%%VSN%%"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.0.1"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.0.0.3"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.0.1.3"),
+ true = rabbit_misc:version_minor_equivalent("3.0.0", "3.0.foo"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0", "3.1.0"),
+ false = rabbit_misc:version_minor_equivalent("3.0.0.1", "3.1.0.1"),
+
+ false = rabbit_misc:version_minor_equivalent("3.5.7", "3.6.7"),
+ false = rabbit_misc:version_minor_equivalent("3.6.5", "3.6.6"),
+ false = rabbit_misc:version_minor_equivalent("3.6.6", "3.7.0"),
+ true = rabbit_misc:version_minor_equivalent("3.6.7", "3.6.6"),
+
+ %% Starting with RabbitMQ 3.7.x and feature flags introduced in
+ %% RabbitMQ 3.8.x, versions are considered equivalent and the actual
+ %% check is deferred to the feature flags module.
+ false = rabbit_misc:version_minor_equivalent("3.6.0", "3.8.0"),
+ true = rabbit_misc:version_minor_equivalent("3.7.0", "3.8.0"),
+ true = rabbit_misc:version_minor_equivalent("3.7.0", "3.10.0"),
+
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.0">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.1">>),
+ true = rabbit_misc:version_minor_equivalent(<<"%%VSN%%">>, <<"%%VSN%%">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0">>),
+ true = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.0.0.1">>),
+ false = rabbit_misc:version_minor_equivalent(<<"3.0.0">>, <<"3.1.0">>),
+ false = rabbit_misc:version_minor_equivalent(<<"3.0.0.1">>, <<"3.1.0.1">>).
+
+frame_encoding_does_not_fail_with_empty_binary_payload(_Config) ->
+ [begin
+ Content = #content{
+ class_id = 60, properties = none, properties_bin = <<0,0>>, protocol = rabbit_framing_amqp_0_9_1,
+ payload_fragments_rev = P
+ },
+ ExpectedFrames = rabbit_binary_generator:build_simple_content_frames(1, Content, 0, rabbit_framing_amqp_0_9_1)
+ end || {P, ExpectedFrames} <- [
+ {[], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,0>>,<<0,0>>],206]]},
+ {[<<>>], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,0>>,<<0,0>>],206]]},
+ {[<<"payload">>], [[<<2,0,1,0,0,0,14>>,[<<0,60,0,0,0,0,0,0,0,0,0,7>>,<<0,0>>],206],
+ [<<3,0,1,0,0,0,7>>,[<<"payload">>],206]]}
+ ]],
+ ok.
+
+amqp_table_conversion(_Config) ->
+ assert_table(#{}, []),
+ assert_table(#{<<"x-expires">> => 1000},
+ [{<<"x-expires">>, long, 1000}]),
+ assert_table(#{<<"x-forwarding">> =>
+ [#{<<"uri">> => <<"amqp://localhost/%2F/upstream">>}]},
+ [{<<"x-forwarding">>, array,
+ [{table, [{<<"uri">>, longstr,
+ <<"amqp://localhost/%2F/upstream">>}]}]}]).
+
+assert_table(JSON, AMQP) ->
+ ?assertEqual(JSON, rabbit_misc:amqp_table(AMQP)),
+ ?assertEqual(AMQP, rabbit_misc:to_amqp_table(JSON)).
+
+
+set_stats_interval(Interval) ->
+ application:set_env(rabbit, collect_statistics, coarse),
+ application:set_env(rabbit, collect_statistics_interval, Interval).
+
+reset_stats_interval() ->
+ application:unset_env(rabbit, collect_statistics),
+ application:unset_env(rabbit, collect_statistics_interval).
+
+name_type(_) ->
+ ?assertEqual(shortnames, rabbit_nodes_common:name_type(rabbit)),
+ ?assertEqual(shortnames, rabbit_nodes_common:name_type(rabbit@localhost)),
+ ?assertEqual(longnames, rabbit_nodes_common:name_type('rabbit@localhost.example.com')),
+ ok.
+
+get_erl_path(_) ->
+ Exe = rabbit_runtime:get_erl_path(),
+ case os:type() of
+ {win32, _} ->
+ ?assertNotMatch(nomatch, string:find(Exe, "erl.exe"));
+ _ ->
+ ?assertNotMatch(nomatch, string:find(Exe, "erl"))
+ end,
+ ok.
diff --git a/deps/rabbit_common/test/unit_priority_queue_SUITE.erl b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl
new file mode 100644
index 0000000000..8d58c72f10
--- /dev/null
+++ b/deps/rabbit_common/test/unit_priority_queue_SUITE.erl
@@ -0,0 +1,35 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2018-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(unit_priority_queue_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+all() ->
+ [
+ member,
+ member_priority_queue
+ ].
+
+member(_Config) ->
+ Q = lists:foldl(fun(V, Acc) -> priority_queue:in(V, Acc) end, priority_queue:new(), lists:seq(1, 10)),
+ ?assert(priority_queue:member(1, Q)),
+ ?assert(priority_queue:member(2, Q)),
+ ?assertNot(priority_queue:member(100, Q)),
+ ?assertNot(priority_queue:member(1, priority_queue:new())),
+ ok.
+
+member_priority_queue(_Config) ->
+ Q = lists:foldl(fun(V, Acc) -> priority_queue:in(V, V rem 4, Acc) end, priority_queue:new(),
+ lists:seq(1, 100)),
+ ?assert(priority_queue:member(1, Q)),
+ ?assert(priority_queue:member(50, Q)),
+ ?assertNot(priority_queue:member(200, Q)),
+ ok.
diff --git a/deps/rabbit_common/test/worker_pool_SUITE.erl b/deps/rabbit_common/test/worker_pool_SUITE.erl
new file mode 100644
index 0000000000..a50104f6c7
--- /dev/null
+++ b/deps/rabbit_common/test/worker_pool_SUITE.erl
@@ -0,0 +1,220 @@
+%% This Source Code Form is subject to the terms of the Mozilla Public
+%% License, v. 2.0. If a copy of the MPL was not distributed with this
+%% file, You can obtain one at https://mozilla.org/MPL/2.0/.
+%%
+%% Copyright (c) 2007-2020 VMware, Inc. or its affiliates. All rights reserved.
+%%
+
+-module(worker_pool_SUITE).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("eunit/include/eunit.hrl").
+
+-compile(export_all).
+
+-define(POOL_SIZE, 1).
+-define(POOL_NAME, test_pool).
+
+all() ->
+ [
+ run_code_synchronously,
+ run_code_asynchronously,
+ set_timeout,
+ cancel_timeout,
+ cancel_timeout_by_setting,
+ dispatch_async_blocks_until_task_begins
+ ].
+
+init_per_testcase(_, Config) ->
+ {ok, Pool} = worker_pool_sup:start_link(?POOL_SIZE, ?POOL_NAME),
+ [{pool_sup, Pool} | Config].
+
+end_per_testcase(_, Config) ->
+ Pool = ?config(pool_sup, Config),
+ unlink(Pool),
+ exit(Pool, kill).
+
+run_code_synchronously(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Sleep = 200,
+ {Time, Result} = timer:tc(fun() ->
+ worker_pool:submit(?POOL_NAME,
+ fun() ->
+ timer:sleep(Sleep),
+ Self ! {hi, Test},
+ self()
+ end,
+ reuse)
+ end),
+ % Worker run synchronously
+ true = Time > Sleep,
+ % Worker have sent message
+ receive {hi, Test} -> ok
+ after 0 -> error(no_message_from_worker)
+ end,
+ % Worker is a separate process
+ true = (Self /= Result).
+
+run_code_asynchronously(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Sleep = 200,
+ {Time, Result} = timer:tc(fun() ->
+ worker_pool:submit_async(?POOL_NAME,
+ fun() ->
+ timer:sleep(Sleep),
+ Self ! {hi, Test},
+ self()
+ end)
+ end),
+ % Worker run synchronously
+ true = Time < Sleep,
+ % Worker have sent message
+ receive {hi, Test} -> ok
+ after Sleep + 100 -> error(no_message_from_worker)
+ end,
+ % Worker is a separate process
+ true = (Self /= Result).
+
+set_timeout(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ timer:sleep(1000),
+
+ receive {hello, Worker, Test} -> ok
+ after 1000 -> exit(timeout_is_late)
+ end.
+
+
+cancel_timeout(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ worker_pool_worker:next_job_from(Worker, Self),
+ Worker = worker_pool_worker:submit(Worker,
+ fun() ->
+ worker_pool_worker:clear_timeout(my_timeout),
+ Worker
+ end,
+ reuse),
+
+ timer:sleep(1000),
+ receive {hello, Worker, Test} -> exit(timeout_is_not_cancelled)
+ after 0 -> ok
+ end.
+
+cancel_timeout_by_setting(_) ->
+ Self = self(),
+ Test = make_ref(),
+ Worker = worker_pool:submit(?POOL_NAME,
+ fun() ->
+ Worker = self(),
+ timer:sleep(100),
+ worker_pool_worker:set_timeout(
+ my_timeout, 1000,
+ fun() ->
+ Self ! {hello, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ % Timeout will occur after 1000 ms only
+ receive {hello, Worker, Test} -> exit(timeout_should_wait)
+ after 0 -> ok
+ end,
+
+ worker_pool_worker:next_job_from(Worker, Self),
+ Worker = worker_pool_worker:submit(Worker,
+ fun() ->
+ worker_pool_worker:set_timeout(my_timeout, 1000,
+ fun() ->
+ Self ! {hello_reset, self(), Test}
+ end),
+ Worker
+ end,
+ reuse),
+
+ timer:sleep(1000),
+ receive {hello, Worker, Test} -> exit(timeout_is_not_cancelled)
+ after 0 -> ok
+ end,
+
+ receive {hello_reset, Worker, Test} -> ok
+ after 1000 -> exit(timeout_is_late)
+ end.
+
+dispatch_async_blocks_until_task_begins(_) ->
+ Self = self(),
+
+ Waiter = fun() ->
+ Self ! {register, self()},
+ receive
+ go -> ok
+ end
+ end,
+
+ ok = worker_pool:dispatch_sync(?POOL_NAME, Waiter),
+ SomeWorker = receive
+ {register, WPid} -> WPid
+ after 250 ->
+ none
+ end,
+ ?assert(is_process_alive(SomeWorker), "Dispatched tasks should be running"),
+ spawn(fun() ->
+ ok = worker_pool:dispatch_sync(?POOL_NAME,
+ Waiter),
+ Self ! done_waiting,
+ exit(normal)
+ end),
+ DidWait = receive
+ done_waiting ->
+ false
+ after 250 ->
+ true
+ end,
+ ?assert(DidWait, "dispatch_sync should block until there is a free worker"),
+ SomeWorker ! go,
+ DidFinish = receive
+ done_waiting ->
+ true
+ after 250 ->
+ false
+ end,
+ ?assert(DidFinish, "appearance of a free worker should unblock the dispatcher").