summaryrefslogtreecommitdiff
path: root/cpp/src/tests
diff options
context:
space:
mode:
authorStephen D. Huston <shuston@apache.org>2011-10-21 01:19:00 +0000
committerStephen D. Huston <shuston@apache.org>2011-10-21 01:19:00 +0000
commitebfd9ff053b04ab379acfc0fefedee5a31b6d8a5 (patch)
treedcfb94e75656c6c239fc3dcb754cd2015126424d /cpp/src/tests
parent5eb354b338bb8d8fcd35b6ac3fb33f8103e757c3 (diff)
downloadqpid-python-ebfd9ff053b04ab379acfc0fefedee5a31b6d8a5.tar.gz
Undo bad merge from trunk - merged at wrong level.
git-svn-id: https://svn.apache.org/repos/asf/qpid/branches/QPID-2519@1187150 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'cpp/src/tests')
-rw-r--r--cpp/src/tests/.valgrind.supp74
-rw-r--r--cpp/src/tests/Address.cpp11
-rw-r--r--cpp/src/tests/BrokerFixture.h32
-rw-r--r--cpp/src/tests/BrokerMgmtAgent.cpp3
-rw-r--r--cpp/src/tests/BrokerOptions.cpp79
-rw-r--r--cpp/src/tests/CMakeLists.txt18
-rw-r--r--cpp/src/tests/ClientSessionTest.cpp33
-rw-r--r--cpp/src/tests/ExchangeTest.cpp2
-rw-r--r--cpp/src/tests/ForkedBroker.cpp3
-rw-r--r--cpp/src/tests/IncompleteMessageList.cpp134
-rw-r--r--cpp/src/tests/Makefile.am47
-rw-r--r--cpp/src/tests/MessageReplayTracker.cpp4
-rw-r--r--cpp/src/tests/MessagingFixture.h117
-rw-r--r--cpp/src/tests/MessagingSessionTests.cpp228
-rw-r--r--cpp/src/tests/Qmf2.cpp104
-rw-r--r--cpp/src/tests/QueueEvents.cpp4
-rw-r--r--cpp/src/tests/QueueFlowLimitTest.cpp463
-rw-r--r--cpp/src/tests/QueuePolicyTest.cpp21
-rw-r--r--cpp/src/tests/QueueTest.cpp444
-rw-r--r--cpp/src/tests/ReplicationTest.cpp2
-rw-r--r--cpp/src/tests/SessionState.cpp8
-rw-r--r--cpp/src/tests/SocketProxy.h181
-rw-r--r--cpp/src/tests/TimerTest.cpp4
-rw-r--r--cpp/src/tests/TxPublishTest.cpp7
-rw-r--r--cpp/src/tests/Url.cpp26
-rw-r--r--cpp/src/tests/Variant.cpp58
-rw-r--r--cpp/src/tests/XmlClientSessionTest.cpp2
-rwxr-xr-xcpp/src/tests/acl.py219
-rwxr-xr-xcpp/src/tests/allhosts4
-rw-r--r--cpp/src/tests/brokertest.py312
-rwxr-xr-xcpp/src/tests/cli_tests.py24
-rw-r--r--cpp/src/tests/cluster_python_tests_failing.txt28
-rwxr-xr-xcpp/src/tests/cluster_test_logs.py16
-rwxr-xr-xcpp/src/tests/cluster_tests.py1033
-rw-r--r--cpp/src/tests/exception_test.cpp14
-rwxr-xr-xcpp/src/tests/federated_topic_test27
-rwxr-xr-xcpp/src/tests/federation.py398
-rwxr-xr-xcpp/src/tests/federation_sys.py1900
-rwxr-xr-xcpp/src/tests/ipv6_test150
-rw-r--r--cpp/src/tests/msg_group_test.cpp618
-rwxr-xr-xcpp/src/tests/python_tests2
-rwxr-xr-xcpp/src/tests/qpid-cluster-benchmark47
-rwxr-xr-xcpp/src/tests/qpid-cpp-benchmark71
-rwxr-xr-xcpp/src/tests/qpid-ctrl5
-rw-r--r--cpp/src/tests/qpid-perftest.cpp18
-rw-r--r--cpp/src/tests/qpid-receive.cpp9
-rw-r--r--cpp/src/tests/qpid-send.cpp104
-rw-r--r--cpp/src/tests/qrsh.cpp169
-rw-r--r--cpp/src/tests/qrsh_run.cpp321
-rw-r--r--cpp/src/tests/qrsh_server.cpp1068
-rwxr-xr-xcpp/src/tests/qrsh_utils/10_all30
-rwxr-xr-xcpp/src/tests/qrsh_utils/1_remote_run26
-rwxr-xr-xcpp/src/tests/qrsh_utils/2_forever26
-rwxr-xr-xcpp/src/tests/qrsh_utils/3_kill_it27
-rwxr-xr-xcpp/src/tests/qrsh_utils/4_wait_for_it26
-rwxr-xr-xcpp/src/tests/qrsh_utils/5_exited64
-rwxr-xr-xcpp/src/tests/qrsh_utils/6_get29
-rwxr-xr-xcpp/src/tests/qrsh_utils/7_get_output44
-rwxr-xr-xcpp/src/tests/qrsh_utils/8_any43
-rwxr-xr-xcpp/src/tests/qrsh_utils/9_alias38
-rw-r--r--cpp/src/tests/qrsh_utils/qrsh_example_command.cpp52
-rw-r--r--cpp/src/tests/qrsh_utils/qrsh_forever.cpp50
-rw-r--r--cpp/src/tests/qrsh_utils/qsh_doc.txt309
-rw-r--r--cpp/src/tests/queue_flow_limit_tests.py371
-rwxr-xr-xcpp/src/tests/replication_test2
-rwxr-xr-xcpp/src/tests/run_acl_tests2
-rwxr-xr-xcpp/src/tests/run_cli_tests5
-rwxr-xr-xcpp/src/tests/run_federation_sys_tests97
-rwxr-xr-xcpp/src/tests/run_federation_tests4
-rwxr-xr-xcpp/src/tests/run_header_test2
-rw-r--r--cpp/src/tests/run_long_federation_sys_tests24
-rwxr-xr-xcpp/src/tests/run_msg_group_tests66
-rwxr-xr-xcpp/src/tests/run_msg_group_tests_soak60
-rwxr-xr-xcpp/src/tests/run_queue_flow_limit_tests57
-rw-r--r--cpp/src/tests/run_store_tests.ps12
-rwxr-xr-xcpp/src/tests/run_test2
-rw-r--r--cpp/src/tests/sasl.mk16
-rwxr-xr-xcpp/src/tests/sasl_fed2
-rwxr-xr-xcpp/src/tests/sasl_fed_ex306
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_dynamic27
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_dynamic_cluster28
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_link27
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_link_cluster28
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_queue27
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_queue_cluster28
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_route27
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_route_cluster28
-rwxr-xr-xcpp/src/tests/sasl_no_dir218
-rwxr-xr-xcpp/src/tests/sasl_test_setup.sh1
-rw-r--r--cpp/src/tests/sender.cpp2
-rwxr-xr-xcpp/src/tests/ssl_test14
-rw-r--r--cpp/src/tests/windows/DisableWin32ErrorWindows.cpp4
92 files changed, 3365 insertions, 7542 deletions
diff --git a/cpp/src/tests/.valgrind.supp b/cpp/src/tests/.valgrind.supp
index 2c6a1509ff..0e3e045437 100644
--- a/cpp/src/tests/.valgrind.supp
+++ b/cpp/src/tests/.valgrind.supp
@@ -73,6 +73,61 @@
}
{
+ boost 103200 -- we think Boost is responsible for these leaks.
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZN5boost15program_options??options_description*
+}
+
+{
+ boost 103200 -- we think Boost is responsible for these leaks.
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZN5boost9unit_test9test_case*
+}
+
+{
+ boost 103200 -- we think Boost is responsible for these leaks.
+ Memcheck:Leak
+ fun:calloc
+ fun:_dlerror_run
+ fun:dlopen@@GLIBC_2.2.5
+ fun:_ZN4qpid3sys5Shlib4loadEPKc
+ fun:_Z9testShlibv
+ fun:_ZN5boost9unit_test9ut_detail17unit_test_monitor8functionEv
+ obj:/usr/lib64/libboost_unit_test_framework.so.1.32.0
+ fun:_ZN5boost17execution_monitor7executeEbi
+ fun:_ZN5boost9unit_test9ut_detail17unit_test_monitor21execute_and_translateEPNS0_9test_caseEMS3_FvvEi
+ fun:_ZN5boost9unit_test9test_case3runEv
+ fun:_ZN5boost9unit_test10test_suite6do_runEv
+ fun:_ZN5boost9unit_test9test_case3runEv
+ fun:main
+}
+
+{
+ boost 103200 -- we think Boost is responsible for these leaks.
+ Memcheck:Leak
+ fun:calloc
+ fun:_dl_allocate_tls
+ fun:pthread_create@@GLIBC_2.2.5
+ fun:_ZN4qpid6broker5Timer5startEv
+ fun:_ZN4qpid6broker5TimerC1Ev
+ fun:_ZN4qpid6broker10DtxManagerC1Ev
+ fun:_ZN4qpid6broker6BrokerC1ERKNS1_7OptionsE
+ fun:_ZN4qpid6broker6Broker6createERKNS1_7OptionsE
+ fun:_ZN15SessionFixtureTI15ProxyConnectionEC2Ev
+ fun:_Z14testQueueQueryv
+ fun:_ZN5boost9unit_test9ut_detail17unit_test_monitor8functionEv
+ obj:/usr/lib64/libboost_unit_test_framework.so.1.32.0
+ fun:_ZN5boost17execution_monitor7executeEbi
+ fun:_ZN5boost9unit_test9ut_detail17unit_test_monitor21execute_and_translateEPNS0_9test_caseEMS3_FvvEi
+ fun:_ZN5boost9unit_test9test_case3runEv
+ fun:_ZN5boost9unit_test10test_suite6do_runEv
+ fun:_ZN5boost9unit_test9test_case3runEv
+ fun:main
+}
+
+{
INVESTIGATE
Memcheck:Leak
fun:calloc
@@ -100,6 +155,25 @@
}
{
+ boost 103200 -- mgoulish -- fix this, sometime
+ Memcheck:Leak
+ fun:*
+ fun:*
+ obj:*
+ fun:*
+ fun:_ZN4qpid34options_description_less_easy_initclEPKcPKN5boost15program_options14value_semanticES2_
+}
+
+{
+ boost 103200 -- mgoulish -- fix this, sometime
+ Memcheck:Leak
+ fun:*
+ fun:*
+ fun:*
+ fun:_ZN4qpid34options_description_less_easy_initclEPKcPKN5boost15program_options14value_semanticES2_
+}
+
+{
INVESTIGATE
Memcheck:Param
socketcall.sendto(msg)
diff --git a/cpp/src/tests/Address.cpp b/cpp/src/tests/Address.cpp
index 0fd3585958..f41f27b6df 100644
--- a/cpp/src/tests/Address.cpp
+++ b/cpp/src/tests/Address.cpp
@@ -119,17 +119,6 @@ QPID_AUTO_TEST_CASE(testParseQuotedNameAndSubject)
BOOST_CHECK_EQUAL(std::string("my subject with ; in it"), address.getSubject());
}
-QPID_AUTO_TEST_CASE(testParseOptionsWithEmptyStringAsValue)
-{
- Address address("my-topic; {a:'', x:101}");
- BOOST_CHECK_EQUAL(std::string("my-topic"), address.getName());
- Variant a = address.getOptions()["a"];
- BOOST_CHECK_EQUAL(VAR_STRING, a.getType());
- std::string aVal = a;
- BOOST_CHECK(aVal.size() == 0);
- BOOST_CHECK_EQUAL((uint16_t) 101, address.getOptions()["x"].asInt64());
-}
-
QPID_AUTO_TEST_SUITE_END()
}}
diff --git a/cpp/src/tests/BrokerFixture.h b/cpp/src/tests/BrokerFixture.h
index 92c6d22b57..672d954572 100644
--- a/cpp/src/tests/BrokerFixture.h
+++ b/cpp/src/tests/BrokerFixture.h
@@ -22,6 +22,8 @@
*
*/
+#include "SocketProxy.h"
+
#include "qpid/broker/Broker.h"
#include "qpid/client/Connection.h"
#include "qpid/client/ConnectionImpl.h"
@@ -69,15 +71,16 @@ struct BrokerFixture : private boost::noncopyable {
brokerThread = qpid::sys::Thread(*broker);
};
- void shutdownBroker() {
- if (broker) {
- broker->shutdown();
- brokerThread.join();
- broker = BrokerPtr();
- }
+ void shutdownBroker()
+ {
+ broker->shutdown();
+ broker = BrokerPtr();
}
- ~BrokerFixture() { shutdownBroker(); }
+ ~BrokerFixture() {
+ if (broker) broker->shutdown();
+ brokerThread.join();
+ }
/** Open a connection to the broker. */
void open(qpid::client::Connection& c) {
@@ -94,6 +97,20 @@ struct LocalConnection : public qpid::client::Connection {
~LocalConnection() { close(); }
};
+/** A local client connection via a socket proxy. */
+struct ProxyConnection : public qpid::client::Connection {
+ SocketProxy proxy;
+ ProxyConnection(int brokerPort) : proxy(brokerPort) {
+ open("localhost", proxy.getPort());
+ }
+ ProxyConnection(const qpid::client::ConnectionSettings& s) : proxy(s.port) {
+ qpid::client::ConnectionSettings proxySettings(s);
+ proxySettings.port = proxy.getPort();
+ open(proxySettings);
+ }
+ ~ProxyConnection() { close(); }
+};
+
/** Convenience class to create and open a connection and session
* and some related useful objects.
*/
@@ -130,6 +147,7 @@ struct SessionFixtureT : BrokerFixture, ClientT<ConnectionType,SessionType> {
};
typedef SessionFixtureT<LocalConnection> SessionFixture;
+typedef SessionFixtureT<ProxyConnection> ProxySessionFixture;
}} // namespace qpid::tests
diff --git a/cpp/src/tests/BrokerMgmtAgent.cpp b/cpp/src/tests/BrokerMgmtAgent.cpp
index 1d5289dc90..d0c6668b72 100644
--- a/cpp/src/tests/BrokerMgmtAgent.cpp
+++ b/cpp/src/tests/BrokerMgmtAgent.cpp
@@ -599,12 +599,13 @@ namespace qpid {
// populate the agent with multiple test objects
const size_t objCount = 50;
std::vector<TestManageable *> tmv;
+ uint32_t objLen;
for (size_t i = 0; i < objCount; i++) {
std::stringstream key;
key << "testobj-" << i;
TestManageable *tm = new TestManageable(agent, key.str());
- (void) tm->GetManagementObject()->writePropertiesSize();
+ objLen = tm->GetManagementObject()->writePropertiesSize();
agent->addObject(tm->GetManagementObject(), key.str());
tmv.push_back(tm);
}
diff --git a/cpp/src/tests/BrokerOptions.cpp b/cpp/src/tests/BrokerOptions.cpp
deleted file mode 100644
index b36d96916a..0000000000
--- a/cpp/src/tests/BrokerOptions.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-/** Unit tests for various broker configuration options **/
-
-#include "unit_test.h"
-#include "test_tools.h"
-#include "MessagingFixture.h"
-
-#include "qpid/messaging/Address.h"
-#include "qpid/messaging/Connection.h"
-#include "qpid/messaging/Message.h"
-#include "qpid/messaging/Receiver.h"
-#include "qpid/messaging/Sender.h"
-#include "qpid/messaging/Session.h"
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(BrokerOptionsTestSuite)
-
-using namespace qpid::broker;
-using namespace qpid::messaging;
-using namespace qpid::types;
-using namespace qpid;
-
-QPID_AUTO_TEST_CASE(testDisabledTimestamp)
-{
- // by default, there should be no timestamp added by the broker
- MessagingFixture fix;
-
- Sender sender = fix.session.createSender("test-q; {create:always, delete:sender}");
- messaging::Message msg("hi");
- sender.send(msg);
-
- Receiver receiver = fix.session.createReceiver("test-q");
- messaging::Message in;
- BOOST_CHECK(receiver.fetch(in, Duration::IMMEDIATE));
- Variant::Map props = in.getProperties();
- BOOST_CHECK(props.find("x-amqp-0-10.timestamp") == props.end());
-}
-
-QPID_AUTO_TEST_CASE(testEnabledTimestamp)
-{
- // when enabled, the 0.10 timestamp is added by the broker
- Broker::Options opts;
- opts.timestampRcvMsgs = true;
- MessagingFixture fix(opts, true);
-
- Sender sender = fix.session.createSender("test-q; {create:always, delete:sender}");
- messaging::Message msg("one");
- sender.send(msg);
-
- Receiver receiver = fix.session.createReceiver("test-q");
- messaging::Message in;
- BOOST_CHECK(receiver.fetch(in, Duration::IMMEDIATE));
- Variant::Map props = in.getProperties();
- BOOST_CHECK(props.find("x-amqp-0-10.timestamp") != props.end());
- BOOST_CHECK(props["x-amqp-0-10.timestamp"]);
-}
-
-QPID_AUTO_TEST_SUITE_END()
-
-}}
diff --git a/cpp/src/tests/CMakeLists.txt b/cpp/src/tests/CMakeLists.txt
index 7d781e5eb3..3b3b232671 100644
--- a/cpp/src/tests/CMakeLists.txt
+++ b/cpp/src/tests/CMakeLists.txt
@@ -107,6 +107,7 @@ set(unit_tests_to_build
MessagingSessionTests
SequenceSet
StringUtils
+ IncompleteMessageList
RangeSet
AtomicValue
QueueTest
@@ -118,7 +119,6 @@ set(unit_tests_to_build
MessageTest
QueueRegistryTest
QueuePolicyTest
- QueueFlowLimitTest
FramingTest
HeaderTest
SequenceNumberTest
@@ -264,19 +264,6 @@ add_executable (qpid-send qpid-send.cpp Statistics.cpp ${platform_test_additions
target_link_libraries (qpid-send qpidmessaging)
remember_location(qpid-send)
-add_executable (qpid-ping qpid-ping.cpp ${platform_test_additions})
-target_link_libraries (qpid-ping qpidclient)
-remember_location(qpid-ping)
-
-add_executable (datagen datagen.cpp ${platform_test_additions})
-target_link_libraries (datagen qpidclient)
-remember_location(datagen)
-
-add_executable (msg_group_test msg_group_test.cpp ${platform_test_additions})
-target_link_libraries (msg_group_test qpidmessaging)
-remember_location(msg_group_test)
-
-
# qpid-perftest and qpid-latency-test are generally useful so install them
install (TARGETS qpid-perftest qpid-latency-test RUNTIME
DESTINATION ${QPID_INSTALL_BINDIR})
@@ -291,7 +278,7 @@ set(test_wrap ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_test${test_script_suffix}
add_test (unit_test ${test_wrap} ${unit_test_LOCATION})
add_test (start_broker ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/start_broker${test_script_suffix})
-add_test (qpid-client-test ${test_wrap} ${qpid-client-test_LOCATION})
+add_test (qpid-client-test ${test_wrap} ${qpid-client_test_LOCATION})
add_test (quick_perftest ${test_wrap} ${qpid-perftest_LOCATION} --summary --count 100)
add_test (quick_topictest ${test_wrap} ${CMAKE_CURRENT_SOURCE_DIR}/quick_topictest${test_script_suffix})
add_test (quick_txtest ${test_wrap} ${qpid-txtest_LOCATION} --queues 4 --tx-count 10 --quiet)
@@ -301,7 +288,6 @@ if (PYTHON_EXECUTABLE)
endif (PYTHON_EXECUTABLE)
add_test (stop_broker ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/stop_broker${test_script_suffix})
if (PYTHON_EXECUTABLE)
- add_test (ipv6_test ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/ipv6_test${test_script_suffix})
add_test (federation_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_tests${test_script_suffix})
if (BUILD_ACL)
add_test (acl_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_acl_tests${test_script_suffix})
diff --git a/cpp/src/tests/ClientSessionTest.cpp b/cpp/src/tests/ClientSessionTest.cpp
index 30441cd03c..939f8f2b88 100644
--- a/cpp/src/tests/ClientSessionTest.cpp
+++ b/cpp/src/tests/ClientSessionTest.cpp
@@ -102,9 +102,9 @@ struct SimpleListener : public MessageListener
}
};
-struct ClientSessionFixture : public SessionFixture
+struct ClientSessionFixture : public ProxySessionFixture
{
- ClientSessionFixture(Broker::Options opts = Broker::Options()) : SessionFixture(opts) {
+ ClientSessionFixture(Broker::Options opts = Broker::Options()) : ProxySessionFixture(opts) {
session.queueDeclare(arg::queue="my-queue");
}
};
@@ -150,6 +150,16 @@ QPID_AUTO_TEST_CASE(testDispatcherThread)
BOOST_CHECK_EQUAL(boost::lexical_cast<string>(i), listener.messages[i].getData());
}
+// FIXME aconway 2009-06-17: test for unimplemented feature, enable when implemented.
+void testSuspend0Timeout() {
+ ClientSessionFixture fix;
+ fix.session.suspend(); // session has 0 timeout.
+ try {
+ fix.connection.resume(fix.session);
+ BOOST_FAIL("Expected InvalidArgumentException.");
+ } catch(const InternalErrorException&) {}
+}
+
QPID_AUTO_TEST_CASE(testUseSuspendedError)
{
ClientSessionFixture fix;
@@ -161,6 +171,18 @@ QPID_AUTO_TEST_CASE(testUseSuspendedError)
} catch(const NotAttachedException&) {}
}
+// FIXME aconway 2009-06-17: test for unimplemented feature, enable when implemented.
+void testSuspendResume() {
+ ClientSessionFixture fix;
+ fix.session.timeout(60);
+ fix.session.suspend();
+ // Make sure we are still subscribed after resume.
+ fix.connection.resume(fix.session);
+ fix.session.messageTransfer(arg::content=Message("my-message", "my-queue"));
+ BOOST_CHECK_EQUAL("my-message", fix.subs.get("my-queue", TIME_SEC).getData());
+}
+
+
QPID_AUTO_TEST_CASE(testSendToSelf) {
ClientSessionFixture fix;
SimpleListener mylistener;
@@ -249,12 +271,8 @@ QPID_AUTO_TEST_CASE(testOpenFailure) {
QPID_AUTO_TEST_CASE(testPeriodicExpiration) {
Broker::Options opts;
opts.queueCleanInterval = 1;
- opts.queueFlowStopRatio = 0;
- opts.queueFlowResumeRatio = 0;
ClientSessionFixture fix(opts);
- FieldTable args;
- args.setInt("qpid.max_count",10);
- fix.session.queueDeclare(arg::queue="my-queue", arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
+ fix.session.queueDeclare(arg::queue="my-queue", arg::exclusive=true, arg::autoDelete=true);
for (uint i = 0; i < 10; i++) {
Message m((boost::format("Message_%1%") % (i+1)).str(), "my-queue");
@@ -265,7 +283,6 @@ QPID_AUTO_TEST_CASE(testPeriodicExpiration) {
BOOST_CHECK_EQUAL(fix.session.queueQuery(string("my-queue")).getMessageCount(), 10u);
qpid::sys::sleep(2);
BOOST_CHECK_EQUAL(fix.session.queueQuery(string("my-queue")).getMessageCount(), 5u);
- fix.session.messageTransfer(arg::content=Message("Message_11", "my-queue"));//ensure policy is also updated
}
QPID_AUTO_TEST_CASE(testExpirationOnPop) {
diff --git a/cpp/src/tests/ExchangeTest.cpp b/cpp/src/tests/ExchangeTest.cpp
index fe72f42a46..88a1cd99c2 100644
--- a/cpp/src/tests/ExchangeTest.cpp
+++ b/cpp/src/tests/ExchangeTest.cpp
@@ -253,7 +253,7 @@ QPID_AUTO_TEST_CASE(testIVEOption)
TopicExchange topic ("topic1", false, args);
intrusive_ptr<Message> msg1 = cmessage("direct1", "abc");
- msg1->insertCustomProperty("a", "abc");
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString("a", "abc");
DeliverableMessage dmsg1(msg1);
FieldTable args2;
diff --git a/cpp/src/tests/ForkedBroker.cpp b/cpp/src/tests/ForkedBroker.cpp
index 10674b5175..53eaa7e1ce 100644
--- a/cpp/src/tests/ForkedBroker.cpp
+++ b/cpp/src/tests/ForkedBroker.cpp
@@ -68,7 +68,8 @@ ForkedBroker::~ForkedBroker() {
}
if (!dataDir.empty())
{
- (void) ::system(("rm -rf "+dataDir).c_str());
+ int unused_ret; // Suppress warnings about ignoring return value.
+ unused_ret = ::system(("rm -rf "+dataDir).c_str());
}
}
diff --git a/cpp/src/tests/IncompleteMessageList.cpp b/cpp/src/tests/IncompleteMessageList.cpp
new file mode 100644
index 0000000000..10782572e5
--- /dev/null
+++ b/cpp/src/tests/IncompleteMessageList.cpp
@@ -0,0 +1,134 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+#include <iostream>
+#include <sstream>
+#include "qpid/broker/Message.h"
+#include "qpid/broker/NullMessageStore.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/broker/IncompleteMessageList.h"
+
+#include "unit_test.h"
+
+namespace qpid {
+namespace tests {
+
+QPID_AUTO_TEST_SUITE(IncompleteMessageListTestSuite)
+
+using namespace qpid::broker;
+using namespace qpid::framing;
+
+struct Checker
+{
+ std::list<SequenceNumber> ids;
+
+ Checker() { }
+
+ Checker(uint start, uint end) {
+ for (uint i = start; i <= end; i++) {
+ ids.push_back(i);
+ }
+ }
+
+ Checker& expect(const SequenceNumber& id) {
+ ids.push_back(id);
+ return *this;
+ }
+
+ void operator()(boost::intrusive_ptr<Message> msg) {
+ BOOST_CHECK(!ids.empty());
+ BOOST_CHECK_EQUAL(msg->getCommandId(), ids.front());
+ ids.pop_front();
+ }
+};
+
+QPID_AUTO_TEST_CASE(testProcessSimple)
+{
+ IncompleteMessageList list;
+ SequenceNumber counter(1);
+ //fill up list with messages
+ for (int i = 0; i < 5; i++) {
+ boost::intrusive_ptr<Message> msg(new Message(counter++));
+ list.add(msg);
+ }
+ //process and ensure they are all passed to completion listener
+ list.process(Checker(1, 5), false);
+ //process again and ensure none are resent to listener
+ list.process(Checker(), false);
+}
+
+QPID_AUTO_TEST_CASE(testProcessWithIncomplete)
+{
+ Queue::shared_ptr queue;
+ IncompleteMessageList list;
+ SequenceNumber counter(1);
+ boost::intrusive_ptr<Message> middle;
+ //fill up list with messages
+ for (int i = 0; i < 5; i++) {
+ boost::intrusive_ptr<Message> msg(new Message(counter++));
+ list.add(msg);
+ if (i == 2) {
+ //mark a message in the middle as incomplete
+ msg->enqueueAsync(queue, 0);
+ middle = msg;
+ }
+ }
+ //process and ensure only message upto incomplete message are passed to listener
+ list.process(Checker(1, 2), false);
+ //mark message complete and re-process to get remaining messages sent to listener
+ middle->enqueueComplete();
+ list.process(Checker(3, 5), false);
+}
+
+
+struct MockStore : public NullMessageStore
+{
+ Queue::shared_ptr queue;
+ boost::intrusive_ptr<Message> msg;
+
+ void flush(const qpid::broker::PersistableQueue& q) {
+ BOOST_CHECK_EQUAL(queue.get(), &q);
+ msg->enqueueComplete();
+ }
+};
+
+QPID_AUTO_TEST_CASE(testSyncProcessWithIncomplete)
+{
+ IncompleteMessageList list;
+ SequenceNumber counter(1);
+ MockStore store;
+ store.queue = Queue::shared_ptr(new Queue("mock-queue", false, &store));
+ //fill up list with messages
+ for (int i = 0; i < 5; i++) {
+ boost::intrusive_ptr<Message> msg(new Message(counter++));
+ list.add(msg);
+ if (i == 2) {
+ //mark a message in the middle as incomplete
+ msg->enqueueAsync(store.queue, &store);
+ store.msg = msg;
+ }
+ }
+ //process with sync bit specified and ensure that all messages are passed to listener
+ list.process(Checker(1, 5), true);
+}
+
+QPID_AUTO_TEST_SUITE_END()
+
+}} // namespace qpid::tests
diff --git a/cpp/src/tests/Makefile.am b/cpp/src/tests/Makefile.am
index 3c9ca1b70f..07405bcd8f 100644
--- a/cpp/src/tests/Makefile.am
+++ b/cpp/src/tests/Makefile.am
@@ -75,7 +75,7 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
MessagingThreadTests.cpp \
MessagingFixture.h \
ClientSessionTest.cpp \
- BrokerFixture.h \
+ BrokerFixture.h SocketProxy.h \
exception_test.cpp \
RefCounted.cpp \
SessionState.cpp logging.cpp \
@@ -87,6 +87,7 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
InlineVector.cpp \
SequenceSet.cpp \
StringUtils.cpp \
+ IncompleteMessageList.cpp \
RangeSet.cpp \
AtomicValue.cpp \
QueueTest.cpp \
@@ -98,7 +99,6 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
MessageTest.cpp \
QueueRegistryTest.cpp \
QueuePolicyTest.cpp \
- QueueFlowLimitTest.cpp \
FramingTest.cpp \
HeaderTest.cpp \
SequenceNumberTest.cpp \
@@ -124,8 +124,7 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
Variant.cpp \
Address.cpp \
ClientMessage.cpp \
- Qmf2.cpp \
- BrokerOptions.cpp
+ Qmf2.cpp
if HAVE_XML
unit_test_SOURCES+= XmlClientSessionTest.cpp
@@ -287,27 +286,31 @@ check_PROGRAMS+=datagen
datagen_SOURCES=datagen.cpp
datagen_LDADD=$(lib_common) $(lib_client)
+check_PROGRAMS+=qrsh_server
+qrsh_server_SOURCES=qrsh_server.cpp
+qrsh_server_LDADD=$(lib_client)
+
+check_PROGRAMS+=qrsh_run
+qrsh_run_SOURCES=qrsh_run.cpp
+qrsh_run_LDADD=$(lib_client)
+
+check_PROGRAMS+=qrsh
+qrsh_SOURCES=qrsh.cpp
+qrsh_LDADD=$(lib_client)
+
check_PROGRAMS+=qpid-stream
qpid_stream_INCLUDES=$(PUBLIC_INCLUDES)
qpid_stream_SOURCES=qpid-stream.cpp
qpid_stream_LDADD=$(lib_messaging)
-check_PROGRAMS+=msg_group_test
-msg_group_test_INCLUDES=$(PUBLIC_INCLUDES)
-msg_group_test_SOURCES=msg_group_test.cpp
-msg_group_test_LDADD=$(lib_messaging)
-
TESTS_ENVIRONMENT = \
VALGRIND=$(VALGRIND) \
LIBTOOL="$(LIBTOOL)" \
QPID_DATA_DIR= \
$(srcdir)/run_test
-system_tests = qpid-client-test quick_perftest quick_topictest run_header_test quick_txtest \
- run_msg_group_tests
-TESTS += start_broker $(system_tests) python_tests stop_broker run_federation_tests run_federation_sys_tests \
- run_acl_tests run_cli_tests replication_test dynamic_log_level_test \
- run_queue_flow_limit_tests ipv6_test
+system_tests = qpid-client-test quick_perftest quick_topictest run_header_test quick_txtest
+TESTS += start_broker $(system_tests) python_tests stop_broker run_federation_tests run_acl_tests run_cli_tests replication_test dynamic_log_level_test
EXTRA_DIST += \
run_test vg_check \
@@ -322,8 +325,6 @@ EXTRA_DIST += \
config.null \
ais_check \
run_federation_tests \
- run_federation_sys_tests \
- run_long_federation_sys_tests \
run_cli_tests \
run_acl_tests \
.valgrind.supp \
@@ -348,10 +349,7 @@ EXTRA_DIST += \
run_test.ps1 \
start_broker.ps1 \
stop_broker.ps1 \
- topictest.ps1 \
- run_queue_flow_limit_tests \
- run_msg_group_tests \
- ipv6_test
+ topictest.ps1
check_LTLIBRARIES += libdlclose_noop.la
libdlclose_noop_la_LDFLAGS = -module -rpath $(abs_builddir)
@@ -362,11 +360,7 @@ CLEANFILES+=valgrind.out *.log *.vglog* dummy_test qpidd.port $(unit_wrappers)
# Longer running stability tests, not run by default check: target.
# Not run under valgrind, too slow
-LONG_TESTS+=start_broker \
- fanout_perftest shared_perftest multiq_perftest topic_perftest run_ring_queue_test \
- run_msg_group_tests_soak \
- stop_broker \
- run_long_federation_sys_tests \
+LONG_TESTS+=start_broker fanout_perftest shared_perftest multiq_perftest topic_perftest run_ring_queue_test stop_broker \
run_failover_soak reliable_replication_test \
federated_cluster_test_with_node_failure
@@ -378,8 +372,7 @@ EXTRA_DIST+= \
run_failover_soak \
reliable_replication_test \
federated_cluster_test_with_node_failure \
- sasl_test_setup.sh \
- run_msg_group_tests_soak
+ sasl_test_setup.sh
check-long:
$(MAKE) check TESTS="$(LONG_TESTS)" VALGRIND=
diff --git a/cpp/src/tests/MessageReplayTracker.cpp b/cpp/src/tests/MessageReplayTracker.cpp
index e35f673683..3d79ee53c2 100644
--- a/cpp/src/tests/MessageReplayTracker.cpp
+++ b/cpp/src/tests/MessageReplayTracker.cpp
@@ -51,7 +51,7 @@ class ReplayBufferChecker
QPID_AUTO_TEST_CASE(testReplay)
{
- SessionFixture fix;
+ ProxySessionFixture fix;
fix.session.queueDeclare(arg::queue="my-queue", arg::exclusive=true, arg::autoDelete=true);
MessageReplayTracker tracker(10);
@@ -77,7 +77,7 @@ QPID_AUTO_TEST_CASE(testReplay)
QPID_AUTO_TEST_CASE(testCheckCompletion)
{
- SessionFixture fix;
+ ProxySessionFixture fix;
fix.session.queueDeclare(arg::queue="my-queue", arg::exclusive=true, arg::autoDelete=true);
MessageReplayTracker tracker(10);
diff --git a/cpp/src/tests/MessagingFixture.h b/cpp/src/tests/MessagingFixture.h
index 2312a87e9d..715de09bad 100644
--- a/cpp/src/tests/MessagingFixture.h
+++ b/cpp/src/tests/MessagingFixture.h
@@ -27,19 +27,15 @@
#include "qpid/client/Connection.h"
#include "qpid/client/Session.h"
#include "qpid/framing/Uuid.h"
-#include "qpid/messaging/Address.h"
#include "qpid/messaging/Connection.h"
#include "qpid/messaging/Session.h"
#include "qpid/messaging/Sender.h"
#include "qpid/messaging/Receiver.h"
#include "qpid/messaging/Message.h"
-#include "qpid/types/Variant.h"
namespace qpid {
namespace tests {
-using qpid::types::Variant;
-
struct BrokerAdmin
{
qpid::client::Connection connection;
@@ -227,119 +223,6 @@ inline void receive(messaging::Receiver& receiver, uint count = 1, uint start =
}
}
-
-class MethodInvoker
-{
- public:
- MethodInvoker(messaging::Session& session) : replyTo("#; {create:always, node:{x-declare:{auto-delete:true}}}"),
- sender(session.createSender("qmf.default.direct/broker")),
- receiver(session.createReceiver(replyTo)) {}
-
- void createExchange(const std::string& name, const std::string& type, bool durable=false)
- {
- Variant::Map params;
- params["name"]=name;
- params["type"]="exchange";
- params["properties"] = Variant::Map();
- params["properties"].asMap()["exchange-type"] = type;
- params["properties"].asMap()["durable"] = durable;
- methodRequest("create", params);
- }
-
- void deleteExchange(const std::string& name)
- {
- Variant::Map params;
- params["name"]=name;
- params["type"]="exchange";
- methodRequest("delete", params);
- }
-
- void createQueue(const std::string& name, bool durable=false, bool autodelete=false,
- const Variant::Map& options=Variant::Map())
- {
- Variant::Map params;
- params["name"]=name;
- params["type"]="queue";
- params["properties"] = options;
- params["properties"].asMap()["durable"] = durable;
- params["properties"].asMap()["auto-delete"] = autodelete;
- methodRequest("create", params);
- }
-
- void deleteQueue(const std::string& name)
- {
- Variant::Map params;
- params["name"]=name;
- params["type"]="queue";
- methodRequest("delete", params);
- }
-
- void bind(const std::string& exchange, const std::string& queue, const std::string& key,
- const Variant::Map& options=Variant::Map())
- {
- Variant::Map params;
- params["name"]=(boost::format("%1%/%2%/%3%") % (exchange) % (queue) % (key)).str();
- params["type"]="binding";
- params["properties"] = options;
- methodRequest("create", params);
- }
-
- void unbind(const std::string& exchange, const std::string& queue, const std::string& key)
- {
- Variant::Map params;
- params["name"]=(boost::format("%1%/%2%/%3%") % (exchange) % (queue) % (key)).str();
- params["type"]="binding";
- methodRequest("delete", params);
- }
-
- void methodRequest(const std::string& method, const Variant::Map& inParams, Variant::Map* outParams = 0)
- {
- Variant::Map content;
- Variant::Map objectId;
- objectId["_object_name"] = "org.apache.qpid.broker:broker:amqp-broker";
- content["_object_id"] = objectId;
- content["_method_name"] = method;
- content["_arguments"] = inParams;
-
- messaging::Message request;
- request.setReplyTo(replyTo);
- request.getProperties()["x-amqp-0-10.app-id"] = "qmf2";
- request.getProperties()["qmf.opcode"] = "_method_request";
- encode(content, request);
-
- sender.send(request);
-
- messaging::Message response;
- if (receiver.fetch(response, messaging::Duration::SECOND*5)) {
- if (response.getProperties()["x-amqp-0-10.app-id"] == "qmf2") {
- std::string opcode = response.getProperties()["qmf.opcode"];
- if (opcode == "_method_response") {
- if (outParams) {
- Variant::Map m;
- decode(response, m);
- *outParams = m["_arguments"].asMap();
- }
- } else if (opcode == "_exception") {
- Variant::Map m;
- decode(response, m);
- throw Exception(QPID_MSG("Error: " << m["_values"]));
- } else {
- throw Exception(QPID_MSG("Invalid response received, unexpected opcode: " << opcode));
- }
- } else {
- throw Exception(QPID_MSG("Invalid response received, not a qmfv2 message: app-id="
- << response.getProperties()["x-amqp-0-10.app-id"]));
- }
- } else {
- throw Exception(QPID_MSG("No response received"));
- }
- }
- private:
- messaging::Address replyTo;
- messaging::Sender sender;
- messaging::Receiver receiver;
-};
-
}} // namespace qpid::tests
#endif /*!TESTS_MESSAGINGFIXTURE_H*/
diff --git a/cpp/src/tests/MessagingSessionTests.cpp b/cpp/src/tests/MessagingSessionTests.cpp
index 9d5db84bb4..991ec847bf 100644
--- a/cpp/src/tests/MessagingSessionTests.cpp
+++ b/cpp/src/tests/MessagingSessionTests.cpp
@@ -611,28 +611,6 @@ QPID_AUTO_TEST_CASE(testAssertPolicyQueue)
fix.admin.deleteQueue("q");
}
-QPID_AUTO_TEST_CASE(testAssertExchangeOption)
-{
- MessagingFixture fix;
- std::string a1 = "e; {create:always, assert:always, node:{type:topic, x-declare:{type:direct, arguments:{qpid.msg_sequence:True}}}}";
- Sender s1 = fix.session.createSender(a1);
- s1.close();
- Receiver r1 = fix.session.createReceiver(a1);
- r1.close();
-
- std::string a2 = "e; {assert:receiver, node:{type:topic, x-declare:{type:fanout, arguments:{qpid.msg_sequence:True}}}}";
- Sender s2 = fix.session.createSender(a2);
- s2.close();
- BOOST_CHECK_THROW(fix.session.createReceiver(a2), qpid::messaging::AssertionFailed);
-
- std::string a3 = "e; {assert:sender, node:{x-declare:{arguments:{qpid.msg_sequence:False}}}}";
- BOOST_CHECK_THROW(fix.session.createSender(a3), qpid::messaging::AssertionFailed);
- Receiver r3 = fix.session.createReceiver(a3);
- r3.close();
-
- fix.admin.deleteExchange("e");
-}
-
QPID_AUTO_TEST_CASE(testGetSender)
{
QueueFixture fix;
@@ -912,212 +890,6 @@ QPID_AUTO_TEST_CASE(testAcknowledge)
BOOST_CHECK(!fix.session.createReceiver(fix.queue).fetch(m, Duration::IMMEDIATE));
}
-QPID_AUTO_TEST_CASE(testQmfCreateAndDelete)
-{
- MessagingFixture fix(Broker::Options(), true/*enable management*/);
- MethodInvoker control(fix.session);
- control.createQueue("my-queue");
- control.createExchange("my-exchange", "topic");
- control.bind("my-exchange", "my-queue", "subject1");
-
- Sender sender = fix.session.createSender("my-exchange");
- Receiver receiver = fix.session.createReceiver("my-queue");
- Message out;
- out.setSubject("subject1");
- out.setContent("one");
- sender.send(out);
- Message in;
- BOOST_CHECK(receiver.fetch(in, Duration::SECOND*5));
- BOOST_CHECK_EQUAL(out.getContent(), in.getContent());
- control.unbind("my-exchange", "my-queue", "subject1");
- control.bind("my-exchange", "my-queue", "subject2");
-
- out.setContent("two");
- sender.send(out);//should be dropped
-
- out.setSubject("subject2");
- out.setContent("three");
- sender.send(out);//should not be dropped
-
- BOOST_CHECK(receiver.fetch(in, Duration::SECOND*5));
- BOOST_CHECK_EQUAL(out.getContent(), in.getContent());
- BOOST_CHECK(!receiver.fetch(in, Duration::IMMEDIATE));
- sender.close();
- receiver.close();
-
- control.deleteExchange("my-exchange");
- messaging::Session other = fix.connection.createSession();
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(other.createSender("my-exchange"), qpid::messaging::NotFound);
- }
- control.deleteQueue("my-queue");
- other = fix.connection.createSession();
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(other.createReceiver("my-queue"), qpid::messaging::NotFound);
- }
-}
-
-QPID_AUTO_TEST_CASE(testRejectAndCredit)
-{
- //Ensure credit is restored on completing rejected messages
- QueueFixture fix;
- Sender sender = fix.session.createSender(fix.queue);
- Receiver receiver = fix.session.createReceiver(fix.queue);
-
- const uint count(10);
- receiver.setCapacity(count);
- for (uint i = 0; i < count; i++) {
- sender.send(Message((boost::format("Message_%1%") % (i+1)).str()));
- }
-
- Message in;
- for (uint i = 0; i < count; ++i) {
- if (receiver.fetch(in, Duration::SECOND)) {
- BOOST_CHECK_EQUAL(in.getContent(), (boost::format("Message_%1%") % (i+1)).str());
- fix.session.reject(in);
- } else {
- BOOST_FAIL((boost::format("Message_%1% not received as expected") % (i+1)).str());
- break;
- }
- }
- //send another batch of messages
- for (uint i = 0; i < count; i++) {
- sender.send(Message((boost::format("Message_%1%") % (i+count)).str()));
- }
-
- for (uint i = 0; i < count; ++i) {
- if (receiver.fetch(in, Duration::SECOND)) {
- BOOST_CHECK_EQUAL(in.getContent(), (boost::format("Message_%1%") % (i+count)).str());
- } else {
- BOOST_FAIL((boost::format("Message_%1% not received as expected") % (i+count)).str());
- break;
- }
- }
- fix.session.acknowledge();
- receiver.close();
- sender.close();
-}
-
-QPID_AUTO_TEST_CASE(testTtlForever)
-{
- QueueFixture fix;
- Sender sender = fix.session.createSender(fix.queue);
- Message out("I want to live forever!");
- out.setTtl(Duration::FOREVER);
- sender.send(out, true);
- Receiver receiver = fix.session.createReceiver(fix.queue);
- Message in = receiver.fetch(Duration::IMMEDIATE);
- fix.session.acknowledge();
- BOOST_CHECK_EQUAL(in.getContent(), out.getContent());
- BOOST_CHECK(in.getTtl() == Duration::FOREVER);
-}
-
-QPID_AUTO_TEST_CASE(testExclusiveTopicSubscriber)
-{
- TopicFixture fix;
- std::string address = (boost::format("%1%; { link: { name: 'my-subscription', x-declare: { auto-delete: true, exclusive: true }}}") % fix.topic).str();
- Sender sender = fix.session.createSender(fix.topic);
- Receiver receiver1 = fix.session.createReceiver(address);
- {
- ScopedSuppressLogging sl;
- try {
- fix.session.createReceiver(address);
- fix.session.sync();
- BOOST_FAIL("Expected exception.");
- } catch (const MessagingException& /*e*/) {}
- }
-}
-
-QPID_AUTO_TEST_CASE(testNonExclusiveSubscriber)
-{
- TopicFixture fix;
- std::string address = (boost::format("%1%; {node:{type:topic}, link:{name:'my-subscription', x-declare:{auto-delete:true, exclusive:false}}}") % fix.topic).str();
- Receiver receiver1 = fix.session.createReceiver(address);
- Receiver receiver2 = fix.session.createReceiver(address);
- Sender sender = fix.session.createSender(fix.topic);
- sender.send(Message("one"), true);
- Message in = receiver1.fetch(Duration::IMMEDIATE);
- BOOST_CHECK_EQUAL(in.getContent(), std::string("one"));
- sender.send(Message("two"), true);
- in = receiver2.fetch(Duration::IMMEDIATE);
- BOOST_CHECK_EQUAL(in.getContent(), std::string("two"));
- fix.session.acknowledge();
-}
-
-QPID_AUTO_TEST_CASE(testAcknowledgeUpTo)
-{
- QueueFixture fix;
- Sender sender = fix.session.createSender(fix.queue);
- const uint count(20);
- for (uint i = 0; i < count; ++i) {
- sender.send(Message((boost::format("Message_%1%") % (i+1)).str()));
- }
-
- Session other = fix.connection.createSession();
- Receiver receiver = other.createReceiver(fix.queue);
- std::vector<Message> messages;
- for (uint i = 0; i < count; ++i) {
- Message msg = receiver.fetch();
- BOOST_CHECK_EQUAL(msg.getContent(), (boost::format("Message_%1%") % (i+1)).str());
- messages.push_back(msg);
- }
- const uint batch = 10;
- other.acknowledgeUpTo(messages[batch-1]);//acknowledge first 10 messages only
-
- messages.clear();
- other.sync();
- other.close();
-
- other = fix.connection.createSession();
- receiver = other.createReceiver(fix.queue);
- Message msg;
- for (uint i = 0; i < (count-batch); ++i) {
- msg = receiver.fetch();
- BOOST_CHECK_EQUAL(msg.getContent(), (boost::format("Message_%1%") % (i+1+batch)).str());
- }
- other.acknowledgeUpTo(msg);
- other.sync();
- other.close();
-
- Message m;
- //check queue is empty
- BOOST_CHECK(!fix.session.createReceiver(fix.queue).fetch(m, Duration::IMMEDIATE));
-}
-
-QPID_AUTO_TEST_CASE(testCreateBindingsOnStandardExchange)
-{
- QueueFixture fix;
- Sender sender = fix.session.createSender((boost::format("amq.direct; {create:always, node:{type:topic, x-bindings:[{queue:%1%, key:my-subject}]}}") % fix.queue).str());
- Message out("test-message");
- out.setSubject("my-subject");
- sender.send(out);
- Receiver receiver = fix.session.createReceiver(fix.queue);
- Message in = receiver.fetch(Duration::SECOND * 5);
- fix.session.acknowledge();
- BOOST_CHECK_EQUAL(in.getContent(), out.getContent());
- BOOST_CHECK_EQUAL(in.getSubject(), out.getSubject());
-}
-
-QPID_AUTO_TEST_CASE(testUnsubscribeOnClose)
-{
- MessagingFixture fix;
- Sender sender = fix.session.createSender("my-exchange/my-subject; {create: always, delete:sender, node:{type:topic, x-declare:{alternate-exchange:amq.fanout}}}");
- Receiver receiver = fix.session.createReceiver("my-exchange/my-subject");
- Receiver deadletters = fix.session.createReceiver("amq.fanout");
-
- sender.send(Message("first"));
- Message in = receiver.fetch(Duration::SECOND);
- BOOST_CHECK_EQUAL(in.getContent(), std::string("first"));
- fix.session.acknowledge();
- receiver.close();
- sender.send(Message("second"));
- in = deadletters.fetch(Duration::SECOND);
- BOOST_CHECK_EQUAL(in.getContent(), std::string("second"));
- fix.session.acknowledge();
-}
-
QPID_AUTO_TEST_SUITE_END()
}} // namespace qpid::tests
diff --git a/cpp/src/tests/Qmf2.cpp b/cpp/src/tests/Qmf2.cpp
index bc263d5c6d..66c774accd 100644
--- a/cpp/src/tests/Qmf2.cpp
+++ b/cpp/src/tests/Qmf2.cpp
@@ -23,36 +23,12 @@
#include "qmf/QueryImpl.h"
#include "qmf/SchemaImpl.h"
#include "qmf/exceptions.h"
-#include "qpid/messaging/Connection.h"
-#include "qmf/PosixEventNotifierImpl.h"
-#include "qmf/AgentSession.h"
-#include "qmf/AgentSessionImpl.h"
-#include "qmf/ConsoleSession.h"
-#include "qmf/ConsoleSessionImpl.h"
+
#include "unit_test.h"
-using namespace std;
using namespace qpid::types;
-using namespace qpid::messaging;
using namespace qmf;
-bool isReadable(int fd)
-{
- fd_set rfds;
- struct timeval tv;
- int nfds, result;
-
- FD_ZERO(&rfds);
- FD_SET(fd, &rfds);
- nfds = fd + 1;
- tv.tv_sec = 0;
- tv.tv_usec = 0;
-
- result = select(nfds, &rfds, NULL, NULL, &tv);
-
- return result > 0;
-}
-
namespace qpid {
namespace tests {
@@ -339,84 +315,6 @@ QPID_AUTO_TEST_CASE(testSchema)
BOOST_CHECK_THROW(method.getArgument(3), QmfException);
}
-QPID_AUTO_TEST_CASE(testAgentSessionEventListener)
-{
- Connection connection("localhost");
- AgentSession session(connection, "");
- posix::EventNotifier notifier(session);
-
- AgentSessionImpl& sessionImpl = AgentSessionImplAccess::get(session);
-
- BOOST_CHECK(sessionImpl.getEventNotifier() != 0);
-}
-
-QPID_AUTO_TEST_CASE(testConsoleSessionEventListener)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- posix::EventNotifier notifier(session);
-
- ConsoleSessionImpl& sessionImpl = ConsoleSessionImplAccess::get(session);
-
- BOOST_CHECK(sessionImpl.getEventNotifier() != 0);
-}
-
-QPID_AUTO_TEST_CASE(testGetHandle)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- posix::EventNotifier notifier(session);
-
- BOOST_CHECK(notifier.getHandle() > 0);
-}
-
-QPID_AUTO_TEST_CASE(testSetReadableToFalse)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- posix::EventNotifier notifier(session);
- PosixEventNotifierImplAccess::get(notifier).setReadable(false);
-
- bool readable(isReadable(notifier.getHandle()));
- BOOST_CHECK(!readable);
-}
-
-QPID_AUTO_TEST_CASE(testSetReadable)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- posix::EventNotifier notifier(session);
- PosixEventNotifierImplAccess::get(notifier).setReadable(true);
-
- bool readable(isReadable(notifier.getHandle()));
- BOOST_CHECK(readable);
-}
-
-QPID_AUTO_TEST_CASE(testSetReadableMultiple)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- posix::EventNotifier notifier(session);
- for (int i = 0; i < 15; i++)
- PosixEventNotifierImplAccess::get(notifier).setReadable(true);
- PosixEventNotifierImplAccess::get(notifier).setReadable(false);
-
- bool readable(isReadable(notifier.getHandle()));
- BOOST_CHECK(!readable);
-}
-
-QPID_AUTO_TEST_CASE(testDeleteNotifier)
-{
- Connection connection("localhost");
- ConsoleSession session(connection, "");
- ConsoleSessionImpl& sessionImpl = ConsoleSessionImplAccess::get(session);
- {
- posix::EventNotifier notifier(session);
- BOOST_CHECK(sessionImpl.getEventNotifier() != 0);
- }
- BOOST_CHECK(sessionImpl.getEventNotifier() == 0);
-}
-
QPID_AUTO_TEST_SUITE_END()
}} // namespace qpid::tests
diff --git a/cpp/src/tests/QueueEvents.cpp b/cpp/src/tests/QueueEvents.cpp
index cea8bbf0db..bd18fa45fb 100644
--- a/cpp/src/tests/QueueEvents.cpp
+++ b/cpp/src/tests/QueueEvents.cpp
@@ -147,7 +147,7 @@ struct EventRecorder
QPID_AUTO_TEST_CASE(testSystemLevelEventProcessing)
{
- SessionFixture fixture;
+ ProxySessionFixture fixture;
//register dummy event listener to broker
EventRecorder listener;
fixture.broker->getQueueEvents().registerListener("recorder", boost::bind(&EventRecorder::handle, &listener, _1));
@@ -194,7 +194,7 @@ QPID_AUTO_TEST_CASE(testSystemLevelEventProcessing)
QPID_AUTO_TEST_CASE(testSystemLevelEventProcessing_enqueuesOnly)
{
- SessionFixture fixture;
+ ProxySessionFixture fixture;
//register dummy event listener to broker
EventRecorder listener;
fixture.broker->getQueueEvents().registerListener("recorder", boost::bind(&EventRecorder::handle, &listener, _1));
diff --git a/cpp/src/tests/QueueFlowLimitTest.cpp b/cpp/src/tests/QueueFlowLimitTest.cpp
deleted file mode 100644
index 8a6923fb09..0000000000
--- a/cpp/src/tests/QueueFlowLimitTest.cpp
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-#include <sstream>
-#include <deque>
-#include "unit_test.h"
-#include "test_tools.h"
-
-#include "qpid/broker/QueuePolicy.h"
-#include "qpid/broker/QueueFlowLimit.h"
-#include "qpid/sys/Time.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "MessageUtils.h"
-#include "BrokerFixture.h"
-
-using namespace qpid::broker;
-using namespace qpid::framing;
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(QueueFlowLimitTestSuite)
-
-namespace {
-
-class TestFlow : public QueueFlowLimit
-{
-public:
- TestFlow(uint32_t flowStopCount, uint32_t flowResumeCount,
- uint64_t flowStopSize, uint64_t flowResumeSize) :
- QueueFlowLimit(0, flowStopCount, flowResumeCount, flowStopSize, flowResumeSize)
- {}
- virtual ~TestFlow() {}
-
- static TestFlow *createTestFlow(const qpid::framing::FieldTable& settings)
- {
- FieldTable::ValuePtr v;
-
- v = settings.get(flowStopCountKey);
- uint32_t flowStopCount = (v) ? (uint32_t)v->get<int64_t>() : 0;
- v = settings.get(flowResumeCountKey);
- uint32_t flowResumeCount = (v) ? (uint32_t)v->get<int64_t>() : 0;
- v = settings.get(flowStopSizeKey);
- uint64_t flowStopSize = (v) ? (uint64_t)v->get<int64_t>() : 0;
- v = settings.get(flowResumeSizeKey);
- uint64_t flowResumeSize = (v) ? (uint64_t)v->get<int64_t>() : 0;
-
- return new TestFlow(flowStopCount, flowResumeCount, flowStopSize, flowResumeSize);
- }
-
- static QueueFlowLimit *getQueueFlowLimit(const qpid::framing::FieldTable& settings)
- {
- return QueueFlowLimit::createLimit(0, settings);
- }
-};
-
-
-
-QueuedMessage createMessage(uint32_t size)
-{
- static uint32_t seqNum;
- QueuedMessage msg;
- msg.payload = MessageUtils::createMessage();
- msg.position = ++seqNum;
- MessageUtils::addContent(msg.payload, std::string (size, 'x'));
- return msg;
-}
-}
-
-QPID_AUTO_TEST_CASE(testFlowCount)
-{
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopCountKey, 7);
- args.setInt(QueueFlowLimit::flowResumeCountKey, 5);
-
- std::auto_ptr<TestFlow> flow(TestFlow::createTestFlow(args));
-
- BOOST_CHECK_EQUAL((uint32_t) 7, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 5, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
-
- std::deque<QueuedMessage> msgs;
- for (size_t i = 0; i < 6; i++) {
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(!flow->isFlowControlActive());
- }
- BOOST_CHECK(!flow->isFlowControlActive()); // 6 on queue
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(!flow->isFlowControlActive()); // 7 on queue
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(flow->isFlowControlActive()); // 8 on queue, ON
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(flow->isFlowControlActive()); // 9 on queue, no change to flow control
-
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 8 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 7 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 6 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 5 on queue, no change
-
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive()); // 4 on queue, OFF
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowSize)
-{
- FieldTable args;
- args.setUInt64(QueueFlowLimit::flowStopSizeKey, 70);
- args.setUInt64(QueueFlowLimit::flowResumeSizeKey, 50);
-
- std::auto_ptr<TestFlow> flow(TestFlow::createTestFlow(args));
-
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL((uint32_t) 70, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint32_t) 50, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
-
- std::deque<QueuedMessage> msgs;
- for (size_t i = 0; i < 6; i++) {
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(!flow->isFlowControlActive());
- }
- BOOST_CHECK(!flow->isFlowControlActive()); // 60 on queue
- BOOST_CHECK_EQUAL(6u, flow->getFlowCount());
- BOOST_CHECK_EQUAL(60u, flow->getFlowSize());
-
- QueuedMessage msg_9 = createMessage(9);
- flow->enqueued(msg_9);
- BOOST_CHECK(!flow->isFlowControlActive()); // 69 on queue
- QueuedMessage tinyMsg_1 = createMessage(1);
- flow->enqueued(tinyMsg_1);
- BOOST_CHECK(!flow->isFlowControlActive()); // 70 on queue
-
- QueuedMessage tinyMsg_2 = createMessage(1);
- flow->enqueued(tinyMsg_2);
- BOOST_CHECK(flow->isFlowControlActive()); // 71 on queue, ON
- msgs.push_back(createMessage(10));
- flow->enqueued(msgs.back());
- BOOST_CHECK(flow->isFlowControlActive()); // 81 on queue
- BOOST_CHECK_EQUAL(10u, flow->getFlowCount());
- BOOST_CHECK_EQUAL(81u, flow->getFlowSize());
-
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 71 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 61 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // 51 on queue
-
- flow->dequeued(tinyMsg_1);
- BOOST_CHECK(flow->isFlowControlActive()); // 50 on queue
- flow->dequeued(tinyMsg_2);
- BOOST_CHECK(!flow->isFlowControlActive()); // 49 on queue, OFF
-
- flow->dequeued(msg_9);
- BOOST_CHECK(!flow->isFlowControlActive()); // 40 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive()); // 30 on queue
- flow->dequeued(msgs.front());
- msgs.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive()); // 20 on queue
- BOOST_CHECK_EQUAL(2u, flow->getFlowCount());
- BOOST_CHECK_EQUAL(20u, flow->getFlowSize());
-}
-
-QPID_AUTO_TEST_CASE(testFlowArgs)
-{
- FieldTable args;
- const uint64_t stop(0x2FFFFFFFFull);
- const uint64_t resume(0x1FFFFFFFFull);
- args.setInt(QueueFlowLimit::flowStopCountKey, 30);
- args.setInt(QueueFlowLimit::flowResumeCountKey, 21);
- args.setUInt64(QueueFlowLimit::flowStopSizeKey, stop);
- args.setUInt64(QueueFlowLimit::flowResumeSizeKey, resume);
-
- std::auto_ptr<TestFlow> flow(TestFlow::createTestFlow(args));
-
- BOOST_CHECK_EQUAL((uint32_t) 30, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 21, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL(stop, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL(resume, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowCombo)
-{
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopCountKey, 10);
- args.setInt(QueueFlowLimit::flowResumeCountKey, 5);
- args.setUInt64(QueueFlowLimit::flowStopSizeKey, 200);
- args.setUInt64(QueueFlowLimit::flowResumeSizeKey, 100);
-
- std::deque<QueuedMessage> msgs_1;
- std::deque<QueuedMessage> msgs_10;
- std::deque<QueuedMessage> msgs_50;
- std::deque<QueuedMessage> msgs_100;
-
- QueuedMessage msg;
-
- std::auto_ptr<TestFlow> flow(TestFlow::createTestFlow(args));
- BOOST_CHECK(!flow->isFlowControlActive()); // count:0 size:0
-
- // verify flow control comes ON when only count passes its stop point.
-
- for (size_t i = 0; i < 10; i++) {
- msgs_10.push_back(createMessage(10));
- flow->enqueued(msgs_10.back());
- BOOST_CHECK(!flow->isFlowControlActive());
- }
- // count:10 size:100
-
- msgs_1.push_back(createMessage(1));
- flow->enqueued(msgs_1.back()); // count:11 size: 101 ->ON
- BOOST_CHECK(flow->isFlowControlActive());
-
- for (size_t i = 0; i < 6; i++) {
- flow->dequeued(msgs_10.front());
- msgs_10.pop_front();
- BOOST_CHECK(flow->isFlowControlActive());
- }
- // count:5 size: 41
-
- flow->dequeued(msgs_1.front()); // count: 4 size: 40 ->OFF
- msgs_1.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive());
-
- for (size_t i = 0; i < 4; i++) {
- flow->dequeued(msgs_10.front());
- msgs_10.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive());
- }
- // count:0 size:0
-
- // verify flow control comes ON when only size passes its stop point.
-
- msgs_100.push_back(createMessage(100));
- flow->enqueued(msgs_100.back()); // count:1 size: 100
- BOOST_CHECK(!flow->isFlowControlActive());
-
- msgs_50.push_back(createMessage(50));
- flow->enqueued(msgs_50.back()); // count:2 size: 150
- BOOST_CHECK(!flow->isFlowControlActive());
-
- msgs_50.push_back(createMessage(50));
- flow->enqueued(msgs_50.back()); // count:3 size: 200
- BOOST_CHECK(!flow->isFlowControlActive());
-
- msgs_1.push_back(createMessage(1));
- flow->enqueued(msgs_1.back()); // count:4 size: 201 ->ON
- BOOST_CHECK(flow->isFlowControlActive());
-
- flow->dequeued(msgs_100.front()); // count:3 size:101
- msgs_100.pop_front();
- BOOST_CHECK(flow->isFlowControlActive());
-
- flow->dequeued(msgs_1.front()); // count:2 size:100
- msgs_1.pop_front();
- BOOST_CHECK(flow->isFlowControlActive());
-
- flow->dequeued(msgs_50.front()); // count:1 size:50 ->OFF
- msgs_50.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive());
-
- // verify flow control remains ON until both thresholds drop below their
- // resume point.
-
- for (size_t i = 0; i < 8; i++) {
- msgs_10.push_back(createMessage(10));
- flow->enqueued(msgs_10.back());
- BOOST_CHECK(!flow->isFlowControlActive());
- }
- // count:9 size:130
-
- msgs_10.push_back(createMessage(10));
- flow->enqueued(msgs_10.back()); // count:10 size: 140
- BOOST_CHECK(!flow->isFlowControlActive());
-
- msgs_1.push_back(createMessage(1));
- flow->enqueued(msgs_1.back()); // count:11 size: 141 ->ON
- BOOST_CHECK(flow->isFlowControlActive());
-
- msgs_100.push_back(createMessage(100));
- flow->enqueued(msgs_100.back()); // count:12 size: 241 (both thresholds crossed)
- BOOST_CHECK(flow->isFlowControlActive());
-
- // at this point: 9@10 + 1@50 + 1@100 + 1@1 == 12@241
-
- flow->dequeued(msgs_50.front()); // count:11 size:191
- msgs_50.pop_front();
- BOOST_CHECK(flow->isFlowControlActive());
-
- for (size_t i = 0; i < 9; i++) {
- flow->dequeued(msgs_10.front());
- msgs_10.pop_front();
- BOOST_CHECK(flow->isFlowControlActive());
- }
- // count:2 size:101
- flow->dequeued(msgs_1.front()); // count:1 size:100
- msgs_1.pop_front();
- BOOST_CHECK(flow->isFlowControlActive()); // still active due to size
-
- flow->dequeued(msgs_100.front()); // count:0 size:0 ->OFF
- msgs_100.pop_front();
- BOOST_CHECK(!flow->isFlowControlActive());
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowDefaultArgs)
-{
- QueueFlowLimit::setDefaults(2950001, // max queue byte count
- 80, // 80% stop threshold
- 70); // 70% resume threshold
- FieldTable args;
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
-
- BOOST_CHECK(ptr);
- std::auto_ptr<QueueFlowLimit> flow(ptr);
- BOOST_CHECK_EQUAL((uint64_t) 2360001, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint64_t) 2065000, flow->getFlowResumeSize());
- BOOST_CHECK_EQUAL( 0u, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL( 0u, flow->getFlowResumeCount());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowOverrideArgs)
-{
- QueueFlowLimit::setDefaults(2950001, // max queue byte count
- 80, // 80% stop threshold
- 70); // 70% resume threshold
- {
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopCountKey, 35000);
- args.setInt(QueueFlowLimit::flowResumeCountKey, 30000);
-
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(ptr);
- std::auto_ptr<QueueFlowLimit> flow(ptr);
-
- BOOST_CHECK_EQUAL((uint32_t) 35000, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 30000, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL((uint64_t) 0, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint64_t) 0, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
- }
- {
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopSizeKey, 350000);
- args.setInt(QueueFlowLimit::flowResumeSizeKey, 300000);
-
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(ptr);
- std::auto_ptr<QueueFlowLimit> flow(ptr);
-
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 0, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL((uint64_t) 350000, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint64_t) 300000, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
- }
- {
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopCountKey, 35000);
- args.setInt(QueueFlowLimit::flowResumeCountKey, 30000);
- args.setInt(QueueFlowLimit::flowStopSizeKey, 350000);
- args.setInt(QueueFlowLimit::flowResumeSizeKey, 300000);
-
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(ptr);
- std::auto_ptr<QueueFlowLimit> flow(ptr);
-
- BOOST_CHECK_EQUAL((uint32_t) 35000, flow->getFlowStopCount());
- BOOST_CHECK_EQUAL((uint32_t) 30000, flow->getFlowResumeCount());
- BOOST_CHECK_EQUAL((uint64_t) 350000, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint64_t) 300000, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
- }
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowOverrideDefaults)
-{
- QueueFlowLimit::setDefaults(2950001, // max queue byte count
- 97, // stop threshold
- 73); // resume threshold
- FieldTable args;
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(ptr);
- std::auto_ptr<QueueFlowLimit> flow(ptr);
-
- BOOST_CHECK_EQUAL((uint32_t) 2861501, flow->getFlowStopSize());
- BOOST_CHECK_EQUAL((uint32_t) 2153500, flow->getFlowResumeSize());
- BOOST_CHECK(!flow->isFlowControlActive());
- BOOST_CHECK(flow->monitorFlowControl());
-}
-
-
-QPID_AUTO_TEST_CASE(testFlowDisable)
-{
- {
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopCountKey, 0);
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(!ptr);
- }
- {
- FieldTable args;
- args.setInt(QueueFlowLimit::flowStopSizeKey, 0);
- QueueFlowLimit *ptr = TestFlow::getQueueFlowLimit(args);
- BOOST_CHECK(!ptr);
- }
-}
-
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/QueuePolicyTest.cpp b/cpp/src/tests/QueuePolicyTest.cpp
index f735e09449..90af9c7dd9 100644
--- a/cpp/src/tests/QueuePolicyTest.cpp
+++ b/cpp/src/tests/QueuePolicyTest.cpp
@@ -23,7 +23,6 @@
#include "test_tools.h"
#include "qpid/broker/QueuePolicy.h"
-#include "qpid/broker/QueueFlowLimit.h"
#include "qpid/client/QueueOptions.h"
#include "qpid/sys/Time.h"
#include "qpid/framing/reply_exceptions.h"
@@ -39,7 +38,6 @@ namespace tests {
QPID_AUTO_TEST_SUITE(QueuePolicyTestSuite)
-namespace {
QueuedMessage createMessage(uint32_t size)
{
QueuedMessage msg;
@@ -47,7 +45,7 @@ QueuedMessage createMessage(uint32_t size)
MessageUtils::addContent(msg.payload, std::string (size, 'x'));
return msg;
}
-}
+
QPID_AUTO_TEST_CASE(testCount)
{
@@ -152,7 +150,7 @@ QPID_AUTO_TEST_CASE(testRingPolicyCount)
std::auto_ptr<QueuePolicy> policy = QueuePolicy::createQueuePolicy("test", 5, 0, QueuePolicy::RING);
policy->update(args);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("my-ring-queue");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
for (int i = 0; i < 10; i++) {
@@ -187,7 +185,7 @@ QPID_AUTO_TEST_CASE(testRingPolicySize)
std::auto_ptr<QueuePolicy> policy = QueuePolicy::createQueuePolicy("test", 0, 500, QueuePolicy::RING);
policy->update(args);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("my-ring-queue");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
@@ -259,7 +257,7 @@ QPID_AUTO_TEST_CASE(testStrictRingPolicy)
std::auto_ptr<QueuePolicy> policy = QueuePolicy::createQueuePolicy("test", 5, 0, QueuePolicy::RING_STRICT);
policy->update(args);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("my-ring-queue");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
LocalQueue incoming;
@@ -285,7 +283,7 @@ QPID_AUTO_TEST_CASE(testPolicyWithDtx)
std::auto_ptr<QueuePolicy> policy = QueuePolicy::createQueuePolicy("test", 5, 0, QueuePolicy::REJECT);
policy->update(args);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("my-policy-queue");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
LocalQueue incoming;
@@ -342,10 +340,8 @@ QPID_AUTO_TEST_CASE(testFlowToDiskWithNoStore)
//fallback to rejecting messages
QueueOptions args;
args.setSizePolicy(FLOW_TO_DISK, 0, 5);
- // Disable flow control, or else we'll never hit the max limit
- args.setInt(QueueFlowLimit::flowStopCountKey, 0);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("my-queue");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
LocalQueue incoming;
@@ -371,7 +367,7 @@ QPID_AUTO_TEST_CASE(testPolicyFailureOnCommit)
std::auto_ptr<QueuePolicy> policy = QueuePolicy::createQueuePolicy("test", 5, 0, QueuePolicy::REJECT);
policy->update(args);
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("q");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
f.session.txSelect();
@@ -386,9 +382,8 @@ QPID_AUTO_TEST_CASE(testCapacityConversion)
{
FieldTable args;
args.setString("qpid.max_count", "5");
- args.setString("qpid.flow_stop_count", "0");
- SessionFixture f;
+ ProxySessionFixture f;
std::string q("q");
f.session.queueDeclare(arg::queue=q, arg::exclusive=true, arg::autoDelete=true, arg::arguments=args);
for (int i = 0; i < 5; i++) {
diff --git a/cpp/src/tests/QueueTest.cpp b/cpp/src/tests/QueueTest.cpp
index aaa2721021..80c69ac386 100644
--- a/cpp/src/tests/QueueTest.cpp
+++ b/cpp/src/tests/QueueTest.cpp
@@ -36,9 +36,6 @@
#include "qpid/framing/AMQFrame.h"
#include "qpid/framing/MessageTransferBody.h"
#include "qpid/framing/reply_exceptions.h"
-#include "qpid/broker/QueuePolicy.h"
-#include "qpid/broker/QueueFlowLimit.h"
-
#include <iostream>
#include "boost/format.hpp"
@@ -56,12 +53,12 @@ class TestConsumer : public virtual Consumer{
public:
typedef boost::shared_ptr<TestConsumer> shared_ptr;
- QueuedMessage last;
+ intrusive_ptr<Message> last;
bool received;
- TestConsumer(std::string name="test", bool acquire = true):Consumer(name, acquire), received(false) {};
+ TestConsumer(bool acquire = true):Consumer(acquire), received(false) {};
virtual bool deliver(QueuedMessage& msg){
- last = msg;
+ last = msg.payload;
received = true;
return true;
};
@@ -81,14 +78,13 @@ public:
Message& getMessage() { return *(msg.get()); }
};
-intrusive_ptr<Message> create_message(std::string exchange, std::string routingKey, uint64_t ttl = 0) {
+intrusive_ptr<Message> create_message(std::string exchange, std::string routingKey) {
intrusive_ptr<Message> msg(new Message());
AMQFrame method((MessageTransferBody(ProtocolVersion(), exchange, 0, 0)));
AMQFrame header((AMQHeaderBody()));
msg->getFrames().append(method);
msg->getFrames().append(header);
msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setRoutingKey(routingKey);
- if (ttl) msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setTtl(ttl);
return msg;
}
@@ -149,16 +145,16 @@ QPID_AUTO_TEST_CASE(testConsumers){
queue->deliver(msg1);
BOOST_CHECK(queue->dispatch(c1));
- BOOST_CHECK_EQUAL(msg1.get(), c1->last.payload.get());
+ BOOST_CHECK_EQUAL(msg1.get(), c1->last.get());
queue->deliver(msg2);
BOOST_CHECK(queue->dispatch(c2));
- BOOST_CHECK_EQUAL(msg2.get(), c2->last.payload.get());
+ BOOST_CHECK_EQUAL(msg2.get(), c2->last.get());
c1->received = false;
queue->deliver(msg3);
BOOST_CHECK(queue->dispatch(c1));
- BOOST_CHECK_EQUAL(msg3.get(), c1->last.payload.get());
+ BOOST_CHECK_EQUAL(msg3.get(), c1->last.get());
//Test cancellation:
queue->cancel(c1);
@@ -214,7 +210,7 @@ QPID_AUTO_TEST_CASE(testDequeue){
if (!consumer->received)
sleep(2);
- BOOST_CHECK_EQUAL(msg3.get(), consumer->last.payload.get());
+ BOOST_CHECK_EQUAL(msg3.get(), consumer->last.get());
BOOST_CHECK_EQUAL(uint32_t(0), queue->getMessageCount());
received = queue->get().payload;
@@ -248,7 +244,7 @@ QPID_AUTO_TEST_CASE(testBound){
exchange2.reset();
//unbind the queue from all exchanges it knows it has been bound to:
- queue->unbind(exchanges);
+ queue->unbind(exchanges, queue);
//ensure the remaining exchanges don't still have the queue bound to them:
FailOnDeliver deliverable;
@@ -258,26 +254,26 @@ QPID_AUTO_TEST_CASE(testBound){
QPID_AUTO_TEST_CASE(testPersistLastNodeStanding){
client::QueueOptions args;
- args.setPersistLastNode();
+ args.setPersistLastNode();
- Queue::shared_ptr queue(new Queue("my-queue", true));
+ Queue::shared_ptr queue(new Queue("my-queue", true));
queue->configure(args);
intrusive_ptr<Message> msg1 = create_message("e", "A");
intrusive_ptr<Message> msg2 = create_message("e", "B");
intrusive_ptr<Message> msg3 = create_message("e", "C");
- //enqueue 2 messages
+ //enqueue 2 messages
queue->deliver(msg1);
queue->deliver(msg2);
- //change mode
- queue->setLastNodeFailure();
+ //change mode
+ queue->setLastNodeFailure();
- //enqueue 1 message
+ //enqueue 1 message
queue->deliver(msg3);
- //check all have persistent ids.
+ //check all have persistent ids.
BOOST_CHECK(msg1->isPersistent());
BOOST_CHECK(msg2->isPersistent());
BOOST_CHECK(msg3->isPersistent());
@@ -287,58 +283,54 @@ QPID_AUTO_TEST_CASE(testPersistLastNodeStanding){
QPID_AUTO_TEST_CASE(testSeek){
- Queue::shared_ptr queue(new Queue("my-queue", true));
+ Queue::shared_ptr queue(new Queue("my-queue", true));
intrusive_ptr<Message> msg1 = create_message("e", "A");
intrusive_ptr<Message> msg2 = create_message("e", "B");
intrusive_ptr<Message> msg3 = create_message("e", "C");
- //enqueue 2 messages
+ //enqueue 2 messages
queue->deliver(msg1);
queue->deliver(msg2);
queue->deliver(msg3);
- TestConsumer::shared_ptr consumer(new TestConsumer("test", false));
+ TestConsumer::shared_ptr consumer(new TestConsumer(false));
SequenceNumber seq(2);
consumer->position = seq;
QueuedMessage qm;
queue->dispatch(consumer);
-
- BOOST_CHECK_EQUAL(msg3.get(), consumer->last.payload.get());
+
+ BOOST_CHECK_EQUAL(msg3.get(), consumer->last.get());
queue->dispatch(consumer);
queue->dispatch(consumer); // make sure over-run is safe
-
+
}
QPID_AUTO_TEST_CASE(testSearch){
- Queue::shared_ptr queue(new Queue("my-queue", true));
+ Queue::shared_ptr queue(new Queue("my-queue", true));
intrusive_ptr<Message> msg1 = create_message("e", "A");
intrusive_ptr<Message> msg2 = create_message("e", "B");
intrusive_ptr<Message> msg3 = create_message("e", "C");
- //enqueue 2 messages
+ //enqueue 2 messages
queue->deliver(msg1);
queue->deliver(msg2);
queue->deliver(msg3);
SequenceNumber seq(2);
- QueuedMessage qm;
- TestConsumer::shared_ptr c1(new TestConsumer());
-
- BOOST_CHECK(queue->find(seq, qm));
-
+ QueuedMessage qm = queue->find(seq);
+
BOOST_CHECK_EQUAL(seq.getValue(), qm.position.getValue());
-
- queue->acquire(qm, c1->getName());
+
+ queue->acquire(qm);
BOOST_CHECK_EQUAL(queue->getMessageCount(), 2u);
SequenceNumber seq1(3);
- QueuedMessage qm1;
- BOOST_CHECK(queue->find(seq1, qm1));
+ QueuedMessage qm1 = queue->find(seq1);
BOOST_CHECK_EQUAL(seq1.getValue(), qm1.position.getValue());
-
+
}
const std::string nullxid = "";
@@ -424,10 +416,10 @@ QPID_AUTO_TEST_CASE(testLVQOrdering){
client::QueueOptions args;
// set queue mode
- args.setOrdering(client::LVQ);
+ args.setOrdering(client::LVQ);
- Queue::shared_ptr queue(new Queue("my-queue", true ));
- queue->configure(args);
+ Queue::shared_ptr queue(new Queue("my-queue", true ));
+ queue->configure(args);
intrusive_ptr<Message> msg1 = create_message("e", "A");
intrusive_ptr<Message> msg2 = create_message("e", "B");
@@ -438,16 +430,16 @@ QPID_AUTO_TEST_CASE(testLVQOrdering){
//set deliever match for LVQ a,b,c,a
string key;
- args.getLVQKey(key);
+ args.getLVQKey(key);
BOOST_CHECK_EQUAL(key, "qpid.LVQ_key");
- msg1->insertCustomProperty(key,"a");
- msg2->insertCustomProperty(key,"b");
- msg3->insertCustomProperty(key,"c");
- msg4->insertCustomProperty(key,"a");
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg2->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"b");
+ msg3->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"c");
+ msg4->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
- //enqueue 4 message
+ //enqueue 4 message
queue->deliver(msg1);
queue->deliver(msg2);
queue->deliver(msg3);
@@ -467,9 +459,9 @@ QPID_AUTO_TEST_CASE(testLVQOrdering){
intrusive_ptr<Message> msg5 = create_message("e", "A");
intrusive_ptr<Message> msg6 = create_message("e", "B");
intrusive_ptr<Message> msg7 = create_message("e", "C");
- msg5->insertCustomProperty(key,"a");
- msg6->insertCustomProperty(key,"b");
- msg7->insertCustomProperty(key,"c");
+ msg5->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg6->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"b");
+ msg7->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"c");
queue->deliver(msg5);
queue->deliver(msg6);
queue->deliver(msg7);
@@ -504,7 +496,7 @@ QPID_AUTO_TEST_CASE(testLVQEmptyKey){
BOOST_CHECK_EQUAL(key, "qpid.LVQ_key");
- msg1->insertCustomProperty(key,"a");
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
queue->deliver(msg1);
queue->deliver(msg2);
BOOST_CHECK_EQUAL(queue->getMessageCount(), 2u);
@@ -516,8 +508,6 @@ QPID_AUTO_TEST_CASE(testLVQAcquire){
client::QueueOptions args;
// set queue mode
args.setOrdering(client::LVQ);
- // disable flow control, as this test violates the enqueue/dequeue sequence.
- args.setInt(QueueFlowLimit::flowStopCountKey, 0);
Queue::shared_ptr queue(new Queue("my-queue", true ));
queue->configure(args);
@@ -536,12 +526,12 @@ QPID_AUTO_TEST_CASE(testLVQAcquire){
BOOST_CHECK_EQUAL(key, "qpid.LVQ_key");
- msg1->insertCustomProperty(key,"a");
- msg2->insertCustomProperty(key,"b");
- msg3->insertCustomProperty(key,"c");
- msg4->insertCustomProperty(key,"a");
- msg5->insertCustomProperty(key,"b");
- msg6->insertCustomProperty(key,"c");
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg2->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"b");
+ msg3->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"c");
+ msg4->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg5->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"b");
+ msg6->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"c");
//enqueue 4 message
queue->deliver(msg1);
@@ -556,13 +546,12 @@ QPID_AUTO_TEST_CASE(testLVQAcquire){
QueuedMessage qmsg2(queue.get(), msg2, ++sequence);
framing::SequenceNumber sequence1(10);
QueuedMessage qmsg3(queue.get(), 0, sequence1);
- TestConsumer::shared_ptr dummy(new TestConsumer());
- BOOST_CHECK(!queue->acquire(qmsg, dummy->getName()));
- BOOST_CHECK(queue->acquire(qmsg2, dummy->getName()));
+ BOOST_CHECK(!queue->acquire(qmsg));
+ BOOST_CHECK(queue->acquire(qmsg2));
// Acquire the massage again to test failure case.
- BOOST_CHECK(!queue->acquire(qmsg2, dummy->getName()));
- BOOST_CHECK(!queue->acquire(qmsg3, dummy->getName()));
+ BOOST_CHECK(!queue->acquire(qmsg2));
+ BOOST_CHECK(!queue->acquire(qmsg3));
BOOST_CHECK_EQUAL(queue->getMessageCount(), 2u);
@@ -572,7 +561,7 @@ QPID_AUTO_TEST_CASE(testLVQAcquire){
// set mode to no browse and check
args.setOrdering(client::LVQ_NO_BROWSE);
queue->configure(args);
- TestConsumer::shared_ptr c1(new TestConsumer("test", false));
+ TestConsumer::shared_ptr c1(new TestConsumer(false));
queue->dispatch(c1);
queue->dispatch(c1);
@@ -606,8 +595,8 @@ QPID_AUTO_TEST_CASE(testLVQMultiQueue){
args.getLVQKey(key);
BOOST_CHECK_EQUAL(key, "qpid.LVQ_key");
- msg1->insertCustomProperty(key,"a");
- msg2->insertCustomProperty(key,"a");
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg2->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
queue1->deliver(msg1);
queue2->deliver(msg1);
@@ -641,7 +630,7 @@ QPID_AUTO_TEST_CASE(testLVQRecover){
Queue::shared_ptr queue1(new Queue("my-queue", true, &testStore));
intrusive_ptr<Message> received;
- queue1->create(args);
+ queue1->configure(args);
intrusive_ptr<Message> msg1 = create_message("e", "A");
intrusive_ptr<Message> msg2 = create_message("e", "A");
@@ -650,9 +639,9 @@ QPID_AUTO_TEST_CASE(testLVQRecover){
args.getLVQKey(key);
BOOST_CHECK_EQUAL(key, "qpid.LVQ_key");
- msg1->insertCustomProperty(key,"a");
- msg2->insertCustomProperty(key,"a");
- // 3
+ msg1->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ msg2->getProperties<MessageProperties>()->getApplicationHeaders().setString(key,"a");
+ // 3
queue1->deliver(msg1);
// 4
queue1->setLastNodeFailure();
@@ -671,8 +660,13 @@ QPID_AUTO_TEST_CASE(testLVQRecover){
void addMessagesToQueue(uint count, Queue& queue, uint oddTtl = 200, uint evenTtl = 0)
{
for (uint i = 0; i < count; i++) {
- intrusive_ptr<Message> m = create_message("exchange", "key", i % 2 ? oddTtl : evenTtl);
- m->computeExpiration(new broker::ExpiryPolicy);
+ intrusive_ptr<Message> m = create_message("exchange", "key");
+ if (i % 2) {
+ if (oddTtl) m->getProperties<DeliveryProperties>()->setTtl(oddTtl);
+ } else {
+ if (evenTtl) m->getProperties<DeliveryProperties>()->setTtl(evenTtl);
+ }
+ m->setTimestamp(new broker::ExpiryPolicy);
queue.deliver(m);
}
}
@@ -682,7 +676,7 @@ QPID_AUTO_TEST_CASE(testPurgeExpired) {
addMessagesToQueue(10, queue);
BOOST_CHECK_EQUAL(queue.getMessageCount(), 10u);
::usleep(300*1000);
- queue.purgeExpired(0);
+ queue.purgeExpired();
BOOST_CHECK_EQUAL(queue.getMessageCount(), 5u);
}
@@ -693,7 +687,7 @@ QPID_AUTO_TEST_CASE(testQueueCleaner) {
addMessagesToQueue(10, *queue, 200, 400);
BOOST_CHECK_EQUAL(queue->getMessageCount(), 10u);
- QueueCleaner cleaner(queues, &timer);
+ QueueCleaner cleaner(queues, timer);
cleaner.start(100 * qpid::sys::TIME_MSEC);
::usleep(300*1000);
BOOST_CHECK_EQUAL(queue->getMessageCount(), 5u);
@@ -701,280 +695,6 @@ QPID_AUTO_TEST_CASE(testQueueCleaner) {
BOOST_CHECK_EQUAL(queue->getMessageCount(), 0u);
}
-
-namespace {
- // helper for group tests
- void verifyAcquire( Queue::shared_ptr queue,
- TestConsumer::shared_ptr c,
- std::deque<QueuedMessage>& results,
- const std::string& expectedGroup,
- const int expectedId )
- {
- queue->dispatch(c);
- results.push_back(c->last);
- std::string group = c->last.payload->getProperties<MessageProperties>()->getApplicationHeaders().getAsString("GROUP-ID");
- int id = c->last.payload->getProperties<MessageProperties>()->getApplicationHeaders().getAsInt("MY-ID");
- BOOST_CHECK_EQUAL( group, expectedGroup );
- BOOST_CHECK_EQUAL( id, expectedId );
- }
-}
-
-QPID_AUTO_TEST_CASE(testGroupsMultiConsumer) {
- //
- // Verify that consumers of grouped messages own the groups once a message is acquired,
- // and release the groups once all acquired messages have been dequeued or requeued
- //
- FieldTable args;
- Queue::shared_ptr queue(new Queue("my_queue", true));
- args.setString("qpid.group_header_key", "GROUP-ID");
- args.setInt("qpid.shared_msg_group", 1);
- queue->configure(args);
-
- std::string groups[] = { std::string("a"), std::string("a"), std::string("a"),
- std::string("b"), std::string("b"), std::string("b"),
- std::string("c"), std::string("c"), std::string("c") };
- for (int i = 0; i < 9; ++i) {
- intrusive_ptr<Message> msg = create_message("e", "A");
- msg->insertCustomProperty("GROUP-ID", groups[i]);
- msg->insertCustomProperty("MY-ID", i);
- queue->deliver(msg);
- }
-
- // Queue = a-0, a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
- // Owners= ---, ---, ---, ---, ---, ---, ---, ---, ---,
-
- BOOST_CHECK_EQUAL(uint32_t(9), queue->getMessageCount());
-
- TestConsumer::shared_ptr c1(new TestConsumer("C1"));
- TestConsumer::shared_ptr c2(new TestConsumer("C2"));
-
- queue->consume(c1);
- queue->consume(c2);
-
- std::deque<QueuedMessage> dequeMeC1;
- std::deque<QueuedMessage> dequeMeC2;
-
-
- verifyAcquire(queue, c1, dequeMeC1, "a", 0 ); // c1 now owns group "a" (acquire a-0)
- verifyAcquire(queue, c2, dequeMeC2, "b", 3 ); // c2 should now own group "b" (acquire b-3)
-
- // now let c1 complete the 'a-0' message - this should free the 'a' group
- queue->dequeue( 0, dequeMeC1.front() );
- dequeMeC1.pop_front();
-
- // Queue = a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
- // Owners= ---, ---, ^C2, ^C2, ^C2, ---, ---, ---
-
- // now c2 should pick up the next 'a-1', since it is oldest free
- verifyAcquire(queue, c2, dequeMeC2, "a", 1 ); // c2 should now own groups "a" and "b"
-
- // Queue = a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
- // Owners= ^C2, ^C2, ^C2, ^C2, ^C2, ---, ---, ---
-
- // c1 should only be able to snarf up the first "c" message now...
- verifyAcquire(queue, c1, dequeMeC1, "c", 6 ); // should skip to the first "c"
-
- // Queue = a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
- // Owners= ^C2, ^C2, ^C2, ^C2, ^C2, ^C1, ^C1, ^C1
-
- // hmmm... what if c2 now dequeues "b-3"? (now only has a-1 acquired)
- queue->dequeue( 0, dequeMeC2.front() );
- dequeMeC2.pop_front();
-
- // Queue = a-1, a-2, b-4, b-5, c-6, c-7, c-8...
- // Owners= ^C2, ^C2, ---, ---, ^C1, ^C1, ^C1
-
- // b group is free, c is owned by c1 - c1's next get should grab 'b-4'
- verifyAcquire(queue, c1, dequeMeC1, "b", 4 );
-
- // Queue = a-1, a-2, b-4, b-5, c-6, c-7, c-8...
- // Owners= ^C2, ^C2, ^C1, ^C1, ^C1, ^C1, ^C1
-
- // c2 can now only grab a-2, and that's all
- verifyAcquire(queue, c2, dequeMeC2, "a", 2 );
-
- // now C2 can't get any more, since C1 owns "b" and "c" group...
- bool gotOne = queue->dispatch(c2);
- BOOST_CHECK( !gotOne );
-
- // hmmm... what if c1 now dequeues "c-6"? (now only own's b-4)
- queue->dequeue( 0, dequeMeC1.front() );
- dequeMeC1.pop_front();
-
- // Queue = a-1, a-2, b-4, b-5, c-7, c-8...
- // Owners= ^C2, ^C2, ^C1, ^C1, ---, ---
-
- // c2 can now grab c-7
- verifyAcquire(queue, c2, dequeMeC2, "c", 7 );
-
- // Queue = a-1, a-2, b-4, b-5, c-7, c-8...
- // Owners= ^C2, ^C2, ^C1, ^C1, ^C2, ^C2
-
- // what happens if C-2 "requeues" a-1 and a-2?
- queue->requeue( dequeMeC2.front() );
- dequeMeC2.pop_front();
- queue->requeue( dequeMeC2.front() );
- dequeMeC2.pop_front(); // now just has c-7 acquired
-
- // Queue = a-1, a-2, b-4, b-5, c-7, c-8...
- // Owners= ---, ---, ^C1, ^C1, ^C2, ^C2
-
- // now c1 will grab a-1 and a-2...
- verifyAcquire(queue, c1, dequeMeC1, "a", 1 );
- verifyAcquire(queue, c1, dequeMeC1, "a", 2 );
-
- // Queue = a-1, a-2, b-4, b-5, c-7, c-8...
- // Owners= ^C1, ^C1, ^C1, ^C1, ^C2, ^C2
-
- // c2 can now acquire c-8 only
- verifyAcquire(queue, c2, dequeMeC2, "c", 8 );
-
- // and c1 can get b-5
- verifyAcquire(queue, c1, dequeMeC1, "b", 5 );
-
- // should be no more acquire-able for anyone now:
- gotOne = queue->dispatch(c1);
- BOOST_CHECK( !gotOne );
- gotOne = queue->dispatch(c2);
- BOOST_CHECK( !gotOne );
-
- // requeue all of C1's acquired messages, then cancel C1
- while (!dequeMeC1.empty()) {
- queue->requeue(dequeMeC1.front());
- dequeMeC1.pop_front();
- }
- queue->cancel(c1);
-
- // Queue = a-1, a-2, b-4, b-5, c-7, c-8...
- // Owners= ---, ---, ---, ---, ^C2, ^C2
-
- // b-4, a-1, a-2, b-5 all should be available, right?
- verifyAcquire(queue, c2, dequeMeC2, "a", 1 );
-
- while (!dequeMeC2.empty()) {
- queue->dequeue(0, dequeMeC2.front());
- dequeMeC2.pop_front();
- }
-
- // Queue = a-2, b-4, b-5
- // Owners= ---, ---, ---
-
- TestConsumer::shared_ptr c3(new TestConsumer("C3"));
- std::deque<QueuedMessage> dequeMeC3;
-
- verifyAcquire(queue, c3, dequeMeC3, "a", 2 );
- verifyAcquire(queue, c2, dequeMeC2, "b", 4 );
-
- // Queue = a-2, b-4, b-5
- // Owners= ^C3, ^C2, ^C2
-
- gotOne = queue->dispatch(c3);
- BOOST_CHECK( !gotOne );
-
- verifyAcquire(queue, c2, dequeMeC2, "b", 5 );
-
- while (!dequeMeC2.empty()) {
- queue->dequeue(0, dequeMeC2.front());
- dequeMeC2.pop_front();
- }
-
- // Queue = a-2,
- // Owners= ^C3,
-
- intrusive_ptr<Message> msg = create_message("e", "A");
- msg->insertCustomProperty("GROUP-ID", "a");
- msg->insertCustomProperty("MY-ID", 9);
- queue->deliver(msg);
-
- // Queue = a-2, a-9
- // Owners= ^C3, ^C3
-
- gotOne = queue->dispatch(c2);
- BOOST_CHECK( !gotOne );
-
- msg = create_message("e", "A");
- msg->insertCustomProperty("GROUP-ID", "b");
- msg->insertCustomProperty("MY-ID", 10);
- queue->deliver(msg);
-
- // Queue = a-2, a-9, b-10
- // Owners= ^C3, ^C3, ----
-
- verifyAcquire(queue, c2, dequeMeC2, "b", 10 );
- verifyAcquire(queue, c3, dequeMeC3, "a", 9 );
-
- gotOne = queue->dispatch(c3);
- BOOST_CHECK( !gotOne );
-
- queue->cancel(c2);
- queue->cancel(c3);
-}
-
-
-QPID_AUTO_TEST_CASE(testGroupsMultiConsumerDefaults) {
- //
- // Verify that the same default group name is automatically applied to messages that
- // do not specify a group name.
- //
- FieldTable args;
- Queue::shared_ptr queue(new Queue("my_queue", true));
- args.setString("qpid.group_header_key", "GROUP-ID");
- args.setInt("qpid.shared_msg_group", 1);
- queue->configure(args);
-
- for (int i = 0; i < 3; ++i) {
- intrusive_ptr<Message> msg = create_message("e", "A");
- // no "GROUP-ID" header
- msg->insertCustomProperty("MY-ID", i);
- queue->deliver(msg);
- }
-
- // Queue = 0, 1, 2
-
- BOOST_CHECK_EQUAL(uint32_t(3), queue->getMessageCount());
-
- TestConsumer::shared_ptr c1(new TestConsumer("C1"));
- TestConsumer::shared_ptr c2(new TestConsumer("C2"));
-
- queue->consume(c1);
- queue->consume(c2);
-
- std::deque<QueuedMessage> dequeMeC1;
- std::deque<QueuedMessage> dequeMeC2;
-
- queue->dispatch(c1); // c1 now owns default group (acquired 0)
- dequeMeC1.push_back(c1->last);
- int id = c1->last.payload->getProperties<MessageProperties>()->getApplicationHeaders().getAsInt("MY-ID");
- BOOST_CHECK_EQUAL( id, 0 );
-
- bool gotOne = queue->dispatch(c2); // c2 should get nothing
- BOOST_CHECK( !gotOne );
-
- queue->dispatch(c1); // c1 now acquires 1
- dequeMeC1.push_back(c1->last);
- id = c1->last.payload->getProperties<MessageProperties>()->getApplicationHeaders().getAsInt("MY-ID");
- BOOST_CHECK_EQUAL( id, 1 );
-
- gotOne = queue->dispatch(c2); // c2 should still get nothing
- BOOST_CHECK( !gotOne );
-
- while (!dequeMeC1.empty()) {
- queue->dequeue(0, dequeMeC1.front());
- dequeMeC1.pop_front();
- }
-
- // now default group should be available...
- queue->dispatch(c2); // c2 now owns default group (acquired 2)
- id = c2->last.payload->getProperties<MessageProperties>()->getApplicationHeaders().getAsInt("MY-ID");
- BOOST_CHECK_EQUAL( id, 2 );
-
- gotOne = queue->dispatch(c1); // c1 should get nothing
- BOOST_CHECK( !gotOne );
-
- queue->cancel(c1);
- queue->cancel(c2);
-}
-
QPID_AUTO_TEST_CASE(testMultiQueueLastNode){
TestMessageStoreOC testStore;
@@ -982,9 +702,9 @@ QPID_AUTO_TEST_CASE(testMultiQueueLastNode){
args.setPersistLastNode();
Queue::shared_ptr queue1(new Queue("queue1", true, &testStore ));
- queue1->create(args);
+ queue1->configure(args);
Queue::shared_ptr queue2(new Queue("queue2", true, &testStore ));
- queue2->create(args);
+ queue2->configure(args);
intrusive_ptr<Message> msg1 = create_message("e", "A");
@@ -1070,7 +790,7 @@ not requeued to the store.
Queue::shared_ptr queue1(new Queue("my-queue", true, &testStore));
intrusive_ptr<Message> received;
- queue1->create(args);
+ queue1->configure(args);
// check requeue 1
intrusive_ptr<Message> msg1 = create_message("e", "C");
@@ -1150,40 +870,28 @@ QPID_AUTO_TEST_CASE(testFlowToDiskBlocking){
intrusive_ptr<Message> msg02 = mkMsg(testStore, std::string(5, 'X')); // transient w/ content
DeliverableMessage dmsg02(msg02);
- {
- ScopedSuppressLogging sl; // suppress expected error messages.
- BOOST_CHECK_THROW(sbtFanout1.route(dmsg02, "", 0), ResourceLimitExceededException);
- }
+ BOOST_CHECK_THROW(sbtFanout1.route(dmsg02, "", 0), ResourceLimitExceededException);
msg02->tryReleaseContent();
BOOST_CHECK_EQUAL(msg02->isContentReleased(), false);
BOOST_CHECK_EQUAL(1u, tq1->getMessageCount());
intrusive_ptr<Message> msg03 = mkMsg(testStore, std::string(5, 'X'), true); // durable w/ content
DeliverableMessage dmsg03(msg03);
- {
- ScopedSuppressLogging sl; // suppress expected error messages.
- BOOST_CHECK_THROW(sbtFanout1.route(dmsg03, "", 0), ResourceLimitExceededException);
- }
+ BOOST_CHECK_THROW(sbtFanout1.route(dmsg03, "", 0), ResourceLimitExceededException);
msg03->tryReleaseContent();
BOOST_CHECK_EQUAL(msg03->isContentReleased(), false);
BOOST_CHECK_EQUAL(1u, tq1->getMessageCount());
intrusive_ptr<Message> msg04 = mkMsg(testStore); // transient no content
DeliverableMessage dmsg04(msg04);
- {
- ScopedSuppressLogging sl; // suppress expected error messages.
- BOOST_CHECK_THROW(sbtFanout1.route(dmsg04, "", 0), ResourceLimitExceededException);
- }
+ BOOST_CHECK_THROW(sbtFanout1.route(dmsg04, "", 0), ResourceLimitExceededException);
msg04->tryReleaseContent();
BOOST_CHECK_EQUAL(msg04->isContentReleased(), false);
BOOST_CHECK_EQUAL(1u, tq1->getMessageCount());
intrusive_ptr<Message> msg05 = mkMsg(testStore, "", true); // durable no content
DeliverableMessage dmsg05(msg05);
- {
- ScopedSuppressLogging sl; // suppress expected error messages.
- BOOST_CHECK_THROW(sbtFanout1.route(dmsg05, "", 0), ResourceLimitExceededException);
- }
+ BOOST_CHECK_THROW(sbtFanout1.route(dmsg05, "", 0), ResourceLimitExceededException);
msg05->tryReleaseContent();
BOOST_CHECK_EQUAL(msg05->isContentReleased(), false);
BOOST_CHECK_EQUAL(1u, tq1->getMessageCount());
diff --git a/cpp/src/tests/ReplicationTest.cpp b/cpp/src/tests/ReplicationTest.cpp
index 1219a6b59e..7310a3fe20 100644
--- a/cpp/src/tests/ReplicationTest.cpp
+++ b/cpp/src/tests/ReplicationTest.cpp
@@ -74,7 +74,7 @@ QPID_AUTO_TEST_CASE(testReplicationExchange)
{
qpid::broker::Broker::Options brokerOpts(getBrokerOpts(list_of<string>("qpidd")
("--replication-exchange-name=qpid.replication")));
- SessionFixture f(brokerOpts);
+ ProxySessionFixture f(brokerOpts);
std::string dataQ("queue-1");
diff --git a/cpp/src/tests/SessionState.cpp b/cpp/src/tests/SessionState.cpp
index 3be9bb0cbc..157cabfb63 100644
--- a/cpp/src/tests/SessionState.cpp
+++ b/cpp/src/tests/SessionState.cpp
@@ -43,7 +43,7 @@ using namespace qpid::framing;
// Apply f to [begin, end) and accumulate the result
template <class Iter, class T, class F>
T applyAccumulate(Iter begin, Iter end, T seed, const F& f) {
- return std::accumulate(begin, end, seed, boost::bind(std::plus<T>(), _1, boost::bind(f, _2)));
+ return std::accumulate(begin, end, seed, bind(std::plus<T>(), _1, bind(f, _2)));
}
// Create a frame with a one-char string.
@@ -105,8 +105,8 @@ size_t transferN(qpid::SessionState& s, string content) {
char last = content[content.size()-1];
content.resize(content.size()-1);
size += applyAccumulate(content.begin(), content.end(), 0,
- boost::bind(&send, boost::ref(s),
- boost::bind(contentFrameChar, _1, false)));
+ bind(&send, ref(s),
+ bind(contentFrameChar, _1, false)));
size += send(s, contentFrameChar(last, true));
}
return size;
@@ -115,7 +115,7 @@ size_t transferN(qpid::SessionState& s, string content) {
// Send multiple transfers with single-byte content.
size_t transfers(qpid::SessionState& s, string content) {
return applyAccumulate(content.begin(), content.end(), 0,
- boost::bind(transfer1Char, boost::ref(s), _1));
+ bind(transfer1Char, ref(s), _1));
}
size_t contentFrameSize(size_t n=1) { return AMQFrame(( AMQContentBody())).encodedSize() + n; }
diff --git a/cpp/src/tests/SocketProxy.h b/cpp/src/tests/SocketProxy.h
new file mode 100644
index 0000000000..0c6f39d62e
--- /dev/null
+++ b/cpp/src/tests/SocketProxy.h
@@ -0,0 +1,181 @@
+#ifndef SOCKETPROXY_H
+#define SOCKETPROXY_H
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/sys/IOHandle.h"
+#ifdef _WIN32
+# include "qpid/sys/windows/IoHandlePrivate.h"
+ typedef SOCKET FdType;
+#else
+# include "qpid/sys/posix/PrivatePosix.h"
+ typedef int FdType;
+#endif
+#include "qpid/sys/Socket.h"
+#include "qpid/sys/Runnable.h"
+#include "qpid/sys/Thread.h"
+#include "qpid/sys/Mutex.h"
+#include "qpid/log/Statement.h"
+
+namespace qpid {
+namespace tests {
+
+/**
+ * A simple socket proxy that forwards to another socket.
+ * Used between client & local broker to simulate network failures.
+ */
+class SocketProxy : private qpid::sys::Runnable
+{
+ // Need a Socket we can get the fd from
+ class LowSocket : public qpid::sys::Socket {
+ public:
+#ifdef _WIN32
+ FdType getFd() { return toSocketHandle(*this); }
+#else
+ FdType getFd() { return toFd(impl); }
+#endif
+ };
+
+ public:
+ /** Connect to connectPort on host, start a forwarding thread.
+ * Listen for connection on getPort().
+ */
+ SocketProxy(int connectPort, const std::string host="localhost")
+ : closed(false), joined(true),
+ port(listener.listen()), dropClient(), dropServer()
+ {
+ client.connect(host, connectPort);
+ joined = false;
+ thread = qpid::sys::Thread(static_cast<qpid::sys::Runnable*>(this));
+ }
+
+ ~SocketProxy() { close(); if (!joined) thread.join(); }
+
+ /** Simulate a network disconnect. */
+ void close() {
+ {
+ qpid::sys::Mutex::ScopedLock l(lock);
+ if (closed) { return; }
+ closed=true;
+ }
+ if (thread && thread != qpid::sys::Thread::current()) {
+ thread.join();
+ joined = true;
+ }
+ client.close();
+ }
+
+ /** Simulate lost packets, drop data from client */
+ void dropClientData(bool drop=true) { dropClient=drop; }
+
+ /** Simulate lost packets, drop data from server */
+ void dropServerData(bool drop=true) { dropServer=drop; }
+
+ bool isClosed() const {
+ qpid::sys::Mutex::ScopedLock l(lock);
+ return closed;
+ }
+
+ uint16_t getPort() const { return port; }
+
+ private:
+ static void throwErrno(const std::string& msg) {
+ throw qpid::Exception(msg+":"+qpid::sys::strError(errno));
+ }
+ static void throwIf(bool condition, const std::string& msg) {
+ if (condition) throw qpid::Exception(msg);
+ }
+
+ void run() {
+ std::auto_ptr<LowSocket> server;
+ try {
+ fd_set socks;
+ FdType maxFd = listener.getFd();
+ struct timeval tmo;
+ for (;;) {
+ FD_ZERO(&socks);
+ FD_SET(maxFd, &socks);
+ tmo.tv_sec = 0;
+ tmo.tv_usec = 500 * 1000;
+ if (select(maxFd+1, &socks, 0, 0, &tmo) == 0) {
+ qpid::sys::Mutex::ScopedLock l(lock);
+ throwIf(closed, "SocketProxy: Closed by close()");
+ continue;
+ }
+ throwIf(!FD_ISSET(maxFd, &socks), "SocketProxy: Accept failed");
+ break; // Accept ready... go to next step
+ }
+ server.reset(reinterpret_cast<LowSocket *>(listener.accept()));
+ maxFd = server->getFd();
+ if (client.getFd() > maxFd)
+ maxFd = client.getFd();
+ char buffer[1024];
+ for (;;) {
+ FD_ZERO(&socks);
+ tmo.tv_sec = 0;
+ tmo.tv_usec = 500 * 1000;
+ FD_SET(client.getFd(), &socks);
+ FD_SET(server->getFd(), &socks);
+ if (select(maxFd+1, &socks, 0, 0, &tmo) == 0) {
+ qpid::sys::Mutex::ScopedLock l(lock);
+ throwIf(closed, "SocketProxy: Closed by close()");
+ continue;
+ }
+ // Something is set; relay data as needed until something closes
+ if (FD_ISSET(server->getFd(), &socks)) {
+ int n = server->read(buffer, sizeof(buffer));
+ throwIf(n <= 0, "SocketProxy: server disconnected");
+ if (!dropServer) client.write(buffer, n);
+ }
+ if (FD_ISSET(client.getFd(), &socks)) {
+ int n = client.read(buffer, sizeof(buffer));
+ throwIf(n <= 0, "SocketProxy: client disconnected");
+ if (!dropServer) server->write(buffer, n);
+ }
+ if (!FD_ISSET(client.getFd(), &socks) &&
+ !FD_ISSET(server->getFd(), &socks))
+ throwIf(true, "SocketProxy: No handle ready");
+ }
+ }
+ catch (const std::exception& e) {
+ QPID_LOG(debug, "SocketProxy::run exception: " << e.what());
+ }
+ try {
+ if (server.get()) server->close();
+ close();
+ }
+ catch (const std::exception& e) {
+ QPID_LOG(debug, "SocketProxy::run exception in client/server close()" << e.what());
+ }
+ }
+
+ mutable qpid::sys::Mutex lock;
+ mutable bool closed;
+ bool joined;
+ LowSocket client, listener;
+ uint16_t port;
+ qpid::sys::Thread thread;
+ bool dropClient, dropServer;
+};
+
+}} // namespace qpid::tests
+
+#endif
diff --git a/cpp/src/tests/TimerTest.cpp b/cpp/src/tests/TimerTest.cpp
index 6a0a196f4e..7df94164e0 100644
--- a/cpp/src/tests/TimerTest.cpp
+++ b/cpp/src/tests/TimerTest.cpp
@@ -77,10 +77,8 @@ class TestTask : public TimerTask
BOOST_CHECK(fired);
BOOST_CHECK_EQUAL(expected_position, position);
Duration actual(start, end);
-#ifdef _MSC_VER
+#ifdef _WIN32
uint64_t difference = _abs64(expected - actual);
-#elif defined(_WIN32)
- uint64_t difference = labs(expected - actual);
#else
uint64_t difference = abs(expected - actual);
#endif
diff --git a/cpp/src/tests/TxPublishTest.cpp b/cpp/src/tests/TxPublishTest.cpp
index 152581e4ba..6b44d95baa 100644
--- a/cpp/src/tests/TxPublishTest.cpp
+++ b/cpp/src/tests/TxPublishTest.cpp
@@ -50,9 +50,10 @@ struct TxPublishTest
TxPublishTest() :
queue1(new Queue("queue1", false, &store, 0)),
queue2(new Queue("queue2", false, &store, 0)),
- msg(MessageUtils::createMessage("exchange", "routing_key", true)),
+ msg(MessageUtils::createMessage("exchange", "routing_key", false, "id")),
op(msg)
{
+ msg->getProperties<DeliveryProperties>()->setDeliveryMode(PERSISTENT);
op.deliverTo(queue1);
op.deliverTo(queue2);
}
@@ -73,7 +74,7 @@ QPID_AUTO_TEST_CASE(testPrepare)
BOOST_CHECK_EQUAL(pmsg, t.store.enqueued[0].second);
BOOST_CHECK_EQUAL(string("queue2"), t.store.enqueued[1].first);
BOOST_CHECK_EQUAL(pmsg, t.store.enqueued[1].second);
- BOOST_CHECK_EQUAL( true, ( boost::static_pointer_cast<PersistableMessage>(t.msg))->isIngressComplete());
+ BOOST_CHECK_EQUAL( true, ( boost::static_pointer_cast<PersistableMessage>(t.msg))->isEnqueueComplete());
}
QPID_AUTO_TEST_CASE(testCommit)
@@ -86,7 +87,7 @@ QPID_AUTO_TEST_CASE(testCommit)
BOOST_CHECK_EQUAL((uint32_t) 1, t.queue1->getMessageCount());
intrusive_ptr<Message> msg_dequeue = t.queue1->get().payload;
- BOOST_CHECK_EQUAL( true, (boost::static_pointer_cast<PersistableMessage>(msg_dequeue))->isIngressComplete());
+ BOOST_CHECK_EQUAL( true, (boost::static_pointer_cast<PersistableMessage>(msg_dequeue))->isEnqueueComplete());
BOOST_CHECK_EQUAL(t.msg, msg_dequeue);
BOOST_CHECK_EQUAL((uint32_t) 1, t.queue2->getMessageCount());
diff --git a/cpp/src/tests/Url.cpp b/cpp/src/tests/Url.cpp
index b30de682bc..234a62ee91 100644
--- a/cpp/src/tests/Url.cpp
+++ b/cpp/src/tests/Url.cpp
@@ -60,32 +60,6 @@ QPID_AUTO_TEST_CASE(TestParseXyz) {
BOOST_CHECK_EQUAL(Url("xyz:host").str(), "amqp:xyz:host:5672");
}
-QPID_AUTO_TEST_CASE(TestParseTricky) {
- BOOST_CHECK_EQUAL(Url("amqp").str(), "amqp:tcp:amqp:5672");
- BOOST_CHECK_EQUAL(Url("amqp:tcp").str(), "amqp:tcp:tcp:5672");
- // These are ambiguous parses and arguably not the best result
- BOOST_CHECK_EQUAL(Url("amqp:876").str(), "amqp:tcp:876:5672");
- BOOST_CHECK_EQUAL(Url("tcp:567").str(), "amqp:tcp:567:5672");
-}
-
-QPID_AUTO_TEST_CASE(TestParseIPv6) {
- Url u1("[::]");
- BOOST_CHECK_EQUAL(u1[0].host, "::");
- BOOST_CHECK_EQUAL(u1[0].port, 5672);
- Url u2("[::1]");
- BOOST_CHECK_EQUAL(u2[0].host, "::1");
- BOOST_CHECK_EQUAL(u2[0].port, 5672);
- Url u3("[::127.0.0.1]");
- BOOST_CHECK_EQUAL(u3[0].host, "::127.0.0.1");
- BOOST_CHECK_EQUAL(u3[0].port, 5672);
- Url u4("[2002::222:68ff:fe0b:e61a]");
- BOOST_CHECK_EQUAL(u4[0].host, "2002::222:68ff:fe0b:e61a");
- BOOST_CHECK_EQUAL(u4[0].port, 5672);
- Url u5("[2002::222:68ff:fe0b:e61a]:123");
- BOOST_CHECK_EQUAL(u5[0].host, "2002::222:68ff:fe0b:e61a");
- BOOST_CHECK_EQUAL(u5[0].port, 123);
-}
-
QPID_AUTO_TEST_CASE(TestParseMultiAddress) {
Url::addProtocol("xyz");
URL_CHECK_STR("amqp:tcp:host:0,xyz:foo:123,tcp:foo:0,xyz:bar:1");
diff --git a/cpp/src/tests/Variant.cpp b/cpp/src/tests/Variant.cpp
index 40f1c0cf75..b4188f524b 100644
--- a/cpp/src/tests/Variant.cpp
+++ b/cpp/src/tests/Variant.cpp
@@ -86,64 +86,6 @@ QPID_AUTO_TEST_CASE(testConversions)
BOOST_CHECK_THROW(value.asBool(), InvalidConversion);
}
-QPID_AUTO_TEST_CASE(testConversionsFromString)
-{
- Variant value;
- value = "5";
- BOOST_CHECK_EQUAL(5, value.asInt16());
- BOOST_CHECK_EQUAL(5u, value.asUint16());
-
- value = "-5";
- BOOST_CHECK_EQUAL(-5, value.asInt16());
- BOOST_CHECK_THROW(value.asUint16(), InvalidConversion);
-
- value = "18446744073709551615";
- BOOST_CHECK_EQUAL(18446744073709551615ull, value.asUint64());
- BOOST_CHECK_THROW(value.asInt64(), InvalidConversion);
-
- value = "9223372036854775808";
- BOOST_CHECK_EQUAL(9223372036854775808ull, value.asUint64());
- BOOST_CHECK_THROW(value.asInt64(), InvalidConversion);
-
- value = "-9223372036854775809";
- BOOST_CHECK_THROW(value.asUint64(), InvalidConversion);
- BOOST_CHECK_THROW(value.asInt64(), InvalidConversion);
-
- value = "2147483648";
- BOOST_CHECK_EQUAL(2147483648ul, value.asUint32());
- BOOST_CHECK_THROW(value.asInt32(), InvalidConversion);
-
- value = "-2147483649";
- BOOST_CHECK_THROW(value.asUint32(), InvalidConversion);
- BOOST_CHECK_THROW(value.asInt32(), InvalidConversion);
-
- value = "32768";
- BOOST_CHECK_EQUAL(32768u, value.asUint16());
- BOOST_CHECK_THROW(value.asInt16(), InvalidConversion);
-
- value = "-32769";
- BOOST_CHECK_THROW(value.asUint16(), InvalidConversion);
- BOOST_CHECK_THROW(value.asInt16(), InvalidConversion);
-
- value = "-2.5";
- BOOST_CHECK_EQUAL(-2.5, value.asFloat());
-
- value = "-0.875432e10";
- BOOST_CHECK_EQUAL(-0.875432e10, value.asDouble());
-
- value = "-0";
- BOOST_CHECK_EQUAL(0, value.asInt16());
- BOOST_CHECK_EQUAL(0u, value.asUint16());
-
- value = "-000";
- BOOST_CHECK_EQUAL(0, value.asInt16());
- BOOST_CHECK_EQUAL(0u, value.asUint16());
-
- value = "-0010";
- BOOST_CHECK_EQUAL(-10, value.asInt16());
- BOOST_CHECK_THROW(value.asUint16(), InvalidConversion);
-}
-
QPID_AUTO_TEST_CASE(testSizeConversionsUint)
{
Variant value;
diff --git a/cpp/src/tests/XmlClientSessionTest.cpp b/cpp/src/tests/XmlClientSessionTest.cpp
index b94c35ece0..b3b7f12b53 100644
--- a/cpp/src/tests/XmlClientSessionTest.cpp
+++ b/cpp/src/tests/XmlClientSessionTest.cpp
@@ -90,7 +90,7 @@ struct SimpleListener : public MessageListener
}
};
-struct ClientSessionFixture : public SessionFixture
+struct ClientSessionFixture : public ProxySessionFixture
{
void declareSubscribe(const string& q="odd_blue",
const string& dest="xml")
diff --git a/cpp/src/tests/acl.py b/cpp/src/tests/acl.py
index 65d5242e51..2d6a5b489d 100755
--- a/cpp/src/tests/acl.py
+++ b/cpp/src/tests/acl.py
@@ -26,11 +26,10 @@ from qpid.datatypes import uuid4
from qpid.testlib import TestBase010
from qmf.console import Session
from qpid.datatypes import Message
-import qpid.messaging
class ACLFile:
- def __init__(self, policy='data_dir/policy.acl'):
- self.f = open(policy,'w')
+ def __init__(self):
+ self.f = open('data_dir/policy.acl','w');
def write(self,line):
self.f.write(line)
@@ -51,24 +50,14 @@ class ACLTests(TestBase010):
acl = self.qmf.getObjects(_class="acl")[0]
return acl.reloadACLFile()
- def get_acl_file(self):
- return ACLFile(self.config.defines.get("policy-file", "data_dir/policy.acl"))
-
def setUp(self):
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow all all\n')
aclf.close()
TestBase010.setUp(self)
self.startQmf()
self.reload_acl()
-
- def tearDown(self):
- aclf = self.get_acl_file()
- aclf.write('acl allow all all\n')
- aclf.close()
- self.reload_acl()
- TestBase010.tearDown(self)
-
+
#=====================================
# ACL general tests
#=====================================
@@ -77,7 +66,7 @@ class ACLTests(TestBase010):
"""
Test the deny all mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow anonymous all all\n')
aclf.write('acl allow bob@QPID create queue\n')
aclf.write('acl deny all all')
@@ -105,7 +94,7 @@ class ACLTests(TestBase010):
"""
Test the allow all mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID bind exchange\n')
aclf.write('acl allow all all')
aclf.close()
@@ -137,7 +126,7 @@ class ACLTests(TestBase010):
"""
Test empty groups
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl group\n')
aclf.write('acl group admins bob@QPID joe@QPID\n')
aclf.write('acl allow all all')
@@ -151,7 +140,7 @@ class ACLTests(TestBase010):
"""
Test illegal acl formats
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl group admins bob@QPID joe@QPID\n')
aclf.write('acl allow all all')
aclf.close()
@@ -165,7 +154,7 @@ class ACLTests(TestBase010):
Test illegal extension lines
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('group admins bob@QPID \n')
aclf.write(' \ \n')
aclf.write('joe@QPID \n')
@@ -183,7 +172,7 @@ class ACLTests(TestBase010):
"""
Test proper extention lines
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('group test1 joe@EXAMPLE.com \\ \n') # should be allowed
aclf.write(' jack@EXAMPLE.com \\ \n') # should be allowed
aclf.write('jill@TEST.COM \\ \n') # should be allowed
@@ -200,7 +189,7 @@ class ACLTests(TestBase010):
Test a user defined without a realm
Ex. group admin rajith
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('group admin bob\n') # shouldn't be allowed
aclf.write('acl deny admin bind exchange\n')
aclf.write('acl allow all all')
@@ -215,7 +204,7 @@ class ACLTests(TestBase010):
Test a user defined without a realm
Ex. group admin rajith
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('group test1 joe@EXAMPLE.com\n') # should be allowed
aclf.write('group test2 jack_123-jill@EXAMPLE.com\n') # should be allowed
aclf.write('group test4 host/somemachine.example.com@EXAMPLE.COM\n') # should be allowed
@@ -226,7 +215,7 @@ class ACLTests(TestBase010):
if (result.text.find("ACL format error",0,len(result.text)) != -1):
self.fail(result)
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('group test1 joe$H@EXAMPLE.com\n') # shouldn't be allowed
aclf.write('acl allow all all')
aclf.close()
@@ -244,7 +233,7 @@ class ACLTests(TestBase010):
Test illegal queue policy
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q2 exclusive=true policytype=ding\n')
aclf.write('acl allow all all')
aclf.close()
@@ -260,7 +249,7 @@ class ACLTests(TestBase010):
Test illegal queue policy
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q2 maxqueuesize=-1\n')
aclf.write('acl allow all all')
aclf.close()
@@ -271,7 +260,7 @@ class ACLTests(TestBase010):
if (result.text != expected):
self.fail(result)
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q2 maxqueuesize=9223372036854775808\n')
aclf.write('acl allow all all')
aclf.close()
@@ -288,7 +277,7 @@ class ACLTests(TestBase010):
Test illegal queue policy
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q2 maxqueuecount=-1\n')
aclf.write('acl allow all all')
aclf.close()
@@ -299,7 +288,7 @@ class ACLTests(TestBase010):
if (result.text != expected):
self.fail(result)
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q2 maxqueuecount=9223372036854775808\n')
aclf.write('acl allow all all')
aclf.close()
@@ -319,7 +308,7 @@ class ACLTests(TestBase010):
"""
Test cases for queue acl in allow mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create queue name=q1 durable=true passive=true\n')
aclf.write('acl deny bob@QPID create queue name=q2 exclusive=true policytype=ring\n')
aclf.write('acl deny bob@QPID access queue name=q3\n')
@@ -422,7 +411,7 @@ class ACLTests(TestBase010):
"""
Test cases for queue acl in deny mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow bob@QPID create queue name=q1 durable=true passive=true\n')
aclf.write('acl allow bob@QPID create queue name=q2 exclusive=true policytype=ring\n')
aclf.write('acl allow bob@QPID access queue name=q3\n')
@@ -545,7 +534,7 @@ class ACLTests(TestBase010):
"""
Test cases for exchange acl in allow mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID create exchange name=testEx durable=true passive=true\n')
aclf.write('acl deny bob@QPID create exchange name=ex1 type=direct\n')
aclf.write('acl deny bob@QPID access exchange name=myEx queuename=q1 routingkey=rk1.*\n')
@@ -676,7 +665,7 @@ class ACLTests(TestBase010):
"""
Test cases for exchange acl in deny mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow bob@QPID create exchange name=myEx durable=true passive=false\n')
aclf.write('acl allow bob@QPID bind exchange name=amq.topic queuename=bar routingkey=foo.*\n')
aclf.write('acl allow bob@QPID unbind exchange name=amq.topic queuename=bar routingkey=foo.*\n')
@@ -783,52 +772,6 @@ class ACLTests(TestBase010):
if (403 == e.args[0].error_code):
self.fail("ACL should allow exchange delete request for myEx");
- def test_create_and_delete_exchange_via_qmf(self):
- """
- Test acl is enforced when creating/deleting via QMF
- methods. Note that in order to be able to send the QMF methods
- and receive the responses a significant amount of permissions
- need to be enabled (TODO: can the set below be narrowed down
- at all?)
- """
- aclf = self.get_acl_file()
- aclf.write('acl allow bob@QPID create exchange\n')
- aclf.write('acl allow admin@QPID delete exchange\n')
- aclf.write('acl allow all access exchange\n')
- aclf.write('acl allow all bind exchange\n')
- aclf.write('acl allow all create queue\n')
- aclf.write('acl allow all access queue\n')
- aclf.write('acl allow all delete queue\n')
- aclf.write('acl allow all consume queue\n')
- aclf.write('acl allow all access method\n')
- aclf.write('acl deny all all')
- aclf.close()
-
- result = self.reload_acl()
- if (result.text.find("format error",0,len(result.text)) != -1):
- self.fail(result)
-
- bob = BrokerAdmin(self.config.broker, "bob", "bob")
- bob.create_exchange("my-exchange") #should pass
- #cleanup by deleting exchange
- try:
- bob.delete_exchange("my-exchange") #should fail
- self.fail("ACL should deny exchange delete request for my-exchange");
- except Exception, e:
- self.assertEqual(7,e.args[0]["error_code"])
- assert e.args[0]["error_text"].find("unauthorized-access") == 0
- admin = BrokerAdmin(self.config.broker, "admin", "admin")
- admin.delete_exchange("my-exchange") #should pass
-
- anonymous = BrokerAdmin(self.config.broker)
- try:
- anonymous.create_exchange("another-exchange") #should fail
- self.fail("ACL should deny exchange create request for another-exchange");
- except Exception, e:
- self.assertEqual(7,e.args[0]["error_code"])
- assert e.args[0]["error_text"].find("unauthorized-access") == 0
-
-
#=====================================
# ACL consume tests
#=====================================
@@ -837,7 +780,7 @@ class ACLTests(TestBase010):
"""
Test cases for consume in allow mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID consume queue name=q1\n')
aclf.write('acl deny bob@QPID consume queue name=q2\n')
aclf.write('acl allow all all')
@@ -883,7 +826,7 @@ class ACLTests(TestBase010):
"""
Test cases for consume in allow mode
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow bob@QPID consume queue name=q1\n')
aclf.write('acl allow bob@QPID consume queue name=q2\n')
aclf.write('acl allow bob@QPID create queue\n')
@@ -929,7 +872,7 @@ class ACLTests(TestBase010):
"""
Test various publish acl
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl deny bob@QPID publish exchange name=amq.direct routingkey=rk1\n')
aclf.write('acl deny bob@QPID publish exchange name=amq.topic\n')
aclf.write('acl deny bob@QPID publish exchange name=myEx routingkey=rk2\n')
@@ -978,7 +921,7 @@ class ACLTests(TestBase010):
"""
Test various publish acl
"""
- aclf = self.get_acl_file()
+ aclf = ACLFile()
aclf.write('acl allow bob@QPID publish exchange name=amq.direct routingkey=rk1\n')
aclf.write('acl allow bob@QPID publish exchange name=amq.topic\n')
aclf.write('acl allow bob@QPID publish exchange name=myEx routingkey=rk2\n')
@@ -1029,113 +972,3 @@ class ACLTests(TestBase010):
except qpid.session.SessionException, e:
if (403 == e.args[0].error_code):
self.fail("ACL should allow message transfer to exchange amq.direct with routing key rk1");
-
- #=====================================
- # ACL broker configuration tests
- #=====================================
-
- def test_broker_timestamp_config(self):
- """
- Test ACL control of the broker timestamp configuration
- """
- aclf = self.get_acl_file()
- # enable lots of stuff to allow QMF to work
- aclf.write('acl allow all create exchange\n')
- aclf.write('acl allow all access exchange\n')
- aclf.write('acl allow all bind exchange\n')
- aclf.write('acl allow all publish exchange\n')
- aclf.write('acl allow all create queue\n')
- aclf.write('acl allow all access queue\n')
- aclf.write('acl allow all delete queue\n')
- aclf.write('acl allow all consume queue\n')
- aclf.write('acl allow all access method\n')
- # this should let bob access the timestamp configuration
- aclf.write('acl allow bob@QPID access broker\n')
- aclf.write('acl allow admin@QPID all all\n')
- aclf.write('acl deny all all')
- aclf.close()
-
- result = self.reload_acl()
- if (result.text.find("format error",0,len(result.text)) != -1):
- self.fail(result)
-
- ts = None
- bob = BrokerAdmin(self.config.broker, "bob", "bob")
- ts = bob.get_timestamp_cfg() #should work
- bob.set_timestamp_cfg(ts); #should work
-
- obo = BrokerAdmin(self.config.broker, "obo", "obo")
- try:
- ts = obo.get_timestamp_cfg() #should fail
- failed = False
- except Exception, e:
- failed = True
- self.assertEqual(7,e.args[0]["error_code"])
- assert e.args[0]["error_text"].find("unauthorized-access") == 0
- assert(failed)
-
- try:
- obo.set_timestamp_cfg(ts) #should fail
- failed = False
- except Exception, e:
- failed = True
- self.assertEqual(7,e.args[0]["error_code"])
- assert e.args[0]["error_text"].find("unauthorized-access") == 0
- assert(failed)
-
- admin = BrokerAdmin(self.config.broker, "admin", "admin")
- ts = admin.get_timestamp_cfg() #should pass
- admin.set_timestamp_cfg(ts) #should pass
-
-
-class BrokerAdmin:
- def __init__(self, broker, username=None, password=None):
- self.connection = qpid.messaging.Connection(broker)
- if username:
- self.connection.username = username
- self.connection.password = password
- self.connection.sasl_mechanisms = "PLAIN"
- self.connection.open()
- self.session = self.connection.session()
- self.sender = self.session.sender("qmf.default.direct/broker")
- self.reply_to = "responses-#; {create:always}"
- self.receiver = self.session.receiver(self.reply_to)
-
- def invoke(self, method, arguments):
- content = {
- "_object_id": {"_object_name": "org.apache.qpid.broker:broker:amqp-broker"},
- "_method_name": method,
- "_arguments": arguments
- }
- request = qpid.messaging.Message(reply_to=self.reply_to, content=content)
- request.properties["x-amqp-0-10.app-id"] = "qmf2"
- request.properties["qmf.opcode"] = "_method_request"
- self.sender.send(request)
- response = self.receiver.fetch()
- self.session.acknowledge()
- if response.properties['x-amqp-0-10.app-id'] == 'qmf2':
- if response.properties['qmf.opcode'] == '_method_response':
- return response.content['_arguments']
- elif response.properties['qmf.opcode'] == '_exception':
- raise Exception(response.content['_values'])
- else: raise Exception("Invalid response received, unexpected opcode: %s" % response.properties['qmf.opcode'])
- else: raise Exception("Invalid response received, not a qmfv2 method: %s" % response.properties['x-amqp-0-10.app-id'])
- def create_exchange(self, name, exchange_type=None, options={}):
- properties = options
- if exchange_type: properties["exchange_type"] = exchange_type
- self.invoke("create", {"type": "exchange", "name":name, "properties":properties})
-
- def create_queue(self, name, properties={}):
- self.invoke("create", {"type": "queue", "name":name, "properties":properties})
-
- def delete_exchange(self, name):
- self.invoke("delete", {"type": "exchange", "name":name})
-
- def delete_queue(self, name):
- self.invoke("delete", {"type": "queue", "name":name})
-
- def get_timestamp_cfg(self):
- return self.invoke("getTimestampConfig", {})
-
- def set_timestamp_cfg(self, receive):
- return self.invoke("getTimestampConfig", {"receive":receive})
diff --git a/cpp/src/tests/allhosts b/cpp/src/tests/allhosts
index 4b4b943156..e43571aed4 100755
--- a/cpp/src/tests/allhosts
+++ b/cpp/src/tests/allhosts
@@ -29,12 +29,11 @@ Options:
-s SECONDS sleep between starting commands.
-q don't print banner lines for each host.
-o SUFFIX log output of each command to <host>.SUFFIX
- -X passed to ssh - forward X connection.
"
exit 1
}
-while getopts "tl:bs:dqo:X" opt; do
+while getopts "tl:bs:dqo:" opt; do
case $opt in
l) SSHOPTS="-l$OPTARG $SSHOPTS" ;;
t) SSHOPTS="-t $SSHOPTS" ;;
@@ -43,7 +42,6 @@ while getopts "tl:bs:dqo:X" opt; do
s) SLEEP="sleep $OPTARG" ;;
q) NOBANNER=1 ;;
o) SUFFIX=$OPTARG ;;
- X) SSHOPTS="-X $SSHOPTS" ;;
*) usage;;
esac
done
diff --git a/cpp/src/tests/brokertest.py b/cpp/src/tests/brokertest.py
index 16d7fb0b78..98f58ebfdd 100644
--- a/cpp/src/tests/brokertest.py
+++ b/cpp/src/tests/brokertest.py
@@ -29,7 +29,6 @@ from unittest import TestCase
from copy import copy
from threading import Thread, Lock, Condition
from logging import getLogger
-import qmf.console
log = getLogger("qpid.brokertest")
@@ -62,6 +61,24 @@ def is_running(pid):
class BadProcessStatus(Exception):
pass
+class ExceptionWrapper:
+ """Proxy object that adds a message to exceptions raised"""
+ def __init__(self, obj, msg):
+ self.obj = obj
+ self.msg = msg
+
+ def __getattr__(self, name):
+ func = getattr(self.obj, name)
+ if type(func) != callable:
+ return func
+ return lambda *args, **kwargs: self._wrap(func, args, kwargs)
+
+ def _wrap(self, func, args, kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception, e:
+ raise Exception("%s: %s" %(self.msg, str(e)))
+
def error_line(filename, n=1):
"""Get the last n line(s) of filename for error messages"""
result = []
@@ -71,8 +88,7 @@ def error_line(filename, n=1):
for l in f:
if len(result) == n: result.pop(0)
result.append(" "+l)
- finally:
- f.close()
+ finally: f.close()
except: return ""
return ":\n" + "".join(result)
@@ -80,90 +96,111 @@ def retry(function, timeout=10, delay=.01):
"""Call function until it returns True or timeout expires.
Double the delay for each retry. Return True if function
returns true, False if timeout expires."""
- deadline = time.time() + timeout
while not function():
- remaining = deadline - time.time()
- if remaining <= 0: return False
- delay = min(delay, remaining)
+ if delay > timeout: delay = timeout
time.sleep(delay)
+ timeout -= delay
+ if timeout <= 0: return False
delay *= 2
return True
-class AtomicCounter:
- def __init__(self):
- self.count = 0
- self.lock = Lock()
-
- def next(self):
- self.lock.acquire();
- ret = self.count
- self.count += 1
- self.lock.release();
- return ret
-
-_popen_id = AtomicCounter() # Popen identifier for use in output file names.
-
-# Constants for file descriptor arguments to Popen
-FILE = "FILE" # Write to file named after process
-PIPE = subprocess.PIPE
-
class Popen(subprocess.Popen):
"""
Can set and verify expectation of process status at end of test.
Dumps command line, stdout, stderr to data dir for debugging.
"""
- def __init__(self, cmd, expect=EXPECT_EXIT_OK, stdin=None, stdout=FILE, stderr=FILE):
- """Run cmd (should be a list of program and arguments)
+ class DrainThread(Thread):
+ """Thread to drain a file object and write the data to a file."""
+ def __init__(self, infile, outname):
+ Thread.__init__(self)
+ self.infile, self.outname = infile, outname
+ self.outfile = None
+
+ def run(self):
+ try:
+ for line in self.infile:
+ if self.outfile is None:
+ self.outfile = open(self.outname, "w")
+ self.outfile.write(line)
+ finally:
+ self.infile.close()
+ if self.outfile is not None: self.outfile.close()
+
+ class OutStream(ExceptionWrapper):
+ """Wrapper for output streams, handles exceptions & draining output"""
+ def __init__(self, infile, outfile, msg):
+ ExceptionWrapper.__init__(self, infile, msg)
+ self.infile, self.outfile = infile, outfile
+ self.thread = None
+
+ def drain(self):
+ if self.thread is None:
+ self.thread = Popen.DrainThread(self.infile, self.outfile)
+ self.thread.start()
+
+ def outfile(self, ext): return "%s.%s" % (self.pname, ext)
+
+ def __init__(self, cmd, expect=EXPECT_EXIT_OK, drain=True):
+ """Run cmd (should be a list of arguments)
expect - if set verify expectation at end of test.
- stdout, stderr - can have the same values as for subprocess.Popen as well as
- FILE (the default) which means write to a file named after the process.
- stdin - like subprocess.Popen but defauts to PIPE
+ drain - if true (default) drain stdout/stderr to files.
"""
self._clean = False
self._clean_lock = Lock()
assert find_exe(cmd[0]), "executable not found: "+cmd[0]
if type(cmd) is type(""): cmd = [cmd] # Make it a list.
self.cmd = [ str(x) for x in cmd ]
+ self.returncode = None
self.expect = expect
- self.id = _popen_id.next()
- self.pname = "%s-%d" % (os.path.split(self.cmd[0])[1], self.id)
- if stdout == FILE: stdout = open(self.outfile("out"), "w")
- if stderr == FILE: stderr = open(self.outfile("err"), "w")
try:
- subprocess.Popen.__init__(self, self.cmd, bufsize=0, executable=None,
- stdin=stdin, stdout=stdout, stderr=stderr,
- close_fds=True)
- except ValueError: # Windows can't do close_fds
- subprocess.Popen.__init__(self, self.cmd, bufsize=0, executable=None,
- stdin=stdin, stdout=stdout, stderr=stderr)
-
+ subprocess.Popen.__init__(self, self.cmd, 0, None, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE, close_fds=True)
+ except ValueError: # Windows can't do close_fds
+ subprocess.Popen.__init__(self, self.cmd, 0, None, subprocess.PIPE, subprocess.PIPE, subprocess.PIPE)
+ self.pname = "%s-%d" % (os.path.split(self.cmd[0])[1], self.pid)
+ msg = "Process %s" % self.pname
+ self.stdin = ExceptionWrapper(self.stdin, msg)
+ self.stdout = Popen.OutStream(self.stdout, self.outfile("out"), msg)
+ self.stderr = Popen.OutStream(self.stderr, self.outfile("err"), msg)
f = open(self.outfile("cmd"), "w")
- try: f.write("%s\n%d"%(self.cmd_str(), self.pid))
+ try: f.write(self.cmd_str())
finally: f.close()
log.debug("Started process %s: %s" % (self.pname, " ".join(self.cmd)))
+ if drain: self.drain()
- def __str__(self): return "Popen<%s>"%(self.pname)
+ def __str__(self): return "Popen<%s>"%(self.pname)
- def outfile(self, ext): return "%s.%s" % (self.pname, ext)
+ def drain(self):
+ """Start threads to drain stdout/err"""
+ self.stdout.drain()
+ self.stderr.drain()
+
+ def _cleanup(self):
+ """Close pipes to sub-process"""
+ self._clean_lock.acquire()
+ try:
+ if self._clean: return
+ self._clean = True
+ self.stdin.close()
+ self.drain() # Drain output pipes.
+ self.stdout.thread.join() # Drain thread closes pipe.
+ self.stderr.thread.join()
+ finally: self._clean_lock.release()
def unexpected(self,msg):
err = error_line(self.outfile("err")) or error_line(self.outfile("out"))
raise BadProcessStatus("%s %s%s" % (self.pname, msg, err))
-
+
def stop(self): # Clean up at end of test.
try:
if self.expect == EXPECT_UNKNOWN:
try: self.kill() # Just make sure its dead
except: pass
elif self.expect == EXPECT_RUNNING:
- if self.poll() != None:
- self.unexpected("expected running, exit code %d" % self.returncode)
- else:
- try:
- self.kill()
- except Exception,e:
- self.unexpected("exception from kill: %s" % str(e))
+ try:
+ self.kill()
+ except:
+ self.unexpected("expected running, exit code %d" % self.wait())
else:
retry(lambda: self.poll() is not None)
if self.returncode is None: # Still haven't stopped
@@ -175,21 +212,40 @@ class Popen(subprocess.Popen):
self.unexpected("expected error")
finally:
self.wait() # Clean up the process.
-
+
def communicate(self, input=None):
- ret = subprocess.Popen.communicate(self, input)
- self.cleanup()
- return ret
+ if input:
+ self.stdin.write(input)
+ self.stdin.close()
+ outerr = (self.stdout.read(), self.stderr.read())
+ self.wait()
+ return outerr
- def is_running(self): return self.poll() is None
+ def is_running(self):
+ return self.poll() is None
def assert_running(self):
if not self.is_running(): self.unexpected("Exit code %d" % self.returncode)
+ def poll(self, _deadstate=None): # _deadstate required by base class in python 2.4
+ if self.returncode is None:
+ # Pass _deadstate only if it has been set, there is no _deadstate
+ # parameter in Python 2.6
+ if _deadstate is None: ret = subprocess.Popen.poll(self)
+ else: ret = subprocess.Popen.poll(self, _deadstate)
+
+ if (ret != -1):
+ self.returncode = ret
+ self._cleanup()
+ return self.returncode
+
def wait(self):
- ret = subprocess.Popen.wait(self)
- self._cleanup()
- return ret
+ if self.returncode is None:
+ self.drain()
+ try: self.returncode = subprocess.Popen.wait(self)
+ except OSError,e: raise OSError("Wait failed %s: %s"%(self.pname, e))
+ self._cleanup()
+ return self.returncode
def terminate(self):
try: subprocess.Popen.terminate(self)
@@ -198,8 +254,7 @@ class Popen(subprocess.Popen):
os.kill( self.pid , signal.SIGTERM)
except AttributeError: # no os.kill, using taskkill.. (Windows only)
os.popen('TASKKILL /PID ' +str(self.pid) + ' /F')
- self._cleanup()
-
+
def kill(self):
try: subprocess.Popen.kill(self)
except AttributeError: # No terminate method
@@ -207,20 +262,6 @@ class Popen(subprocess.Popen):
os.kill( self.pid , signal.SIGKILL)
except AttributeError: # no os.kill, using taskkill.. (Windows only)
os.popen('TASKKILL /PID ' +str(self.pid) + ' /F')
- self._cleanup()
-
- def _cleanup(self):
- """Clean up after a dead process"""
- self._clean_lock.acquire()
- if not self._clean:
- self._clean = True
- try: self.stdin.close()
- except: pass
- try: self.stdout.close()
- except: pass
- try: self.stderr.close()
- except: pass
- self._clean_lock.release()
def cmd_str(self): return " ".join([str(s) for s in self.cmd])
@@ -247,11 +288,11 @@ class Broker(Popen):
while (os.path.exists(self.log)):
self.log = "%s-%d.log" % (self.name, i)
i += 1
-
+
def get_log(self):
return os.path.abspath(self.log)
- def __init__(self, test, args=[], name=None, expect=EXPECT_RUNNING, port=0, log_level=None, wait=None, show_cmd=False):
+ def __init__(self, test, args=[], name=None, expect=EXPECT_RUNNING, port=0, log_level=None, wait=None):
"""Start a broker daemon. name determines the data-dir and log
file names."""
@@ -277,20 +318,15 @@ class Broker(Popen):
cmd += ["--log-to-file", self.log]
cmd += ["--log-to-stderr=no"]
if log_level != None:
- cmd += ["--log-enable=%s" % log_level]
+ cmd += ["--log-enable=%s" % log_level]
self.datadir = self.name
cmd += ["--data-dir", self.datadir]
- if show_cmd: print cmd
- Popen.__init__(self, cmd, expect, stdout=PIPE)
+ Popen.__init__(self, cmd, expect, drain=False)
test.cleanup_stop(self)
self._host = "127.0.0.1"
log.debug("Started broker %s (%s, %s)" % (self.name, self.pname, self.log))
self._log_ready = False
- def startQmf(self, handler=None):
- self.qmf_session = qmf.console.Session(handler)
- self.qmf_broker = self.qmf_session.addBroker("%s:%s" % (self.host(), self.port()))
-
def host(self): return self._host
def port(self):
@@ -321,7 +357,7 @@ class Broker(Popen):
s = c.session(str(qpid.datatypes.uuid4()))
s.queue_declare(queue=queue)
c.close()
-
+
def _prep_sender(self, queue, durable, xprops):
s = queue + "; {create:always, node:{durable:" + str(durable)
if xprops != None: s += ", x-declare:{" + xprops + "}"
@@ -365,14 +401,13 @@ class Broker(Popen):
def log_ready(self):
"""Return true if the log file exists and contains a broker ready message"""
- if not self._log_ready:
- self._log_ready = find_in_file("notice Broker running", self.log)
- return self._log_ready
+ if self._log_ready: return True
+ self._log_ready = find_in_file("notice Broker running", self.log)
def ready(self, **kwargs):
"""Wait till broker is ready to serve clients"""
# First make sure the broker is listening by checking the log.
- if not retry(self.log_ready, timeout=60):
+ if not retry(self.log_ready, timeout=30):
raise Exception(
"Timed out waiting for broker %s%s"%(self.name, error_line(self.log,5)))
# Create a connection and a session. For a cluster broker this will
@@ -381,27 +416,23 @@ class Broker(Popen):
c = self.connect(**kwargs)
try: c.session()
finally: c.close()
- except Exception,e: raise RethrownException(
- "Broker %s not responding: (%s)%s"%(self.name,e,error_line(self.log, 5)))
+ except: raise RethrownException(
+ "Broker %s failed ready test%s"%(self.name,error_line(self.log, 5)))
def store_state(self):
- f = open(os.path.join(self.datadir, "cluster", "store.status"))
- try: uuids = f.readlines()
- finally: f.close()
+ uuids = open(os.path.join(self.datadir, "cluster", "store.status")).readlines()
null_uuid="00000000-0000-0000-0000-000000000000\n"
if len(uuids) < 2: return "unknown" # we looked while the file was being updated.
if uuids[0] == null_uuid: return "empty"
if uuids[1] == null_uuid: return "dirty"
return "clean"
-
+
class Cluster:
"""A cluster of brokers in a test."""
- # Client connection options for use in failover tests.
- CONNECTION_OPTIONS = "reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
_cluster_count = 0
- def __init__(self, test, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
+ def __init__(self, test, count=0, args=[], expect=EXPECT_RUNNING, wait=True):
self.test = test
self._brokers=[]
self.name = "cluster%d" % Cluster._cluster_count
@@ -412,19 +443,16 @@ class Cluster:
self.args += [ "--log-enable=info+", "--log-enable=debug+:cluster"]
assert BrokerTest.cluster_lib, "Cannot locate cluster plug-in"
self.args += [ "--load-module", BrokerTest.cluster_lib ]
- self.start_n(count, expect=expect, wait=wait, show_cmd=show_cmd)
+ self.start_n(count, expect=expect, wait=wait)
- def start(self, name=None, expect=EXPECT_RUNNING, wait=True, args=[], port=0, show_cmd=False):
+ def start(self, name=None, expect=EXPECT_RUNNING, wait=True, args=[], port=0):
"""Add a broker to the cluster. Returns the index of the new broker."""
if not name: name="%s-%d" % (self.name, len(self._brokers))
- self._brokers.append(self.test.broker(self.args+args, name, expect, wait, port=port, show_cmd=show_cmd))
+ self._brokers.append(self.test.broker(self.args+args, name, expect, wait, port=port))
return self._brokers[-1]
- def ready(self):
- for b in self: b.ready()
-
- def start_n(self, count, expect=EXPECT_RUNNING, wait=True, args=[], show_cmd=False):
- for i in range(count): self.start(expect=expect, wait=wait, args=args, show_cmd=show_cmd)
+ def start_n(self, count, expect=EXPECT_RUNNING, wait=True, args=[]):
+ for i in range(count): self.start(expect=expect, wait=wait, args=args)
# Behave like a list of brokers.
def __len__(self): return len(self._brokers)
@@ -453,7 +481,7 @@ class BrokerTest(TestCase):
rootdir = os.getcwd()
def configure(self, config): self.config=config
-
+
def setUp(self):
outdir = self.config.defines.get("OUTDIR") or "brokertest.tmp"
self.dir = os.path.join(self.rootdir, outdir, self.id())
@@ -474,49 +502,40 @@ class BrokerTest(TestCase):
"""Call thing.stop at end of test"""
self.stopem.append(stopable)
- def popen(self, cmd, expect=EXPECT_EXIT_OK, stdin=None, stdout=FILE, stderr=FILE):
+ def popen(self, cmd, expect=EXPECT_EXIT_OK, drain=True):
"""Start a process that will be killed at end of test, in the test dir."""
os.chdir(self.dir)
- p = Popen(cmd, expect, stdin=stdin, stdout=stdout, stderr=stderr)
+ p = Popen(cmd, expect, drain)
self.cleanup_stop(p)
return p
- def broker(self, args=[], name=None, expect=EXPECT_RUNNING, wait=True, port=0, log_level=None, show_cmd=False):
+ def broker(self, args=[], name=None, expect=EXPECT_RUNNING, wait=True, port=0, log_level=None):
"""Create and return a broker ready for use"""
- b = Broker(self, args=args, name=name, expect=expect, port=port, log_level=log_level, show_cmd=show_cmd)
+ b = Broker(self, args=args, name=name, expect=expect, port=port, log_level=log_level)
if (wait):
try: b.ready()
except Exception, e:
raise RethrownException("Failed to start broker %s(%s): %s" % (b.name, b.log, e))
return b
- def cluster(self, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
+ def cluster(self, count=0, args=[], expect=EXPECT_RUNNING, wait=True):
"""Create and return a cluster ready for use"""
- cluster = Cluster(self, count, args, expect=expect, wait=wait, show_cmd=show_cmd)
+ cluster = Cluster(self, count, args, expect=expect, wait=wait)
return cluster
- def browse(self, session, queue, timeout=0):
- """Return a list with the contents of each message on queue."""
- r = session.receiver("%s;{mode:browse}"%(queue))
- r.capacity = 100
- try:
- contents = []
- try:
- while True: contents.append(r.fetch(timeout=timeout).content)
- except messaging.Empty: pass
- finally: r.close()
- return contents
-
def assert_browse(self, session, queue, expect_contents, timeout=0):
"""Assert that the contents of messages on queue (as retrieved
using session and timeout) exactly match the strings in
expect_contents"""
- actual_contents = self.browse(session, queue, timeout)
- self.assertEqual(expect_contents, actual_contents)
-def join(thread, timeout=10):
- thread.join(timeout)
- if thread.isAlive(): raise Exception("Timed out joining thread %s"%thread)
+ r = session.receiver("%s;{mode:browse}"%(queue))
+ actual_contents = []
+ try:
+ for c in expect_contents: actual_contents.append(r.fetch(timeout=timeout).content)
+ while True: actual_contents.append(r.fetch(timeout=0).content) # Check for extra messages.
+ except messaging.Empty: pass
+ r.close()
+ self.assertEqual(expect_contents, actual_contents)
class RethrownException(Exception):
"""Captures the stack trace of the current exception to be thrown later"""
@@ -535,16 +554,15 @@ class StoppableThread(Thread):
def stop(self):
self.stopped = True
- join(self)
+ self.join()
if self.error: raise self.error
-
+
class NumberedSender(Thread):
"""
Thread to run a sender client and send numbered messages until stopped.
"""
- def __init__(self, broker, max_depth=None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS):
+ def __init__(self, broker, max_depth=None, queue="test-queue"):
"""
max_depth: enable flow control, ensure sent - received <= max_depth.
Requires self.notify_received(n) to be called each time messages are received.
@@ -555,11 +573,9 @@ class NumberedSender(Thread):
"--broker", "localhost:%s"%broker.port(),
"--address", "%s;{create:always}"%queue,
"--failover-updates",
- "--connection-options", "{%s}"%(connection_options),
"--content-stdin"
],
- expect=EXPECT_RUNNING,
- stdin=PIPE)
+ expect=EXPECT_RUNNING)
self.condition = Condition()
self.max = max_depth
self.received = 0
@@ -574,7 +590,6 @@ class NumberedSender(Thread):
try:
self.sent = 0
while not self.stopped:
- self.sender.assert_running()
if self.max:
self.condition.acquire()
while not self.stopped and self.sent - self.received > self.max:
@@ -597,17 +612,16 @@ class NumberedSender(Thread):
self.stopped = True
self.condition.notify()
finally: self.condition.release()
- join(self)
+ self.join()
self.write_message(-1) # end-of-messages marker.
if self.error: raise self.error
-
+
class NumberedReceiver(Thread):
"""
Thread to run a receiver client and verify it receives
sequentially numbered messages.
"""
- def __init__(self, broker, sender = None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS):
+ def __init__(self, broker, sender = None, queue="test-queue"):
"""
sender: enable flow control. Call sender.received(n) for each message received.
"""
@@ -618,24 +632,22 @@ class NumberedReceiver(Thread):
"--broker", "localhost:%s"%broker.port(),
"--address", "%s;{create:always}"%queue,
"--failover-updates",
- "--connection-options", "{%s}"%(connection_options),
"--forever"
],
expect=EXPECT_RUNNING,
- stdout=PIPE)
+ drain=False)
self.lock = Lock()
self.error = None
self.sender = sender
- self.received = 0
def read_message(self):
return int(self.receiver.stdout.readline())
-
+
def run(self):
try:
+ self.received = 0
m = self.read_message()
while m != -1:
- self.receiver.assert_running()
assert(m <= self.received) # Check for missing messages
if (m == self.received): # Ignore duplicates
self.received += 1
@@ -647,7 +659,7 @@ class NumberedReceiver(Thread):
def stop(self):
"""Returns when termination message is received"""
- join(self)
+ self.join()
if self.error: raise self.error
class ErrorGenerator(StoppableThread):
@@ -662,7 +674,7 @@ class ErrorGenerator(StoppableThread):
self.broker=broker
broker.test.cleanup_stop(self)
self.start()
-
+
def run(self):
c = self.broker.connect_old()
try:
diff --git a/cpp/src/tests/cli_tests.py b/cpp/src/tests/cli_tests.py
index 6c75927461..deef03279d 100755
--- a/cpp/src/tests/cli_tests.py
+++ b/cpp/src/tests/cli_tests.py
@@ -365,26 +365,6 @@ class CliTests(TestBase010):
self.assertEqual(queue._altExchange_.name, altName)
self.assertEqual(found, True)
- def test_qpid_config_list_queues_arguments(self):
- """
- Test to verify that when the type of a policy limit is
- actually a string (though still a valid value), it does not
- upset qpid-config
- """
- self.startQmf();
- qmf = self.qmf
-
- names = ["queue_capacity%s" % (i) for i in range(1, 6)]
- for name in names:
- self.session.queue_declare(queue=name, exclusive=True,
- arguments={'qpid.max_count' : str(i), 'qpid.max_size': '100'})
-
- output = os.popen(self.qpid_config_command(" queues")).readlines()
- queues = [line.split()[0] for line in output[2:len(output)]] #ignore first two lines (header)
-
- for name in names:
- assert name in queues, "%s not in %s" % (name, queues)
-
def test_qpid_route(self):
self.startQmf();
qmf = self.qmf
@@ -425,7 +405,7 @@ class CliTests(TestBase010):
qmf = self.qmf
ret = self.qpid_route_api("dynamic add "
- + " --client-sasl-mechanism PLAIN "
+ + " --sasl-mechanism PLAIN "
+ "guest/guest@localhost:"+str(self.broker.port) + " "
+ str(self.remote_host())+":"+str(self.remote_port()) + " "
+"amq.direct")
@@ -444,7 +424,7 @@ class CliTests(TestBase010):
qmf = self.qmf
ret = self.qpid_route_api("dynamic add "
- + " --client-sasl-mechanism PLAIN "
+ + " --sasl-mechanism PLAIN "
+ "localhost:"+str(self.broker.port) + " "
+ str(self.remote_host())+":"+str(self.remote_port()) + " "
+"amq.direct")
diff --git a/cpp/src/tests/cluster_python_tests_failing.txt b/cpp/src/tests/cluster_python_tests_failing.txt
index f8639d7b59..7ba8089946 100644
--- a/cpp/src/tests/cluster_python_tests_failing.txt
+++ b/cpp/src/tests/cluster_python_tests_failing.txt
@@ -1,4 +1,32 @@
qpid_tests.broker_0_10.management.ManagementTest.test_purge_queue
qpid_tests.broker_0_10.management.ManagementTest.test_connection_close
+qpid_tests.broker_0_10.dtx.DtxTests.test_bad_resume
+qpid_tests.broker_0_10.dtx.DtxTests.test_commit_unknown
+qpid_tests.broker_0_10.dtx.DtxTests.test_end
+qpid_tests.broker_0_10.dtx.DtxTests.test_end_suspend_and_fail
+qpid_tests.broker_0_10.dtx.DtxTests.test_end_unknown_xid
+qpid_tests.broker_0_10.dtx.DtxTests.test_forget_xid_on_completion
+qpid_tests.broker_0_10.dtx.DtxTests.test_get_timeout
+qpid_tests.broker_0_10.dtx.DtxTests.test_get_timeout_unknown
+qpid_tests.broker_0_10.dtx.DtxTests.test_implicit_end
+qpid_tests.broker_0_10.dtx.DtxTests.test_invalid_commit_not_ended
+qpid_tests.broker_0_10.dtx.DtxTests.test_invalid_commit_one_phase_false
+qpid_tests.broker_0_10.dtx.DtxTests.test_invalid_commit_one_phase_true
+qpid_tests.broker_0_10.dtx.DtxTests.test_invalid_prepare_not_ended
+qpid_tests.broker_0_10.dtx.DtxTests.test_invalid_rollback_not_ended
+qpid_tests.broker_0_10.dtx.DtxTests.test_prepare_unknown
+qpid_tests.broker_0_10.dtx.DtxTests.test_recover
+qpid_tests.broker_0_10.dtx.DtxTests.test_rollback_unknown
+qpid_tests.broker_0_10.dtx.DtxTests.test_select_required
+qpid_tests.broker_0_10.dtx.DtxTests.test_set_timeout
+qpid_tests.broker_0_10.dtx.DtxTests.test_simple_commit
+qpid_tests.broker_0_10.dtx.DtxTests.test_simple_prepare_commit
+qpid_tests.broker_0_10.dtx.DtxTests.test_simple_prepare_rollback
+qpid_tests.broker_0_10.dtx.DtxTests.test_simple_rollback
+qpid_tests.broker_0_10.dtx.DtxTests.test_start_already_known
+qpid_tests.broker_0_10.dtx.DtxTests.test_start_join
+qpid_tests.broker_0_10.dtx.DtxTests.test_start_join_and_resume
+qpid_tests.broker_0_10.dtx.DtxTests.test_suspend_resume
+qpid_tests.broker_0_10.dtx.DtxTests.test_suspend_start_end_resume
qpid_tests.broker_0_10.message.MessageTests.test_ttl
qpid_tests.broker_0_10.management.ManagementTest.test_broker_connectivity_oldAPI
diff --git a/cpp/src/tests/cluster_test_logs.py b/cpp/src/tests/cluster_test_logs.py
index 3c7e8e8020..1fa9014d11 100755
--- a/cpp/src/tests/cluster_test_logs.py
+++ b/cpp/src/tests/cluster_test_logs.py
@@ -53,19 +53,16 @@ def filter_log(log):
'stall for update|unstall, ignore update|cancelled offer .* unstall',
'caught up',
'active for links|Passivating links|Activating links',
- 'info Connecting: .*', # UpdateClient connection
'info Connection.* connected to', # UpdateClient connection
- 'warning Connection \\[[-0-9.: ]+\\] closed', # UpdateClient connection
+ 'warning Connection [\d+ [0-9.:]+] closed', # UpdateClient connection
'warning Broker closed connection: 200, OK',
'task late',
'task overran',
'warning CLOSING .* unsent data',
'Inter-broker link ',
- 'Running in a cluster, marking store',
- 'debug Sending keepalive signal to watchdog', # Watchdog timer thread
- 'last broker standing joined by 1 replicas, updating queue policies.',
- 'Connection .* timed out: closing' # heartbeat connection close
+ 'Running in a cluster, marking store'
])
+ skip_re = re.compile(skip)
# Regex to match a UUID
uuid='\w\w\w\w\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w\w\w\w\w\w\w\w\w'
# Substitutions to remove expected differences
@@ -83,13 +80,6 @@ def filter_log(log):
(r' map={.*_object_name:([^,}]*)[,}].*', r' \1'), # V2 map - just keep name
(r'\d+-\d+-\d+--\d+', 'X-X-X--X'), # V1 Object IDs
]
- # Substitutions to mask known issue: durable test shows inconsistent "changed stats for com.redhat.rhm.store:journal" messages.
- skip += '|Changed V[12] statistics com.redhat.rhm.store:journal'
- subs += [(r'to=console.obj.1.0.com.redhat.rhm.store.journal props=\d+ stats=\d+',
- 'to=console.obj.1.0.com.redhat.rhm.store.journal props=NN stats=NN')]
-
- skip_re = re.compile(skip)
- subs = [(re.compile(pattern), subst) for pattern, subst in subs]
for l in open(log):
if skip_re.search(l): continue
for pattern,subst in subs: l = re.sub(pattern,subst,l)
diff --git a/cpp/src/tests/cluster_tests.py b/cpp/src/tests/cluster_tests.py
index 0e80e06d34..cbad4010b4 100755
--- a/cpp/src/tests/cluster_tests.py
+++ b/cpp/src/tests/cluster_tests.py
@@ -18,13 +18,12 @@
# under the License.
#
-import os, signal, sys, time, imp, re, subprocess, glob, random, logging
-import cluster_test_logs
+import os, signal, sys, time, imp, re, subprocess, glob, cluster_test_logs
from qpid import datatypes, messaging
from brokertest import *
from qpid.harness import Skipped
-from qpid.messaging import Message, Empty, Disposition, REJECTED, util
-from threading import Thread, Lock, Condition
+from qpid.messaging import Message, Empty
+from threading import Thread, Lock
from logging import getLogger
from itertools import chain
from tempfile import NamedTemporaryFile
@@ -97,15 +96,9 @@ class ShortTests(BrokerTest):
destination="amq.direct",
message=qpid.datatypes.Message(props, "content"))
- # Try message with TTL and differnet headers/properties
- cluster[0].send_message("q", Message(durable=True, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={}, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={"x":10}, ttl=100000))
-
# Now update a new member and compare their dumps.
cluster.start(args=["--test-store-dump", "updatee.dump"])
assert readfile("direct.dump") == readfile("updatee.dump")
-
os.remove("direct.dump")
os.remove("updatee.dump")
@@ -115,22 +108,19 @@ class ShortTests(BrokerTest):
acl=os.path.join(os.getcwd(), "policy.acl")
aclf=file(acl,"w")
aclf.write("""
-acl allow zig@QPID all all
-acl deny all all
+acl deny zag@QPID create queue
+acl allow all all
""")
aclf.close()
- cluster = self.cluster(1, args=["--auth", "yes",
+ cluster = self.cluster(2, args=["--auth", "yes",
"--sasl-config", sasl_config,
"--load-module", os.getenv("ACL_LIB"),
"--acl-file", acl])
# Valid user/password, ensure queue is created.
c = cluster[0].connect(username="zig", password="zig")
- c.session().sender("ziggy;{create:always,node:{x-declare:{exclusive:true}}}")
+ c.session().sender("ziggy;{create:always}")
c.close()
- cluster.start() # Start second node.
-
- # Check queue is created on second node.
c = cluster[1].connect(username="zig", password="zig")
c.session().receiver("ziggy;{assert:always}")
c.close()
@@ -159,7 +149,7 @@ acl deny all all
self.fail("Expected exception")
except messaging.exceptions.UnauthorizedAccess: pass
# make sure the queue was not created at the other node.
- c = cluster[1].connect(username="zig", password="zig")
+ c = cluster[0].connect(username="zag", password="zag")
try:
s = c.session()
s.sender("zaggy;{assert:always}")
@@ -167,35 +157,6 @@ acl deny all all
self.fail("Expected exception")
except messaging.exceptions.NotFound: pass
- def test_sasl_join(self):
- """Verify SASL authentication between brokers when joining a cluster."""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- # Test with a valid username/password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--load-module", os.getenv("ACL_LIB"),
- "--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=PLAIN"
- ])
- cluster.start()
- cluster.ready()
- c = cluster[1].connect(username="zag", password="zag")
-
- # Test with an invalid username/password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--load-module", os.getenv("ACL_LIB"),
- "--cluster-username=x",
- "--cluster-password=y",
- "--cluster-mechanism=PLAIN"
- ])
- try:
- cluster.start(expect=EXPECT_EXIT_OK)
- cluster[1].ready()
- self.fail("Expected exception")
- except: pass
-
def test_user_id_update(self):
"""Ensure that user-id of an open session is updated to new cluster members"""
sasl_config=os.path.join(self.rootdir, "sasl_config")
@@ -285,6 +246,25 @@ acl deny all all
session1 = cluster[1].connect().session()
for q in queues: self.assert_browse(session1, "q1", ["foo"])
+ def test_dr_no_message(self):
+ """Regression test for https://bugzilla.redhat.com/show_bug.cgi?id=655141
+ Joining broker crashes with 'error deliveryRecord no update message'
+ """
+
+ cluster = self.cluster(1)
+ session0 = cluster[0].connect().session()
+ s = session0.sender("q1;{create:always}")
+ s.send(Message("a", ttl=0.05), sync=False)
+ s.send(Message("b", ttl=0.05), sync=False)
+ r1 = session0.receiver("q1")
+ self.assertEqual("a", r1.fetch(timeout=0).content)
+ r2 = session0.receiver("q1;{mode:browse}")
+ self.assertEqual("b", r2.fetch(timeout=0).content)
+ # Leave messages un-acknowledged, let the expire, then start new broker.
+ time.sleep(.1)
+ cluster.start()
+ self.assertRaises(Empty, cluster[1].connect().session().receiver("q1").fetch,0)
+
def test_route_update(self):
"""Regression test for https://issues.apache.org/jira/browse/QPID-2982
Links and bridges associated with routes were not replicated on update.
@@ -292,7 +272,6 @@ acl deny all all
client was attached.
"""
args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
- # First broker will be killed.
cluster0 = self.cluster(1, args=args)
cluster1 = self.cluster(1, args=args)
assert 0 == subprocess.call(
@@ -322,695 +301,9 @@ acl deny all all
qpid_tool.wait()
scanner.join()
assert scanner.found
- # Regression test for https://issues.apache.org/jira/browse/QPID-3235
- # Inconsistent stats when changing elder.
-
- # Force a change of elder
- cluster0.start()
- cluster0[0].expect=EXPECT_EXIT_FAIL # About to die.
- cluster0[0].kill()
- time.sleep(2) # Allow a management interval to pass.
# Verify logs are consistent
cluster_test_logs.verify_logs()
- def test_redelivered(self):
- """Verify that redelivered flag is set correctly on replayed messages"""
- cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- url = "amqp:tcp:%s,tcp:%s" % (cluster[0].host_port(), cluster[1].host_port())
- queue = "my-queue"
- cluster[0].declare_queue(queue)
- self.sender = self.popen(
- ["qpid-send",
- "--broker", url,
- "--address", queue,
- "--sequence=true",
- "--send-eos=1",
- "--messages=100000",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS)
- ])
- self.receiver = self.popen(
- ["qpid-receive",
- "--broker", url,
- "--address", queue,
- "--ignore-duplicates",
- "--check-redelivered",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--forever"
- ])
- time.sleep(1)#give sender enough time to have some messages to replay
- cluster[0].kill()
- self.sender.wait()
- self.receiver.wait()
- cluster[1].kill()
-
- class BlockedSend(Thread):
- """Send a message, send is expected to block.
- Verify that it does block (for a given timeout), then allow
- waiting till it unblocks when it is expected to do so."""
- def __init__(self, sender, msg):
- self.sender, self.msg = sender, msg
- self.blocked = True
- self.condition = Condition()
- self.timeout = 0.1 # Time to wait for expected results.
- Thread.__init__(self)
- def run(self):
- try:
- self.sender.send(self.msg, sync=True)
- self.condition.acquire()
- try:
- self.blocked = False
- self.condition.notify()
- finally: self.condition.release()
- except Exception,e: print "BlockedSend exception: %s"%e
- def start(self):
- Thread.start(self)
- time.sleep(self.timeout)
- assert self.blocked # Expected to block
- def assert_blocked(self): assert self.blocked
- def wait(self): # Now expecting to unblock
- self.condition.acquire()
- try:
- while self.blocked:
- self.condition.wait(self.timeout)
- if self.blocked: raise Exception("Timed out waiting for send to unblock")
- finally: self.condition.release()
- self.join()
-
- def queue_flowlimit_test(self, brokers):
- """Verify that the queue's flowlimit configuration and state are
- correctly replicated.
- The brokers argument allows this test to run on single broker,
- cluster of 2 pre-startd brokers or cluster where second broker
- starts after queue is in flow control.
- """
- # configure a queue with a specific flow limit on first broker
- ssn0 = brokers.first().connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- brokers.first().startQmf()
- q1 = [q for q in brokers.first().qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- brokers.second().startQmf()
- qs = brokers.second().qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now drain the queue using a session to the other broker
- ssn1 = brokers.second().connect().session()
- r1 = ssn1.receiver("flq", capacity=6)
- for x in range(4):
- r1.fetch(timeout=0)
- ssn1.acknowledge()
- sender.wait() # Verify no longer blocked.
-
- # and re-verify state of queue on both brokers
- q1.update()
- assert not q1.flowStopped
- q2.update()
- assert not q2.flowStopped
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
- def test_queue_flowlimit(self):
- """Test flow limits on a standalone broker"""
- broker = self.broker()
- class Brokers:
- def first(self): return broker
- def second(self): return broker
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster(self):
- cluster = self.cluster(2)
- class Brokers:
- def first(self): return cluster[0]
- def second(self): return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster_join(self):
- cluster = self.cluster(1)
- class Brokers:
- def first(self): return cluster[0]
- def second(self):
- if len(cluster) == 1: cluster.start()
- return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_replicate(self):
- """ Verify that a queue which is in flow control BUT has drained BELOW
- the flow control 'stop' threshold, is correctly replicated when a new
- broker is added to the cluster.
- """
-
- class AsyncSender(Thread):
- """Send a fixed number of msgs from a sender in a separate thread
- so it may block without blocking the test.
- """
- def __init__(self, broker, address, count=1, size=4):
- Thread.__init__(self)
- self.daemon = True
- self.broker = broker
- self.queue = address
- self.count = count
- self.size = size
- self.done = False
-
- def run(self):
- self.sender = subprocess.Popen(["qpid-send",
- "--capacity=1",
- "--content-size=%s" % self.size,
- "--messages=%s" % self.count,
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--address=%s" % self.queue,
- "--broker=%s" % self.broker.host_port()])
- self.sender.wait()
- self.done = True
-
- cluster = self.cluster(2)
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':100, 'qpid.flow_resume_count':20}}}}")
-
- # fire off the sending thread to broker[0], and wait until the queue
- # hits flow control on broker[1]
- sender = AsyncSender(cluster[0], "flq", count=110);
- sender.start();
-
- cluster[1].startQmf()
- q_obj = [q for q in cluster[1].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- deadline = time.time() + 10
- while not q_obj.flowStopped and time.time() < deadline:
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- assert q_obj.msgDepth < 110
-
- # Now drain enough messages on broker[1] to drop below the flow stop
- # threshold, but not relieve flow control...
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=15",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[1].host_port()])
- receiver.wait()
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- current_depth = q_obj.msgDepth
-
- # add a new broker to the cluster, and verify that the queue is in flow
- # control on that broker
- cluster.start()
- cluster[2].startQmf()
- q_obj = [q for q in cluster[2].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- assert q_obj.flowStopped
- assert q_obj.msgDepth == current_depth
-
- # now drain the queue on broker[2], and verify that the sender becomes
- # unblocked
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=95",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[2].host_port()])
- receiver.wait()
- q_obj.update()
- assert not q_obj.flowStopped
- self.assertEqual(q_obj.msgDepth, 0)
-
- # verify that the sender has become unblocked
- sender.join(timeout=5)
- assert not sender.isAlive()
- assert sender.done
-
- def test_blocked_queue_delete(self):
- """Verify that producers which are blocked on a queue due to flow
- control are unblocked when that queue is deleted.
- """
-
- cluster = self.cluster(2)
- cluster[0].startQmf()
- cluster[1].startQmf()
-
- # configure a queue with a specific flow limit on first broker
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- q1 = [q for q in cluster[0].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- qs = cluster[1].qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now delete the blocked queue from other broker
- ssn1 = cluster[1].connect().session()
- self.evaluate_address(ssn1, "flq;{delete:always}")
- sender.wait() # Verify no longer blocked.
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
-
- def test_alternate_exchange_update(self):
- """Verify that alternate-exchange on exchanges and queues is propagated to new members of a cluster. """
- cluster = self.cluster(1)
- s0 = cluster[0].connect().session()
- # create alt queue bound to amq.fanout exchange, will be destination for alternate exchanges
- self.evaluate_address(s0, "alt;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:alt}]}}")
- # create direct exchange ex with alternate-exchange amq.fanout and no queues bound
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'amq.fanout'}}}")
- # create queue q with alternate-exchange amq.fanout
- self.evaluate_address(s0, "q;{create:always,node:{type:queue, x-declare:{alternate-exchange:'amq.fanout'}}}")
-
- def verify(broker):
- s = broker.connect().session()
- # Verify unmatched message goes to ex's alternate.
- s.sender("ex").send("foo")
- self.assertEqual("foo", s.receiver("alt").fetch(timeout=0).content)
- # Verify rejected message goes to q's alternate.
- s.sender("q").send("bar")
- msg = s.receiver("q").fetch(timeout=0)
- self.assertEqual("bar", msg.content)
- s.acknowledge(msg, Disposition(REJECTED)) # Reject the message
- self.assertEqual("bar", s.receiver("alt").fetch(timeout=0).content)
-
- verify(cluster[0])
- cluster.start()
- verify(cluster[1])
-
- def test_binding_order(self):
- """Regression test for binding order inconsistency in cluster"""
- cluster = self.cluster(1)
- c0 = cluster[0].connect()
- s0 = c0.session()
- # Declare multiple queues bound to same key on amq.topic
- def declare(q,max=0):
- if max: declare = 'x-declare:{arguments:{"qpid.max_count":%d, "qpid.flow_stop_count":0}}'%max
- else: declare = 'x-declare:{}'
- bind='x-bindings:[{queue:%s,key:key,exchange:"amq.topic"}]'%(q)
- s0.sender("%s;{create:always,node:{%s,%s}}" % (q,declare,bind))
- declare('d',max=4) # Only one with a limit
- for q in ['c', 'b','a']: declare(q)
- # Add a cluster member, send enough messages to exceed the max count
- cluster.start()
- try:
- s = s0.sender('amq.topic/key')
- for m in xrange(1,6): s.send(Message(str(m)))
- self.fail("Expected capacity exceeded exception")
- except messaging.exceptions.TargetCapacityExceeded: pass
- c1 = cluster[1].connect()
- s1 = c1.session()
- s0 = c0.session() # Old session s0 is broken by exception.
- # Verify queue contents are consistent.
- for q in ['a','b','c','d']:
- self.assertEqual(self.browse(s0, q), self.browse(s1, q))
- # Verify queue contents are "best effort"
- for q in ['a','b','c']: self.assert_browse(s1,q,[str(n) for n in xrange(1,6)])
- self.assert_browse(s1,'d',[str(n) for n in xrange(1,5)])
-
- def test_deleted_exchange(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
- Verify stand-alone case
- """
- cluster = self.cluster()
- # Verify we do not route message via an exchange that has been destroyed.
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar") # Should fail, exchange is deleted.
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
- self.assert_browse(cluster[0].connect().session(), "q", ["foo"])
-
- def test_deleted_exchange_inconsistent(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
-
- Verify cluster inconsistency.
- """
- cluster = self.cluster()
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
-
- cluster.start()
- s1 = cluster[1].connect().session()
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar")
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
-
- self.assert_browse(s1, "q", ["foo"])
-
-
- def test_ttl_consistent(self):
- """Ensure we don't get inconsistent errors with message that have TTL very close together"""
- messages = [ Message(str(i), ttl=i/1000.0) for i in xrange(0,1000)]
- messages.append(Message("x"))
- cluster = self.cluster(2)
- sender = cluster[0].connect().session().sender("q;{create:always}")
-
- def fetch(b):
- receiver = b.connect().session().receiver("q;{create:always}")
- while receiver.fetch().content != "x": pass
-
- for m in messages: sender.send(m, sync=False)
- for m in messages: sender.send(m, sync=False)
- fetch(cluster[0])
- fetch(cluster[1])
- for m in messages: sender.send(m, sync=False)
- cluster.start()
- fetch(cluster[2])
-
-# Some utility code for transaction tests
-XA_RBROLLBACK = 1
-XA_RBTIMEOUT = 2
-XA_OK = 0
-dtx_branch_counter = 0
-
-class DtxStatusException(Exception):
- def __init__(self, expect, actual):
- self.expect = expect
- self.actual = actual
-
- def str(self):
- return "DtxStatusException(expect=%s, actual=%s)"%(self.expect, self.actual)
-
-class DtxTestFixture:
- """Bundle together some common requirements for dtx tests."""
- def __init__(self, test, broker, name, exclusive=False):
- self.test = test
- self.broker = broker
- self.name = name
- # Use old API. DTX is not supported in messaging API.
- self.connection = broker.connect_old()
- self.session = self.connection.session(name, 1) # 1 second timeout
- self.queue = self.session.queue_declare(name, exclusive=exclusive)
- self.session.dtx_select()
- self.consumer = None
-
- def xid(self, id=None):
- if id is None: id = self.name
- return self.session.xid(format=0, global_id=id)
-
- def check_status(self, expect, actual):
- if expect != actual: raise DtxStatusException(expect, actual)
-
- def start(self, id=None, resume=False):
- self.check_status(XA_OK, self.session.dtx_start(xid=self.xid(id), resume=resume).status)
-
- def end(self, id=None, suspend=False):
- self.check_status(XA_OK, self.session.dtx_end(xid=self.xid(id), suspend=suspend).status)
-
- def prepare(self, id=None):
- self.check_status(XA_OK, self.session.dtx_prepare(xid=self.xid(id)).status)
-
- def commit(self, id=None, one_phase=True):
- self.check_status(
- XA_OK, self.session.dtx_commit(xid=self.xid(id), one_phase=one_phase).status)
-
- def rollback(self, id=None):
- self.check_status(XA_OK, self.session.dtx_rollback(xid=self.xid(id)).status)
-
- def set_timeout(self, timeout, id=None):
- self.session.dtx_set_timeout(xid=self.xid(id),timeout=timeout)
-
- def send(self, messages):
- for m in messages:
- dp=self.session.delivery_properties(routing_key=self.name)
- mp=self.session.message_properties()
- self.session.message_transfer(message=qpid.datatypes.Message(dp, mp, m))
-
- def accept(self):
- """Accept 1 message from queue"""
- consumer_tag="%s-consumer"%(self.name)
- self.session.message_subscribe(queue=self.name, destination=consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.message, value = 1, destination = consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
- msg = self.session.incoming(consumer_tag).get(timeout=1)
- self.session.message_cancel(destination=consumer_tag)
- self.session.message_accept(qpid.datatypes.RangedSet(msg.id))
- return msg
-
-
- def verify(self, sessions, messages):
- for s in sessions:
- self.test.assert_browse(s, self.name, messages)
-
-class DtxTests(BrokerTest):
-
- def test_dtx_update(self):
- """Verify that DTX transaction state is updated to a new broker.
- Start a collection of transactions, then add a new cluster member,
- then verify they commit/rollback correctly on the new broker."""
-
- # Note: multiple test have been bundled into one to avoid the need to start/stop
- # multiple brokers per test.
-
- cluster=self.cluster(1)
- sessions = [cluster[0].connect().session()] # For verify
-
- # Transaction that will be open when new member joins, then committed.
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.send(["1", "2"])
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be open when new member joins, then rolled back.
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.start()
- t2.send(["1", "2"])
-
- # Transaction that will be prepared when new member joins, then committed.
- t3 = DtxTestFixture(self, cluster[0], "t3")
- t3.start()
- t3.send(["1", "2"])
- t3.end()
- t3.prepare()
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be prepared when new member joins, then rolled back.
- t4 = DtxTestFixture(self, cluster[0], "t4")
- t4.start()
- t4.send(["1", "2"])
- t4.end()
- t4.prepare()
-
- # Transaction using an exclusive queue
- t5 = DtxTestFixture(self, cluster[0], "t5", exclusive=True)
- t5.start()
- t5.send(["1", "2"])
-
- # Accept messages in a transaction before/after join then commit
- t6 = DtxTestFixture(self, cluster[0], "t6")
- t6.send(["a","b","c"])
- t6.start()
- self.assertEqual(t6.accept().body, "a");
-
- # Accept messages in a transaction before/after join then roll back
- t7 = DtxTestFixture(self, cluster[0], "t7")
- t7.send(["a","b","c"])
- t7.start()
- self.assertEqual(t7.accept().body, "a");
-
- # Ended, suspended transactions across join.
- t8 = DtxTestFixture(self, cluster[0], "t8")
- t8.start(id="1")
- t8.send(["x"])
- t8.end(id="1", suspend=True)
- t8.start(id="2")
- t8.send(["y"])
- t8.end(id="2")
- t8.start()
- t8.send("z")
-
-
- # Start new cluster member
- cluster.start()
- sessions.append(cluster[1].connect().session())
-
- # Commit t1
- t1.send(["3","4"])
- t1.verify(sessions, [])
- t1.end()
- t1.commit(one_phase=True)
- t1.verify(sessions, ["1","2","3","4"])
-
- # Rollback t2
- t2.send(["3","4"])
- t2.end()
- t2.rollback()
- t2.verify(sessions, [])
-
- # Commit t3
- t3.commit(one_phase=False)
- t3.verify(sessions, ["1","2"])
-
- # Rollback t4
- t4.rollback()
- t4.verify(sessions, [])
-
- # Commit t5
- t5.send(["3","4"])
- t5.verify(sessions, [])
- t5.end()
- t5.commit(one_phase=True)
- t5.verify(sessions, ["1","2","3","4"])
-
- # Commit t6
- self.assertEqual(t6.accept().body, "b");
- t6.verify(sessions, ["c"])
- t6.end()
- t6.commit(one_phase=True)
- t6.session.close() # Make sure they're not requeued by the session.
- t6.verify(sessions, ["c"])
-
- # Rollback t7
- self.assertEqual(t7.accept().body, "b");
- t7.end()
- t7.rollback()
- t7.verify(sessions, ["a", "b", "c"])
-
- # Resume t8
- t8.end()
- t8.commit(one_phase=True)
- t8.start("1", resume=True)
- t8.end("1")
- t8.commit("1", one_phase=True)
- t8.commit("2", one_phase=True)
- t8.verify(sessions, ["z", "x","y"])
-
-
- def test_dtx_failover_rollback(self):
- """Kill a broker during a transaction, verify we roll back correctly"""
- cluster=self.cluster(1, expect=EXPECT_EXIT_FAIL)
- cluster.start(expect=EXPECT_RUNNING)
-
- # Test unprepared at crash
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.send(["a"]) # Not in transaction
- t1.start()
- t1.send(["b"]) # In transaction
-
- # Test prepared at crash
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.send(["a"]) # Not in transaction
- t2.start()
- t2.send(["b"]) # In transaction
- t2.end()
- t2.prepare()
-
- # Crash the broker
- cluster[0].kill()
-
- # Transactional changes should not appear
- s = cluster[1].connect().session();
- self.assert_browse(s, "t1", ["a"])
- self.assert_browse(s, "t2", ["a"])
-
- def test_dtx_timeout(self):
- """Verify that dtx timeout works"""
- cluster = self.cluster(1)
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.set_timeout(1)
- time.sleep(1.1)
- try:
- t1.end()
- self.fail("Expected rollback timeout.")
- except DtxStatusException, e:
- self.assertEqual(e.actual, XA_RBTIMEOUT)
-
-class TxTests(BrokerTest):
-
- def test_tx_update(self):
- """Verify that transaction state is updated to a new broker"""
-
- def make_message(session, body=None, key=None, id=None):
- dp=session.delivery_properties(routing_key=key)
- mp=session.message_properties(correlation_id=id)
- return qpid.datatypes.Message(dp, mp, body)
-
- cluster=self.cluster(1)
- # Use old API. TX is not supported in messaging API.
- c = cluster[0].connect_old()
- s = c.session("tx-session", 1)
- s.queue_declare(queue="q")
- # Start transaction
- s.tx_select()
- s.message_transfer(message=make_message(s, "1", "q"))
- # Start new member mid-transaction
- cluster.start()
- # Do more work
- s.message_transfer(message=make_message(s, "2", "q"))
- # Commit the transaction and verify the results.
- s.tx_commit()
- for b in cluster: self.assert_browse(b.connect().session(), "q", ["1","2"])
-
-
class LongTests(BrokerTest):
"""Tests that can run for a long time if -DDURATION=<minutes> is set"""
def duration(self):
@@ -1023,28 +316,22 @@ class LongTests(BrokerTest):
# Original cluster will all be killed so expect exit with failure
cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
for b in cluster: ErrorGenerator(b)
# Start sender and receiver threads
cluster[0].declare_queue("test-queue")
- sender = NumberedSender(cluster[0], 1000) # Max queue depth
- receiver = NumberedReceiver(cluster[0], sender)
+ sender = NumberedSender(cluster[1], 1000) # Max queue depth
+ receiver = NumberedReceiver(cluster[2], sender)
receiver.start()
sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
# Kill original brokers, start new ones for the duration.
endtime = time.time() + self.duration()
i = 0
while time.time() < endtime:
- sender.sender.assert_running()
- receiver.receiver.assert_running()
cluster[i].kill()
i += 1
b = cluster.start(expect=EXPECT_EXIT_FAIL)
- for b in cluster[i:]: b.ready()
ErrorGenerator(b)
time.sleep(5)
sender.stop()
@@ -1075,24 +362,24 @@ class LongTests(BrokerTest):
if self.stopped: break
self.process = self.broker.test.popen(
self.cmd, expect=EXPECT_UNKNOWN)
- finally:
- self.lock.release()
- try:
- exit = self.process.wait()
+ finally: self.lock.release()
+ try: exit = self.process.wait()
except OSError, e:
- # Process may already have been killed by self.stop()
- break
+ # Seems to be a race in wait(), it throws
+ # "no such process" during test shutdown.
+ # Doesn't indicate a test error, ignore.
+ return
except Exception, e:
self.process.unexpected(
"client of %s: %s"%(self.broker.name, e))
self.lock.acquire()
try:
+ # Quit and ignore errors if stopped or expecting failure.
if self.stopped: break
if exit != 0:
self.process.unexpected(
"client of %s exit code %s"%(self.broker.name, exit))
- finally:
- self.lock.release()
+ finally: self.lock.release()
except Exception, e:
self.error = RethrownException("Error in ClientLoop.run")
@@ -1114,7 +401,7 @@ class LongTests(BrokerTest):
args += ["--log-enable=trace+:management"]
# Use store if present.
if BrokerTest.store_lib: args +=["--load-module", BrokerTest.store_lib]
- cluster = self.cluster(3, args, expect=EXPECT_EXIT_FAIL) # brokers will be killed
+ cluster = self.cluster(3, args)
clients = [] # Per-broker list of clients that only connect to one broker.
mclients = [] # Management clients that connect to every broker in the cluster.
@@ -1123,12 +410,10 @@ class LongTests(BrokerTest):
"""Start ordinary clients for a broker."""
cmds=[
["qpid-tool", "localhost:%s"%(broker.port())],
- ["qpid-perftest", "--count=5000", "--durable=yes",
+ ["qpid-perftest", "--count", 50000,
"--base-name", str(qpid.datatypes.uuid4()), "--port", broker.port()],
- ["qpid-txtest", "--queue-base-name", "tx-%s"%str(qpid.datatypes.uuid4()),
- "--port", broker.port()],
- ["qpid-queue-stats", "-a", "localhost:%s" %(broker.port())]
- ]
+ ["qpid-queue-stats", "-a", "localhost:%s" %(broker.port())],
+ ["testagent", "localhost", str(broker.port())] ]
clients.append([ClientLoop(broker, cmd) for cmd in cmds])
def start_mclients(broker):
@@ -1137,8 +422,7 @@ class LongTests(BrokerTest):
mclients.append(ClientLoop(broker, cmd))
endtime = time.time() + self.duration()
- # For long duration, first run is a quarter of the duration.
- runtime = min(5.0, self.duration() / 3.0)
+ runtime = self.duration() / 4 # First run is longer, use quarter of duration.
alive = 0 # First live cluster member
for i in range(len(cluster)): start_clients(cluster[i])
start_mclients(cluster[alive])
@@ -1149,7 +433,7 @@ class LongTests(BrokerTest):
for b in cluster[alive:]: b.ready() # Check if a broker crashed.
# Kill the first broker, expect the clients to fail.
b = cluster[alive]
- b.ready()
+ b.expect = EXPECT_EXIT_FAIL
b.kill()
# Stop the brokers clients and all the mclients.
for c in clients[alive] + mclients:
@@ -1159,251 +443,26 @@ class LongTests(BrokerTest):
mclients = []
# Start another broker and clients
alive += 1
- cluster.start(expect=EXPECT_EXIT_FAIL)
- cluster[-1].ready() # Wait till its ready
+ cluster.start()
start_clients(cluster[-1])
start_mclients(cluster[alive])
for c in chain(mclients, *clients):
c.stop()
- for b in cluster[alive:]:
- b.ready() # Verify still alive
- b.kill()
+
# Verify that logs are consistent
cluster_test_logs.verify_logs()
def test_management_qmf2(self):
self.test_management(args=["--mgmt-qmf2=yes"])
- def test_connect_consistent(self):
+ def test_connect_consistent(self): # FIXME aconway 2011-01-18:
args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
cluster = self.cluster(2, args=args)
end = time.time() + self.duration()
while (time.time() < end): # Get a management interval
for i in xrange(1000): cluster[0].connect().close()
- cluster_test_logs.verify_logs()
-
- def test_flowlimit_failover(self):
- """Test fail-over during continuous send-receive with flow control
- active.
- """
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("test-queue; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':2000, 'qpid.flow_resume_count':100}}}}")
-
- receiver = NumberedReceiver(cluster[0])
- receiver.start()
- senders = [NumberedSender(cluster[0]) for i in range(1,3)]
- for s in senders:
- s.start()
- # Wait for senders & receiver to get up and running
- retry(lambda: receiver.received > 2*senders)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- for s in senders: s.sender.assert_running()
- receiver.receiver.assert_running()
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(5)
- for s in senders:
- s.stop()
- receiver.stop()
- for i in range(i, len(cluster)): cluster[i].kill()
-
- def test_ttl_failover(self):
- """Test that messages with TTL don't cause problems in a cluster with failover"""
-
- class Client(StoppableThread):
-
- def __init__(self, broker):
- StoppableThread.__init__(self)
- self.connection = broker.connect(reconnect=True)
- self.auto_fetch_reconnect_urls(self.connection)
- self.session = self.connection.session()
-
- def auto_fetch_reconnect_urls(self, conn):
- """Replacment for qpid.messaging.util version which is noisy"""
- ssn = conn.session("auto-fetch-reconnect-urls")
- rcv = ssn.receiver("amq.failover")
- rcv.capacity = 10
-
- def main():
- while True:
- try:
- msg = rcv.fetch()
- qpid.messaging.util.set_reconnect_urls(conn, msg)
- ssn.acknowledge(msg, sync=False)
- except messaging.exceptions.LinkClosed: return
- except messaging.exceptions.ConnectionError: return
-
- thread = Thread(name="auto-fetch-reconnect-urls", target=main)
- thread.setDaemon(True)
- thread.start()
-
- def stop(self):
- StoppableThread.stop(self)
- self.connection.detach()
-
- class Sender(Client):
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.sent = 0 # Number of messages _reliably_ sent.
- self.sender = self.session.sender(address, capacity=1000)
-
- def send_counted(self, ttl):
- self.sender.send(Message(str(self.sent), ttl=ttl))
- self.sent += 1
-
- def run(self):
- while not self.stopped:
- choice = random.randint(0,4)
- if choice == 0: self.send_counted(None) # No ttl
- elif choice == 1: self.send_counted(100000) # Large ttl
- else: # Small ttl, might expire
- self.sender.send(Message("", ttl=random.random()/10))
- self.sender.send(Message("z"), sync=True) # Chaser.
-
- class Receiver(Client):
-
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.received = 0 # Number of non-empty (reliable) messages received.
- self.receiver = self.session.receiver(address, capacity=1000)
- def run(self):
- try:
- while True:
- m = self.receiver.fetch(1)
- if m.content == "z": break
- if m.content: # Ignore unreliable messages
- # Ignore duplicates
- if int(m.content) == self.received: self.received += 1
- except Exception,e: self.error = e
-
- # def test_ttl_failover
-
- # Original cluster will all be killed so expect exit with failure
- # Set small purge interval.
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["--queue-purge-interval=1"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # Python client failover produces noisy WARN logs, disable temporarily
- logger = logging.getLogger()
- log_level = logger.getEffectiveLevel()
- logger.setLevel(logging.ERROR)
- sender = None
- receiver = None
- try:
- # Start sender and receiver threads
- receiver = Receiver(cluster[0], "q;{create:always}")
- receiver.start()
- sender = Sender(cluster[0], "q;{create:always}")
- sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
-
- # Kill brokers in a cycle.
- endtime = time.time() + self.duration()
- runtime = min(5.0, self.duration() / 4.0)
- i = 0
- while time.time() < endtime:
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- b.ready()
- time.sleep(runtime)
- sender.stop()
- receiver.stop()
- for b in cluster[i:]:
- b.ready() # Check it didn't crash
- b.kill()
- self.assertEqual(sender.sent, receiver.received)
cluster_test_logs.verify_logs()
- finally:
- # Detach to avoid slow reconnect attempts during shut-down if test fails.
- if sender: sender.connection.detach()
- if receiver: receiver.connection.detach()
- logger.setLevel(log_level)
-
- def test_msg_group_failover(self):
- """Test fail-over during continuous send-receive of grouped messages.
- """
-
- class GroupedTrafficGenerator(Thread):
- def __init__(self, url, queue, group_key):
- Thread.__init__(self)
- self.url = url
- self.queue = queue
- self.group_key = group_key
- self.status = -1
-
- def run(self):
- # generate traffic for approx 10 seconds (2011msgs / 200 per-sec)
- cmd = ["msg_group_test",
- "--broker=%s" % self.url,
- "--address=%s" % self.queue,
- "--connection-options={%s}" % (Cluster.CONNECTION_OPTIONS),
- "--group-key=%s" % self.group_key,
- "--receivers=2",
- "--senders=3",
- "--messages=2011",
- "--send-rate=200",
- "--capacity=11",
- "--ack-frequency=23",
- "--allow-duplicates",
- "--group-size=37",
- "--randomize-group-size",
- "--interleave=13"]
- # "--trace"]
- self.generator = Popen( cmd );
- self.status = self.generator.wait()
- return self.status
-
- def results(self):
- self.join(timeout=30) # 3x assumed duration
- if self.isAlive(): return -1
- return self.status
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["-t"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- q_args = "{'qpid.group_header_key':'group-id', 'qpid.shared_msg_group':1}"
- s0 = ssn0.sender("test-group-q; {create:always, node:{type:queue, x-declare:{arguments:%s}}}" % q_args)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- traffic = GroupedTrafficGenerator( cluster[i].host_port(),
- "test-group-q", "group-id" )
- traffic.start()
- time.sleep(1)
-
- for x in range(2):
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(1)
-
- # wait for traffic to finish, verify success
- self.assertEqual(0, traffic.results())
-
- for i in range(i, len(cluster)): cluster[i].kill()
-
class StoreTests(BrokerTest):
"""
diff --git a/cpp/src/tests/exception_test.cpp b/cpp/src/tests/exception_test.cpp
index 3e844b4e58..3536ffddbe 100644
--- a/cpp/src/tests/exception_test.cpp
+++ b/cpp/src/tests/exception_test.cpp
@@ -92,30 +92,32 @@ QPID_AUTO_TEST_CASE(TestSessionBusy) {
}
QPID_AUTO_TEST_CASE(DisconnectedPop) {
- SessionFixture fix;
+ ProxySessionFixture fix;
+ ProxyConnection c(fix.broker->getPort(Broker::TCP_TRANSPORT));
fix.session.queueDeclare(arg::queue="q");
fix.subs.subscribe(fix.lq, "q");
Catcher<TransportFailure> pop(bind(&LocalQueue::pop, &fix.lq, sys::TIME_SEC));
- fix.shutdownBroker();
+ fix.connection.proxy.close();
BOOST_CHECK(pop.join());
}
QPID_AUTO_TEST_CASE(DisconnectedListen) {
- SessionFixture fix;
+ ProxySessionFixture fix;
struct NullListener : public MessageListener {
void received(Message&) { BOOST_FAIL("Unexpected message"); }
} l;
+ ProxyConnection c(fix.broker->getPort(Broker::TCP_TRANSPORT));
fix.session.queueDeclare(arg::queue="q");
fix.subs.subscribe(l, "q");
Catcher<TransportFailure> runner(bind(&SubscriptionManager::run, boost::ref(fix.subs)));
- fix.shutdownBroker();
- runner.join();
+ fix.connection.proxy.close();
+ runner.join();
BOOST_CHECK_THROW(fix.session.queueDeclare(arg::queue="x"), TransportFailure);
}
QPID_AUTO_TEST_CASE(NoSuchQueueTest) {
- SessionFixture fix;
+ ProxySessionFixture fix;
ScopedSuppressLogging sl; // Suppress messages for expected errors.
BOOST_CHECK_THROW(fix.subs.subscribe(fix.lq, "no such queue"), NotFoundException);
}
diff --git a/cpp/src/tests/federated_topic_test b/cpp/src/tests/federated_topic_test
index 2e55ddcfaa..b1063c7e8c 100755
--- a/cpp/src/tests/federated_topic_test
+++ b/cpp/src/tests/federated_topic_test
@@ -42,12 +42,13 @@ while getopts "s:m:b:" opt ; do
esac
done
+MY_DIR=$(dirname $(which $0))
source ./test_env.sh
trap stop_brokers EXIT
start_broker() {
- $QPIDD_EXEC --daemon --port 0 --no-module-dir --no-data-dir --auth no > qpidd.port
+ ${MY_DIR}/../qpidd --daemon --port 0 --no-module-dir --no-data-dir --auth no > qpidd.port
}
start_brokers() {
@@ -75,39 +76,39 @@ subscribe() {
echo Subscriber $1 connecting on $MY_PORT
LOG="subscriber_$1.log"
- ./qpid-topic-listener -p $MY_PORT > $LOG 2>&1 && rm -f $LOG
+ ${MY_DIR}/topic_listener -p $MY_PORT > $LOG 2>&1 && rm -f $LOG
}
publish() {
- ./qpid-topic-publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS -p $PORT_A
+ ${MY_DIR}/topic_publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS -p $PORT_A
}
setup_routes() {
- BROKER_A="daffodil:$PORT_A"
- BROKER_B="daffodil:$PORT_B"
- BROKER_C="daffodil:$PORT_C"
+ BROKER_A="localhost:$PORT_A"
+ BROKER_B="localhost:$PORT_B"
+ BROKER_C="localhost:$PORT_C"
if (($VERBOSE)); then
echo "Establishing routes for topic..."
fi
- $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_A amq.topic topic_control B B
- $QPID_ROUTE_EXEC route add $BROKER_C $BROKER_B amq.topic topic_control C C
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_B $BROKER_A amq.topic topic_control B B
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_C $BROKER_B amq.topic topic_control C C
if (($VERBOSE)); then
echo "linked A->B->C"
fi
- $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_C amq.topic topic_control B B
- $QPID_ROUTE_EXEC route add $BROKER_A $BROKER_B amq.topic topic_control A A
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_B $BROKER_C amq.topic topic_control B B
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_A $BROKER_B amq.topic topic_control A A
if (($VERBOSE)); then
echo "linked C->B->A"
echo "Establishing routes for response queue..."
fi
- $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_C amq.direct response B B
- $QPID_ROUTE_EXEC route add $BROKER_A $BROKER_B amq.direct response A A
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_B $BROKER_C amq.direct response B B
+ $PYTHON_COMMANDS/qpid-route route add $BROKER_A $BROKER_B amq.direct response A A
if (($VERBOSE)); then
echo "linked C->B->A"
for b in $BROKER_A $BROKER_B $BROKER_C; do
echo "Routes for $b"
- $QPID_ROUTE_EXEC route list $b
+ $PYTHON_COMMANDS/qpid-route route list $b
done
fi
}
diff --git a/cpp/src/tests/federation.py b/cpp/src/tests/federation.py
index 7d613b98ce..973a1d366c 100755
--- a/cpp/src/tests/federation.py
+++ b/cpp/src/tests/federation.py
@@ -23,7 +23,7 @@ from qpid.testlib import TestBase010
from qpid.datatypes import Message
from qpid.queue import Empty
from qpid.util import URL
-from time import sleep, time
+from time import sleep
class _FedBroker(object):
@@ -111,18 +111,18 @@ class FederationTests(TestBase010):
broker = qmf.getObjects(_class="broker")[0]
result = broker.connect(self.remote_host(), self.remote_port(), False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
link = qmf.getObjects(_class="link")[0]
result = link.bridge(False, "amq.direct", "amq.direct", "my-key", "", "", False, False, False, 0)
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
bridge = qmf.getObjects(_class="bridge")[0]
result = bridge.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
result = link.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
self.verify_cleanup()
@@ -133,11 +133,11 @@ class FederationTests(TestBase010):
qmf = self.qmf
broker = qmf.getObjects(_class="broker")[0]
result = broker.connect(self.remote_host(), self.remote_port(), False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
link = qmf.getObjects(_class="link")[0]
result = link.bridge(False, "amq.direct", "amq.fanout", "my-key", "", "", False, False, False, 0)
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
bridge = qmf.getObjects(_class="bridge")[0]
@@ -165,9 +165,9 @@ class FederationTests(TestBase010):
except Empty: None
result = bridge.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
result = link.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
self.verify_cleanup()
@@ -178,11 +178,11 @@ class FederationTests(TestBase010):
qmf = self.qmf
broker = qmf.getObjects(_class="broker")[0]
result = broker.connect(self.remote_host(), self.remote_port(), False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
link = qmf.getObjects(_class="link")[0]
result = link.bridge(False, "amq.direct", "amq.fanout", "my-key", "", "", False, True, False, 0)
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
bridge = qmf.getObjects(_class="bridge")[0]
@@ -209,9 +209,9 @@ class FederationTests(TestBase010):
except Empty: None
result = bridge.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
result = link.close()
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
self.verify_cleanup()
@@ -236,11 +236,11 @@ class FederationTests(TestBase010):
qmf = self.qmf
broker = qmf.getObjects(_class="broker")[0]
result = broker.connect(self.remote_host(), self.remote_port(), False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
link = qmf.getObjects(_class="link")[0]
result = link.bridge(False, "my-bridge-queue", "amq.fanout", "my-key", "", "", True, False, False, 1)
- self.assertEqual(result.status, 0, result)
+ self.assertEqual(result.status, 0)
bridge = qmf.getObjects(_class="bridge")[0]
sleep(3)
@@ -262,63 +262,6 @@ class FederationTests(TestBase010):
except Empty: None
result = bridge.close()
- self.assertEqual(result.status, 0, result)
- result = link.close()
- self.assertEqual(result.status, 0, result)
-
- self.verify_cleanup()
-
- def test_pull_from_queue_recovery(self):
- session = self.session
-
- #setup queue on remote broker and add some messages
- r_conn = self.connect(host=self.remote_host(), port=self.remote_port())
- r_session = r_conn.session("test_pull_from_queue_recovery")
- r_session.queue_declare(queue="my-bridge-queue", auto_delete=True)
- for i in range(1, 6):
- dp = r_session.delivery_properties(routing_key="my-bridge-queue")
- r_session.message_transfer(message=Message(dp, "Message %d" % i))
-
- #setup queue to receive messages from local broker
- session.queue_declare(queue="fed1", exclusive=True, auto_delete=True)
- session.exchange_bind(queue="fed1", exchange="amq.fanout")
- self.subscribe(queue="fed1", destination="f1")
- queue = session.incoming("f1")
-
- self.startQmf()
- qmf = self.qmf
- broker = qmf.getObjects(_class="broker")[0]
- result = broker.connect(self.remote_host(), self.remote_port(), False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result)
-
- link = qmf.getObjects(_class="link")[0]
- result = link.bridge(False, "my-bridge-queue", "amq.fanout", "my-key", "", "", True, False, False, 1)
- self.assertEqual(result.status, 0, result)
-
- bridge = qmf.getObjects(_class="bridge")[0]
- sleep(5)
-
- #recreate the remote bridge queue to invalidate the bridge session
- r_session.queue_delete (queue="my-bridge-queue", if_empty=False, if_unused=False)
- r_session.queue_declare(queue="my-bridge-queue", auto_delete=True)
-
- #add some more messages (i.e. after bridge was created)
- for i in range(6, 11):
- dp = r_session.delivery_properties(routing_key="my-bridge-queue")
- r_session.message_transfer(message=Message(dp, "Message %d" % i))
-
- for i in range(1, 11):
- try:
- msg = queue.get(timeout=5)
- self.assertEqual("Message %d" % i, msg.body)
- except Empty:
- self.fail("Failed to find expected message containing 'Message %d'" % i)
- try:
- extra = queue.get(timeout=1)
- self.fail("Got unexpected message in queue: " + extra.body)
- except Empty: None
-
- result = bridge.close()
self.assertEqual(result.status, 0)
result = link.close()
self.assertEqual(result.status, 0)
@@ -706,17 +649,10 @@ class FederationTests(TestBase010):
self.verify_cleanup()
- def test_dynamic_headers_any(self):
- self.do_test_dynamic_headers('any')
-
- def test_dynamic_headers_all(self):
- self.do_test_dynamic_headers('all')
-
-
- def do_test_dynamic_headers(self, match_mode):
+ def test_dynamic_headers(self):
session = self.session
r_conn = self.connect(host=self.remote_host(), port=self.remote_port())
- r_session = r_conn.session("test_dynamic_headers_%s" % match_mode)
+ r_session = r_conn.session("test_dynamic_headers")
session.exchange_declare(exchange="fed.headers", type="headers")
r_session.exchange_declare(exchange="fed.headers", type="headers")
@@ -735,7 +671,7 @@ class FederationTests(TestBase010):
sleep(5)
session.queue_declare(queue="fed1", exclusive=True, auto_delete=True)
- session.exchange_bind(queue="fed1", exchange="fed.headers", binding_key="key1", arguments={'x-match':match_mode, 'class':'first'})
+ session.exchange_bind(queue="fed1", exchange="fed.headers", binding_key="key1", arguments={'x-match':'any', 'class':'first'})
self.subscribe(queue="fed1", destination="f1")
queue = session.incoming("f1")
@@ -1855,301 +1791,3 @@ class FederationTests(TestBase010):
if headers:
return headers[name]
return None
-
- def test_dynamic_topic_bounce(self):
- """ Bounce the connection between federated Topic Exchanges.
- """
- class Params:
- def exchange_type(self): return "topic"
- def bind_queue(self, ssn, qname, ename):
- ssn.exchange_bind(queue=qname, exchange=ename,
- binding_key="spud.*")
- def unbind_queue(self, ssn, qname, ename):
- ssn.exchange_unbind(queue=qname, exchange=ename, binding_key="spud.*")
- def delivery_properties(self, ssn):
- return ssn.delivery_properties(routing_key="spud.boy")
-
- self.generic_dynamic_bounce_test(Params())
-
- def test_dynamic_direct_bounce(self):
- """ Bounce the connection between federated Direct Exchanges.
- """
- class Params:
- def exchange_type(self): return "direct"
- def bind_queue(self, ssn, qname, ename):
- ssn.exchange_bind(queue=qname, exchange=ename, binding_key="spud")
- def unbind_queue(self, ssn, qname, ename):
- ssn.exchange_unbind(queue=qname, exchange=ename, binding_key="spud")
- def delivery_properties(self, ssn):
- return ssn.delivery_properties(routing_key="spud")
- self.generic_dynamic_bounce_test(Params())
-
- def test_dynamic_fanout_bounce(self):
- """ Bounce the connection between federated Fanout Exchanges.
- """
- class Params:
- def exchange_type(self): return "fanout"
- def bind_queue(self, ssn, qname, ename):
- ssn.exchange_bind(queue=qname, exchange=ename)
- def unbind_queue(self, ssn, qname, ename):
- ssn.exchange_unbind(queue=qname, exchange=ename)
- def delivery_properties(self, ssn):
- return ssn.delivery_properties(routing_key="spud")
- self.generic_dynamic_bounce_test(Params())
-
- def test_dynamic_headers_bounce(self):
- """ Bounce the connection between federated Headers Exchanges.
- """
- class Params:
- def exchange_type(self): return "headers"
- def bind_queue(self, ssn, qname, ename):
- ssn.exchange_bind(queue=qname, exchange=ename,
- binding_key="spud", arguments={'x-match':'any', 'class':'first'})
- def unbind_queue(self, ssn, qname, ename):
- ssn.exchange_unbind(queue=qname, exchange=ename, binding_key="spud")
- def delivery_properties(self, ssn):
- return ssn.message_properties(application_headers={'class':'first'})
- ## @todo KAG - re-enable once federation bugs with headers exchanges
- ## are fixed.
- #self.generic_dynamic_bounce_test(Params())
- return
-
-
- def generic_dynamic_bounce_test(self, params):
- """ Verify that a federated broker can maintain a binding to a local
- queue using the same key as a remote binding. Destroy and reconnect
- the federation link, and verify routes are restored correctly.
- See QPID-3170.
- Topology:
-
- Queue1 <---"Key"---B0<==[Federated Exchange]==>B1---"Key"--->Queue2
- """
- session = self.session
-
- # create the federation
-
- self.startQmf()
- qmf = self.qmf
-
- self._setup_brokers()
-
- # create exchange on each broker, and retrieve the corresponding
- # management object for that exchange
-
- exchanges=[]
- for _b in self._brokers[0:2]:
- _b.client_session.exchange_declare(exchange="fedX", type=params.exchange_type())
- self.assertEqual(_b.client_session.exchange_query(name="fedX").type,
- params.exchange_type(), "exchange_declare failed!")
- # pull the exchange out of qmf...
- retries = 0
- my_exchange = None
- timeout = time() + 10
- while my_exchange is None and time() <= timeout:
- objs = qmf.getObjects(_broker=_b.qmf_broker, _class="exchange")
- for ooo in objs:
- if ooo.name == "fedX":
- my_exchange = ooo
- break
- if my_exchange is None:
- self.fail("QMF failed to find new exchange!")
- exchanges.append(my_exchange)
-
- #
- # on each broker, create a local queue bound to the exchange with the
- # same key value.
- #
-
- self._brokers[0].client_session.queue_declare(queue="fedX1", exclusive=True, auto_delete=True)
- params.bind_queue(self._brokers[0].client_session, "fedX1", "fedX")
- self.subscribe(self._brokers[0].client_session, queue="fedX1", destination="f1")
- queue_0 = self._brokers[0].client_session.incoming("f1")
-
- self._brokers[1].client_session.queue_declare(queue="fedX1", exclusive=True, auto_delete=True)
- params.bind_queue(self._brokers[1].client_session, "fedX1", "fedX")
- self.subscribe(self._brokers[1].client_session, queue="fedX1", destination="f1")
- queue_1 = self._brokers[1].client_session.incoming("f1")
-
- # now federate the two brokers
-
- # connect B0 --> B1
- result = self._brokers[1].qmf_object.connect(self._brokers[0].host,
- self._brokers[0].port,
- False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0)
-
- # connect B1 --> B0
- result = self._brokers[0].qmf_object.connect(self._brokers[1].host,
- self._brokers[1].port,
- False, "PLAIN", "guest", "guest", "tcp")
- self.assertEqual(result.status, 0)
-
- # for each link, bridge the "fedX" exchanges:
-
- for _l in qmf.getObjects(_class="link"):
- # print("Link=%s:%s %s" % (_l.host, _l.port, str(_l.getBroker())))
- result = _l.bridge(False, # durable
- "fedX", # src
- "fedX", # dst
- "", # key
- "", # tag
- "", # excludes
- False, # srcIsQueue
- False, # srcIsLocal
- True, # dynamic
- 0) # sync
- self.assertEqual(result.status, 0)
-
- # wait for all the inter-broker links to become operational
- operational = False
- timeout = time() + 10
- while not operational and time() <= timeout:
- operational = True
- for _l in qmf.getObjects(_class="link"):
- #print("Link=%s:%s %s" % (_l.host, _l.port, str(_l.state)))
- if _l.state != "Operational":
- operational = False
- self.failUnless(operational, "inter-broker links failed to become operational.")
-
- # @todo - There is no way to determine when the bridge objects become
- # active.
-
- # wait until the binding key has propagated to each broker - each
- # broker should see 2 bindings (1 local, 1 remote)
-
- binding_counts = [2, 2]
- self.assertEqual(len(binding_counts), len(exchanges), "Update Test!")
- for i in range(2):
- exchanges[i].update()
- timeout = time() + 10
- while exchanges[i].bindingCount < binding_counts[i] and time() <= timeout:
- exchanges[i].update()
- self.failUnless(exchanges[i].bindingCount == binding_counts[i])
-
- # send 10 msgs to B0
- for i in range(1, 11):
- # dp = self._brokers[0].client_session.delivery_properties(routing_key=params.routing_key())
- dp = params.delivery_properties(self._brokers[0].client_session)
- self._brokers[0].client_session.message_transfer(destination="fedX", message=Message(dp, "Message_trp %d" % i))
-
- # get exactly 10 msgs on B0's local queue and B1's queue
- for i in range(1, 11):
- try:
- msg = queue_0.get(timeout=5)
- self.assertEqual("Message_trp %d" % i, msg.body)
- msg = queue_1.get(timeout=5)
- self.assertEqual("Message_trp %d" % i, msg.body)
- except Empty:
- self.fail("Only got %d msgs - expected 10" % i)
- try:
- extra = queue_0.get(timeout=1)
- self.fail("Got unexpected message in queue_0: " + extra.body)
- except Empty: None
-
- try:
- extra = queue_1.get(timeout=1)
- self.fail("Got unexpected message in queue_1: " + extra.body)
- except Empty: None
-
- #
- # Tear down the bridges between the two exchanges, then wait
- # for the bindings to be cleaned up
- #
-
- for _b in qmf.getObjects(_class="bridge"):
- result = _b.close()
- self.assertEqual(result.status, 0)
-
- binding_counts = [1, 1]
- self.assertEqual(len(binding_counts), len(exchanges), "Update Test!")
- for i in range(2):
- exchanges[i].update()
- timeout = time() + 10
- while exchanges[i].bindingCount != binding_counts[i] and time() <= timeout:
- exchanges[i].update()
- self.failUnless(exchanges[i].bindingCount == binding_counts[i])
-
- #
- # restore the bridges between the two exchanges, and wait for the
- # bindings to propagate.
- #
-
- for _l in qmf.getObjects(_class="link"):
- # print("Link=%s:%s %s" % (_l.host, _l.port, str(_l.getBroker())))
- result = _l.bridge(False, # durable
- "fedX", # src
- "fedX", # dst
- "", # key
- "", # tag
- "", # excludes
- False, # srcIsQueue
- False, # srcIsLocal
- True, # dynamic
- 0) # sync
- self.assertEqual(result.status, 0)
-
- binding_counts = [2, 2]
- self.assertEqual(len(binding_counts), len(exchanges), "Update Test!")
- for i in range(2):
- exchanges[i].update()
- timeout = time() + 10
- while exchanges[i].bindingCount != binding_counts[i] and time() <= timeout:
- exchanges[i].update()
- self.failUnless(exchanges[i].bindingCount == binding_counts[i])
-
- #
- # verify traffic flows correctly
- #
-
- for i in range(1, 11):
- #dp = self._brokers[1].client_session.delivery_properties(routing_key=params.routing_key())
- dp = params.delivery_properties(self._brokers[1].client_session)
- self._brokers[1].client_session.message_transfer(destination="fedX", message=Message(dp, "Message_trp %d" % i))
-
- # get exactly 10 msgs on B0's queue and B1's queue
- for i in range(1, 11):
- try:
- msg = queue_0.get(timeout=5)
- self.assertEqual("Message_trp %d" % i, msg.body)
- msg = queue_1.get(timeout=5)
- self.assertEqual("Message_trp %d" % i, msg.body)
- except Empty:
- self.fail("Only got %d msgs - expected 10" % i)
- try:
- extra = queue_0.get(timeout=1)
- self.fail("Got unexpected message in queue_0: " + extra.body)
- except Empty: None
-
- try:
- extra = queue_1.get(timeout=1)
- self.fail("Got unexpected message in queue_1: " + extra.body)
- except Empty: None
-
-
- #
- # cleanup
- #
- params.unbind_queue(self._brokers[0].client_session, "fedX1", "fedX")
- self._brokers[0].client_session.message_cancel(destination="f1")
- self._brokers[0].client_session.queue_delete(queue="fedX1")
-
- params.unbind_queue(self._brokers[1].client_session, "fedX1", "fedX")
- self._brokers[1].client_session.message_cancel(destination="f1")
- self._brokers[1].client_session.queue_delete(queue="fedX1")
-
- for _b in qmf.getObjects(_class="bridge"):
- result = _b.close()
- self.assertEqual(result.status, 0)
-
- for _l in qmf.getObjects(_class="link"):
- result = _l.close()
- self.assertEqual(result.status, 0)
-
- for _b in self._brokers[0:2]:
- _b.client_session.exchange_delete(exchange="fedX")
-
- self._teardown_brokers()
-
- self.verify_cleanup()
-
-
diff --git a/cpp/src/tests/federation_sys.py b/cpp/src/tests/federation_sys.py
deleted file mode 100755
index 11590f684e..0000000000
--- a/cpp/src/tests/federation_sys.py
+++ /dev/null
@@ -1,1900 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-from inspect import stack
-from qpid import messaging
-from qpid.messaging import Message
-from qpid.messaging.exceptions import Empty
-from qpid.testlib import TestBase010
-from random import randint
-from sys import stdout
-from time import sleep
-
-
-class Enum(object):
- def __init__(self, **entries):
- self.__dict__.update(entries)
- def __repr__(self):
- args = ['%s=%s' % (k, repr(v)) for (k,v) in vars(self).items()]
- return 'Enum(%s)' % ', '.join(args)
-
-
-class QmfTestBase010(TestBase010):
-
- _brokers = []
- _links = []
- _bridges = []
- _alt_exch_ops = Enum(none=0, create=1, delete=2)
-
- class _Broker(object):
- """
- This broker proxy object holds the Qmf proxy to a broker of known address as well as the QMF broker
- object, connection and sessions to the broker.
- """
- def __init__(self, url):
- self.url = url # format: "host:port"
- url_parts = url.split(':')
- self.host = url_parts[0]
- self.port = int(url_parts[1])
- self.qmf_broker = None
- self.connection = messaging.Connection.establish(self.url)
- self.sessions = []
- def __str__(self):
- return "_Broker %s:%s (%d open sessions)" % (self.host, self.port, len(self.sessions))
- def destroy(self, qmf = None):
- if qmf is not None:
- qmf.delBroker(self.qmf_broker.getBroker())
- for session in self.sessions:
- try: # Session may have been closed by broker error
- session.close()
- except Exception, e: print "WARNING: %s: Unable to close session %s (%s): %s %s" % (self, session, hex(id(session)), type(e), e)
- try: # Connection may have been closed by broker error
- self.connection.close()
- except Exception, e: print "WARNING: %s: Unable to close connection %s (%s): %s %s" % (self, self.connection, hex(id(self.connection)), type(e), e)
- def session(self, name, transactional_flag = False):
- session = self.connection.session(name, transactional_flag)
- self.sessions.append(session)
- return session
-
- def setUp(self):
- """
- Called one before each test starts
- """
- TestBase010.setUp(self)
- self.startQmf();
-
- def tearDown(self):
- """
- Called once after each test competes. Close all Qmf objects (bridges, links and brokers)
- """
- while len(self._bridges):
- self._bridges.pop().close()
- while len(self._links):
- self._links.pop().close()
- while len(self._brokers):
- b = self._brokers.pop()
- if len(self._brokers) <= 1:
- b.destroy(None)
- else:
- b.destroy(self.qmf)
- TestBase010.tearDown(self)
- self.qmf.close()
-
- #--- General test utility functions
-
- def _get_name(self):
- """
- Return the name of method which called this method stripped of "test_" prefix. Used for naming
- queues and exchanges on a per-test basis.
- """
- return stack()[1][3][5:]
-
- def _get_broker_port(self, key):
- """
- Get the port of a broker defined in the environment using -D<key>=portno
- """
- return int(self.defines[key])
-
- def _get_cluster_ports(self, key):
- """
- Get the cluster ports from the parameters of the test which place it in the environment using
- -D<key>="port0 port1 ... portN" (space-separated)
- """
- ports = []
- ports_str = self.defines[key]
- if ports_str:
- for p in ports_str.split():
- ports.append(int(p))
- return ports
-
- def _get_send_address(self, exch_name, queue_name):
- """
- Get an address to which to send messages based on the exchange name and queue name, but taking into account
- that the exchange name may be "" (the default exchange), in whcih case the format changes slightly.
- """
- if len(exch_name) == 0: # Default exchange
- return queue_name
- return "%s/%s" % (exch_name, queue_name)
-
- def _get_broker(self, cluster_flag, broker_port_key, cluster_ports_key):
- """
- Read the port numbers for pre-started brokers from the environment using keys, then find or create and return
- the Qmf broker proxy for the appropriate broker
- """
- if cluster_flag:
- port = self._get_cluster_ports(cluster_ports_key)[0] # Always use the first node in the cluster
- else:
- port = self._get_broker_port(broker_port_key)
- return self._find_create_broker("localhost:%s" % port)
-
- def _get_msg_subject(self, topic_key):
- """
- Return an appropriate subject for sending a message to a known topic. Return None if there is no topic.
- """
- if len(topic_key) == 0: return None
- if "*" in topic_key: return topic_key.replace("*", "test")
- if "#" in topic_key: return topic_key.replace("#", "multipart.test")
- return topic_key
-
- def _send_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", topic_key = "",
- msg_durable_flag = False, enq_txn_size = 0):
- """
- Send messages to a broker using address addr
- """
- send_session = broker.session(session_name, transactional_flag = enq_txn_size > 0)
- sender = send_session.sender(addr)
- txn_cnt = 0
- for i in range(0, msg_count):
- sender.send(Message(msg_content % (i + 1), subject = self._get_msg_subject(topic_key), durable = msg_durable_flag))
- if enq_txn_size > 0:
- txn_cnt += 1
- if txn_cnt >= enq_txn_size:
- send_session.commit()
- txn_cnt = 0
- if enq_txn_size > 0 and txn_cnt > 0:
- send_session.commit()
- sender.close()
- send_session.close()
-
- def _receive_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", deq_txn_size = 0,
- timeout = 0):
- """
- Receive messages from a broker
- """
- receive_session = broker.session(session_name, transactional_flag = deq_txn_size > 0)
- receiver = receive_session.receiver(addr)
- txn_cnt = 0
- for i in range(0, msg_count):
- try:
- msg = receiver.fetch(timeout = timeout)
- if deq_txn_size > 0:
- txn_cnt += 1
- if txn_cnt >= deq_txn_size:
- receive_session.commit()
- txn_cnt = 0
- receive_session.acknowledge()
- except Empty:
- if deq_txn_size > 0: receive_session.rollback()
- receiver.close()
- receive_session.close()
- if i == 0:
- self.fail("Broker %s queue \"%s\" is empty" % (broker.qmf_broker.getBroker().getUrl(), addr))
- else:
- self.fail("Unable to receive message %d from broker %s queue \"%s\"" % (i, broker.qmf_broker.getBroker().getUrl(), addr))
- if msg.content != msg_content % (i + 1):
- receiver.close()
- receive_session.close()
- self.fail("Unexpected message \"%s\", was expecting \"%s\"." % (msg.content, msg_content % (i + 1)))
- try:
- msg = receiver.fetch(timeout = 0)
- if deq_txn_size > 0: receive_session.rollback()
- receiver.close()
- receive_session.close()
- self.fail("Extra message \"%s\" found on broker %s address \"%s\"" % (msg.content, broker.qmf_broker.getBroker().getUrl(), addr))
- except Empty:
- pass
- if deq_txn_size > 0 and txn_cnt > 0:
- receive_session.commit()
- receiver.close()
- receive_session.close()
-
- #--- QMF-specific utility functions
-
- def _get_qmf_property(self, props, key):
- """
- Get the value of a named property key kj from a property list [(k0, v0), (k1, v1), ... (kn, vn)].
- """
- for k,v in props:
- if k.name == key:
- return v
- return None
-
- def _check_qmf_return(self, method_result):
- """
- Check the result of a Qmf-defined method call
- """
- self.assertTrue(method_result.status == 0, method_result.text)
-
- def _check_optional_qmf_property(self, qmf_broker, type, qmf_object, key, expected_val, obj_ref_flag):
- """
- Optional Qmf properties don't show up in the properties list when they are not specified. Checks for
- these property types involve searching the properties list and making sure it is present or not as
- expected.
- """
- val = self._get_qmf_property(qmf_object.getProperties(), key)
- if val is None:
- if len(expected_val) > 0:
- self.fail("%s %s exists, but has does not have %s property. Expected value: \"%s\"" %
- (type, qmf_object.name, key, expected_val))
- else:
- if len(expected_val) > 0:
- if obj_ref_flag:
- obj = self.qmf.getObjects(_objectId = val, _broker = qmf_broker.getBroker())
- self.assertEqual(len(obj), 1, "More than one object with the same objectId: %s" % obj)
- val = obj[0].name
- self.assertEqual(val, expected_val, "%s %s exists, but has incorrect %s property. Found \"%s\", expected \"%s\"" %
- (type, qmf_object.name, key, val, expected_val))
- else:
- self.fail("%s %s exists, but has an unexpected %s property \"%s\" set." % (type, qmf_object.name, key, val))
-
- #--- Find/create Qmf broker objects
-
- def _find_qmf_broker(self, url):
- """
- Find the Qmf broker object for the given broker URL. The broker must have been previously added to Qmf through
- addBroker()
- """
- for b in self.qmf.getObjects(_class="broker"):
- if b.getBroker().getUrl() == url:
- return b
- return None
-
- def _find_create_broker(self, url):
- """
- Find a running broker through Qmf. If it does not exist, add it (assuming the broker is already running).
- """
- broker = self._Broker(url)
- self._brokers.append(broker)
- if self.qmf is not None:
- qmf_broker = self._find_qmf_broker(broker.url)
- if qmf_broker is None:
- self.qmf.addBroker(broker.url)
- broker.qmf_broker = self._find_qmf_broker(broker.url)
- else:
- broker.qmf_broker = qmf_broker
- return broker
-
- #--- Find/create/delete exchanges
-
- def _find_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
- """
- Find Qmf exchange object
- """
- for e in self.qmf.getObjects(_class="exchange", _broker = qmf_broker.getBroker()):
- if e.name == name:
- if len(name) == 0 or (len(name) >= 4 and name[:4] == "amq."): return e # skip checks for special exchanges
- self.assertEqual(e.type, type,
- "Exchange \"%s\" exists, but is of unexpected type %s; expected type %s." %
- (name, e.type, type))
- self._check_optional_qmf_property(qmf_broker, "Exchange", e, "altExchange", alternate, True)
- self.assertEqual(e.durable, durable,
- "Exchange \"%s\" exists, but has incorrect durability. Found durable=%s, expected durable=%s" %
- (name, e.durable, durable))
- self.assertEqual(e.autoDelete, auto_delete,
- "Exchange \"%s\" exists, but has incorrect auto-delete property. Found %s, expected %s" %
- (name, e.autoDelete, auto_delete))
- return e
- return None
-
- def _find_create_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete, args):
- """
- Find Qmf exchange object if exchange exists, create exchange and return its Qmf object if not
- """
- e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
- if e is not None: return e
- # Does not exist, so create it
- props = dict({"exchange-type": type, "type": type, "durable": durable, "auto-delete": auto_delete, "alternate-exchange": alternate}, **args)
- self._check_qmf_return(qmf_broker.create(type="exchange", name=name, properties=props, strict=True))
- e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
- self.assertNotEqual(e, None, "Creation of exchange %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
- return e
-
- def _find_delete_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
- """
- Find and delete Qmf exchange object if it exists
- """
- e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
- if e is not None and not auto_delete:
- self._check_qmf_return(qmf_broker.delete(type="exchange", name=name, options={}))
-
- #--- Find/create/delete queues
-
- def _find_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete):
- """
- Find a Qmf queue object
- """
- for q in self.qmf.getObjects(_class="queue", _broker = qmf_broker.getBroker()):
- if q.name == name:
- self._check_optional_qmf_property(qmf_broker, "Queue", q, "altExchange", alternate_exchange, True)
- self.assertEqual(q.durable, durable,
- "Queue \"%s\" exists, but has incorrect durable property. Found %s, expected %s" %
- (name, q.durable, durable))
- self.assertEqual(q.exclusive, exclusive,
- "Queue \"%s\" exists, but has incorrect exclusive property. Found %s, expected %s" %
- (name, q.exclusive, exclusive))
- self.assertEqual(q.autoDelete, auto_delete,
- "Queue \"%s\" exists, but has incorrect auto-delete property. Found %s, expected %s" %
- (name, q.autoDelete, auto_delete))
- return q
- return None
-
- def _find_create_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
- """
- Find Qmf queue object if queue exists, create queue and return its Qmf object if not
- """
- q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
- if q is not None: return q
- # Queue does not exist, so create it
- props = dict({"durable": durable, "auto-delete": auto_delete, "exclusive": exclusive, "alternate-exchange": alternate_exchange}, **args)
- self._check_qmf_return(qmf_broker.create(type="queue", name=name, properties=props, strict=True))
- q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
- self.assertNotEqual(q, None, "Creation of queue %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
- return q
-
- def _find_delete_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
- """
- Find and delete Qmf queue object if it exists
- """
- q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
- if q is not None and not auto_delete:
- self._check_qmf_return(qmf_broker.delete(type="queue", name=name, options={}))
-
- #--- Find/create/delete bindings (between an exchange and a queue)
-
- def _find_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
- """
- Find a Qmf binding object
- """
- for b in self.qmf.getObjects(_class="binding", _broker = qmf_broker.getBroker()):
- if b.exchangeRef == qmf_exchange.getObjectId() and b.queueRef == qmf_queue.getObjectId():
- if qmf_exchange.type != "fanout": # Fanout ignores the binding key, and always returns "" as the key
- self.assertEqual(b.bindingKey, binding_key,
- "Binding between exchange %s and queue %s exists, but has mismatching binding key: Found %s, expected %s." %
- (qmf_exchange.name, qmf_queue.name, b.bindingKey, binding_key))
- self.assertEqual(b.arguments, binding_args,
- "Binding between exchange %s and queue %s exists, but has mismatching arguments: Found %s, expected %s" %
- (qmf_exchange.name, qmf_queue.name, b.arguments, binding_args))
- return b
- return None
-
- def _find_create_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
- """
- Find Qmf binding object if it exists, create binding and return its Qmf object if not
- """
- b = self._find_qmf_binding(qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args)
- if b is not None: return b
- # Does not exist, so create it
- self._check_qmf_return(qmf_broker.create(type="binding", name="%s/%s/%s" % (qmf_exchange.name, qmf_queue.name, binding_key), properties=binding_args, strict=True))
- b = self._find_qmf_binding(qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args)
- self.assertNotEqual(b, None, "Creation of binding between exchange %s and queue %s with key %s failed" %
- (qmf_exchange.name, qmf_queue.name, binding_key))
- return b
-
- def _find_delete_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
- """
- Find and delete Qmf binding object if it exists
- """
- b = self._find_qmf_binding(qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args)
- if b is not None:
- if len(qmf_exchange.name) > 0: # not default exchange
- self._check_qmf_return(qmf_broker.delete(type="binding", name="%s/%s/%s" % (qmf_exchange.name, qmf_queue.name, binding_key), options={}))
-
- #--- Find/create a link
-
- def _find_qmf_link(self, qmf_from_broker_proxy, host, port):
- """
- Find a Qmf link object
- """
- for l in self.qmf.getObjects(_class="link", _broker=qmf_from_broker_proxy):
- if l.host == host and l.port == port:
- return l
- return None
-
- def _find_create_qmf_link(self, qmf_from_broker, qmf_to_broker_proxy, link_durable_flag, auth_mechanism, user_id,
- password, transport, pause_interval, link_ready_timeout):
- """
- Find a Qmf link object if it exists, create it and return its Qmf link object if not
- """
- to_broker_host = qmf_to_broker_proxy.host
- to_broker_port = qmf_to_broker_proxy.port
- l = self._find_qmf_link(qmf_from_broker.getBroker(), to_broker_host, to_broker_port)
- if l is not None: return l
- # Does not exist, so create it
- self._check_qmf_return(qmf_from_broker.connect(to_broker_host, to_broker_port, link_durable_flag, auth_mechanism, user_id, password, transport))
- l = self._find_qmf_link(qmf_from_broker.getBroker(), to_broker_host, to_broker_port)
- self.assertNotEqual(l, None, "Creation of link from broker %s to broker %s failed" %
- (qmf_from_broker.getBroker().getUrl(), qmf_to_broker_proxy.getUrl()))
- self._wait_for_link(l, pause_interval, link_ready_timeout)
- return l
-
- def _wait_for_link(self, link, pause_interval, link_ready_timeout):
- """
- Wait for link to become active (state=Operational)
- """
- tot_time = 0
- link.update()
- if link.state == "":
- # Link mgmt updates for the c++ link object are disabled when in a cluster because of inconsistent state:
- # one is "Operational", the other "Passive". In this case, wait a bit and hope for the best...
- sleep(2*pause_interval)
- else:
- while link.state != "Operational" and tot_time < link_ready_timeout:
- sleep(pause_interval)
- tot_time += pause_interval
- link.update()
- self.assertEqual(link.state, "Operational", "Timeout: Link not operational, state=%s" % link.state)
-
- #--- Find/create a bridge
-
- def _find_qmf_bridge(self, qmf_broker_proxy, qmf_link, source, destination, key):
- """
- Find a Qmf link object
- """
- for b in self.qmf.getObjects(_class="bridge", _broker=qmf_broker_proxy):
- if b.linkRef == qmf_link.getObjectId() and b.src == source and b.dest == destination and b.key == key:
- return b
- return None
-
- def _find_create_qmf_bridge(self, qmf_broker_proxy, qmf_link, queue_name, exch_name, topic_key,
- queue_route_type_flag, bridge_durable_flag):
- """
- Find a Qmf bridge object if it exists, create it and return its Qmf object if not
- """
- if queue_route_type_flag:
- src = queue_name
- dest = exch_name
- key = ""
- else:
- src = exch_name
- dest = exch_name
- if len(topic_key) > 0:
- key = topic_key
- else:
- key = queue_name
- b = self._find_qmf_bridge(qmf_broker_proxy, qmf_link, src, dest, key)
- if b is not None:
- return b
- # Does not exist, so create it
- self._check_qmf_return(qmf_link.bridge(bridge_durable_flag, src, dest, key, "", "", queue_route_type_flag, False, False, 1))
- b = self._find_qmf_bridge(qmf_broker_proxy, qmf_link, src, dest, key)
- self.assertNotEqual(b, None, "Bridge creation failed: src=%s dest=%s key=%s" % (src, dest, key))
- return b
-
- def _wait_for_bridge(self, bridge, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval,
- bridge_ready_timeout):
- """
- Wait for bridge to become active by sending messages over the bridge at 1 sec intervals until they are
- observed at the destination.
- """
- tot_time = 0
- active = False
- send_session = src_broker.session("tx")
- sender = send_session.sender(self._get_send_address(exch_name, queue_name))
- src_receive_session = src_broker.session("src_rx")
- src_receiver = src_receive_session.receiver(queue_name)
- dest_receive_session = dest_broker.session("dest_rx")
- dest_receiver = dest_receive_session.receiver(queue_name)
- while not active and tot_time < bridge_ready_timeout:
- sender.send(Message("xyz123", subject = self._get_msg_subject(topic_key)))
- try:
- src_receiver.fetch(timeout = 0)
- src_receive_session.acknowledge()
- # Keep receiving msgs, as several may have accumulated
- while True:
- dest_receiver.fetch(timeout = 0)
- dest_receive_session.acknowledge()
- sleep(1)
- active = True
- except Empty:
- sleep(pause_interval)
- tot_time += pause_interval
- dest_receiver.close()
- dest_receive_session.close()
- src_receiver.close()
- src_receive_session.close()
- sender.close()
- send_session.close()
- self.assertTrue(active, "Bridge failed to become active after %ds: %s" % (bridge_ready_timeout, bridge))
-
- #--- Find/create/delete utility functions
-
- def _create_and_bind(self, qmf_broker, exchange_args, queue_args, binding_args):
- """
- Create a binding between a named exchange and queue on a broker
- """
- e = self._find_create_qmf_exchange(qmf_broker, **exchange_args)
- q = self._find_create_qmf_queue(qmf_broker, **queue_args)
- return self._find_create_qmf_binding(qmf_broker, e, q, **binding_args)
-
- def _check_alt_exchange(self, qmf_broker, alt_exch_name, alt_exch_type, alt_exch_op):
- """
- Check for existence of alternate exchange. Return the Qmf exchange proxy object for the alternate exchange
- """
- if len(alt_exch_name) == 0: return None
- if alt_exch_op == _alt_exch_ops.create:
- return self._find_create_qmf_exchange(qmf_broker=qmf_broker, name=alt_exch_name, type=alt_exch_type,
- alternate="", durable=False, auto_delete=False, args={})
- if alt_exch_op == _alt_exch_ops.delete:
- return self._find_delete_qmf_exchange(qmf_broker=qmf_broker, name=alt_exch_name, type=alt_exch_type,
- alternate="", durable=False, auto_delete=False)
- return self._find_qmf_exchange(qmf_broker=qmf_broker, name=alt_exchange_name, type=alt_exchange_type,
- alternate="", durable=False, auto_delete=False)
-
- def _delete_queue_binding(self, qmf_broker, exchange_args, queue_args, binding_args):
- """
- Delete a queue and the binding between it and the exchange
- """
- e = self._find_qmf_exchange(qmf_broker, exchange_args["name"], exchange_args["type"], exchange_args["alternate"], exchange_args["durable"], exchange_args["auto_delete"])
- q = self._find_qmf_queue(qmf_broker, queue_args["name"], queue_args["alternate_exchange"], queue_args["durable"], queue_args["exclusive"], queue_args["auto_delete"])
- self._find_delete_qmf_binding(qmf_broker, e, q, **binding_args)
- self._find_delete_qmf_queue(qmf_broker, **queue_args)
-
- def _create_route(self, queue_route_type_flag, src_broker, dest_broker, exch_name, queue_name, topic_key,
- link_durable_flag, bridge_durable_flag, auth_mechanism, user_id, password, transport,
- pause_interval = 1, link_ready_timeout = 20, bridge_ready_timeout = 20):
- """
- Create a route from a source broker to a destination broker
- """
- l = self._find_create_qmf_link(dest_broker.qmf_broker, src_broker.qmf_broker.getBroker(), link_durable_flag,
- auth_mechanism, user_id, password, transport, pause_interval, link_ready_timeout)
- self._links.append(l)
- b = self._find_create_qmf_bridge(dest_broker.qmf_broker.getBroker(), l, queue_name, exch_name, topic_key,
- queue_route_type_flag, bridge_durable_flag)
- self._bridges.append(b)
- self._wait_for_bridge(b, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval, bridge_ready_timeout)
-
- # Parameterized test - entry point for tests
-
- def _do_test(self,
- test_name, # Name of test
- exch_name = "amq.direct", # Remote exchange name
- exch_type = "direct", # Remote exchange type
- exch_alt_exch = "", # Remote exchange alternate exchange
- exch_alt_exch_type = "direct", # Remote exchange alternate exchange type
- exch_durable_flag = False, # Remote exchange durability
- exch_auto_delete_flag = False, # Remote exchange auto-delete property
- exch_x_args = {}, # Remote exchange args
- queue_alt_exch = "", # Remote queue alternate exchange
- queue_alt_exch_type = "direct", # Remote queue alternate exchange type
- queue_durable_flag = False, # Remote queue durability
- queue_exclusive_flag = False, # Remote queue exclusive property
- queue_auto_delete_flag = False, # Remote queue auto-delete property
- queue_x_args = {}, # Remote queue args
- binding_durable_flag = False, # Remote binding durability
- binding_x_args = {}, # Remote binding args
- topic_key = "", # Binding key For remote topic exchanges only
- msg_count = 10, # Number of messages to send
- msg_durable_flag = False, # Message durability
- link_durable_flag = False, # Route link durability
- bridge_durable_flag = False, # Route bridge durability
- queue_route_type_flag = False, # Route type: false = bridge route, true = queue route
- enq_txn_size = 0, # Enqueue transaction size, 0 = no transactions
- deq_txn_size = 0, # Dequeue transaction size, 0 = no transactions
- local_cluster_flag = False, # Use a node from the local cluster, otherwise use single local broker
- remote_cluster_flag = False, # Use a node from the remote cluster, otherwise use single remote broker
- alt_exch_op = _alt_exch_ops.create,# Op on alt exch [create (ensure present), delete (ensure not present), none (neither create nor delete)]
- auth_mechanism = "", # Authorization mechanism for linked broker
- user_id = "", # User ID for authorization on linked broker
- password = "", # Password for authorization on linked broker
- transport = "tcp" # Transport for route to linked broker
- ):
- """
- Parameterized federation test. Sets up a federated link between a source broker and a destination broker and
- checks that messages correctly pass over the link to the destination. Where appropriate (non-queue-routes), also
- checks for the presence of messages on the source broker.
-
- In these tests, the concept is to create a LOCAL broker, then create a link to a REMOTE broker using federation.
- In other words, the messages sent to the LOCAL broker will be replicated on the REMOTE broker, and tests are
- performed on the REMOTE broker to check that the required messages are present. In the case of regular routes,
- the LOCAL broker will also retain the messages, and a similar test is performed on this broker.
-
- TODO: There are several items to improve here:
- 1. _do_test() is rather general. Rather create a version for each exchange type and test the exchange/queue
- interaction in more detail based on the exchange type
- 2. Add a headers and an xml exchange type
- 3. Restructure the tests to start and stop brokers and clusters directly rather than relying on previously
- started brokers. Then persistence can be checked by stopping and restarting the brokers/clusters. In particular,
- test the persistence of links and bridges, both of which take a persistence flag.
- 4. Test the behavior of the alternate exchanges when messages are sourced through a link. Also check behavior
- when the alternate exchange is not present or is deleted after the reference is made.
- 5. Test special queue types (eg LVQ)
- """
- local_broker = self._get_broker(local_cluster_flag, "local-port", "local-cluster-ports")
- remote_broker = self._get_broker(remote_cluster_flag, "remote-port", "remote-cluster-ports")
-
- # Check alternate exchanges exist (and create them if not) on both local and remote brokers
- self._check_alt_exchange(local_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
- self._check_alt_exchange(local_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
- self._check_alt_exchange(remote_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
- self._check_alt_exchange(remote_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
-
- queue_name = "queue_%s" % test_name
- exchange_args = {"name": exch_name, "type": exch_type, "alternate": exch_alt_exch,
- "durable": exch_durable_flag, "auto_delete": exch_auto_delete_flag, "args": exch_x_args}
- queue_args = {"name": queue_name, "alternate_exchange": queue_alt_exch, "durable": queue_durable_flag,
- "exclusive": queue_exclusive_flag, "auto_delete": queue_auto_delete_flag, "args": queue_x_args}
- binding_args = {"binding_args": binding_x_args}
- if exch_type == "topic":
- self.assertTrue(len(topic_key) > 0, "Topic exchange selected, but no topic key was set.")
- binding_args["binding_key"] = topic_key
- elif exch_type == "direct":
- binding_args["binding_key"] = queue_name
- else:
- binding_args["binding_key"] = ""
- self._create_and_bind(qmf_broker=local_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
- self._create_and_bind(qmf_broker=remote_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
- self._create_route(queue_route_type_flag, local_broker, remote_broker, exch_name, queue_name, topic_key,
- link_durable_flag, bridge_durable_flag, auth_mechanism, user_id, password, transport)
-
- self._send_msgs("send_session", local_broker, addr = self._get_send_address(exch_name, queue_name),
- msg_count = msg_count, topic_key = topic_key, msg_durable_flag = msg_durable_flag, enq_txn_size = enq_txn_size)
- if not queue_route_type_flag:
- self._receive_msgs("local_receive_session", local_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size)
- self._receive_msgs("remote_receive_session", remote_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size, timeout = 5)
-
- # Clean up
- self._delete_queue_binding(qmf_broker=local_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
- self._delete_queue_binding(qmf_broker=remote_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
-
-class A_ShortTests(QmfTestBase010):
-
- def test_route_defaultExch(self):
- self._do_test(self._get_name())
-
- def test_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True)
-
-
-class A_LongTests(QmfTestBase010):
-
- def test_route_amqDirectExch(self):
- self._do_test(self._get_name(), exch_name="amq.direct")
-
- def test_queueRoute_amqDirectExch(self):
- self._do_test(self._get_name(), exch_name="amq.direct", queue_route_type_flag=True)
-
-
- def test_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange")
-
- def test_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True)
-
-
- def test_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout")
-
- def test_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True)
-
-
- def test_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#")
-
- def test_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True)
-
-
-class B_ShortTransactionTests(QmfTestBase010):
-
- def test_txEnq01_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1)
-
- def test_txEnq01_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
-class B_LongTransactionTests(QmfTestBase010):
-
- def test_txEnq10_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
-
-
-
- def test_txEnq01_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1)
-
- def test_txEnq01_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
- def test_txEnq01_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1)
-
- def test_txEnq01_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
- def test_txEnq01_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1)
-
- def test_txEnq01_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
-class C_ShortClusterTests(QmfTestBase010):
-
- def test_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True)
-
- def test_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class C_LongClusterTests(QmfTestBase010):
-
- def test_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_ShortClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_LongClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class E_ShortPersistenceTests(QmfTestBase010):
-
- def test_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True)
-
- def test_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True)
-
- def test_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True)
-
- def test_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
-
-class E_LongPersistenceTests(QmfTestBase010):
-
-
- def test_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True)
-
- def test_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True)
-
- def test_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True)
-
- def test_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
-
- def test_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True)
-
- def test_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True)
-
- def test_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True)
-
- def test_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
-
- def test_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True)
-
- def test_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True)
-
- def test_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True)
-
- def test_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
-
-class F_ShortPersistenceTransactionTests(QmfTestBase010):
-
- def test_txEnq01_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
-class F_LongPersistenceTransactionTests(QmfTestBase010):
-
- def test_txEnq10_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
-
-
-
- def test_txEnq01_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
- def test_txEnq01_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
- def test_txEnq01_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq01_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
- def test_txEnq10_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq10_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
- def test_txEnq01_txDeq01_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
- def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
-
-class G_ShortPersistenceClusterTests(QmfTestBase010):
-
- def test_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class G_LongPersistenceClusterTests(QmfTestBase010):
-
-
-
- def test_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_ShortPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_LongPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-
-
- def test_txEnq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
diff --git a/cpp/src/tests/ipv6_test b/cpp/src/tests/ipv6_test
deleted file mode 100755
index d75d50fd0a..0000000000
--- a/cpp/src/tests/ipv6_test
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run a simple test over IPv6
-source ./test_env.sh
-
-CONFIG=$(dirname $0)/config.null
-TEST_HOSTNAME=::1
-COUNT=10
-
-trap cleanup EXIT
-
-error() { echo $*; exit 1; }
-
-# Don't need --no-module-dir or --no-data-dir as they are set as env vars in test_env.sh
-COMMON_OPTS="--daemon --auth no --config $CONFIG"
-
-# Record all broker ports started
-unset PORTS
-declare -a PORTS
-
-# Start new brokers:
-# $1 must be integer
-# $2 = extra opts
-# Append used ports to PORTS variable
-start_brokers() {
- local -a ports
- for (( i=0; $i<$1; i++)) do
- ports[$i]=$($QPIDD_EXEC --port 0 $COMMON_OPTS $2)
- done
- PORTS=( ${PORTS[@]} ${ports[@]} )
-}
-
-stop_brokers() {
- for port in "${PORTS[@]}";
- do
- $QPIDD_EXEC -qp $port
- done
- PORTS=()
-}
-
-cleanup() {
- stop_brokers
-}
-
-start_brokers 1
-PORT=${PORTS[0]}
-echo "Started IPv6 smoke perftest on broker port $PORT"
-
-## Test connection via connection settings
-./qpid-perftest --count ${COUNT} --port ${PORT} -b $TEST_HOSTNAME --summary
-
-## Test connection with a URL
-URL="amqp:[$TEST_HOSTNAME]:$PORT"
-
-./qpid-send -b $URL --content-string=hello -a "foo;{create:always}"
-MSG=`./qpid-receive -b $URL -a "foo;{create:always}" --messages 1`
-test "$MSG" = "hello" || { echo "receive failed '$MSG' != 'hello'"; exit 1; }
-
-stop_brokers
-
-# Federation smoke test follows
-
-# Start 2 brokers
-
-# In a distribution, the python tools will be absent.
-if [ ! -f $QPID_CONFIG_EXEC ] || [ ! -f $QPID_ROUTE_EXEC ] ; then
- echo "python tools absent - skipping federation test."
-else
-
- start_brokers 2
- echo "Started Federated brokers on ports ${PORTS[*]}"
- # Make broker urls
- BROKER0="[::1]:${PORTS[0]}"
- BROKER1="[::1]:${PORTS[1]}"
- TEST_QUEUE=ipv6-fed-test
-
- $QPID_CONFIG_EXEC -a $BROKER0 add queue $TEST_QUEUE
- $QPID_CONFIG_EXEC -a $BROKER1 add queue $TEST_QUEUE
- $QPID_ROUTE_EXEC dynamic add $BROKER1 $BROKER0 amq.direct
- $QPID_CONFIG_EXEC -a $BROKER1 bind amq.direct $TEST_QUEUE $TEST_QUEUE
- $QPID_ROUTE_EXEC route map $BROKER1
-
- ./datagen --count 100 | tee rdata-in |
- ./qpid-send -b amqp:$BROKER0 -a amq.direct/$TEST_QUEUE --content-stdin
- ./qpid-receive -b amqp:$BROKER1 -a $TEST_QUEUE --print-content yes -m 0 > rdata-out
-
- cmp rdata-in rdata-out || { echo "Federated data over IPv6 does not compare"; exit 1; }
-
- stop_brokers
- rm rdata-in rdata-out
-fi
-
-# Cluster smoke test follows
-test -z $CLUSTER_LIB && exit 0 # Exit if cluster not supported.
-
-## Test failover in a cluster using IPv6 only
-. $srcdir/ais_check # Will exit if clustering not enabled.
-
-pick_port() {
- # We need a fixed port to set --cluster-url. Use qpidd to pick a free port.
- # Note this method is racy
- PICK=$($QPIDD_EXEC -dp0)
- $QPIDD_EXEC -qp $PICK
- echo $PICK
-}
-
-ssl_cluster_broker() { # $1 = port
- $QPIDD_EXEC $COMMON_OPTS --load-module $CLUSTER_LIB --cluster-name ipv6_test.$HOSTNAME.$$ --cluster-url amqp:[$TEST_HOSTNAME]:$1 --port $1
- # Wait for broker to be ready
- ./qpid-ping -b $TEST_HOSTNAME -qp $1 || { echo "Cannot connect to broker on $1"; exit 1; }
- echo "Running IPv6 cluster broker on port $1"
-}
-
-PORT1=`pick_port`; ssl_cluster_broker $PORT1
-PORT2=`pick_port`; ssl_cluster_broker $PORT2
-
-# Pipe receive output to uniq to remove duplicates
-./qpid-receive --connection-options "{reconnect:true, reconnect-timeout:5}" --failover-updates -b amqp:[$TEST_HOSTNAME]:$PORT1 -a "foo;{create:always}" -f | uniq > ssl_test_receive.tmp &
-
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=one -a "foo;{create:always}"
-
-$QPIDD_EXEC -qp $PORT1 # Kill broker 1 receiver should fail-over.
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=two -a "foo;{create:always}" --send-eos 1
-wait # Wait for qpid-receive
-{ echo one; echo two; } > ssl_test_receive.cmp
-diff ssl_test_receive.tmp ssl_test_receive.cmp || { echo "Failover failed"; exit 1; }
-
-$QPIDD_EXEC -qp $PORT2
-
-rm -f ssl_test_receive.*
-
diff --git a/cpp/src/tests/msg_group_test.cpp b/cpp/src/tests/msg_group_test.cpp
deleted file mode 100644
index 6b9d09b89a..0000000000
--- a/cpp/src/tests/msg_group_test.cpp
+++ /dev/null
@@ -1,618 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include <qpid/messaging/Address.h>
-#include <qpid/messaging/Connection.h>
-#include <qpid/messaging/Receiver.h>
-#include <qpid/messaging/Sender.h>
-#include <qpid/messaging/Session.h>
-#include <qpid/messaging/Message.h>
-#include <qpid/messaging/FailoverUpdates.h>
-#include <qpid/Options.h>
-#include <qpid/log/Logger.h>
-#include <qpid/log/Options.h>
-#include "qpid/log/Statement.h"
-#include "qpid/sys/Time.h"
-#include "qpid/sys/Runnable.h"
-#include "qpid/sys/Thread.h"
-#include "qpid/sys/SystemInfo.h"
-
-#include <iostream>
-#include <memory>
-#include <stdlib.h>
-
-using namespace qpid::messaging;
-using namespace qpid::types;
-using namespace std;
-
-namespace qpid {
-namespace tests {
-
-struct Options : public qpid::Options
-{
- bool help;
- std::string url;
- std::string address;
- std::string connectionOptions;
- uint messages;
- uint capacity;
- uint ackFrequency;
- bool failoverUpdates;
- qpid::log::Options log;
- uint senders;
- uint receivers;
- uint groupSize;
- bool printReport;
- std::string groupKey;
- bool durable;
- bool allowDuplicates;
- bool randomizeSize;
- bool stickyConsumer;
- uint timeout;
- uint interleave;
- std::string prefix;
- uint sendRate;
-
- Options(const std::string& argv0=std::string())
- : qpid::Options("Options"),
- help(false),
- url("amqp:tcp:127.0.0.1"),
- messages(10000),
- capacity(1000),
- ackFrequency(100),
- failoverUpdates(false),
- log(argv0),
- senders(2),
- receivers(2),
- groupSize(10),
- printReport(false),
- groupKey("qpid.no_group"),
- durable(false),
- allowDuplicates(false),
- randomizeSize(false),
- stickyConsumer(false),
- timeout(10),
- interleave(1),
- sendRate(0)
- {
- addOptions()
- ("ack-frequency", qpid::optValue(ackFrequency, "N"), "Ack frequency (0 implies none of the messages will get accepted)")
- ("address,a", qpid::optValue(address, "ADDRESS"), "address to send and receive from")
- ("allow-duplicates", qpid::optValue(allowDuplicates), "Ignore the delivery of duplicated messages")
- ("broker,b", qpid::optValue(url, "URL"), "url of broker to connect to")
- ("capacity", qpid::optValue(capacity, "N"), "Pre-fetch window (0 implies no pre-fetch)")
- ("connection-options", qpid::optValue(connectionOptions, "OPTIONS"), "options for the connection")
- ("durable", qpid::optValue(durable, "yes|no"), "Mark messages as durable.")
- ("failover-updates", qpid::optValue(failoverUpdates), "Listen for membership updates distributed via amq.failover")
- ("group-key", qpid::optValue(groupKey, "KEY"), "Key of the message header containing the group identifier.")
- ("group-prefix", qpid::optValue(prefix, "STRING"), "Add 'prefix' to the start of all generated group identifiers.")
- ("group-size", qpid::optValue(groupSize, "N"), "Number of messages per a group.")
- ("interleave", qpid::optValue(interleave, "N"), "Simultaineously interleave messages from N different groups.")
- ("messages,m", qpid::optValue(messages, "N"), "Number of messages to send per each sender.")
- ("receivers,r", qpid::optValue(receivers, "N"), "Number of message consumers.")
- ("randomize-group-size", qpid::optValue(randomizeSize), "Randomize the number of messages per group to [1...group-size].")
- ("send-rate", qpid::optValue(sendRate,"N"), "Send at rate of N messages/second. 0 means send as fast as possible.")
- ("senders,s", qpid::optValue(senders, "N"), "Number of message producers.")
- ("sticky-consumers", qpid::optValue(stickyConsumer), "If set, verify that all messages in a group are consumed by the same client [TBD].")
- ("timeout", qpid::optValue(timeout, "N"), "Fail with a stall error should all consumers remain idle for timeout seconds.")
- ("print-report", qpid::optValue(printReport), "Dump message group statistics to stdout.")
- ("help", qpid::optValue(help), "print this usage statement");
- add(log);
- //("check-redelivered", qpid::optValue(checkRedelivered), "Fails with exception if a duplicate is not marked as redelivered (only relevant when ignore-duplicates is selected)")
- //("tx", qpid::optValue(tx, "N"), "batch size for transactions (0 implies transaction are not used)")
- //("rollback-frequency", qpid::optValue(rollbackFrequency, "N"), "rollback frequency (0 implies no transaction will be rolledback)")
- }
-
- bool parse(int argc, char** argv)
- {
- try {
- qpid::Options::parse(argc, argv);
- if (address.empty()) throw qpid::Exception("Address must be specified!");
- qpid::log::Logger::instance().configure(log);
- if (help) {
- std::ostringstream msg;
- std::cout << msg << *this << std::endl << std::endl
- << "Verifies the behavior of grouped messages." << std::endl;
- return false;
- } else {
- return true;
- }
- } catch (const std::exception& e) {
- std::cerr << *this << std::endl << std::endl << e.what() << std::endl;
- return false;
- }
- }
-};
-
-const string EOS("eos");
-const string SN("sn");
-
-
-// class that monitors group state across all publishers and consumers. tracks the next
-// expected sequence for each group, and total messages consumed.
-class GroupChecker
-{
- qpid::sys::Mutex lock;
-
- const uint totalMsgs;
- uint totalMsgsConsumed;
- uint totalMsgsPublished;
- bool allowDuplicates;
- uint duplicateMsgs;
-
- typedef std::map<std::string, uint> SequenceMap;
- SequenceMap sequenceMap;
-
- // Statistics - for each group, store the names of all clients that consumed messages
- // from that group, and the number of messages consumed per client.
- typedef std::map<std::string, uint> ClientCounter;
- typedef std::map<std::string, ClientCounter> GroupStatistics;
- GroupStatistics statistics;
-
-public:
-
- GroupChecker( uint t, bool d ) :
- totalMsgs(t), totalMsgsConsumed(0), totalMsgsPublished(0), allowDuplicates(d),
- duplicateMsgs(0) {}
-
- bool checkSequence( const std::string& groupId,
- uint sequence, const std::string& client )
- {
- qpid::sys::Mutex::ScopedLock l(lock);
-
- QPID_LOG(debug, "Client " << client << " has received " << groupId << ":" << sequence);
-
- GroupStatistics::iterator gs = statistics.find(groupId);
- if (gs == statistics.end()) {
- statistics[groupId][client] = 1;
- } else {
- gs->second[client]++;
- }
- // now verify
- SequenceMap::iterator s = sequenceMap.find(groupId);
- if (s == sequenceMap.end()) {
- QPID_LOG(debug, "Client " << client << " thinks this is the first message from group " << groupId << ":" << sequence);
- // if duplication allowed, it is possible that the last msg(s) of an old sequence are redelivered on reconnect.
- // in this case, set the sequence from the first msg.
- sequenceMap[groupId] = (allowDuplicates) ? sequence : 0;
- s = sequenceMap.find(groupId);
- } else if (sequence < s->second) {
- duplicateMsgs++;
- QPID_LOG(debug, "Client " << client << " thinks this message is a duplicate! " << groupId << ":" << sequence);
- return allowDuplicates;
- }
- totalMsgsConsumed++;
- return sequence == s->second++;
- }
-
- void sendingSequence( const std::string& groupId,
- uint sequence, bool eos,
- const std::string& client )
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- ++totalMsgsPublished;
-
- QPID_LOG(debug, "Client " << client << " sending " << groupId << ":" << sequence <<
- ((eos) ? " (last)" : ""));
- }
-
- bool eraseGroup( const std::string& groupId, const std::string& name )
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- QPID_LOG(debug, "Deleting group " << groupId << " (by client " << name << ")");
- return sequenceMap.erase( groupId ) == 1;
- }
-
- uint getNextExpectedSequence( const std::string& groupId )
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- return sequenceMap[groupId];
- }
-
- bool allMsgsConsumed() // true when done processing msgs
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- return (totalMsgsPublished >= totalMsgs) &&
- (totalMsgsConsumed >= totalMsgsPublished) &&
- sequenceMap.size() == 0;
- }
-
- uint getConsumedTotal()
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- return totalMsgsConsumed;
- }
-
- uint getPublishedTotal()
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- return totalMsgsPublished;
- }
-
- ostream& print(ostream& out)
- {
- qpid::sys::Mutex::ScopedLock l(lock);
- out << "Total Published: " << totalMsgsPublished << ", Total Consumed: " << totalMsgsConsumed <<
- ", Duplicates detected: " << duplicateMsgs << std::endl;
- out << "Total Groups: " << statistics.size() << std::endl;
- unsigned long consumers = 0;
- for (GroupStatistics::iterator gs = statistics.begin(); gs != statistics.end(); ++gs) {
- out << " GroupId: " << gs->first;
- consumers += gs->second.size(); // # of consumers that processed this group
- if (gs->second.size() == 1)
- out << " completely consumed by a single client." << std::endl;
- else
- out << " consumed by " << gs->second.size() << " different clients." << std::endl;
-
- for (ClientCounter::iterator cc = gs->second.begin(); cc != gs->second.end(); ++cc) {
- out << " Client: " << cc->first << " consumed " << cc->second << " messages from the group." << std::endl;
- }
- }
- out << "Average # of consumers per group: " << ((statistics.size() != 0) ? (double(consumers)/statistics.size()) : 0) << std::endl;
- return out;
- }
-};
-
-
-namespace {
- // rand() is not thread safe. Create a singleton obj to hold a lock while calling
- // rand() so it can be called safely by multiple concurrent clients.
- class Randomizer {
- qpid::sys::Mutex lock;
- public:
- uint operator()(uint max) {
- qpid::sys::Mutex::ScopedLock l(lock);
- return (rand() % max) + 1;
- }
- };
-
- static Randomizer randomizer;
-}
-
-
-// tag each generated message with a group identifer
-//
-class GroupGenerator {
-
- const std::string groupPrefix;
- const uint groupSize;
- const bool randomizeSize;
- const uint interleave;
-
- uint groupSuffix;
- uint total;
-
- struct GroupState {
- std::string id;
- const uint size;
- uint count;
- GroupState( const std::string& i, const uint s )
- : id(i), size(s), count(0) {}
- };
- typedef std::list<GroupState> GroupList;
- GroupList groups;
- GroupList::iterator current;
-
- // add a new group identifier to the list
- void newGroup() {
- std::ostringstream groupId(groupPrefix, ios_base::out|ios_base::ate);
- groupId << std::string(":") << groupSuffix++;
- uint size = (randomizeSize) ? randomizer(groupSize) : groupSize;
- QPID_LOG(trace, "New group: GROUPID=[" << groupId.str() << "] size=" << size << " this=" << this);
- GroupState group( groupId.str(), size );
- groups.push_back( group );
- }
-
-public:
- GroupGenerator( const std::string& prefix,
- const uint t,
- const uint size,
- const bool randomize,
- const uint i)
- : groupPrefix(prefix), groupSize(size),
- randomizeSize(randomize), interleave(i), groupSuffix(0), total(t)
- {
- QPID_LOG(trace, "New group generator: PREFIX=[" << prefix << "] total=" << total << " size=" << size << " rand=" << randomize << " interleave=" << interleave << " this=" << this);
- for (uint i = 0; i < 1 || i < interleave; ++i) {
- newGroup();
- }
- current = groups.begin();
- }
-
- bool genGroup(std::string& groupId, uint& seq, bool& eos)
- {
- if (!total) return false;
- --total;
- if (current == groups.end())
- current = groups.begin();
- groupId = current->id;
- seq = current->count++;
- if (current->count == current->size) {
- QPID_LOG(trace, "Last msg for " << current->id << ", " << current->count << " this=" << this);
- eos = true;
- if (total >= interleave) { // need a new group to replace this one
- newGroup();
- groups.erase(current++);
- } else ++current;
- } else {
- ++current;
- eos = total < interleave; // mark eos on the last message of each group
- }
- QPID_LOG(trace, "SENDING GROUPID=[" << groupId << "] seq=" << seq << " eos=" << eos << " this=" << this);
- return true;
- }
-};
-
-
-
-class Client : public qpid::sys::Runnable
-{
-public:
- typedef boost::shared_ptr<Client> shared_ptr;
- enum State {ACTIVE, DONE, FAILURE};
- Client( const std::string& n, const Options& o ) : name(n), opts(o), state(ACTIVE), stopped(false) {}
- virtual ~Client() {}
- State getState() { return state; }
- void testFailed( const std::string& reason ) { state = FAILURE; error << "Client '" << name << "' failed: " << reason; }
- void clientDone() { if (state == ACTIVE) state = DONE; }
- qpid::sys::Thread& getThread() { return thread; }
- const std::string getErrorMsg() { return error.str(); }
- void stop() {stopped = true;}
- const std::string& getName() { return name; }
-
-protected:
- const std::string name;
- const Options& opts;
- qpid::sys::Thread thread;
- ostringstream error;
- State state;
- bool stopped;
-};
-
-
-class Consumer : public Client
-{
- GroupChecker& checker;
-
-public:
- Consumer(const std::string& n, const Options& o, GroupChecker& c ) : Client(n, o), checker(c) {};
- virtual ~Consumer() {};
-
- void run()
- {
- Connection connection;
- try {
- connection = Connection(opts.url, opts.connectionOptions);
- connection.open();
- std::auto_ptr<FailoverUpdates> updates(opts.failoverUpdates ? new FailoverUpdates(connection) : 0);
- Session session = connection.createSession();
- Receiver receiver = session.createReceiver(opts.address);
- receiver.setCapacity(opts.capacity);
- Message msg;
- uint count = 0;
-
- while (!stopped) {
- if (receiver.fetch(msg, Duration::SECOND)) { // msg retrieved
- qpid::types::Variant::Map& properties = msg.getProperties();
- std::string groupId = properties[opts.groupKey];
- uint groupSeq = properties[SN];
- bool eof = properties[EOS];
-
- QPID_LOG(trace, "RECVING GROUPID=[" << groupId << "] seq=" << groupSeq << " eos=" << eof << " name=" << name);
-
- qpid::sys::usleep(10);
-
- if (!checker.checkSequence( groupId, groupSeq, name )) {
- ostringstream msg;
- msg << "Check sequence failed. Group=" << groupId << " rcvd seq=" << groupSeq << " expected=" << checker.getNextExpectedSequence( groupId );
- testFailed( msg.str() );
- break;
- } else if (eof) {
- if (!checker.eraseGroup( groupId, name )) {
- ostringstream msg;
- msg << "Erase group failed. Group=" << groupId << " rcvd seq=" << groupSeq;
- testFailed( msg.str() );
- break;
- }
- }
-
- ++count;
- if (opts.ackFrequency && (count % opts.ackFrequency == 0)) {
- session.acknowledge();
- }
- // Clear out message properties & content for next iteration.
- msg = Message(); // TODO aconway 2010-12-01: should be done by fetch
- } else if (checker.allMsgsConsumed()) // timed out, nothing else to do?
- break;
- }
- session.acknowledge();
- session.close();
- connection.close();
- } catch(const std::exception& error) {
- ostringstream msg;
- msg << "consumer error: " << error.what();
- testFailed( msg.str() );
- connection.close();
- }
- clientDone();
- QPID_LOG(trace, "Consuming client " << name << " completed.");
- }
-};
-
-
-
-class Producer : public Client
-{
- GroupChecker& checker;
- GroupGenerator generator;
-
-public:
- Producer(const std::string& n, const Options& o, GroupChecker& c)
- : Client(n, o), checker(c),
- generator( n, o.messages, o.groupSize, o.randomizeSize, o.interleave )
- {};
- virtual ~Producer() {};
-
- void run()
- {
- Connection connection;
- try {
- connection = Connection(opts.url, opts.connectionOptions);
- connection.open();
- std::auto_ptr<FailoverUpdates> updates(opts.failoverUpdates ? new FailoverUpdates(connection) : 0);
- Session session = connection.createSession();
- Sender sender = session.createSender(opts.address);
- if (opts.capacity) sender.setCapacity(opts.capacity);
- Message msg;
- msg.setDurable(opts.durable);
- std::string groupId;
- uint seq;
- bool eos;
- uint sent = 0;
-
- qpid::sys::AbsTime start = qpid::sys::now();
- int64_t interval = 0;
- if (opts.sendRate) interval = qpid::sys::TIME_SEC/opts.sendRate;
-
- while (!stopped && generator.genGroup(groupId, seq, eos)) {
- msg.getProperties()[opts.groupKey] = groupId;
- msg.getProperties()[SN] = seq;
- msg.getProperties()[EOS] = eos;
- checker.sendingSequence( groupId, seq, eos, name );
-
- sender.send(msg);
- ++sent;
-
- if (opts.sendRate) {
- qpid::sys::AbsTime waitTill(start, sent*interval);
- int64_t delay = qpid::sys::Duration(qpid::sys::now(), waitTill);
- if (delay > 0) qpid::sys::usleep(delay/qpid::sys::TIME_USEC);
- }
- }
- session.sync();
- session.close();
- connection.close();
- } catch(const std::exception& error) {
- ostringstream msg;
- msg << "producer '" << name << "' error: " << error.what();
- testFailed(msg.str());
- connection.close();
- }
- clientDone();
- QPID_LOG(trace, "Producing client " << name << " completed.");
- }
-};
-
-
-}} // namespace qpid::tests
-
-using namespace qpid::tests;
-
-int main(int argc, char ** argv)
-{
- int status = 0;
- try {
- Options opts;
- if (opts.parse(argc, argv)) {
-
- GroupChecker state( opts.senders * opts.messages,
- opts.allowDuplicates);
- std::vector<Client::shared_ptr> clients;
-
- if (opts.randomizeSize) srand((unsigned int)qpid::sys::SystemInfo::getProcessId());
-
- // fire off the producers && consumers
- for (size_t j = 0; j < opts.senders; ++j) {
- ostringstream name;
- name << opts.prefix << "P_" << j;
- clients.push_back(Client::shared_ptr(new Producer( name.str(), opts, state )));
- clients.back()->getThread() = qpid::sys::Thread(*clients.back());
- }
- for (size_t j = 0; j < opts.receivers; ++j) {
- ostringstream name;
- name << opts.prefix << "C_" << j;
- clients.push_back(Client::shared_ptr(new Consumer( name.str(), opts, state )));
- clients.back()->getThread() = qpid::sys::Thread(*clients.back());
- }
-
- // wait for all pubs/subs to finish.... or for consumers to fail or stall.
- uint stalledTime = 0;
- bool done;
- bool clientFailed = false;
- do {
- uint lastCount = state.getConsumedTotal();
- qpid::sys::usleep( 1000000 );
-
- // check each client for status
- done = true;
- for (std::vector<Client::shared_ptr>::iterator i = clients.begin();
- i != clients.end(); ++i) {
- QPID_LOG(debug, "Client " << (*i)->getName() << " state=" << (*i)->getState());
- if ((*i)->getState() == Client::FAILURE) {
- QPID_LOG(error, argv[0] << ": test failed with client error: " << (*i)->getErrorMsg());
- clientFailed = true;
- done = true;
- break; // exit test.
- } else if ((*i)->getState() != Client::DONE) {
- done = false;
- }
- }
-
- if (!done) {
- // check that consumers are still receiving messages
- if (lastCount == state.getConsumedTotal())
- stalledTime++;
- else {
- lastCount = state.getConsumedTotal();
- stalledTime = 0;
- }
- }
-
- QPID_LOG(debug, "Consumed to date = " << state.getConsumedTotal() <<
- " Published to date = " << state.getPublishedTotal() <<
- " total=" << opts.senders * opts.messages );
-
- } while (!done && stalledTime < opts.timeout);
-
- if (clientFailed) {
- status = 1;
- } else if (stalledTime >= opts.timeout) {
- QPID_LOG(error, argv[0] << ": test failed due to stalled consumer." );
- status = 2;
- }
-
- // Wait for started threads.
- for (std::vector<Client::shared_ptr>::iterator i = clients.begin();
- i != clients.end(); ++i) {
- (*i)->stop();
- (*i)->getThread().join();
- }
-
- if (opts.printReport && !status) state.print(std::cout);
- } else status = 4;
- } catch(const std::exception& error) {
- QPID_LOG(error, argv[0] << ": " << error.what());
- status = 3;
- }
- QPID_LOG(trace, "TEST DONE [" << status << "]");
-
- return status;
-}
diff --git a/cpp/src/tests/python_tests b/cpp/src/tests/python_tests
index 0216b5ca7b..e367004a71 100755
--- a/cpp/src/tests/python_tests
+++ b/cpp/src/tests/python_tests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
diff --git a/cpp/src/tests/qpid-cluster-benchmark b/cpp/src/tests/qpid-cluster-benchmark
index ff787a46dd..4408e63866 100755
--- a/cpp/src/tests/qpid-cluster-benchmark
+++ b/cpp/src/tests/qpid-cluster-benchmark
@@ -7,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,40 +19,21 @@
#
# Benchmark script for comparing cluster performance.
+#PORT=":5555"
+BROKER=`echo $HOSTS | awk '{print $1}'` # Single broker
+BROKERS=`echo $HOSTS | sed "s/\>/$PORT/g;s/ /,/g"` # Broker URL list
+COUNT=100000
+RATE=20000 # Rate to throttle senders for latency results
+run_test() { echo $*; "$@"; echo; echo; echo; }
-# Default values
-PORT="5672"
-COUNT=10000
-FLOW=100 # Flow control limit on queue depth for latency.
-REPEAT=10
-QUEUES=4
-CLIENTS=3
-
-while getopts "p:c:f:r:t:b:q:c" opt; do
- case $opt in
- p) PORT=$OPTARG;;
- c) COUNT=$OPTARG;;
- f) FLOW=$OPTARG;;
- r) REPEAT=$OPTARG;;
- s) SCALE=$OPTARG;;
- b) BROKERS=$OPTARG;;
- q) QUEUES=$OPTARG;;
- c) CLIENTS=$OPTARG;;
- *) echo "Unknown option"; exit 1;;
- esac
-done
-
-BROKERS=${BROKERS:-$(echo $HOSTS | sed "s/\>/:$PORT/g;s/ /,/g")} # Broker URL list
-BROKER=`echo $BROKERS | awk -F, '{print $1}'` # First broker
+# Thruput, unshared queue
+run_test qpid-cpp-benchmark --repeat 10 -b $BROKER --no-timestamp -m $COUNT
-run_test() { echo $*; shift; "$@"; echo; echo; echo; }
+# Latency
+run_test qpid-cpp-benchmark --repeat 10 -b $BROKER --connection-options '{tcp-nodelay:true}' -m `expr $COUNT / 2` --send-rate $RATE
# Multiple pubs/subs connect via multiple brokers (active-active)
-run_test "multi-host-thruput" qpid-cpp-benchmark --repeat $REPEAT -b $BROKERS --no-timestamp --summarize -q$QUEUES -s$CLIENTS -r$CLIENTS -m $COUNT
+run_test qpid-cpp-benchmark --repeat 10 -b $BROKERS --no-timestamp --summarize -s10 -r10 -m `expr $COUNT / 10`
# Multiple pubs/subs connect via single broker (active-passive)
-run_test "single-host-thruput" qpid-cpp-benchmark --repeat $REPEAT -b $BROKER --no-timestamp --summarize -q$QUEUES -s$CLIENTS -r$CLIENTS -m $COUNT
-
-# Latency
-run_test "latency" qpid-cpp-benchmark --repeat $REPEAT -b $BROKER --connection-options '{tcp-nodelay:true}' -m $COUNT --flow-control $FLOW
-
+run_test qpid-cpp-benchmark --repeat 10 -b $BROKER --no-timestamp --summarize -s10 -r10 -m `expr $COUNT / 10`
diff --git a/cpp/src/tests/qpid-cpp-benchmark b/cpp/src/tests/qpid-cpp-benchmark
index 300d34774f..1f77226b4d 100755
--- a/cpp/src/tests/qpid-cpp-benchmark
+++ b/cpp/src/tests/qpid-cpp-benchmark
@@ -77,20 +77,6 @@ def ssh_command(host, command):
"""Convert command into an ssh command on host with quoting"""
return ["ssh", host] + [posix_quote(arg) for arg in command]
-class Clients:
- def __init__(self): self.clients=[]
-
- def add(self, client):
- self.clients.append(client)
- return client
-
- def kill(self):
- for c in self.clients:
- try: c.kill()
- except: pass
-
-clients = Clients()
-
def start_receive(queue, index, opts, ready_queue, broker, host):
address_opts=["create:receiver"] + opts.receive_option
if opts.durable: address_opts += ["node:{durable:true}"]
@@ -115,7 +101,7 @@ def start_receive(queue, index, opts, ready_queue, broker, host):
if opts.connection_options:
command += ["--connection-options",opts.connection_options]
if host: command = ssh_command(host, command)
- return clients.add(Popen(command, stdout=PIPE))
+ return Popen(command, stdout=PIPE)
def start_send(queue, opts, broker, host):
address="%s;{%s}"%(queue,",".join(opts.send_option))
@@ -136,7 +122,7 @@ def start_send(queue, opts, broker, host):
if opts.connection_options:
command += ["--connection-options",opts.connection_options]
if host: command = ssh_command(host, command)
- return clients.add(Popen(command, stdout=PIPE))
+ return Popen(command, stdout=PIPE)
def first_line(p):
out,err=p.communicate()
@@ -147,11 +133,7 @@ def delete_queues(queues, broker):
c = qpid.messaging.Connection(broker)
c.open()
for q in queues:
- try:
- s = c.session()
- snd = s.sender("%s;{delete:always}"%(q))
- snd.close()
- s.sync()
+ try: s = c.session().sender("%s;{delete:always}"%(q))
except qpid.messaging.exceptions.NotFound: pass # Ignore "no such queue"
c.close()
@@ -163,6 +145,7 @@ def print_header(timestamp):
def parse(parser, lines): # Parse sender/receiver output
for l in lines:
fn_val = zip(parser, l)
+
return [map(lambda p: p[0](p[1]), zip(parser,line.split())) for line in lines]
def parse_senders(senders):
@@ -173,12 +156,11 @@ def parse_receivers(receivers):
def print_data(send_stats, recv_stats):
for send,recv in map(None, send_stats, recv_stats):
- line=""
- if send: line += "%d"%send[0]
+ if send: print send[0],
if recv:
- line += "\t\t%d"%recv[0]
- if len(recv) == 4: line += "\t%.2f\t%.2f\t%.2f"%tuple(recv[1:])
- print line
+ print "\t\t%d"%recv[0],
+ if len(recv) == 4: print "\t%.2f\t%.2f\t%.2f"%tuple(recv[1:]),
+ print
def print_summary(send_stats, recv_stats):
def avg(s): sum(s) / len(s)
@@ -202,11 +184,11 @@ class ReadyReceiver:
self.receiver = self.connection.session().receiver(
"%s;{create:receiver,delete:receiver,node:{durable:false}}"%(queue))
self.receiver.session.sync()
- self.timeout=10
+ self.timeout=2
def wait(self, receivers):
try:
- for i in receivers: self.receiver.fetch(self.timeout)
+ for i in xrange(len(receivers)): self.receiver.fetch(self.timeout)
self.connection.close()
except qpid.messaging.Empty:
for r in receivers:
@@ -215,8 +197,7 @@ class ReadyReceiver:
raise Exception("Receiver error: %s"%(out))
raise Exception("Timed out waiting for receivers to be ready")
-def flatten(l):
- return sum(map(lambda s: re.split(re.compile("\s*,\s*|\s+"), s), l), [])
+def flatten(l): return sum(map(lambda s: s.split(","), l),[])
class RoundRobin:
def __init__(self,items):
@@ -240,22 +221,20 @@ def main():
receive_out = ""
ready_queue="%s-ready"%(opts.queue_name)
queues = ["%s-%s"%(opts.queue_name, i) for i in xrange(opts.queues)]
- try:
- for i in xrange(opts.repeat):
- delete_queues(queues, opts.broker[0])
- ready_receiver = ReadyReceiver(ready_queue, opts.broker[0])
- receivers = [start_receive(q, j, opts, ready_queue, brokers.next(), client_hosts.next())
- for q in queues for j in xrange(opts.receivers)]
- ready_receiver.wait(filter(None, receivers)) # Wait for receivers to be ready.
- senders = [start_send(q, opts,brokers.next(), client_hosts.next())
- for q in queues for j in xrange(opts.senders)]
- if opts.report_header and i == 0: print_header(opts.timestamp)
- send_stats=parse_senders(senders)
- recv_stats=parse_receivers(receivers)
- if opts.summarize: print_summary(send_stats, recv_stats)
- else: print_data(send_stats, recv_stats)
- delete_queues(queues, opts.broker[0])
- finally: clients.kill() # No strays
+ for i in xrange(opts.repeat):
+ delete_queues(queues, opts.broker[0])
+ ready_receiver = ReadyReceiver(ready_queue, opts.broker[0])
+ receivers = [start_receive(q, j, opts, ready_queue, brokers.next(), client_hosts.next())
+ for q in queues for j in xrange(opts.receivers)]
+ ready_receiver.wait(filter(None, receivers)) # Wait for receivers to be ready.
+ senders = [start_send(q, opts,brokers.next(), client_hosts.next())
+ for q in queues for j in xrange(opts.senders)]
+ if opts.report_header and i == 0: print_header(opts.timestamp)
+ send_stats=parse_senders(senders)
+ recv_stats=parse_receivers(receivers)
+ if opts.summarize: print_summary(send_stats, recv_stats)
+ else: print_data(send_stats, recv_stats)
+ delete_queues(queues, opts.broker[0])
if __name__ == "__main__": main()
diff --git a/cpp/src/tests/qpid-ctrl b/cpp/src/tests/qpid-ctrl
index 4246c57898..7b46c190fb 100755
--- a/cpp/src/tests/qpid-ctrl
+++ b/cpp/src/tests/qpid-ctrl
@@ -92,10 +92,7 @@ try:
arguments = {}
for a in args:
name, val = nameval(a)
- if val[0] == '{' or val[0] == '[':
- arguments[name] = eval(val)
- else:
- arguments[name] = val
+ arguments[name] = val
content = {
"_object_id": {"_object_name": object_name},
"_method_name": method_name,
diff --git a/cpp/src/tests/qpid-perftest.cpp b/cpp/src/tests/qpid-perftest.cpp
index dd81354adb..4d7b563c8c 100644
--- a/cpp/src/tests/qpid-perftest.cpp
+++ b/cpp/src/tests/qpid-perftest.cpp
@@ -396,7 +396,7 @@ struct Controller : public Client {
void run() { // Controller
try {
// Wait for subscribers to be ready.
- process(opts.totalSubs, fqn("sub_ready"), boost::bind(expect, _1, "ready"));
+ process(opts.totalSubs, fqn("sub_ready"), bind(expect, _1, "ready"));
LocalQueue pubDone;
LocalQueue subDone;
@@ -423,10 +423,8 @@ struct Controller : public Client {
process(opts.totalSubs, subDone, fqn("sub_done"), boost::ref(subRates));
AbsTime end=now();
+
double time=secs(start, end);
- if (time <= 0.0) {
- throw Exception("ERROR: Test completed in zero seconds. Try again with a larger message count.");
- }
double txrate=opts.transfers/time;
double mbytes=(txrate*opts.size)/(1024*1024);
@@ -510,11 +508,10 @@ struct PublishThread : public Client {
}
SubscriptionManager subs(session);
LocalQueue lq;
- subs.setFlowControl(0, SubscriptionManager::UNLIMITED, false);
- Subscription cs = subs.subscribe(lq, fqn("pub_start"));
+ subs.setFlowControl(1, SubscriptionManager::UNLIMITED, true);
+ subs.subscribe(lq, fqn("pub_start"));
for (size_t j = 0; j < opts.iterations; ++j) {
- cs.grantMessageCredit(1);
expect(lq.pop().getData(), "start");
AbsTime start=now();
for (size_t i=0; i<opts.count; i++) {
@@ -546,9 +543,6 @@ struct PublishThread : public Client {
if (opts.confirm) session.sync();
AbsTime end=now();
double time=secs(start,end);
- if (time <= 0.0) {
- throw Exception("ERROR: Test completed in zero seconds. Try again with a larger message count.");
- }
// Send result to controller.
Message report(lexical_cast<string>(opts.count/time), fqn("pub_done"));
@@ -644,9 +638,7 @@ struct SubscribeThread : public Client {
//
// For now verify order only for a single publisher.
size_t offset = opts.uniqueData ? 5 /*marker is 'data:'*/ : 0;
- size_t n;
- memcpy (&n, reinterpret_cast<const char*>(msg.getData().data() + offset),
- sizeof(n));
+ size_t n = *reinterpret_cast<const size_t*>(msg.getData().data() + offset);
if (opts.pubs == 1) {
if (opts.subs == 1 || opts.mode == FANOUT) verify(n==expect, "==", expect, n);
else verify(n>=expect, ">=", expect, n);
diff --git a/cpp/src/tests/qpid-receive.cpp b/cpp/src/tests/qpid-receive.cpp
index 9c713e872a..012d544a2e 100644
--- a/cpp/src/tests/qpid-receive.cpp
+++ b/cpp/src/tests/qpid-receive.cpp
@@ -53,7 +53,6 @@ struct Options : public qpid::Options
bool forever;
uint messages;
bool ignoreDuplicates;
- bool checkRedelivered;
uint capacity;
uint ackFrequency;
uint tx;
@@ -76,7 +75,6 @@ struct Options : public qpid::Options
forever(false),
messages(0),
ignoreDuplicates(false),
- checkRedelivered(false),
capacity(1000),
ackFrequency(100),
tx(0),
@@ -94,11 +92,10 @@ struct Options : public qpid::Options
("broker,b", qpid::optValue(url, "URL"), "url of broker to connect to")
("address,a", qpid::optValue(address, "ADDRESS"), "address to receive from")
("connection-options", qpid::optValue(connectionOptions, "OPTIONS"), "options for the connection")
- ("timeout", qpid::optValue(timeout, "TIMEOUT"), "timeout in seconds to wait before exiting")
+ ("timeout,t", qpid::optValue(timeout, "TIMEOUT"), "timeout in seconds to wait before exiting")
("forever,f", qpid::optValue(forever), "ignore timeout and wait forever")
("messages,m", qpid::optValue(messages, "N"), "Number of messages to receive; 0 means receive indefinitely")
("ignore-duplicates", qpid::optValue(ignoreDuplicates), "Detect and ignore duplicates (by checking 'sn' header)")
- ("check-redelivered", qpid::optValue(checkRedelivered), "Fails with exception if a duplicate is not marked as redelivered (only relevant when ignore-duplicates is selected)")
("capacity", qpid::optValue(capacity, "N"), "Pre-fetch window (0 implies no pre-fetch)")
("ack-frequency", qpid::optValue(ackFrequency, "N"), "Ack frequency (0 implies none of the messages will get accepted)")
("tx", qpid::optValue(tx, "N"), "batch size for transactions (0 implies transaction are not used)")
@@ -219,8 +216,6 @@ int main(int argc, char ** argv)
std::cout << msg.getContent() << std::endl;//TODO: handle map or list messages
if (opts.messages && count >= opts.messages) done = true;
}
- } else if (opts.checkRedelivered && !msg.getRedelivered()) {
- throw qpid::Exception("duplicate sequence number received, message not marked as redelivered!");
}
if (opts.tx && (count % opts.tx == 0)) {
if (opts.rollbackFrequency && (++txCount % opts.rollbackFrequency == 0)) {
@@ -262,7 +257,7 @@ int main(int argc, char ** argv)
return 0;
}
} catch(const std::exception& error) {
- std::cerr << "qpid-receive: " << error.what() << std::endl;
+ std::cerr << "Failure: " << error.what() << std::endl;
connection.close();
return 1;
}
diff --git a/cpp/src/tests/qpid-send.cpp b/cpp/src/tests/qpid-send.cpp
index b1213a484f..6a7e7838ce 100644
--- a/cpp/src/tests/qpid-send.cpp
+++ b/cpp/src/tests/qpid-send.cpp
@@ -28,7 +28,6 @@
#include <qpid/messaging/FailoverUpdates.h>
#include <qpid/sys/Time.h>
#include <qpid/sys/Monitor.h>
-#include <qpid/sys/SystemInfo.h>
#include "TestOptions.h"
#include "Statistics.h"
@@ -77,11 +76,6 @@ struct Options : public qpid::Options
uint flowControl;
bool sequence;
bool timestamp;
- std::string groupKey;
- std::string groupPrefix;
- uint groupSize;
- bool groupRandSize;
- uint groupInterleave;
Options(const std::string& argv0=std::string())
: qpid::Options("Options"),
@@ -106,23 +100,19 @@ struct Options : public qpid::Options
sendRate(0),
flowControl(0),
sequence(true),
- timestamp(true),
- groupPrefix("GROUP-"),
- groupSize(10),
- groupRandSize(false),
- groupInterleave(1)
+ timestamp(true)
{
addOptions()
("broker,b", qpid::optValue(url, "URL"), "url of broker to connect to")
- ("address,a", qpid::optValue(address, "ADDRESS"), "address to send to")
+ ("address,a", qpid::optValue(address, "ADDRESS"), "address to drain from")
("connection-options", qpid::optValue(connectionOptions, "OPTIONS"), "options for the connection")
("messages,m", qpid::optValue(messages, "N"), "stop after N messages have been sent, 0 means no limit")
("id,i", qpid::optValue(id, "ID"), "use the supplied id instead of generating one")
("reply-to", qpid::optValue(replyto, "REPLY-TO"), "specify reply-to address")
("send-eos", qpid::optValue(sendEos, "N"), "Send N EOS messages to mark end of input")
("durable", qpid::optValue(durable, "yes|no"), "Mark messages as durable.")
- ("ttl", qpid::optValue(ttl, "msecs"), "Time-to-live for messages, in milliseconds")
- ("priority", qpid::optValue(priority, "PRIORITY"), "Priority for messages (higher value implies higher priority)")
+ ("ttl", qpid::optValue(ttl, "msecs"), "Time-to-live for messages, in milliseconds")
+ ("priority", qpid::optValue(priority, "PRIORITY"), "Priority for messages (higher value implies higher priority)")
("property,P", qpid::optValue(properties, "NAME=VALUE"), "specify message property")
("correlation-id", qpid::optValue(correlationid, "ID"), "correlation-id for message")
("user-id", qpid::optValue(userid, "USERID"), "userid for message")
@@ -141,11 +131,6 @@ struct Options : public qpid::Options
("flow-control", qpid::optValue(flowControl,"N"), "Do end to end flow control to limit queue depth to 2*N. 0 means no flow control.")
("sequence", qpid::optValue(sequence, "yes|no"), "Add a sequence number messages property (required for duplicate/lost message detection)")
("timestamp", qpid::optValue(timestamp, "yes|no"), "Add a time stamp messages property (required for latency measurement)")
- ("group-key", qpid::optValue(groupKey, "KEY"), "Generate groups of messages using message header 'KEY' to hold the group identifier")
- ("group-prefix", qpid::optValue(groupPrefix, "STRING"), "Generate group identifers with 'STRING' prefix (if group-key specified)")
- ("group-size", qpid::optValue(groupSize, "N"), "Number of messages per a group (if group-key specified)")
- ("group-randomize-size", qpid::optValue(groupRandSize), "Randomize the number of messages per group to [1...group-size] (if group-key specified)")
- ("group-interleave", qpid::optValue(groupInterleave, "N"), "Simultaineously interleave messages from N different groups (if group-key specified)")
("help", qpid::optValue(help), "print this usage statement");
add(log);
}
@@ -267,68 +252,6 @@ class MapContentGenerator : public ContentGenerator {
const Options& opts;
};
-// tag each generated message with a group identifer
-//
-class GroupGenerator {
-public:
- GroupGenerator(const std::string& key,
- const std::string& prefix,
- const uint size,
- const bool randomize,
- const uint interleave)
- : groupKey(key), groupPrefix(prefix), groupSize(size),
- randomizeSize(randomize), groupSuffix(0)
- {
- if (randomize) srand((unsigned int)qpid::sys::SystemInfo::getProcessId());
-
- for (uint i = 0; i < 1 || i < interleave; ++i) {
- newGroup();
- }
- current = groups.begin();
- }
-
- void setGroupInfo(Message &msg)
- {
- if (current == groups.end())
- current = groups.begin();
- msg.getProperties()[groupKey] = current->id;
- // std::cout << "SENDING GROUPID=[" << current->id << "]" << std::endl;
- if (++(current->count) == current->size) {
- newGroup();
- groups.erase(current++);
- } else
- ++current;
- }
-
- private:
- const std::string& groupKey;
- const std::string& groupPrefix;
- const uint groupSize;
- const bool randomizeSize;
-
- uint groupSuffix;
-
- struct GroupState {
- std::string id;
- const uint size;
- uint count;
- GroupState( const std::string& i, const uint s )
- : id(i), size(s), count(0) {}
- };
- typedef std::list<GroupState> GroupList;
- GroupList groups;
- GroupList::iterator current;
-
- void newGroup() {
- std::ostringstream groupId(groupPrefix, ios_base::out|ios_base::ate);
- groupId << groupSuffix++;
- uint size = (randomizeSize) ? (rand() % groupSize) + 1 : groupSize;
- // std::cout << "New group: GROUPID=[" << groupId.str() << "] size=" << size << std::endl;
- GroupState group( groupId.str(), size );
- groups.push_back( group );
- }
-};
-
int main(int argc, char ** argv)
{
Connection connection;
@@ -373,14 +296,6 @@ int main(int argc, char ** argv)
else
contentGen.reset(new FixedContentGenerator(opts.contentString));
- std::auto_ptr<GroupGenerator> groupGen;
- if (!opts.groupKey.empty())
- groupGen.reset(new GroupGenerator(opts.groupKey,
- opts.groupPrefix,
- opts.groupSize,
- opts.groupRandSize,
- opts.groupInterleave));
-
qpid::sys::AbsTime start = qpid::sys::now();
int64_t interval = 0;
if (opts.sendRate) interval = qpid::sys::TIME_SEC/opts.sendRate;
@@ -397,6 +312,9 @@ int main(int argc, char ** argv)
++sent;
if (opts.sequence)
msg.getProperties()[SN] = sent;
+ if (opts.timestamp)
+ msg.getProperties()[TS] = int64_t(
+ qpid::sys::Duration(qpid::sys::EPOCH, qpid::sys::now()));
if (opts.flowControl) {
if ((sent % opts.flowControl) == 0) {
msg.setReplyTo(flowControlAddress);
@@ -405,12 +323,6 @@ int main(int argc, char ** argv)
else
msg.setReplyTo(Address()); // Clear the reply address.
}
- if (groupGen.get())
- groupGen->setGroupInfo(msg);
-
- if (opts.timestamp)
- msg.getProperties()[TS] = int64_t(
- qpid::sys::Duration(qpid::sys::EPOCH, qpid::sys::now()));
sender.send(msg);
reporter.message(msg);
@@ -456,7 +368,7 @@ int main(int argc, char ** argv)
return 0;
}
} catch(const std::exception& error) {
- std::cerr << "qpid-send: " << error.what() << std::endl;
+ std::cout << "Failed: " << error.what() << std::endl;
connection.close();
return 1;
}
diff --git a/cpp/src/tests/qrsh.cpp b/cpp/src/tests/qrsh.cpp
new file mode 100644
index 0000000000..0cb52b6b05
--- /dev/null
+++ b/cpp/src/tests/qrsh.cpp
@@ -0,0 +1,169 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <qpid/client/Connection.h>
+#include <qpid/client/Session.h>
+#include <qpid/client/AsyncSession.h>
+#include <qpid/client/Message.h>
+#include <qpid/client/MessageListener.h>
+#include <qpid/client/SubscriptionManager.h>
+
+#include <stdio.h>
+#include <cstdlib>
+#include <iostream>
+
+#include <sstream>
+
+using namespace qpid::client;
+using namespace qpid::framing;
+
+using namespace std;
+
+namespace qpid {
+namespace tests {
+
+class ResponseListener : public MessageListener
+{
+ public :
+
+ int exitCode;
+
+ ResponseListener ( SubscriptionManager & subscriptions )
+ : exitCode(-1),
+ subscriptions ( subscriptions )
+ {
+ }
+
+ virtual void
+ received ( Message & message )
+ {
+ char first_word[1000];
+ sscanf ( message.getData().c_str(), "%s", first_word );
+
+ if ( ! strcmp ( first_word, "wait_response" ) )
+ {
+ // If we receive a message here, parse out the exit code.
+ sscanf ( message.getData().c_str(), "%*s%d", & exitCode );
+ subscriptions.cancel(message.getDestination());
+ }
+ else
+ if ( ! strcmp ( first_word, "get_response" ) )
+ {
+ // The remainder of the message is the file we requested.
+ fprintf ( stdout,
+ "%s",
+ message.getData().c_str() + strlen("get_response" )
+ );
+ subscriptions.cancel(message.getDestination());
+ }
+ }
+
+
+ private :
+
+ SubscriptionManager & subscriptions;
+};
+
+}} // namespace qpid::tests
+
+using namespace qpid::tests;
+
+/*
+ * argv[1] host
+ * argv[2] port
+ * argv[3] server name
+ * argv[4] command name
+ * argv[5..N] args to the command
+ */
+int
+main ( int argc, char ** argv )
+{
+ const char* host = argv[1];
+ int port = atoi(argv[2]);
+
+
+ Connection connection;
+
+ try
+ {
+ connection.open ( host, port );
+ Session session = connection.newSession ( );
+
+ // Make a queue and bind it to fanout.
+ string myQueue = session.getId().getName();
+
+ session.queueDeclare ( arg::queue=myQueue,
+ arg::exclusive=true,
+ arg::autoDelete=true
+ );
+
+ session.exchangeBind ( arg::exchange="amq.fanout",
+ arg::queue=myQueue,
+ arg::bindingKey="my-key"
+ );
+
+ // Get ready to listen for the wait-response.
+ // or maybe a get-response.
+ // ( Although this may not be one of those types
+ // of command, get ready anyway.
+ SubscriptionManager subscriptions ( session );
+ ResponseListener responseListener ( subscriptions );
+ subscriptions.subscribe ( responseListener, myQueue );
+
+ bool response_command = false;
+ if(! strcmp("exec_wait", argv[4] ))
+ response_command = true;
+ else
+ if(! strcmp("exited", argv[4] ))
+ response_command = true;
+ else
+ if(! strcmp("get", argv[4] ))
+ response_command = true;
+
+ // Send the payload message.
+ // Skip "qrsh host_name port"
+ Message message;
+ stringstream ss;
+ for ( int i = 3; i < argc; ++ i )
+ ss << argv[i] << ' ';
+
+ message.setData ( ss.str() );
+
+ session.messageTransfer(arg::content=message,
+ arg::destination="amq.fanout");
+
+ if ( response_command )
+ subscriptions.run();
+
+ session.close();
+ connection.close();
+ return responseListener.exitCode;
+ }
+ catch ( exception const & e)
+ {
+ cerr << e.what() << endl;
+ }
+
+ return 1;
+}
+
+
+
diff --git a/cpp/src/tests/qrsh_run.cpp b/cpp/src/tests/qrsh_run.cpp
new file mode 100644
index 0000000000..cfdd0cef80
--- /dev/null
+++ b/cpp/src/tests/qrsh_run.cpp
@@ -0,0 +1,321 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <iostream>
+#include <sstream>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+
+using namespace std;
+
+
+
+int
+main ( int argc, char ** argv )
+{
+ int exit_code = -1;
+ int fd[2];
+ int my_pid = getpid();
+ int child_pid;
+
+ pipe(fd);
+
+ char const * root_dir = argv[1]; // This arg is prepended by qrsh_server.
+ char const * child_name = argv[2]; // This arg comes from qrsh.
+ char const * child_path = argv[3]; // This arg comes from qrsh.
+
+ // This is the problem..
+ fprintf ( stderr, "MDEBUG qrsh_run: root_dir: |%s|\n", root_dir );
+ fprintf ( stderr, "MDEBUG qrsh_run: child_name: |%s|\n", child_name );
+ fprintf ( stderr, "MDEBUG qrsh_run: child_path: |%s|\n", child_path );
+
+ /*
+ * A named child is one for whom we will create a directory and
+ * store information. There are some magic names that are not
+ * real symbolic names -- but are instead the names of actions.
+ */
+
+ bool named_child = true;
+
+ if ( ! strcmp ( child_name, "exec" ) )
+ named_child = false;
+ else
+ if ( ! strcmp ( child_name, "exec_wait" ) )
+ named_child = false;
+ else
+ if ( ! strcmp ( child_name, "exited" ) )
+ named_child = false;
+ else
+ named_child = true;
+
+ stringstream child_dir_name;
+
+ if ( named_child )
+ {
+ child_dir_name << root_dir
+ << '/'
+ << child_name;
+
+ /*
+ * Make the child directory before forking, or there is
+ * a race in which the child might be trying to make its
+ * stdout and stderr files while we are tring to make
+ * the directory.
+ */
+ if ( -1 == mkdir ( child_dir_name.str().c_str(), 0777 ) )
+ {
+ fprintf ( stderr,
+ "qrsh_run error: Can't mkdir |%s|\n",
+ child_dir_name.str().c_str()
+ );
+ exit ( 1 );
+ }
+
+ }
+ else
+ /*
+ * If this is an 'exited' command that means we are
+ * waiting for a pre-existing child.
+ */
+ if ( ! strcmp ( child_name, "exited" ) )
+ {
+ int wait_pid = atoi(child_path);
+
+ // Find the child's symbolic name.
+ stringstream pid_to_name_file_name;
+ pid_to_name_file_name << root_dir
+ << '/'
+ << wait_pid;
+ FILE * fp = fopen ( pid_to_name_file_name.str().c_str(), "r" );
+ if (! fp)
+ {
+ fprintf ( stderr,
+ "qrsh_run %d error: Can't open pid2name file |%s|.\n",
+ my_pid,
+ pid_to_name_file_name.str().c_str()
+ );
+ exit(1);
+ }
+ char symbolic_name[1000];
+ strcpy ( symbolic_name, "qrsh_no_name" );
+ fscanf ( fp, "%s", symbolic_name );
+ fclose ( fp );
+
+ // Make the name of the child's exit code file.
+ stringstream exit_code_file_name;
+ exit_code_file_name << root_dir
+ << '/'
+ << symbolic_name
+ << "/exit_code";
+
+ struct stat stat_buf;
+ int file_does_not_exist = stat ( exit_code_file_name.str().c_str(), & stat_buf );
+
+ /*
+ * If the result of stat is zero, the file exists, which means that
+ * the command has exited. The question we are being asked here is
+ * "has it exited yet?"
+ */
+ if ( ! file_does_not_exist )
+ return 1;
+ else
+ if ( errno == ENOENT )
+ return 0;
+ else
+ return 2 ;
+ }
+
+
+ // We are not waiting on a pre-wxiting child: we have a
+ // new child to create.
+
+ child_pid = fork();
+
+ if ( child_pid == 0 )
+ {
+ // This code is executed in the child process.
+
+ // If it's a *named* child, then redirect its stdout and stderr.
+ if ( named_child )
+ {
+ stringstream stdout_path,
+ stderr_path;
+
+ // Redirect the child's stdout. -----------------
+ stdout_path << root_dir
+ << '/'
+ << child_name
+ << '/'
+ << "stdout";
+
+ int redirected_stdout = open ( stdout_path.str().c_str(),
+ O_WRONLY|O_CREAT|O_TRUNC,
+ S_IRWXU|S_IRWXG|S_IRWXO
+ );
+ if ( redirected_stdout < 0 )
+ {
+ perror ( "qrsh_run: error opening redirected_stdout: " );
+ fprintf ( stderr, "stdout path: |%s|\n", stdout_path.str().c_str() );
+ exit ( 1 );
+ }
+ if ( -1 == dup2 ( redirected_stdout, 1 ) )
+ {
+ perror ( "qrsh_run: dup2 (stdout) error: " );
+ exit(1);
+ }
+
+ // Redirect the child's stderr. -----------------
+ stderr_path << root_dir
+ << '/'
+ << child_name
+ << '/'
+ << "stderr";
+
+ int redirected_stderr = open ( stderr_path.str().c_str(),
+ O_WRONLY|O_CREAT|O_TRUNC,
+ S_IRWXU|S_IRWXG|S_IRWXO
+ );
+ if ( redirected_stderr < 0 )
+ {
+ perror ( "qrsh_run: error opening redirected_stderr: " );
+ fprintf ( stderr, "stderr path: |%s|\n", stderr_path.str().c_str() );
+ exit ( 1 );
+ }
+ if(-1 == dup2 ( redirected_stderr, 2 ) )
+ {
+ perror ( "qrsh_run: dup2 (stderr) error: " );
+ exit(1);
+ }
+ }
+
+ fprintf ( stderr, "MDEBUG ------------- qrsh_run argv -------------\n" );
+ for ( int i = 0; i < argc; ++ i )
+ fprintf ( stderr, "MDEBUG argv[%d] : |%s|\n", i, argv[i] );
+
+ execv ( child_path, argv + 2 );
+ perror ( "qrsh_run: execv error: " );
+ fprintf ( stderr, "on path |%s|\n", child_path );
+ exit ( 1 );
+ }
+ else
+ {
+ // This code is executed in the parent process.
+
+ if ( named_child )
+ {
+ // Write the name-to-pid mapping.
+ stringstream pid_file_name;
+ pid_file_name << child_dir_name.str()
+ << "/pid";
+
+ FILE * fp;
+ if ( ! (fp = fopen ( pid_file_name.str().c_str(), "w") ) )
+ {
+ fprintf ( stderr,
+ "qrsh_run %d error: Can't open file |%s|\n",
+ my_pid,
+ pid_file_name.str().c_str()
+ );
+ exit(1);
+ }
+ fprintf ( fp, "%d\n", child_pid );
+ fclose ( fp );
+
+
+ // Write the pid-to-name mapping.
+ stringstream name_to_pid_file_name;
+ name_to_pid_file_name << root_dir
+ << '/'
+ << child_pid;
+ if(! (fp = fopen ( name_to_pid_file_name.str().c_str(), "w")))
+ {
+ fprintf ( stderr,
+ "qrsh_run %d error: Can't open file |%s|\n",
+ my_pid,
+ name_to_pid_file_name.str().c_str()
+ );
+ exit(1);
+ }
+ fprintf ( fp, "%s\n", child_name );
+ fclose(fp);
+ }
+
+ pid_t awaited_pid;
+ while ( 0 == (awaited_pid = waitpid ( child_pid, & exit_code, WNOHANG)) )
+ {
+ fprintf ( stderr,
+ "qrsh_run %d info: parent: waiting for child %d...\n",
+ my_pid,
+ child_pid
+ );
+ sleep(1);
+ }
+
+ if ( -1 == awaited_pid )
+ {
+ fprintf ( stderr, "qrsh_run error awaiting child!\n" );
+ exit ( 1 );
+ }
+
+ /*
+ * Write the exit code.
+ */
+ exit_code >>= 8;
+
+ if ( named_child )
+ {
+ if ( child_pid == awaited_pid )
+ {
+ stringstream exit_code_file_name;
+ exit_code_file_name << child_dir_name.str()
+ << "/exit_code";
+
+ FILE * fp;
+ if ( ! (fp = fopen ( exit_code_file_name.str().c_str(), "w") ) )
+ {
+ fprintf ( stderr,
+ "qrsh_run error: Can't open file |%s|\n",
+ exit_code_file_name.str().c_str()
+ );
+ exit(1);
+ }
+ fprintf ( fp, "%d\n", exit_code );
+ fclose ( fp );
+ }
+ }
+ }
+
+ fprintf ( stderr, "MDEBUG qrsh_run returning exit code %d\n", exit_code );
+ return exit_code;
+}
+
+
+
+
diff --git a/cpp/src/tests/qrsh_server.cpp b/cpp/src/tests/qrsh_server.cpp
new file mode 100644
index 0000000000..782f1e6c7c
--- /dev/null
+++ b/cpp/src/tests/qrsh_server.cpp
@@ -0,0 +1,1068 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <stdio.h>
+#include <set>
+#include <string>
+#include <sstream>
+#include <unistd.h>
+#include <cstdlib>
+#include <iostream>
+#include <map>
+#include <dirent.h>
+
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <qpid/client/Connection.h>
+#include <qpid/client/Session.h>
+#include <qpid/client/AsyncSession.h>
+#include <qpid/client/Message.h>
+#include <qpid/client/MessageListener.h>
+#include <qpid/client/SubscriptionManager.h>
+
+
+using namespace qpid::client;
+using namespace qpid::framing;
+using namespace std;
+
+
+namespace qpid {
+namespace tests {
+
+int
+mrand ( int max_desired_val )
+{
+ double zero_to_one = (double) rand() / (double) RAND_MAX;
+ return (int) (zero_to_one * (double) max_desired_val);
+}
+
+
+
+char *
+file2str ( char const * file_name )
+{
+ FILE * fp = fopen ( file_name, "r" );
+ if(! fp)
+ {
+ fprintf ( stderr, "file2str error: can't open file |%s|.\n", file_name );
+ return 0;
+ }
+
+ fseek ( fp, 0, SEEK_END );
+ size_t file_len = (size_t) ftell ( fp );
+ rewind ( fp );
+ char * content = (char *) malloc ( file_len + 1 );
+
+ if ( ! content )
+ {
+ fprintf ( stderr,
+ "file2str error: can't malloc %d bytes.\n",
+ (int)file_len
+ );
+ return 0;
+ }
+
+ size_t items_read = fread ( content, file_len, 1, fp );
+
+ if ( 1 != items_read )
+ {
+ fprintf ( stderr, "file2str error: read failed.\n" );
+ free ( content );
+ return 0;
+ }
+
+ fclose ( fp );
+ content[file_len] = 0;
+
+ return content;
+}
+
+
+
+
+
+class QrshServer : public MessageListener
+{
+ public:
+
+ QrshServer ( SubscriptionManager & subscriptions,
+ char const * name,
+ char const * qrsh_run_path,
+ char const * host,
+ int port
+ );
+
+ virtual void received ( Message & message);
+
+
+ private:
+
+ set<string> all_server_names;
+
+ stringstream data_dir;
+
+ SubscriptionManager & subscriptions;
+
+ // Is this message addressed to me?
+ bool myMessage ( Message const & message );
+
+ /* ----------------------------------------------
+ * Special Commands
+ * These are commands that the qrsh_server executes
+ * directly, rather than through a child process
+ * instance of qrsh_run.
+ */
+ void runCommand ( Message const & message );
+ void execute ( Message const & message );
+ void wait ( Message const & message );
+ void exited ( Message const & message );
+ void get ( Message const & message );
+ void rememberIntroduction ( Message const & message );
+ void getStraw ( Message const & message );
+ void addAlias ( Message const & message );
+
+ void start ( );
+ void sayHello ( );
+ void sayName ( );
+ // end Special Commands ------------------------
+
+
+ void saveCommand ( Message const & message );
+
+ void send ( string const & content );
+
+ void drawStraws ( );
+ void getNames ( );
+ void runSavedCommand ( );
+
+ char ** getArgs ( char const * s );
+ bool isProcessName ( char const * s );
+ int string_countWords ( char const * s );
+ char const * skipWord ( char const * s );
+
+
+ void string_replaceAll ( string & str,
+ string & target,
+ string & replacement
+ );
+
+
+ string name,
+ qrsh_run_path,
+ host;
+
+ vector<string *> aliases;
+
+ int port;
+
+ map < char *, int > abstract_name_map;
+
+ set < string > myFellowBrokers;
+
+ bool saidHello;
+
+ Message savedCommand;
+
+ vector < int > straws;
+ int myStraw;
+
+};
+
+
+
+QrshServer::QrshServer ( SubscriptionManager & subs,
+ char const * name,
+ char const * qrsh_run_path,
+ char const * host,
+ int port
+ )
+ : subscriptions ( subs ),
+ name ( name ),
+ qrsh_run_path ( qrsh_run_path ),
+ host ( host ),
+ port ( port ),
+ saidHello ( false ),
+ myStraw ( 0 )
+{
+ data_dir << "/tmp/qrsh_"
+ << getpid();
+
+ if(mkdir ( data_dir.str().c_str(), 0777 ) )
+ {
+ fprintf ( stderr,
+ "QrshServer::QrshServer error: can't mkdir |%s|\n",
+ data_dir.str().c_str()
+ );
+ exit ( 1 );
+ }
+}
+
+
+
+void
+QrshServer::saveCommand ( Message const & message )
+{
+ savedCommand = message;
+}
+
+
+
+void
+QrshServer::runSavedCommand ( )
+{
+ runCommand ( savedCommand );
+}
+
+
+
+void
+QrshServer::start ( )
+{
+ stringstream announcement_data;
+ announcement_data << "hello_my_name_is "
+ << name;
+
+ send ( announcement_data.str() );
+
+ saidHello = true;
+}
+
+
+
+
+void
+QrshServer::send ( string const & content )
+{
+ try
+ {
+ Message message;
+ message.setData ( content );
+
+ Connection connection;
+ connection.open ( host, port );
+ Session session = connection.newSession ( );
+ session.messageTransfer ( arg::content = message,
+ arg::destination = "amq.fanout"
+ );
+ session.close();
+ connection.close();
+ }
+ catch ( exception const & e )
+ {
+ fprintf ( stderr, "QrshServer::send error: |%s|\n", e.what() );
+ }
+}
+
+
+
+
+void
+QrshServer::sayHello ( )
+{
+ if ( saidHello )
+ return;
+
+ stringstream ss;
+
+ ss << "hello_my_name_is "
+ << name;
+
+ send ( ss.str() );
+ saidHello = true;
+}
+
+
+
+void
+QrshServer::sayName ( )
+{
+ fprintf ( stderr, "My name is: |%s|\n", name.c_str() );
+}
+
+
+
+
+void
+QrshServer::drawStraws ( )
+{
+ myStraw = mrand ( 1000000000 );
+ stringstream ss;
+ ss << "straw "
+ << name
+ << ' '
+ << myStraw;
+ send ( ss.str() );
+}
+
+
+
+void
+QrshServer::getStraw ( Message const & message )
+{
+ int straw;
+
+ char brokerName[1000];
+ sscanf ( message.getData().c_str(), "%*s%s", brokerName );
+
+ if ( ! strcmp ( brokerName, name.c_str() ) )
+ return;
+
+ sscanf ( message.getData().c_str(), "%*s%*s%d", & straw );
+ straws.push_back ( straw );
+
+ bool i_win = true;
+ int ties = 0;
+
+ if ( straws.size() >= myFellowBrokers.size() )
+ {
+ // All votes are in! Let's see if I win!
+ for ( unsigned int i = 0; i < straws.size(); ++ i )
+ {
+ if ( straws[i] == myStraw )
+ ++ ties;
+ else
+ if ( straws[i] > myStraw )
+ {
+ i_win = false;
+ break;
+ }
+ }
+
+ if ( i_win && (ties <= 0) )
+ {
+ myStraw = 0;
+ straws.clear();
+ runSavedCommand ( );
+ }
+ else
+ if ( i_win && (ties > 0) )
+ {
+ fprintf ( stderr, "MDEBUG oh no! drawStraws error: server %s tied with straw %d!\n", name.c_str(), straw );
+ }
+ }
+}
+
+
+
+
+/*
+ * "APB" command (all-points-bullitens (commands that are not addressed
+ * specifically to any server)) are handled directly, here.
+ * Because if I return simply "true", the normal command processing code
+ * will misinterpret the command.
+ */
+bool
+QrshServer::myMessage ( Message const & message )
+{
+ int const maxlen = 100;
+ char head[maxlen];
+ char first_word [ maxlen + 1 ];
+ strncpy ( head, message.getData().c_str(), maxlen );
+ sscanf ( head, "%s", first_word );
+
+ if ( ! strcmp ( name.c_str(), first_word ) )
+ {
+ return true;
+ }
+ else
+ {
+ // Is the given name one of my aliases?
+ char possibleAlias[1000];
+ if(1 == sscanf ( message.getData().c_str(), "%s", possibleAlias ))
+ {
+ for ( unsigned int i = 0; i < aliases.size(); ++ i )
+ {
+
+ if ( ! strcmp ( possibleAlias, aliases[i]->c_str() ))
+ {
+ return true;
+ }
+ }
+ }
+ }
+
+ if ( ! strcmp ( first_word, "hello_my_name_is" ) )
+ {
+ rememberIntroduction ( message );
+ sayHello ( );
+ return false;
+ }
+ else
+ if ( ! strcmp ( first_word, "straw" ) )
+ {
+ getStraw ( message );
+ return false;
+ }
+ else
+ if ( ! strcmp ( first_word, "all" ) )
+ {
+ return true;
+ }
+ else
+ if ( ! strcmp ( first_word, "any" ) )
+ {
+ straws.clear();
+ usleep ( 200000 );
+ saveCommand ( message );
+ drawStraws ( );
+ return false;
+ }
+ else
+ return false;
+}
+
+
+
+
+void
+QrshServer::rememberIntroduction ( Message const & message )
+{
+ char brokerName [ 1000 ];
+ sscanf ( message.getData().c_str(), "%*s%s", brokerName );
+
+ if ( strcmp ( brokerName, name.c_str() ) )
+ myFellowBrokers.insert ( string ( brokerName ) );
+}
+
+
+
+
+void
+QrshServer::addAlias ( Message const & message )
+{
+ char alias[1000];
+ sscanf ( message.getData().c_str(), "%*s%*s%s", alias );
+ aliases.push_back ( new string(alias) );
+}
+
+
+
+
+void
+QrshServer::getNames ( )
+{
+ abstract_name_map.clear();
+
+ DIR * dir = opendir ( data_dir.str().c_str() );
+
+ if ( ! dir )
+ {
+ fprintf ( stderr,
+ "QrshServer::getNames error: could not open dir |%s|.\n",
+ data_dir.str().c_str()
+ );
+ return;
+ }
+
+ struct dirent * file;
+ while ( (file = readdir ( dir ) ) )
+ {
+ if ( '.' != file->d_name[0] )
+ {
+ stringstream pid_file_name;
+ pid_file_name << data_dir.str()
+ << '/'
+ << file->d_name
+ << "/pid";
+
+ int pid = 0;
+ FILE * fp;
+ if ( (fp = fopen ( pid_file_name.str().c_str(), "r" ) ) )
+ {
+ fscanf ( fp, "%d", & pid );
+ fclose ( fp );
+ abstract_name_map.insert(pair<char*, int>(strdup(file->d_name), pid));
+ }
+ else
+ {
+ /*
+ * Fail silently. The non-existence of this file
+ * is not necessarily an error.
+ */
+ }
+ }
+ }
+ closedir ( dir );
+}
+
+
+
+void
+QrshServer::string_replaceAll ( string & str,
+ string & target,
+ string & replacement
+ )
+{
+ int target_size = target.size();
+ int found_pos = 0;
+
+ while ( 0 <= (found_pos = str.find ( target ) ) )
+ str.replace ( found_pos, target_size, replacement );
+}
+
+
+
+
+bool
+QrshServer::isProcessName ( char const * str )
+{
+ getNames();
+ map<char *, int>::iterator it;
+ for ( it = abstract_name_map.begin(); it != abstract_name_map.end(); ++ it )
+ {
+ if ( ! strcmp ( str, it->first ) )
+ return true;
+ }
+
+ return false;
+}
+
+
+
+
+
+int
+QrshServer::string_countWords ( char const * s1 )
+{
+ int count = 0;
+ char const * s2 = s1 + 1;
+
+ if ( ! isspace(* s1) )
+ {
+ ++ count;
+ }
+
+ for ( ; * s2; ++ s1, ++ s2 )
+ {
+ // count space-to-word transitions.
+ if ( isspace(*s1) && (! isspace(*s2)) )
+ ++ count;
+ }
+
+ return count;
+}
+
+
+
+
+void
+QrshServer::execute ( Message const & message )
+{
+ // First, gather all the symbolic names we know.
+ getNames();
+
+ // Now make a copy of the command, that I can alter.
+ string command ( message.getData() );
+
+
+ // Replace each occurrence of every abstract name with its pid.
+ char pid_str[100];
+ map<char *, int>::iterator it;
+ for ( it = abstract_name_map.begin(); it != abstract_name_map.end(); ++ it )
+ {
+ sprintf ( pid_str, "%d", it->second );
+ string target ( it->first ),
+ replacement ( pid_str );
+ string_replaceAll ( command, target, replacement );
+ }
+
+
+ char const * truncated_command = skipWord(skipWord(command.c_str()));
+
+ if ( truncated_command )
+ system ( truncated_command );
+}
+
+
+
+
+
+void
+QrshServer::get ( Message const & request_message )
+{
+ char * file_content;
+
+ /*
+ * Get the contents of the requested file.
+ */
+ char file_or_process_name[1000];
+ sscanf ( request_message.getData().c_str(), "%*s%*s%s", file_or_process_name );
+
+ if ( isProcessName ( file_or_process_name ) )
+ {
+ stringstream desired_file_name;
+ desired_file_name << data_dir.str()
+ << '/'
+ << file_or_process_name
+ << '/';
+ char requested_output_stream[1000];
+ if(1 != sscanf ( request_message.getData().c_str(),
+ "%*s%*s%*s%s",
+ requested_output_stream
+ )
+ )
+ {
+ fprintf ( stderr,
+ "QrshServer::get error: Can't read requested data file name from this message: |%s|\n",
+ request_message.getData().c_str()
+ );
+ return;
+ }
+ desired_file_name << requested_output_stream;
+ file_content = file2str ( desired_file_name.str().c_str() );
+ }
+ else
+ {
+ file_content = file2str ( file_or_process_name );
+ }
+
+ stringstream reply_data ;
+ reply_data << "get_response "
+ << file_content;
+ /*
+ * Send a response-message to the server who is waiting.
+ */
+ send ( reply_data.str() );
+}
+
+
+
+
+
+
+void
+QrshServer::exited ( Message const & message )
+{
+ int exit_code = -1;
+
+ // First, gather all the symbolic names we know.
+ getNames();
+
+ // Now make a copy of the command, that I can alter.
+ string edited_command ( message.getData() );
+
+ // Replace each occurrence of every abstract name with its pid.
+ char pid_str[100];
+ map<char *, int>::iterator it;
+ for ( it = abstract_name_map.begin(); it != abstract_name_map.end(); ++ it )
+ {
+ sprintf ( pid_str, "%d", it->second );
+ string target ( it->first ),
+ replacement ( pid_str );
+ string_replaceAll ( edited_command, target, replacement );
+ }
+
+ // Skip the service name. That is not used by the child.
+ char const * truncated_command = skipWord(edited_command.c_str());
+
+ if ( truncated_command )
+ {
+ stringstream ss;
+ ss << qrsh_run_path
+ << ' '
+ << data_dir.str()
+ << ' '
+ << truncated_command;
+
+ int child_pid;
+ if ( ! (child_pid = fork() ) )
+ {
+ // This is the child.
+
+ char ** argv = getArgs ( ss.str().c_str() );
+ execv ( qrsh_run_path.c_str(), argv );
+
+ perror ( "qrsh_server: execv error: " );
+ exit ( 1 );
+ }
+ else
+ {
+ // This is the parent.
+ pid_t awaited_pid;
+ while ( 0 == (awaited_pid = waitpid ( child_pid, & exit_code, WNOHANG)) )
+ {
+ fprintf ( stderr, "qrsh_server info: parent: waiting for child...\n" );
+ sleep(1);
+ }
+
+ if ( -1 == awaited_pid )
+ {
+ fprintf ( stderr, "qrsh_server error awaiting child!\n" );
+ exit ( 1 );
+ }
+
+ exit_code >>= 8;
+
+ stringstream data;
+ data << "wait_response "
+ << exit_code;
+
+ send ( data.str() );
+ }
+ }
+}
+
+
+
+
+void
+QrshServer::wait ( Message const & message )
+{
+ bool pre_existing = false;
+ if ( 3 == string_countWords ( message.getData().c_str() ) )
+ {
+ // The first word is the name of this service.
+ // The second word is "exec_wait".
+ // The third word is the symbolic name of the command to wait for.
+ // The fact that there are exactly three words means that this
+ // must be a command that has already been named and started --
+ // we just need to find its pid and wait on it.
+ pre_existing = true;
+ }
+
+
+ int exit_code = -1;
+
+ // First, gather all the symbolic names we know.
+ getNames();
+
+ // Now make a copy of the command, that I can alter.
+ string edited_command ( message.getData() );
+
+ // Replace each occurrence of every abstract name with its pid.
+ char pid_str[100];
+ map<char *, int>::iterator it;
+ for ( it = abstract_name_map.begin(); it != abstract_name_map.end(); ++ it )
+ {
+ sprintf ( pid_str, "%d", it->second );
+ string target ( it->first ),
+ replacement ( pid_str );
+ string_replaceAll ( edited_command, target, replacement );
+ }
+
+ // Skip the service name. That is not used by the child.
+ char const * truncated_command = skipWord(edited_command.c_str());
+
+ if ( truncated_command )
+ {
+ stringstream ss;
+ ss << qrsh_run_path
+ << ' '
+ << data_dir.str()
+ << ' '
+ << truncated_command;
+
+ int child_pid;
+ if ( ! (child_pid = fork() ) )
+ {
+ // This is the child.
+
+ char ** argv = getArgs ( ss.str().c_str() );
+ execv ( qrsh_run_path.c_str(), argv );
+
+ perror ( "qrsh_server: execv error: " );
+ exit ( 1 );
+ }
+ else
+ {
+ // This is the parent.
+ pid_t awaited_pid;
+ while ( 0 == (awaited_pid = waitpid ( child_pid, & exit_code, WNOHANG)) )
+ {
+ fprintf ( stderr, "qrsh_server info: parent: waiting for child...\n" );
+ sleep(1);
+ }
+
+ if ( -1 == awaited_pid )
+ {
+ fprintf ( stderr, "qrsh_server error awaiting child!\n" );
+ exit ( 1 );
+ }
+ }
+
+ exit_code >>= 8;
+
+ stringstream data;
+ data << "wait_response "
+ << exit_code;
+
+ send ( data.str() );
+ }
+}
+
+
+
+
+
+char const *
+QrshServer::skipWord ( char const * s )
+{
+ if(! (s && *s) )
+ return 0;
+
+ // skip past initial white space
+ while ( isspace(*s) )
+ {
+ ++ s;
+ if(! *s)
+ return 0;
+ }
+
+ // skip past first word
+ while ( ! isspace(*s) )
+ {
+ ++ s;
+ if(! *s)
+ return 0;
+ }
+
+ return s;
+}
+
+
+
+
+
+char **
+QrshServer::getArgs ( char const * str )
+{
+ char const * s = str;
+
+ char ** argv = 0;
+ vector<int> start_positions,
+ lengths;
+
+ int pos = 0;
+ int arg_len = 0;
+
+ int n_args = 0;
+ while ( 1 )
+ {
+ // advance over whitespace.
+ while ( isspace ( *s ) )
+ {
+ ++ s; ++ pos;
+ if(! *s)
+ {
+ goto done;
+ }
+ }
+
+ ++ n_args;
+ start_positions.push_back ( pos );
+ arg_len = 0;
+
+ // advance over non-whitespace.
+ while ( ! isspace ( *s ) )
+ {
+ ++ s; ++ pos; ++ arg_len;
+ if(! *s)
+ {
+ lengths.push_back ( arg_len );
+ arg_len = 0;
+ goto done;
+ }
+ }
+
+ lengths.push_back ( arg_len );
+ arg_len = 0;
+ }
+
+ done:
+
+ if ( arg_len > 0 )
+ lengths.push_back ( arg_len );
+
+ // Alloc the array.
+ argv = (char **) malloc ( sizeof(char *) * ( n_args + 1 ) );
+ argv[n_args] = 0; // mull-term the array.
+
+ for ( int i = 0; i < n_args; ++ i )
+ {
+ argv[i] = ( char *) malloc ( lengths[i] + 1 );
+ strncpy ( argv[i],
+ str + start_positions[i],
+ lengths[i]
+ );
+ argv[i][lengths[i]] = 0;
+ }
+
+ return argv;
+}
+
+
+
+void
+QrshServer::runCommand ( Message const & message )
+{
+ char const * s = message.getData().c_str();
+
+ /*
+ * Skip the first word, which is this server's name.
+ */
+ while ( isspace(*s) ) // go to start of first word.
+ ++ s;
+
+ while ( ! isspace(*s) ) // go to end of first word.
+ ++ s;
+
+ while ( isspace(*s) ) // go to start of second word.
+ ++ s;
+
+ char command_name[1000];
+ sscanf ( s, "%s", command_name );
+
+ if ( ! strcmp ( "get", command_name ) )
+ {
+ get ( message );
+ }
+ else
+ if ( ! strcmp ( "exited", command_name ) )
+ {
+ exited ( message );
+ }
+ else
+ if ( ! strcmp ( "exec_wait", command_name ) )
+ {
+ wait ( message );
+ }
+ else
+ if ( ! strcmp ( "exec", command_name ) )
+ {
+ execute ( message );
+ }
+ else
+ if ( ! strcmp ( "start", command_name ) )
+ {
+ start ( );
+ }
+ else
+ if ( ! strcmp ( "alias", command_name ) )
+ {
+ addAlias ( message );
+ }
+ else
+ if ( ! strcmp ( "sayName", command_name ) )
+ {
+ sayName ( );
+ }
+ else
+ {
+ /*
+ * If the command is not any of the "special" commands
+ * above, then it's a "normal" command.
+ * That means we run it with a child process instance of
+ * qrsh_run, which will save all its data in the qrsh dir.
+ */
+ stringstream ss;
+ ss << qrsh_run_path
+ << ' '
+ << data_dir.str()
+ << ' '
+ << s;
+
+ if ( ! fork() )
+ {
+ char ** argv = getArgs ( ss.str().c_str() );
+ execv ( qrsh_run_path.c_str(), argv );
+ perror ( "qrsh_server: execv error: " );
+ }
+ }
+}
+
+
+
+void
+QrshServer::received ( Message & message )
+{
+ if ( myMessage ( message ) )
+ runCommand ( message );
+}
+
+
+
+}} // namespace qpid::tests
+
+using namespace qpid::tests;
+
+/*
+ * fixme mick Mon Aug 3 10:29:26 EDT 2009
+ * argv[1] server name
+ * argv[2] qrsh exe path
+ * argv[3] host
+ * argv[4] port
+ */
+int
+main ( int /*argc*/, char** argv )
+{
+ const char* host = argv[3];
+ int port = atoi(argv[4]);
+ Connection connection;
+ Message msg;
+
+ srand ( getpid() );
+
+ try
+ {
+ connection.open ( host, port );
+ Session session = connection.newSession();
+
+
+ // Declare queues.
+ string myQueue = session.getId().getName();
+ session.queueDeclare ( arg::queue=myQueue,
+ arg::exclusive=true,
+ arg::autoDelete=true);
+
+ session.exchangeBind ( arg::exchange="amq.fanout",
+ arg::queue=myQueue,
+ arg::bindingKey="my-key");
+
+ // Create a server and subscribe it to my queue.
+ SubscriptionManager subscriptions ( session );
+ QrshServer server ( subscriptions,
+ argv[1], // server name
+ argv[2], // qrsh exe path
+ host,
+ port
+ );
+ subscriptions.subscribe ( server, myQueue );
+
+ // Receive messages until the subscription is cancelled
+ // by QrshServer::received()
+ subscriptions.run();
+
+ connection.close();
+ }
+ catch(const exception& error)
+ {
+ cout << error.what() << endl;
+ return 1;
+ }
+
+ return 0;
+}
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/10_all b/cpp/src/tests/qrsh_utils/10_all
new file mode 100755
index 0000000000..7b486ea672
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/10_all
@@ -0,0 +1,30 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+echo "Asking all servers to say their names... "
+qrsh 127.0.0.1 5813 \
+ all sayName
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/1_remote_run b/cpp/src/tests/qrsh_utils/1_remote_run
new file mode 100755
index 0000000000..5b9b307bba
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/1_remote_run
@@ -0,0 +1,26 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 command_1 /home/mick/redhat/qrsh/qrsh_run/my_command foo bar baz
diff --git a/cpp/src/tests/qrsh_utils/2_forever b/cpp/src/tests/qrsh_utils/2_forever
new file mode 100755
index 0000000000..5528b0e4d8
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/2_forever
@@ -0,0 +1,26 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 command_2 /home/mick/redhat/qrsh/qrsh_run/forever foo bar baz
diff --git a/cpp/src/tests/qrsh_utils/3_kill_it b/cpp/src/tests/qrsh_utils/3_kill_it
new file mode 100755
index 0000000000..afc7a03c9d
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/3_kill_it
@@ -0,0 +1,27 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+echo "Killing command 2... "
+./qrsh 127.0.0.1 5813 \
+ mrg23 exec kill -9 command_2
+
diff --git a/cpp/src/tests/qrsh_utils/4_wait_for_it b/cpp/src/tests/qrsh_utils/4_wait_for_it
new file mode 100755
index 0000000000..a4dc0da1ce
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/4_wait_for_it
@@ -0,0 +1,26 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 exec_wait /home/mick/redhat/qrsh/qrsh_run/my_command foo bar baz
+echo "my_command returned an exit code of $?"
diff --git a/cpp/src/tests/qrsh_utils/5_exited b/cpp/src/tests/qrsh_utils/5_exited
new file mode 100755
index 0000000000..4fec1dcc79
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/5_exited
@@ -0,0 +1,64 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+path=/home/mick/redhat/qrsh/qrsh_run
+
+echo "Running command_3 ..."
+./qrsh 127.0.0.1 5813 \
+ mrg23 command_3 $path/my_command foo bar baz
+
+echo "Now I do some other stuff..."
+sleep 1
+echo "And then some more stuff..."
+sleep 1
+echo "and so on..."
+sleep 1
+
+echo "Now I'm waiting for command_3 ..."
+./qrsh 127.0.0.1 5813 \
+ mrg23 exited command_3
+echo "has command_3 exited: $? ."
+sleep 5
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 exited command_3
+echo "has command_3 exited: $? ."
+sleep 5
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 exited command_3
+echo "has command_3 exited: $? ."
+sleep 5
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 exited command_3
+echo "has command_3 exited: $? ."
+sleep 5
+
+./qrsh 127.0.0.1 5813 \
+ mrg23 exited command_3
+echo "has command_3 exited: $? ."
+sleep 5
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/6_get b/cpp/src/tests/qrsh_utils/6_get
new file mode 100755
index 0000000000..4b35ca98e6
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/6_get
@@ -0,0 +1,29 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+echo "getting /tmp/foo ..."
+./qrsh 127.0.0.1 5813 \
+ mrg23 get /tmp/foo
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/7_get_output b/cpp/src/tests/qrsh_utils/7_get_output
new file mode 100755
index 0000000000..59911089ec
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/7_get_output
@@ -0,0 +1,44 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+echo "Run a command..."
+./qrsh 127.0.0.1 5813 \
+ mrg23 command_4 /home/mick/redhat/qrsh/qrsh_run/my_command foo bar baz
+
+echo "Wait for a while..."
+sleep 20
+
+echo "Get stderr output:"
+echo "------------- begin stderr ---------------"
+./qrsh 127.0.0.1 5813 \
+ mrg23 get command_4 stderr
+echo "------------- end stderr ---------------"
+echo " "
+echo " "
+echo " "
+echo "Get stdout output:"
+echo "------------- begin stdout ---------------"
+./qrsh 127.0.0.1 5813 \
+ mrg23 get command_4 stdout
+echo "------------- end stdout ---------------"
+
diff --git a/cpp/src/tests/qrsh_utils/8_any b/cpp/src/tests/qrsh_utils/8_any
new file mode 100755
index 0000000000..2a922ea0e0
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/8_any
@@ -0,0 +1,43 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+echo "asking any server to say his name ..."
+./qrsh 127.0.0.1 5813 \
+ any sayName
+sleep 1
+echo "asking any server to say his name ..."
+./qrsh 127.0.0.1 5813 \
+ any sayName
+sleep 1
+echo "asking any server to say his name ..."
+./qrsh 127.0.0.1 5813 \
+ any sayName
+sleep 1
+echo "asking any server to say his name ..."
+./qrsh 127.0.0.1 5813 \
+ any sayName
+sleep 1
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/9_alias b/cpp/src/tests/qrsh_utils/9_alias
new file mode 100755
index 0000000000..a4cfdfdf9a
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/9_alias
@@ -0,0 +1,38 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#! /bin/bash
+
+# Make a group of two of the servers, using "alias",
+# and send the group a command.
+
+qrsh 127.0.0.1 5813 \
+ mrg22 alias group_1
+qrsh 127.0.0.1 5813 \
+ mrg23 alias group_1
+
+echo "Asking group_1 to say their names... "
+qrsh 127.0.0.1 5813 \
+ group_1 sayName
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/qrsh_example_command.cpp b/cpp/src/tests/qrsh_utils/qrsh_example_command.cpp
new file mode 100644
index 0000000000..386e2f73f0
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/qrsh_example_command.cpp
@@ -0,0 +1,52 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+
+
+main ( int argc, char ** argv )
+{
+ fprintf ( stderr, "Hello, I am the Example Child!\n");
+ fprintf ( stderr, "my arguments %d are:\n", argc - 1 );
+ fprintf ( stdout, "And hello to stdout, too!\n");
+
+ int i;
+ for ( i = 1; i < argc; ++ i )
+ {
+ fprintf ( stderr, "arg %d: |%s|\n", i, argv[i] );
+ }
+
+ for ( i = 0; i < 15; ++ i )
+ {
+ fprintf ( stderr, "child sleeping...\n" );
+ sleep ( 1 );
+ }
+
+ fprintf ( stderr, "child exiting with code 13.\n" );
+
+ return 13;
+}
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/qrsh_forever.cpp b/cpp/src/tests/qrsh_utils/qrsh_forever.cpp
new file mode 100644
index 0000000000..191a9bca11
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/qrsh_forever.cpp
@@ -0,0 +1,50 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+
+#include <stdio.h>
+#include <unistd.h>
+
+
+
+main ( int argc, char ** argv )
+{
+ fprintf ( stderr, "Hello, I am the Forever Example Child!\n");
+ fprintf ( stderr, "my %d arguments are:\n", argc - 1 );
+
+ int i;
+ for ( i = 1; i < argc; ++ i )
+ fprintf ( stderr, "arg %d: |%s|\n", i, argv[i] );
+
+ for ( i = 0; i >= 0; ++ i )
+ {
+ fprintf ( stderr, "child sleeping forever %d ...\n" , i);
+ sleep ( 1 );
+ }
+
+ fprintf ( stderr, "child exiting with code 12.\n" );
+
+ return 12;
+}
+
+
+
+
diff --git a/cpp/src/tests/qrsh_utils/qsh_doc.txt b/cpp/src/tests/qrsh_utils/qsh_doc.txt
new file mode 100644
index 0000000000..ad5990b38b
--- /dev/null
+++ b/cpp/src/tests/qrsh_utils/qsh_doc.txt
@@ -0,0 +1,309 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+##############################################
+ qrsh: a Qpid-based remote shell utility
+
+ Last updated: 3 Aug 09 Mick Goulish
+##############################################
+
+
+
+=============================
+Overview
+=============================
+
+ You're writing a multi-box test, and you want to write a
+ shell script in which you start processes on other boxes
+ and kill them (or send arbitrary signals to them).
+
+ But ssh doesn't let you signal them, and bash isn't the
+ greatest language in the world for creating data structures
+ (like you need to associate the PIDs with box names and
+ executable names.)
+
+ Qsh is a utility implemented on Qpid that you can use from
+ within your bash script, or any other scripting language.
+ With it, you can:
+
+ 1. run any executable on any box in your cluster.
+
+ 2. don't worry about PIDs and box-names. You associate
+ your own abstract names with the executable instances,
+ and then use those names in the rest of your script.
+ I.e. "broker_1" "sender_3" etc.
+
+ 3. Launch the executable and wait until it returns, and
+ get its exit code.
+
+ 4. Launch your executable and do other stuff, then come
+ back later and see if it has exited.
+
+ 5. Get whatever it sent to stdout or stderr.
+
+ 6. Get the contents of any other file.
+
+ 7. send a command to all your boxes at once
+
+ 8. send a command to a randomly selected box.
+
+ 9. define groups of boxes, and send a command simultaneously
+ to all boxes in a given group.
+
+
+
+
+=============================
+Using It
+=============================
+
+ 1. You need to run a Qpid broker.
+
+ 2. You start a Qpid client ( which is called a qrsh_server )
+ on all the boxes you care about. And you give them all
+ names like "mrg13", "mrg14" etc. The names can be anything
+ you want, but I've always used one qrsh_server per box,
+ and given it the box name. ( However, you can run two on
+ one box, they won't collide. )
+
+ 3. After you start all servers, send a "start" command to any
+ one of them:
+
+ 4. The qrsh_servers use the fanout exchange to talk to each
+ other.
+
+ 5. In your script, you run an executable called "qrsh". It knows
+ how to talk to the servers, do what you want, and retrieve
+ the data you want.
+
+
+ example start script: (this does 4 servers on the same box)
+ -------------------------------------------------------------
+
+ echo "Starting server mrg22 ..."
+ ./qrsh_server mrg22 ./qrsh_run 127.0.0.1 5813 &
+
+ echo "Starting server mrg23 ..."
+ ./qrsh_server mrg23 ./qrsh_run 127.0.0.1 5813 &
+
+ echo "Starting server mrg24 ..."
+ ./qrsh_server mrg24 ./qrsh_run 127.0.0.1 5813 &
+
+ echo "Starting server mrg25 ..."
+ ./qrsh_server mrg25 ./qrsh_run 127.0.0.1 5813 &
+
+ echo "Issuing start command..."
+ sleep 2
+ ./qrsh 127.0.0.1 5813 mrg22 start
+ sleep 1
+
+ echo "Ready."
+
+ # end of script.
+
+
+
+
+
+
+=============================
+Qrsh Syntax
+=============================
+
+ qrsh host port server_name command_name arg*
+
+
+ "host" and "port" specify the Qpid server to connect to.
+
+ "server_name" can be anything you want. I always use the name
+ of the box that the server is running on.
+
+ "command_name" is the name that you choose to assign to
+ the process you are running. Each process that you decide
+ to name must have a unique name within this script.
+
+ Or it could be a reserved command name, that Qsh
+ interprets in a special way.
+
+ Reserved command names are:
+
+ exec
+ exec_wait
+ exited
+ get
+
+ "exec" means "interpret the rest of the command line as a
+ command to be executed by the designated server.
+
+ "exec_wait" means same as "exec", but wait for the command
+ to terminate, and return its exit code.
+
+ "exited" -- you provide 1 arg, which is an abstract
+ process name. qrsh returns 1 if that process has exited,
+ else 0.
+
+ "get" -- you provide one arg which is a path. qrsh returns
+ (by printing to stdout) the contents of that file.
+
+ "arg*" is zero or more arguments. They are interpreted
+ differently depending on whether you are using one of
+ the above reserved command names, or making up your own
+ abstract name for a command.
+
+
+
+
+=============================
+Examples
+=============================
+
+ 1. Run a process on a remote box.
+
+ qrsh mrg23 command_1 /usr/sbin/whatever foo bar baz
+
+ Returns immediately.
+
+
+
+ 2. Kill a process that you started earlier:
+
+ qrsh mrg23 exec kill -9 command_1
+
+ After the word "exec" put any command line you want.
+ The server you're sending this to will replace all abstract
+ names in the command with process IDs. ( In this example,
+ just the word "command_1" will be replaced. ) Then it will
+ execute the command.
+
+
+
+ 3. Execute a command, and wait for it to finish
+
+ qrsh mrg23 exec_wait command_name args
+
+
+
+ 4. Check on whether a command you issude earlier has exited.
+
+ ./qrsh mrg23 exited command_3
+
+ Returns 1 if it has exited, else 0.
+
+
+
+ 5. Get the contents of a file from the remote system:
+
+ ./qrsh mrg23 get /tmp/foo
+
+ Prints the contents to stdout.
+
+
+
+ 6. Send a command to all servers at once:
+
+ # This example causes them all to print thir names to stderr.
+ ./qrsh all sayName
+
+
+ 7. Define a group of servers and send a command to that group.
+
+ #! /bin/bash
+
+ # Make a group of two of the servers, using "alias",
+ # and send the group a command.
+
+ qrsh 127.0.0.1 5813 \
+ mrg22 alias group_1
+
+ qrsh 127.0.0.1 5813 \
+ mrg23 alias group_1
+
+ echo "Asking group_1 to say their names... "
+ qrsh 127.0.0.1 5813 \
+ group_1 sayName
+
+ # end of script.
+
+
+
+
+ 8. Execute a command and get its stdout and stderr contents.
+
+ #! /bin/bash
+
+ echo "Run a command..."
+ ./qrsh 127.0.0.1 5813 \
+ mrg23 command_4 my_command foo bar baz
+
+ echo "Wait for a while..."
+ sleep 10
+
+ echo "Get stderr output:"
+ echo "------------- begin stderr ---------------"
+ ./qrsh 127.0.0.1 5813 \
+ mrg23 get command_4 stderr
+ echo "------------- end stderr ---------------"
+ echo " "
+
+ echo " "
+ echo "Get stdout output:"
+ echo "------------- begin stdout ---------------"
+ ./qrsh 127.0.0.1 5813 \
+ mrg23 get command_4 stdout
+ echo "------------- end stdout ---------------"
+
+ # end of script.
+
+
+
+
+ 9. Send a command to one of your servers, selected
+ at random.
+
+ #! /bin/bash
+
+ # I do it multiple times here, so I can see
+ # that it really is selecting randomly.
+
+ echo "asking any server to say his name ..."
+ ./qrsh 127.0.0.1 5813 \
+ any sayName
+ sleep 1
+
+ echo "asking any server to say his name ..."
+ ./qrsh 127.0.0.1 5813 \
+ any sayName
+ sleep 1
+
+ echo "asking any server to say his name ..."
+ ./qrsh 127.0.0.1 5813 \
+ any sayName
+ sleep 1
+
+ echo "asking any server to say his name ..."
+ ./qrsh 127.0.0.1 5813 \
+ any sayName
+
+ # end of script.
+
+
+
+
diff --git a/cpp/src/tests/queue_flow_limit_tests.py b/cpp/src/tests/queue_flow_limit_tests.py
deleted file mode 100644
index dec7cfb3af..0000000000
--- a/cpp/src/tests/queue_flow_limit_tests.py
+++ /dev/null
@@ -1,371 +0,0 @@
-#!/usr/bin/env python
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import sys
-from qpid.testlib import TestBase010
-from qpid import datatypes, messaging
-from qpid.messaging import Message, Empty
-from threading import Thread, Lock
-from logging import getLogger
-from time import sleep, time
-from os import environ, popen
-
-class QueueFlowLimitTests(TestBase010):
-
- def __getattr__(self, name):
- if name == "assertGreater":
- return lambda a, b: self.failUnless(a > b)
- else:
- raise AttributeError
-
- def _create_queue(self, name,
- stop_count=None, resume_count=None,
- stop_size=None, resume_size=None,
- max_size=None, max_count=None):
- """ Create a queue with the given flow settings via the queue.declare
- command.
- """
- args={}
- if (stop_count is not None):
- args["qpid.flow_stop_count"] = stop_count;
- if (resume_count is not None):
- args["qpid.flow_resume_count"] = resume_count;
- if (stop_size is not None):
- args["qpid.flow_stop_size"] = stop_size;
- if (resume_size is not None):
- args["qpid.flow_resume_size"] = resume_size;
- if (max_size is not None):
- args["qpid.max_size"] = max_size;
- if (max_count is not None):
- args["qpid.max_count"] = max_count;
-
-
- self.session.queue_declare(queue=name, arguments=args)
-
- qs = self.qmf.getObjects(_class="queue")
- for i in qs:
- if i.name == name:
- # verify flow settings
- if (stop_count is not None):
- self.assertEqual(i.arguments.get("qpid.flow_stop_count"), stop_count)
- if (resume_count is not None):
- self.assertEqual(i.arguments.get("qpid.flow_resume_count"), resume_count)
- if (stop_size is not None):
- self.assertEqual(i.arguments.get("qpid.flow_stop_size"), stop_size)
- if (resume_size is not None):
- self.assertEqual(i.arguments.get("qpid.flow_resume_size"), resume_size)
- if (max_size is not None):
- self.assertEqual(i.arguments.get("qpid.max_size"), max_size)
- if (max_count is not None):
- self.assertEqual(i.arguments.get("qpid.max_count"), max_count)
- self.failIf(i.flowStopped)
- return i.getObjectId()
- self.fail("Unable to create queue '%s'" % name)
- return None
-
-
- def _delete_queue(self, name):
- """ Delete a named queue
- """
- self.session.queue_delete(queue=name)
-
-
- def _start_qpid_send(self, queue, count, content="X", capacity=100):
- """ Use the qpid-send client to generate traffic to a queue.
- """
- command = "qpid-send" + \
- " -b" + " %s:%s" % (self.broker.host, self.broker.port) \
- + " -a " + str(queue) \
- + " --messages " + str(count) \
- + " --content-string " + str(content) \
- + " --capacity " + str(capacity)
- return popen(command)
-
- def _start_qpid_receive(self, queue, count, timeout=5):
- """ Use the qpid-receive client to consume from a queue.
- Note well: prints one line of text to stdout for each consumed msg.
- """
- command = "qpid-receive" + \
- " -b " + "%s:%s" % (self.broker.host, self.broker.port) \
- + " -a " + str(queue) \
- + " --messages " + str(count) \
- + " --timeout " + str(timeout) \
- + " --print-content yes"
- return popen(command)
-
- def test_qpid_config_cmd(self):
- """ Test the qpid-config command's ability to configure a queue's flow
- control thresholds.
- """
- tool = environ.get("QPID_CONFIG_EXEC")
- if tool:
- command = tool + \
- " --broker-addr=%s:%s " % (self.broker.host, self.broker.port) \
- + "add queue test01 --flow-stop-count=999" \
- + " --flow-resume-count=55 --flow-stop-size=5000000" \
- + " --flow-resume-size=100000"
- cmd = popen(command)
- rc = cmd.close()
- self.assertEqual(rc, None)
-
- # now verify the settings
- self.startQmf();
- qs = self.qmf.getObjects(_class="queue")
- for i in qs:
- if i.name == "test01":
- self.assertEqual(i.arguments.get("qpid.flow_stop_count"), 999)
- self.assertEqual(i.arguments.get("qpid.flow_resume_count"), 55)
- self.assertEqual(i.arguments.get("qpid.flow_stop_size"), 5000000)
- self.assertEqual(i.arguments.get("qpid.flow_resume_size"), 100000)
- self.failIf(i.flowStopped)
- break;
- self.assertEqual(i.name, "test01")
- self._delete_queue("test01")
-
-
- def test_flow_count(self):
- """ Create a queue with count-based flow limit. Spawn several
- producers which will exceed the limit. Verify limit exceeded. Consume
- all messages. Verify flow control released.
- """
- self.startQmf();
- oid = self._create_queue("test-q", stop_count=373, resume_count=229)
- self.assertEqual(self.qmf.getObjects(_objectId=oid)[0].flowStoppedCount, 0)
-
- sndr1 = self._start_qpid_send("test-q", count=1213, content="XXX", capacity=50);
- sndr2 = self._start_qpid_send("test-q", count=797, content="Y", capacity=13);
- sndr3 = self._start_qpid_send("test-q", count=331, content="ZZZZZ", capacity=149);
- totalMsgs = 1213 + 797 + 331
-
- # wait until flow control is active
- deadline = time() + 10
- while (not self.qmf.getObjects(_objectId=oid)[0].flowStopped) and \
- time() < deadline:
- pass
- self.failUnless(self.qmf.getObjects(_objectId=oid)[0].flowStopped)
- depth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- self.assertGreater(depth, 373)
-
- # now wait until the enqueues stop happening - ensure that
- # not all msgs have been sent (senders are blocked)
- sleep(1)
- newDepth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- while depth != newDepth:
- depth = newDepth;
- sleep(1)
- newDepth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- self.assertGreater(totalMsgs, depth)
-
- # drain the queue
- rcvr = self._start_qpid_receive("test-q",
- count=totalMsgs)
- count = 0;
- x = rcvr.readline() # prints a line for each received msg
- while x:
- count += 1;
- x = rcvr.readline()
-
- sndr1.close();
- sndr2.close();
- sndr3.close();
- rcvr.close();
-
- self.assertEqual(count, totalMsgs)
- self.failIf(self.qmf.getObjects(_objectId=oid)[0].flowStopped)
- self.failUnless(self.qmf.getObjects(_objectId=oid)[0].flowStoppedCount)
-
- self._delete_queue("test-q")
-
-
- def test_flow_size(self):
- """ Create a queue with size-based flow limit. Spawn several
- producers which will exceed the limit. Verify limit exceeded. Consume
- all messages. Verify flow control released.
- """
- self.startQmf();
- oid = self._create_queue("test-q", stop_size=351133, resume_size=251143)
-
- sndr1 = self._start_qpid_send("test-q", count=1699, content="X"*439, capacity=53);
- sndr2 = self._start_qpid_send("test-q", count=1129, content="Y"*631, capacity=13);
- sndr3 = self._start_qpid_send("test-q", count=881, content="Z"*823, capacity=149);
- totalMsgs = 1699 + 1129 + 881
-
- # wait until flow control is active
- deadline = time() + 10
- while (not self.qmf.getObjects(_objectId=oid)[0].flowStopped) and \
- time() < deadline:
- pass
- self.failUnless(self.qmf.getObjects(_objectId=oid)[0].flowStopped)
- self.assertGreater(self.qmf.getObjects(_objectId=oid)[0].byteDepth, 351133)
-
- # now wait until the enqueues stop happening - ensure that
- # not all msgs have been sent (senders are blocked)
- depth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- sleep(1)
- newDepth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- while depth != newDepth:
- depth = newDepth;
- sleep(1)
- newDepth = self.qmf.getObjects(_objectId=oid)[0].msgDepth
- self.assertGreater(totalMsgs, depth)
-
- # drain the queue
- rcvr = self._start_qpid_receive("test-q",
- count=totalMsgs)
- count = 0;
- x = rcvr.readline() # prints a line for each received msg
- while x:
- count += 1;
- x = rcvr.readline()
-
- sndr1.close();
- sndr2.close();
- sndr3.close();
- rcvr.close();
-
- self.assertEqual(count, totalMsgs)
- self.failIf(self.qmf.getObjects(_objectId=oid)[0].flowStopped)
-
- self._delete_queue("test-q")
-
-
- def verify_limit(self, testq):
- """ run a limit check against the testq object
- """
-
- testq.mgmt = self.qmf.getObjects(_objectId=testq.oid)[0]
-
- # fill up the queue, waiting until flow control is active
- sndr1 = self._start_qpid_send(testq.mgmt.name, count=testq.sendCount, content=testq.content)
- deadline = time() + 10
- while (not testq.mgmt.flowStopped) and time() < deadline:
- testq.mgmt.update()
-
- self.failUnless(testq.verifyStopped())
-
- # now consume enough messages to drop below the flow resume point, and
- # verify flow control is released.
- rcvr = self._start_qpid_receive(testq.mgmt.name, count=testq.consumeCount)
- rcvr.readlines() # prints a line for each received msg
- rcvr.close();
-
- # we should now be below the resume threshold
- self.failUnless(testq.verifyResumed())
-
- self._delete_queue(testq.mgmt.name)
- sndr1.close();
-
-
- def test_default_flow_count(self):
- """ Create a queue with count-based size limit, and verify the computed
- thresholds using the broker's default ratios.
- """
- class TestQ:
- def __init__(self, oid):
- # Use the broker-wide default flow thresholds of 80%/70% (see
- # run_queue_flow_limit_tests) to base the thresholds off the
- # queue's max_count configuration parameter
- # max_count == 1000 -> stop == 800, resume == 700
- self.oid = oid
- self.sendCount = 1000
- self.consumeCount = 301 # (send - resume) + 1 to reenable flow
- self.content = "X"
- def verifyStopped(self):
- self.mgmt.update()
- return self.mgmt.flowStopped and (self.mgmt.msgDepth > 800)
- def verifyResumed(self):
- self.mgmt.update()
- return (not self.mgmt.flowStopped) and (self.mgmt.msgDepth < 700)
-
- self.startQmf();
- oid = self._create_queue("test-X", max_count=1000)
- self.verify_limit(TestQ(oid))
-
-
- def test_default_flow_size(self):
- """ Create a queue with byte-based size limit, and verify the computed
- thresholds using the broker's default ratios.
- """
- class TestQ:
- def __init__(self, oid):
- # Use the broker-wide default flow thresholds of 80%/70% (see
- # run_queue_flow_limit_tests) to base the thresholds off the
- # queue's max_count configuration parameter
- # max_size == 10000 -> stop == 8000 bytes, resume == 7000 bytes
- self.oid = oid
- self.sendCount = 2000
- self.consumeCount = 601 # (send - resume) + 1 to reenable flow
- self.content = "XXXXX" # 5 bytes per message sent.
- def verifyStopped(self):
- self.mgmt.update()
- return self.mgmt.flowStopped and (self.mgmt.byteDepth > 8000)
- def verifyResumed(self):
- self.mgmt.update()
- return (not self.mgmt.flowStopped) and (self.mgmt.byteDepth < 7000)
-
- self.startQmf();
- oid = self._create_queue("test-Y", max_size=10000)
- self.verify_limit(TestQ(oid))
-
-
- def test_blocked_queue_delete(self):
- """ Verify that blocked senders are unblocked when a queue that is flow
- controlled is deleted.
- """
-
- class BlockedSender(Thread):
- def __init__(self, tester, queue, count, capacity=10):
- self.tester = tester
- self.queue = queue
- self.count = count
- self.capacity = capacity
- Thread.__init__(self)
- self.done = False
- self.start()
- def run(self):
- # spawn qpid-send
- p = self.tester._start_qpid_send(self.queue,
- self.count,
- self.capacity)
- p.close() # waits for qpid-send to complete
- self.done = True
-
- self.startQmf();
- oid = self._create_queue("kill-q", stop_size=10, resume_size=2)
- q = self.qmf.getObjects(_objectId=oid)[0]
- self.failIf(q.flowStopped)
-
- sender = BlockedSender(self, "kill-q", count=100)
- # wait for flow control
- deadline = time() + 10
- while (not q.flowStopped) and time() < deadline:
- q.update()
-
- self.failUnless(q.flowStopped)
- self.failIf(sender.done) # sender blocked
-
- self._delete_queue("kill-q")
- sender.join(5)
- self.failIf(sender.isAlive())
- self.failUnless(sender.done)
-
-
-
-
diff --git a/cpp/src/tests/replication_test b/cpp/src/tests/replication_test
index 8c37568875..691fd20b0c 100755
--- a/cpp/src/tests/replication_test
+++ b/cpp/src/tests/replication_test
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
diff --git a/cpp/src/tests/run_acl_tests b/cpp/src/tests/run_acl_tests
index 41f41e20e1..aff13408ed 100755
--- a/cpp/src/tests/run_acl_tests
+++ b/cpp/src/tests/run_acl_tests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
diff --git a/cpp/src/tests/run_cli_tests b/cpp/src/tests/run_cli_tests
index ec5c71b646..3f1388b9f5 100755
--- a/cpp/src/tests/run_cli_tests
+++ b/cpp/src/tests/run_cli_tests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -70,8 +70,7 @@ stop_brokers() {
if test -d ${PYTHON_DIR} ; then
start_brokers
echo "Running CLI tests using brokers on ports $LOCAL_PORT $REMOTE_PORT"
- PYTHON_TESTS=${PYTHON_TESTS:-$*}
- $QPID_PYTHON_TEST -m cli_tests -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dcli-dir=$CLI_DIR $targs $PYTHON_TESTS $@
+ $QPID_PYTHON_TEST -m cli_tests -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dcli-dir=$CLI_DIR $targs $@
RETCODE=$?
stop_brokers
if test x$RETCODE != x0; then
diff --git a/cpp/src/tests/run_federation_sys_tests b/cpp/src/tests/run_federation_sys_tests
deleted file mode 100755
index f5f772d72e..0000000000
--- a/cpp/src/tests/run_federation_sys_tests
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run the federation system tests.
-
-source ./test_env.sh
-
-MODULENAME=federation_sys
-
-# Test for clustering
-ps -u root | grep 'aisexec\|corosync' > /dev/null
-if (( $? == 0 )); then
- CLUSTERING_ENABLED=1
-else
- echo "WARNING: No clustering detected; tests using it will be ignored."
-fi
-
-# Test for long test
-if [[ "$1" == "LONG_TEST" ]]; then
- USE_LONG_TEST=1
- shift # get rid of this param so it is not treated as a test name
-fi
-
-trap stop_brokers INT TERM QUIT
-
-SKIPTESTS="-i federation_sys.E_* -i federation_sys.F_* -i federation_sys.G_* -i federation_sys.H_*"
-if [ -z ${USE_LONG_TEST} ]; then
- SKIPTESTS="-i federation_sys.A_Long* -i federation_sys.B_Long* ${SKIPTESTS}"
-fi
-echo "WARNING: Tests using persistence will be ignored."
-if [ -z ${CLUSTERING_ENABLED} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*"
-elif [ -z ${USE_LONG_TEST} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_Long* -i federation_sys.D_Long*"
-fi
-
-start_brokers() {
- start_broker() {
- ${QPIDD_EXEC} --daemon --port 0 --auth no --no-data-dir $1 > qpidd.port
- PORT=`cat qpidd.port`
- eval "$2=${PORT}"
- }
- start_broker "" LOCAL_PORT
- start_broker "" REMOTE_PORT
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_2
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_2
- fi
- rm qpidd.port
-}
-
-stop_brokers() {
- ${QPIDD_EXEC} -q --port ${LOCAL_PORT}
- ${QPIDD_EXEC} -q --port ${REMOTE_PORT}
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C1_1}
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C2_1}
- fi
-}
-
-if test -d ${PYTHON_DIR} ; then
- start_brokers
- if [ -z ${CLUSTERING_ENABLED} ]; then
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)"
- else
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT}, local cluster nodes ${CLUSTER_C1_1} ${CLUSTER_C1_2}, remote cluster nodes ${CLUSTER_C2_1} ${CLUSTER_C2_2}"
- fi
- if [ -z ${USE_LONG_TEST} ]; then
- echo "NOTE: To run a full set of federation system tests, use \"make check-long\". To test with persistence, run the store version of this script."
- fi
- ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} -Dlocal-cluster-ports="${CLUSTER_C1_1} ${CLUSTER_C1_2}" -Dremote-cluster-ports="${CLUSTER_C2_1} ${CLUSTER_C2_2}" $@
- RETCODE=$?
- stop_brokers
- if test x${RETCODE} != x0; then
- echo "FAIL federation tests"; exit 1;
- fi
-fi
diff --git a/cpp/src/tests/run_federation_tests b/cpp/src/tests/run_federation_tests
index 14af4807ba..4be27a2e85 100755
--- a/cpp/src/tests/run_federation_tests
+++ b/cpp/src/tests/run_federation_tests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -55,7 +55,7 @@ stop_brokers() {
if test -d ${PYTHON_DIR} ; then
start_brokers
echo "Running federation tests using brokers on ports $LOCAL_PORT $REMOTE_PORT $REMOTE_B1 $REMOTE_B2"
- $QPID_PYTHON_TEST -m federation "$SKIPTESTS" -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dextra-brokers="$REMOTE_B1 $REMOTE_B2" $@
+ $QPID_PYTHON_TEST -m federation $SKIPTESTS -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dextra-brokers="$REMOTE_B1 $REMOTE_B2" $@
RETCODE=$?
stop_brokers
if test x$RETCODE != x0; then
diff --git a/cpp/src/tests/run_header_test b/cpp/src/tests/run_header_test
index 34008132cc..07658343e7 100755
--- a/cpp/src/tests/run_header_test
+++ b/cpp/src/tests/run_header_test
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
diff --git a/cpp/src/tests/run_long_federation_sys_tests b/cpp/src/tests/run_long_federation_sys_tests
deleted file mode 100644
index 69dc08d11c..0000000000
--- a/cpp/src/tests/run_long_federation_sys_tests
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run the federation system tests (long version).
-
-./run_federation_sys_tests LONG_TEST $@
diff --git a/cpp/src/tests/run_msg_group_tests b/cpp/src/tests/run_msg_group_tests
deleted file mode 100755
index 8423022521..0000000000
--- a/cpp/src/tests/run_msg_group_tests
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#script to run a sequence of message group queue tests via make
-
-#setup path to find qpid-config and msg_group_test progs
-source ./test_env.sh
-
-export PATH=$PWD:$srcdir:$PYTHON_COMMANDS:$PATH
-
-#set port to connect to via env var
-test -s qpidd.port && QPID_PORT=`cat qpidd.port`
-
-#trap cleanup INT TERM QUIT
-
-QUEUE_NAME="group-queue"
-GROUP_KEY="My-Group-Id"
-
-BROKER_URL="${QPID_BROKER:-localhost}:${QPID_PORT:-5672}"
-
-run_test() {
- $@
-}
-
-##set -x
-
-declare -i i=0
-declare -a tests
-tests=("qpid-config -a $BROKER_URL add queue $QUEUE_NAME --group-header=${GROUP_KEY} --shared-groups"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 3"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 7 --randomize-group-size"
- "qpid-config -a $BROKER_URL add queue ${QUEUE_NAME}-two --group-header=${GROUP_KEY} --shared-groups"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 3 --randomize-group-size"
- "msg_group_test -b $BROKER_URL -a ${QUEUE_NAME}-two --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 5"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 5 --receivers 2 --senders 3 --capacity 1 --ack-frequency 3 --randomize-group-size"
- "qpid-config -a $BROKER_URL del queue ${QUEUE_NAME}-two --force"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 3 --receivers 2 --senders 3 --capacity 1 --ack-frequency 1 --randomize-group-size"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 211 --group-size 13 --receivers 2 --senders 3 --capacity 47 --ack-frequency 79 --interleave 53"
- "qpid-config -a $BROKER_URL del queue $QUEUE_NAME --force")
-
-while [ -n "${tests[i]}" ]; do
- run_test ${tests[i]}
- RETCODE=$?
- if test x$RETCODE != x0; then
- echo "FAILED message group test. Failed command: \"${tests[i]}\"";
- exit 1;
- fi
- i+=1
-done
diff --git a/cpp/src/tests/run_msg_group_tests_soak b/cpp/src/tests/run_msg_group_tests_soak
deleted file mode 100755
index 5231f74755..0000000000
--- a/cpp/src/tests/run_msg_group_tests_soak
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#script to run a sequence of long-running message group tests via make
-
-#setup path to find qpid-config and msg_group_test test progs
-source ./test_env.sh
-
-export PATH=$PWD:$srcdir:$PYTHON_COMMANDS:$PATH
-
-#set port to connect to via env var
-test -s qpidd.port && QPID_PORT=`cat qpidd.port`
-
-#trap cleanup INT TERM QUIT
-
-QUEUE_NAME="group-queue"
-GROUP_KEY="My-Group-Id"
-
-BROKER_URL="${QPID_BROKER:-localhost}:${QPID_PORT:-5672}"
-
-run_test() {
- $@
-}
-
-##set -x
-
-declare -i i=0
-declare -a tests
-tests=("qpid-config -a $BROKER_URL add queue $QUEUE_NAME --group-header=${GROUP_KEY} --shared-groups"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10007 --receivers 3 --senders 5 --group-size 211 --randomize-group-size --capacity 47 --ack-frequency 97"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10007 --receivers 3 --senders 5 --group-size 211 --randomize-group-size --capacity 79 --ack-frequency 79"
- "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10007 --receivers 3 --senders 5 --group-size 211 --randomize-group-size --capacity 97 --ack-frequency 47"
- "qpid-config -a $BROKER_URL del queue $QUEUE_NAME --force")
-
-while [ -n "${tests[i]}" ]; do
- run_test ${tests[i]}
- RETCODE=$?
- if test x$RETCODE != x0; then
- echo "FAILED message group test. Failed command: \"${tests[i]}\"";
- exit 1;
- fi
- i+=1
-done
diff --git a/cpp/src/tests/run_queue_flow_limit_tests b/cpp/src/tests/run_queue_flow_limit_tests
deleted file mode 100755
index f921cf5e7e..0000000000
--- a/cpp/src/tests/run_queue_flow_limit_tests
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run tests against Queue producer flow control.
-
-source ./test_env.sh
-test -d $PYTHON_DIR || { echo "Skipping queue flow control tests, no python dir."; exit 0; }
-
-LOG_FILE=queue_flow_limit_test.log
-PORT=""
-
-trap stop_broker INT TERM QUIT
-
-error() {
- echo $*
- exit 1;
-}
-
-start_broker() {
- # Note: if you change the DEFAULT_THRESHOLDS, you will need to update queue_flow_limit_tests.py
- DEFAULT_THRESHOLDS="--default-flow-stop-threshold=80 --default-flow-resume-threshold=70"
- rm -rf $LOG_FILE
- PORT=$($QPIDD_EXEC $DEFAULT_THRESHOLDS --auth=no --no-module-dir --daemon --port=0 -t --log-to-file $LOG_FILE) || error "Could not start broker"
-}
-
-stop_broker() {
- test -n "$PORT" && $QPIDD_EXEC --no-module-dir --quit --port $PORT
-}
-
-start_broker
-echo "Running Queue flow limit tests using broker on port $PORT"
-$QPID_PYTHON_TEST -m queue_flow_limit_tests $SKIPTESTS -b localhost:$PORT $@
-RETCODE=$?
-stop_broker
-if test x$RETCODE != x0; then
- echo "FAIL queue flow limit tests"; exit 1;
-fi
-rm -rf $LOG_FILE
-
diff --git a/cpp/src/tests/run_store_tests.ps1 b/cpp/src/tests/run_store_tests.ps1
index b2f0b1ccd8..76b46737f0 100644
--- a/cpp/src/tests/run_store_tests.ps1
+++ b/cpp/src/tests/run_store_tests.ps1
@@ -111,7 +111,7 @@ Invoke-Expression "$prog --quit --port $env:QPID_PORT" | Write-Output
# Test 2... store.py starts/stops/restarts its own brokers
$tests = "*"
-$env:PYTHONPATH="$PYTHON_DIR;$QMF_LIB;$srcdir"
+$env:PYTHONPATH="$PYTHON_DIR;$srcdir"
$env:QPIDD_EXEC="$prog"
$env:STORE_LIB="$store_dir\store$suffix.dll"
if ($test_store -eq "MSSQL") {
diff --git a/cpp/src/tests/run_test b/cpp/src/tests/run_test
index 6ec1fd892b..4b227621bc 100755
--- a/cpp/src/tests/run_test
+++ b/cpp/src/tests/run_test
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
diff --git a/cpp/src/tests/sasl.mk b/cpp/src/tests/sasl.mk
index 69b24c3f8a..5b8419f604 100644
--- a/cpp/src/tests/sasl.mk
+++ b/cpp/src/tests/sasl.mk
@@ -30,21 +30,9 @@ check_PROGRAMS+=sasl_version
sasl_version_SOURCES=sasl_version.cpp
sasl_version_LDADD=$(lib_client)
-TESTS += run_cluster_authentication_test sasl_fed sasl_fed_ex_dynamic sasl_fed_ex_link sasl_fed_ex_queue sasl_fed_ex_route sasl_fed_ex_route_cluster sasl_fed_ex_link_cluster sasl_fed_ex_queue_cluster sasl_fed_ex_dynamic_cluster sasl_no_dir
+TESTS += run_cluster_authentication_test sasl_fed sasl_fed_ex
LONG_TESTS += run_cluster_authentication_soak
-EXTRA_DIST += run_cluster_authentication_test \
- sasl_fed \
- sasl_fed_ex \
- run_cluster_authentication_soak \
- sasl_fed_ex_dynamic \
- sasl_fed_ex_link \
- sasl_fed_ex_queue \
- sasl_fed_ex_route \
- sasl_fed_ex_dynamic_cluster \
- sasl_fed_ex_link_cluster \
- sasl_fed_ex_queue_cluster \
- sasl_fed_ex_route_cluster \
- sasl_no_dir
+EXTRA_DIST += run_cluster_authentication_test sasl_fed sasl_fed_ex run_cluster_authentication_soak
endif # HAVE_SASL
diff --git a/cpp/src/tests/sasl_fed b/cpp/src/tests/sasl_fed
index 884c44177c..9845a20838 100755
--- a/cpp/src/tests/sasl_fed
+++ b/cpp/src/tests/sasl_fed
@@ -123,7 +123,7 @@ n_messages=100
#--------------------------------------------------
#echo " Sending 100 messages to $broker_1_port "
#--------------------------------------------------
-$builddir/datagen --count $n_messages | $SENDER_EXEC --mechanism DIGEST-MD5 --username zag --password zag --exchange $EXCHANGE_NAME --routing-key $ROUTING_KEY --port $broker_1_port
+$builddir/datagen --count $n_messages | $SENDER_EXEC --username zag --password zag --exchange $EXCHANGE_NAME --routing-key $ROUTING_KEY --port $broker_1_port
sleep 5
diff --git a/cpp/src/tests/sasl_fed_ex b/cpp/src/tests/sasl_fed_ex
index 716a806874..0740650d6c 100755
--- a/cpp/src/tests/sasl_fed_ex
+++ b/cpp/src/tests/sasl_fed_ex
@@ -19,52 +19,22 @@
# under the License.
#
+
#===============================================================================
-# These tests create federated links between two brokers using SASL security.
+# This test creates a federated link between two brokers using SASL security.
# The SASL mechanism used is EXTERNAL, which is satisfied by SSL
# transport-layer security.
#===============================================================================
source ./test_env.sh
-script_name=`basename $0`
-
-if [ $# -lt 1 ] || [ $# -gt 2 ]
-then
- echo
- # These are the four different ways of creating links ( or routes+links )
- # that the qpid-route command provides.
- echo "Usage: ${script_name} dynamic|link|queue|route [cluster]"
- echo
- exit 1
-fi
-
-# Has the user told us to do clustering ? -----------
-clustering_flag=
-if [ $# -eq "2" ] && [ "$2" == "cluster" ]; then
- clustering_flag=true
-fi
-
-qpid_route_method=$1
-
-# Debugging print. --------------------------
-debug=
-function print {
- if [ "$debug" ]; then
- echo "${script_name}: $1"
- fi
-}
-
-print "=========== start sasl_fed_ex $* ============"
-
-
# This minimum value corresponds to sasl version 2.1.22
minimum_sasl_version=131350
sasl_version=`$QPID_TEST_EXEC_DIR/sasl_version`
-# This test is necessary because this sasl version is the first one that permits
+# This test is necessary becasue this sasl version is the first one that permits
# redirection of the sasl config file path.
if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then
echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version"
@@ -90,7 +60,6 @@ create_certs() {
delete_certs() {
if [[ -e ${CERT_DIR} ]] ; then
- print "removing cert dir ${CERT_DIR}"
rm -rf ${CERT_DIR}
fi
}
@@ -103,40 +72,22 @@ if [[ !(-x $CERTUTIL) ]] ; then
fi
delete_certs
-create_certs 2> /dev/null
-if [ ! $? ]; then
- error "Could not create test certificate"
- exit 1
-fi
+create_certs || error "Could not create test certificate"
-sasl_config_dir=$builddir/sasl_config
-tmp_root=${builddir}/sasl_fed_ex_temp
-print "results dir is ${tmp_root}"
-rm -rf ${tmp_root}
+sasl_config_file=$builddir/sasl_config
+
+my_random_number=$RANDOM
+tmp_root=/tmp/sasl_fed_$my_random_number
mkdir -p $tmp_root
SRC_SSL_PORT=6667
DST_SSL_PORT=6666
-SRC_SSL_PORT_2=6668
-DST_SSL_PORT_2=6669
-
SRC_TCP_PORT=5801
DST_TCP_PORT=5807
-SRC_TCP_PORT_2=5802
-DST_TCP_PORT_2=5803
-
-CLUSTER_NAME_SUFFIX=`hostname | tr '.' ' ' | awk '{print $1}'`
-CLUSTER_1_NAME=sasl_fed_ex_cluster_1_${CLUSTER_NAME_SUFFIX}
-CLUSTER_2_NAME=sasl_fed_ex_cluster_2_${CLUSTER_NAME_SUFFIX}
-
-print "CLUSTER_1_NAME == ${CLUSTER_1_NAME}"
-print "CLUSTER_2_NAME == ${CLUSTER_2_NAME}"
-
-SSL_LIB=${moduledir}/ssl.so
-CLUSTER_LIB=${moduledir}/cluster.so
+SSL_LIB=../.libs/ssl.so
export QPID_SSL_CERT_NAME=${TEST_HOSTNAME}
@@ -165,112 +116,52 @@ export QPID_SSL_CERT_NAME=${TEST_HOSTNAME}
# 5. DST pulls messages off the temp queue on SRC to itself.
#
-COMMON_BROKER_OPTIONS=" \
- --ssl-sasl-no-dict \
- --sasl-config=$sasl_config_dir \
- --ssl-require-client-authentication \
- --auth yes \
- --ssl-cert-db $CERT_DIR \
- --ssl-cert-password-file $CERT_PW_FILE \
- --ssl-cert-name $TEST_HOSTNAME \
- --no-data-dir \
- --no-module-dir \
- --load-module ${SSL_LIB} \
- --mgmt-enable=yes \
- --log-enable info+ \
- --log-source yes \
- --daemon "
-
-
-function start_brokers {
- if [ $1 ]; then
- # clustered ----------------------------------------
- print "Starting SRC cluster"
-
- print " src broker 1"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
-
- broker_ports[0]=${SRC_TCP_PORT}
-
- print " src broker 2"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT_2} \
- --ssl-port ${SRC_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src_2.log 2> /dev/null
-
- broker_ports[1]=${SRC_TCP_PORT_2}
-
-
- print "Starting DST cluster"
-
- print " dst broker 1"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
-
- broker_ports[2]=${DST_TCP_PORT}
-
- print " dst broker 2"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT_2} \
- --ssl-port ${DST_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst_2.log 2> /dev/null
-
- broker_ports[3]=${DST_TCP_PORT_2}
-
- else
- # vanilla brokers --------------------------------
- print "Starting SRC broker"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
-
- broker_ports[0]=${SRC_TCP_PORT}
-
- print "Starting DST broker"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
-
- broker_ports[1]=${DST_TCP_PORT}
- fi
-}
-
-function halt_brokers {
- n_brokers=${#broker_ports[@]}
- print "Halting ${n_brokers} brokers."
- for i in $(seq 0 $((${n_brokers} - 1)))
- do
- halt_port=${broker_ports[$i]}
- print "Halting broker $i on port ${halt_port}"
- $QPIDD_EXEC --port ${halt_port} --quit
- done
-
-}
-
-start_brokers $clustering_flag
+#echo "-----------------------"
+#echo "Starting SRC broker"
+#echo "-----------------------"
+$QPIDD_EXEC \
+ --port=${SRC_TCP_PORT} \
+ --ssl-port ${SRC_SSL_PORT} \
+ --ssl-sasl-no-dict \
+ --sasl-config=$sasl_config_file \
+ --ssl-require-client-authentication \
+ --auth yes \
+ --ssl-cert-db $CERT_DIR \
+ --ssl-cert-password-file $CERT_PW_FILE \
+ --ssl-cert-name $TEST_HOSTNAME \
+ --no-data-dir \
+ --no-module-dir \
+ --load-module ${SSL_LIB} \
+ --mgmt-enable=yes \
+ --log-enable info+ \
+ --log-source yes \
+ --daemon \
+ --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
+
+
+#echo "-----------------------"
+#echo "Starting DST broker"
+#echo "-----------------------"
+$QPIDD_EXEC \
+ --port=${DST_TCP_PORT} \
+ --ssl-port ${DST_SSL_PORT} \
+ --ssl-cert-db $CERT_DIR \
+ --ssl-cert-password-file $CERT_PW_FILE \
+ --ssl-cert-name $TEST_HOSTNAME \
+ --ssl-sasl-no-dict \
+ --ssl-require-client-authentication \
+ --sasl-config=$sasl_config_file \
+ --no-data-dir \
+ --no-module-dir \
+ --load-module ${SSL_LIB} \
+ --mgmt-enable=yes \
+ --log-enable info+ \
+ --log-source yes \
+ --daemon \
+ $COMMON_BROKER_OPTIONS \
+ --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
# I am not randomizing these names, because this test creates its own brokers.
@@ -279,83 +170,76 @@ ROUTING_KEY=sasl_fed_queue
EXCHANGE_NAME=sasl_fedex
-print "add exchanges"
+#echo "-----------------------"
+#echo "add exchanges"
+#echo "-----------------------"
$QPID_CONFIG_EXEC -a localhost:${SRC_TCP_PORT} add exchange direct $EXCHANGE_NAME
$QPID_CONFIG_EXEC -a localhost:${DST_TCP_PORT} add exchange direct $EXCHANGE_NAME
-print "add queues"
+#echo "-----------------------"
+#echo "add queues"
+#echo "-----------------------"
$QPID_CONFIG_EXEC -a localhost:${SRC_TCP_PORT} add queue $QUEUE_NAME
$QPID_CONFIG_EXEC -a localhost:${DST_TCP_PORT} add queue $QUEUE_NAME
-print "create bindings"
+#echo "-----------------------"
+#echo "create bindings"
+#echo "-----------------------"
$QPID_CONFIG_EXEC -a localhost:${SRC_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY
$QPID_CONFIG_EXEC -a localhost:${DST_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY
-#
+#echo "-----------------------"
+#echo "qpid-route route add"
+#echo "-----------------------"
# NOTE: The SRC broker *must* be referred to as $TEST_HOSTNAME, and not as "localhost".
# It must be referred to by the exact string given as the Common Name (CN) in the cert,
# which was created in the function create_certs, above.
+$QPID_ROUTE_EXEC route add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} -t ssl $EXCHANGE_NAME $ROUTING_KEY "" "" EXTERNAL
+#echo "-----------------------"
+#echo "view the route :"
+#echo "-----------------------"
+#$PYTHON_COMMANDS/qpid-route route list localhost:${DST_TCP_PORT}
+# I don't know how to avoid this sleep yet. It has to come after route-creation.
+sleep 5
-#----------------------------------------------------------------
-# Use qpid-route to create the link, or the link+route, depending
-# on which of its several methods was requested.
-#----------------------------------------------------------------
-if [ ${qpid_route_method} == "dynamic" ]; then
- print "dynamic add"
- $QPID_ROUTE_EXEC -t ssl dynamic add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME "" "" EXTERNAL
-elif [ ${qpid_route_method} == "link" ]; then
- print "link add"
- $QPID_ROUTE_EXEC -t ssl link add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} EXTERNAL
-elif [ ${qpid_route_method} == "queue" ]; then
- print "queue add"
- $QPID_ROUTE_EXEC -t ssl queue add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY EXTERNAL
-elif [ ${qpid_route_method} == "route" ]; then
- print "route add"
- $QPID_ROUTE_EXEC -t ssl route add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY "" "" EXTERNAL
-else
- echo "unknown method: |${qpid_route_method}|"
- echo " choices are: dynamic|link|queue|route "
- halt_brokers
- exit 1
-fi
+n_messages=100
+./datagen --count ${n_messages} | ./sender --broker localhost --port ${SRC_TCP_PORT} --exchange ${EXCHANGE_NAME} --routing-key ${ROUTING_KEY} --mechanism ANONYMOUS
-# I don't know how to avoid this sleep yet. It has to come after route-creation
-# to avoid false negatives.
-sleep 5
-# This should work the same whether or not we are running a clustered test.
-# In the case of clustered tests, the status is not printed by qpid_route.
-# So in either case, I will look only at the transport field, which should be "ssl".
-print "check the link"
-link_status=$($QPID_ROUTE_EXEC link list localhost:${DST_TCP_PORT} | tail -1 | awk '{print $3}')
+#echo "-----------------------"
+#echo "Examine DST Broker"
+#echo "-----------------------"
+dst_message_count=`qpid-stat -q localhost:${DST_TCP_PORT} | grep sasl_fed_queue | awk '{print $2}'`
-halt_brokers
-sleep 1
+#echo "-----------------------"
+#echo "Asking brokers to quit."
+#echo "-----------------------"
+$QPIDD_EXEC --port ${SRC_TCP_PORT} --quit
+$QPIDD_EXEC --port ${DST_TCP_PORT} --quit
-if [ ! ${link_status} ]; then
- print "link_status is empty"
- print "result: fail"
- exit 2
-fi
-if [ ${link_status} == "ssl" ]; then
- print "result: good"
- # Only remove the tmp_root on success, to permit debugging.
- print "Removing temporary directory $tmp_root"
- rm -rf $tmp_root
+#echo "-----------------------"
+#echo "Removing temporary directory $tmp_root"
+#echo "-----------------------"
+rm -rf $tmp_root
+
+if [ "$dst_message_count" -eq "$n_messages" ]; then
+ #echo "good: |$dst_message_count| == |$n_messages|"
exit 0
+else
+ #echo "not ideal: |$dst_message_count| != |$n_messages|"
+ exit 1
fi
-print "link_status has a bad value: ${link_status}"
-print "result: fail"
-exit 3
+
+
diff --git a/cpp/src/tests/sasl_fed_ex_dynamic b/cpp/src/tests/sasl_fed_ex_dynamic
deleted file mode 100755
index c20b8d69a0..0000000000
--- a/cpp/src/tests/sasl_fed_ex_dynamic
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-
-${srcdir}/sasl_fed_ex dynamic
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_dynamic_cluster b/cpp/src/tests/sasl_fed_ex_dynamic_cluster
deleted file mode 100755
index b0cceccecb..0000000000
--- a/cpp/src/tests/sasl_fed_ex_dynamic_cluster
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source $srcdir/ais_check
-
-with_ais_group ${srcdir}/sasl_fed_ex dynamic cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_link b/cpp/src/tests/sasl_fed_ex_link
deleted file mode 100755
index 7b232d4874..0000000000
--- a/cpp/src/tests/sasl_fed_ex_link
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-
-${srcdir}/sasl_fed_ex link
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_link_cluster b/cpp/src/tests/sasl_fed_ex_link_cluster
deleted file mode 100755
index 4139300b12..0000000000
--- a/cpp/src/tests/sasl_fed_ex_link_cluster
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source $srcdir/ais_check
-
-with_ais_group ${srcdir}/sasl_fed_ex link cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_queue b/cpp/src/tests/sasl_fed_ex_queue
deleted file mode 100755
index be0c10cf63..0000000000
--- a/cpp/src/tests/sasl_fed_ex_queue
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-
-${srcdir}/sasl_fed_ex queue
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_queue_cluster b/cpp/src/tests/sasl_fed_ex_queue_cluster
deleted file mode 100755
index f251420e08..0000000000
--- a/cpp/src/tests/sasl_fed_ex_queue_cluster
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source ${srcdir}/ais_check
-
-with_ais_group ${srcdir}/sasl_fed_ex queue cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_route b/cpp/src/tests/sasl_fed_ex_route
deleted file mode 100755
index dd5c4f3cac..0000000000
--- a/cpp/src/tests/sasl_fed_ex_route
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-
-${srcdir}/sasl_fed_ex route
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_route_cluster b/cpp/src/tests/sasl_fed_ex_route_cluster
deleted file mode 100755
index a5d1542def..0000000000
--- a/cpp/src/tests/sasl_fed_ex_route_cluster
+++ /dev/null
@@ -1,28 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source ${srcdir}/ais_check
-
-with_ais_group ${srcdir}/sasl_fed_ex route cluster
-
-
diff --git a/cpp/src/tests/sasl_no_dir b/cpp/src/tests/sasl_no_dir
deleted file mode 100755
index 15a36014bb..0000000000
--- a/cpp/src/tests/sasl_no_dir
+++ /dev/null
@@ -1,218 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-
-script_name=`basename $0`
-
-# This minimum value corresponds to sasl version 2.1.22
-minimum_sasl_version=131350
-
-sasl_version=`$QPID_TEST_EXEC_DIR/sasl_version`
-
-# This test is necessary because this sasl version is the first one that permits
-# redirection of the sasl config file path.
-if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then
- echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version"
- exit 0
-fi
-
-
-sasl_config_dir=$builddir/sasl_config
-
-
-# Debugging print. --------------------------
-debug=
-function print {
- if [ "$debug" ]; then
- echo "${script_name}: $1"
- fi
-}
-
-
-my_random_number=$RANDOM
-tmp_root=/tmp/sasl_fed_$my_random_number
-mkdir -p $tmp_root
-
-
-LOG_FILE=$tmp_root/qpidd.log
-
-# If you want to see this test fail, just comment out this 'mv' command.
-print "Moving sasl configuration dir."
-mv ${sasl_config_dir} ${sasl_config_dir}-
-
-
-#--------------------------------------------------
-print " Starting broker"
-#--------------------------------------------------
-$QPIDD_EXEC \
- -p 0 \
- --no-data-dir \
- --auth=yes \
- --mgmt-enable=yes \
- --log-enable info+ \
- --log-source yes \
- --log-to-file ${LOG_FILE} \
- --sasl-config=$sasl_config_dir \
- -d 2> /dev/null 1> $tmp_root/broker_port
-
-
-
-# If it works right, the output will look something like this: ( two lines long )
-# Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112)
-# 2011-10-13 14:07:00 critical qpidd.cpp:83: Unexpected error: Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112)
-
-result=`cat ${LOG_FILE} | grep "sasl_set_path failed: no such directory" | wc -l `
-
-#--------------------------------------------------
-print "Restore the Sasl config dir to its original place."
-#--------------------------------------------------
-mv ${sasl_config_dir}- ${sasl_config_dir}
-
-if [ "2" -eq ${result} ]; then
- print "result: success"
- rm -rf $tmp_root
- exit 0
-fi
-
-
-# If this test fails, the broker is still alive.
-# Kill it.
-broker_port=`cat $tmp_root/broker_port`
-#--------------------------------------------------
-print "Asking broker to quit."
-#--------------------------------------------------
-$QPIDD_EXEC --port $broker_port --quit
-
-rm -rf $tmp_root
-
-print "result: fail"
-exit 1
-
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-
-script_name=`basename $0`
-
-# This minimum value corresponds to sasl version 2.1.22
-minimum_sasl_version=131350
-
-sasl_version=`$QPID_TEST_EXEC_DIR/sasl_version`
-
-# This test is necessary because this sasl version is the first one that permits
-# redirection of the sasl config file path.
-if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then
- echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version"
- exit 0
-fi
-
-
-sasl_config_dir=$builddir/sasl_config
-
-
-# Debugging print. --------------------------
-debug=
-function print {
- if [ "$debug" ]; then
- echo "${script_name}: $1"
- fi
-}
-
-
-my_random_number=$RANDOM
-tmp_root=/tmp/sasl_fed_$my_random_number
-mkdir -p $tmp_root
-
-
-LOG_FILE=$tmp_root/qpidd.log
-
-# If you want to see this test fail, just comment out this 'mv' command.
-print "Moving sasl configuration dir."
-mv ${sasl_config_dir} ${sasl_config_dir}-
-
-
-#--------------------------------------------------
-print " Starting broker"
-#--------------------------------------------------
-$QPIDD_EXEC \
- -p 0 \
- --no-data-dir \
- --auth=yes \
- --mgmt-enable=yes \
- --log-enable info+ \
- --log-source yes \
- --log-to-file ${LOG_FILE} \
- --sasl-config=$sasl_config_dir \
- -d 2> /dev/null 1> $tmp_root/broker_port
-
-
-
-# If it works right, the output will look something like this: ( two lines long )
-# Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112)
-# 2011-10-13 14:07:00 critical qpidd.cpp:83: Unexpected error: Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112)
-
-result=`cat ${LOG_FILE} | grep "sasl_set_path failed: no such directory" | wc -l `
-
-#--------------------------------------------------
-print "Restore the Sasl config dir to its original place."
-#--------------------------------------------------
-mv ${sasl_config_dir}- ${sasl_config_dir}
-
-if [ "2" -eq ${result} ]; then
- print "result: success"
- rm -rf $tmp_root
- exit 0
-fi
-
-
-# If this test fails, the broker is still alive.
-# Kill it.
-broker_port=`cat $tmp_root/broker_port`
-#--------------------------------------------------
-print "Asking broker to quit."
-#--------------------------------------------------
-$QPIDD_EXEC --port $broker_port --quit
-
-rm -rf $tmp_root
-
-print "result: fail"
-exit 1
-
diff --git a/cpp/src/tests/sasl_test_setup.sh b/cpp/src/tests/sasl_test_setup.sh
index 3e69c0f02b..6395ba6ec3 100755
--- a/cpp/src/tests/sasl_test_setup.sh
+++ b/cpp/src/tests/sasl_test_setup.sh
@@ -30,7 +30,6 @@ pwcheck_method: auxprop
auxprop_plugin: sasldb
sasldb_path: $PWD/sasl_config/qpidd.sasldb
sql_select: dummy select
-mech_list: ANONYMOUS PLAIN DIGEST-MD5 EXTERNAL
EOF
# Populate temporary sasl db.
diff --git a/cpp/src/tests/sender.cpp b/cpp/src/tests/sender.cpp
index 063b5e87dc..9850e851da 100644
--- a/cpp/src/tests/sender.cpp
+++ b/cpp/src/tests/sender.cpp
@@ -120,7 +120,7 @@ void Sender::execute(AsyncSession& session, bool isRetry)
string data;
while (getline(std::cin, data)) {
message.setData(data);
- //message.getHeaders().setInt("SN", ++sent);
+ message.getHeaders().setInt("SN", ++sent);
string matchKey;
if (lvqMatchValues && getline(lvqMatchValues, matchKey)) {
message.getHeaders().setString(QueueOptions::strLVQMatchProperty, matchKey);
diff --git a/cpp/src/tests/ssl_test b/cpp/src/tests/ssl_test
index 2a56c0b80e..04584f169d 100755
--- a/cpp/src/tests/ssl_test
+++ b/cpp/src/tests/ssl_test
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one
@@ -47,13 +47,9 @@ delete_certs() {
fi
}
-COMMON_OPTS="--daemon --no-data-dir --no-module-dir --config $CONFIG --load-module $SSL_LIB --ssl-cert-db $CERT_DIR --ssl-cert-password-file $CERT_PW_FILE --ssl-cert-name $TEST_HOSTNAME --require-encryption"
+COMMON_OPTS="--daemon --no-data-dir --no-module-dir --auth no --config $CONFIG --load-module $SSL_LIB --ssl-cert-db $CERT_DIR --ssl-cert-password-file $CERT_PW_FILE --ssl-cert-name $TEST_HOSTNAME --require-encryption"
start_broker() { # $1 = extra opts
- ../qpidd --transport ssl --port 0 --ssl-port 0 $COMMON_OPTS --auth no $1;
-}
-
-start_authenticating_broker() {
- ../qpidd --transport ssl --port 0 --ssl-port 0 $COMMON_OPTS --ssl-sasl-no-dict --ssl-require-client-authentication --auth yes;
+ ../qpidd --transport ssl --port 0 --ssl-port 0 $COMMON_OPTS $1;
}
stop_brokers() {
@@ -97,7 +93,7 @@ test "$MSG" = "hello" || { echo "receive failed '$MSG' != 'hello'"; exit 1; }
#### Client Authentication tests
-PORT2=`start_authenticating_broker` || error "Could not start broker"
+PORT2=`start_broker --ssl-require-client-authentication` || error "Could not start broker"
echo "Running SSL client authentication test on port $PORT2"
URL=amqp:ssl:$TEST_HOSTNAME:$PORT2
@@ -125,7 +121,7 @@ pick_port() {
echo $PICK
}
ssl_cluster_broker() { # $1 = port
- ../qpidd $COMMON_OPTS --auth no --load-module $CLUSTER_LIB --cluster-name ssl_test.$HOSTNAME.$$ --cluster-url amqp:ssl:$TEST_HOSTNAME:$1 --port 0 --ssl-port $1 --transport ssl > /dev/null
+ ../qpidd $COMMON_OPTS --load-module $CLUSTER_LIB --cluster-name ssl_test.$HOSTNAME.$$ --cluster-url amqp:ssl:$TEST_HOSTNAME:$1 --port 0 --ssl-port $1 --transport ssl > /dev/null
# Wait for broker to be ready
qpid-ping -Pssl -b $TEST_HOSTNAME -qp $1 || { echo "Cannot connect to broker on $1"; exit 1; }
echo "Running SSL cluster broker on port $1"
diff --git a/cpp/src/tests/windows/DisableWin32ErrorWindows.cpp b/cpp/src/tests/windows/DisableWin32ErrorWindows.cpp
index 024f20b147..a0b665db73 100644
--- a/cpp/src/tests/windows/DisableWin32ErrorWindows.cpp
+++ b/cpp/src/tests/windows/DisableWin32ErrorWindows.cpp
@@ -26,9 +26,7 @@
// include this file with the executable being built. If the default
// behaviors are desired, don't include this file in the build.
-#if defined(_MSC_VER)
#include <crtdbg.h>
-#endif
#include <windows.h>
#include <iostream>
@@ -55,14 +53,12 @@ static redirect_errors_to_stderr block;
redirect_errors_to_stderr::redirect_errors_to_stderr()
{
-#if defined(_MSC_VER)
_CrtSetReportMode (_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile (_CRT_WARN, _CRTDBG_FILE_STDERR);
_CrtSetReportMode (_CRT_ERROR, _CRTDBG_MODE_FILE);
_CrtSetReportFile (_CRT_ERROR, _CRTDBG_FILE_STDERR);
_CrtSetReportMode (_CRT_ASSERT, _CRTDBG_MODE_FILE);
_CrtSetReportFile (_CRT_ASSERT, _CRTDBG_FILE_STDERR);
-#endif
// Prevent the system from displaying the critical-error-handler
// and can't-open-file message boxes.