summaryrefslogtreecommitdiff
path: root/qpid/cpp/src
diff options
context:
space:
mode:
authorAlan Conway <aconway@apache.org>2012-07-13 18:33:32 +0000
committerAlan Conway <aconway@apache.org>2012-07-13 18:33:32 +0000
commit9170505cdd578ca48679d1245da6a4c1acefc2b0 (patch)
tree7653301168277d0fcce5e1940e9eaeaa5681814b /qpid/cpp/src
parent2e20fb9010a9aa2d3f81cc69540fbe0c3771a394 (diff)
downloadqpid-python-9170505cdd578ca48679d1245da6a4c1acefc2b0.tar.gz
QPID-4136: HA sporadic failures in ha_tests
Caused by a test bug, default timeout was too short. Fixed the test to set a high timeout and increased default timeout. git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1361323 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'qpid/cpp/src')
-rw-r--r--qpid/cpp/src/qpid/ha/Settings.h2
-rwxr-xr-xqpid/cpp/src/tests/ha_tests.py6
2 files changed, 6 insertions, 2 deletions
diff --git a/qpid/cpp/src/qpid/ha/Settings.h b/qpid/cpp/src/qpid/ha/Settings.h
index 1a612aee66..37235b5c79 100644
--- a/qpid/cpp/src/qpid/ha/Settings.h
+++ b/qpid/cpp/src/qpid/ha/Settings.h
@@ -34,7 +34,7 @@ namespace ha {
class Settings
{
public:
- Settings() : cluster(false), replicateDefault(NONE), backupTimeout(2)
+ Settings() : cluster(false), replicateDefault(NONE), backupTimeout(5)
{}
bool cluster; // True if we are a cluster member.
diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py
index 01dac2664d..f900a841d5 100755
--- a/qpid/cpp/src/tests/ha_tests.py
+++ b/qpid/cpp/src/tests/ha_tests.py
@@ -857,7 +857,8 @@ class RecoveryTests(BrokerTest):
"""Verify that the broker holds queues without sufficient backup,
i.e. does not complete messages sent to those queues."""
- cluster = HaCluster(self, 4);
+ # We don't want backups to time out for this test, set long timeout.
+ cluster = HaCluster(self, 4, args=["--ha-backup-timeout=100000"]);
# Wait for the primary to be ready
cluster[0].wait_status("active")
# Create a queue before the failure.
@@ -877,6 +878,7 @@ class RecoveryTests(BrokerTest):
# Create a queue after the failure
s2 = cluster.connect(3).session().sender("q2;{create:always}")
+
# Verify that messages sent are not completed
for i in xrange(100,200): s1.send(str(i), sync=False); s2.send(str(i), sync=False)
assertSyncTimeout(s1)
@@ -886,6 +888,7 @@ class RecoveryTests(BrokerTest):
# Verify we can receive even if sending is on hold:
cluster[3].assert_browse("q1", [str(i) for i in range(100)+range(100,200)])
+
# Restart backups, verify queues are released only when both backups are up
cluster.restart(1)
assertSyncTimeout(s1)
@@ -895,6 +898,7 @@ class RecoveryTests(BrokerTest):
self.assertEqual(cluster[3].ha_status(), "recovering")
cluster.restart(2)
+ # Verify everything is up to date and active
def settled(sender): sender.sync(); return sender.unsettled() == 0;
assert retry(lambda: settled(s1)), "Unsetttled=%s"%(s1.unsettled())
assert retry(lambda: settled(s2)), "Unsetttled=%s"%(s2.unsettled())