summaryrefslogtreecommitdiff
path: root/qpid/cpp/src/tests/ha_tests.py
diff options
context:
space:
mode:
authorAlan Conway <aconway@apache.org>2012-11-14 16:04:04 +0000
committerAlan Conway <aconway@apache.org>2012-11-14 16:04:04 +0000
commit3b9fdc8e68cb42e6ebfa75f3dc756fd54369f735 (patch)
tree128d1ac54182753c4d8f2d49ce38a5d88bea7909 /qpid/cpp/src/tests/ha_tests.py
parentbce4ad2c993a34d240b1166ab6321bc14b78c612 (diff)
downloadqpid-python-3b9fdc8e68cb42e6ebfa75f3dc756fd54369f735.tar.gz
QPID-4428: HA add UUID tag to avoid using an out of date queue/exchange.
Imagine a cluster with primary A and backups B and C. A queue Q is created on A and replicated to B, C. Now A dies and B takes over as primary. Before C can connect to B, a client destroys Q and creates a new queue with the same name. When B connects it sees Q and incorrectly assumes it is the same Q that it has already replicated. Now C has an inconsistent replica of Q. The fix is to tag queues/exchanges with a UUID so a backup can tell if a queue is not the same as the one it has already replicated, even if the names are the same. This all also applies to exchanges. - Minor imrovements to printing UUIDs in a FieldTable. - Fix comparison of void Variants, added operator != git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1409241 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'qpid/cpp/src/tests/ha_tests.py')
-rwxr-xr-xqpid/cpp/src/tests/ha_tests.py36
1 files changed, 36 insertions, 0 deletions
diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py
index 60c68730f8..d25e68b29c 100755
--- a/qpid/cpp/src/tests/ha_tests.py
+++ b/qpid/cpp/src/tests/ha_tests.py
@@ -270,6 +270,7 @@ class ReplicationTests(HaBrokerTest):
def test_qpid_config_replication(self):
"""Set up replication via qpid-config"""
brokers = HaCluster(self,2)
+ brokers[0].wait_status("active")
brokers[0].config_declare("q","all")
brokers[0].connect().session().sender("q").send("foo")
brokers[1].assert_browse_backup("q", ["foo"])
@@ -830,6 +831,41 @@ acl deny all all
verify_qmf_events("q2")
finally: l.restore()
+ def test_missed_recreate(self):
+ """If a queue or exchange is destroyed and one with the same name re-created
+ while a backup is disconnected, the backup should also delete/recreate
+ the object when it re-connects"""
+ cluster = HaCluster(self, 3)
+ sn = cluster[0].connect().session()
+ # Create a queue with messages
+ s = sn.sender("qq;{create:always}")
+ msgs = [str(i) for i in xrange(3)]
+ for m in msgs: s.send(m)
+ cluster[1].assert_browse_backup("qq", msgs)
+ cluster[2].assert_browse_backup("qq", msgs)
+ # Set up an exchange with a binding.
+ sn.sender("xx;{create:always,node:{type:topic}}")
+ sn.sender("xxq;{create:always,node:{x-bindings:[{exchange:'xx',queue:'xxq',key:xxq}]}}")
+ cluster[1].wait_address("xx")
+ self.assertEqual(cluster[1].agent().getExchange("xx").values["bindingCount"], 1)
+ cluster[2].wait_address("xx")
+ self.assertEqual(cluster[2].agent().getExchange("xx").values["bindingCount"], 1)
+
+ # Simulate the race by re-creating the objects before promoting the new primary
+ cluster.kill(0, False)
+ sn = cluster[1].connect_admin().session()
+ sn.sender("qq;{delete:always}").close()
+ s = sn.sender("qq;{create:always}")
+ s.send("foo")
+ sn.sender("xx;{delete:always}").close()
+ sn.sender("xx;{create:always,node:{type:topic}}")
+ cluster[1].promote()
+ cluster[1].wait_status("active")
+ # Verify we are not still using the old objects on cluster[2]
+ cluster[2].assert_browse_backup("qq", ["foo"])
+ cluster[2].wait_address("xx")
+ self.assertEqual(cluster[2].agent().getExchange("xx").values["bindingCount"], 0)
+
def fairshare(msgs, limit, levels):
"""
Generator to return prioritised messages in expected order for a given fairshare limit