summaryrefslogtreecommitdiff
path: root/RC9/qpid/python
diff options
context:
space:
mode:
authorRafael H. Schloming <rhs@apache.org>2009-01-13 18:11:43 +0000
committerRafael H. Schloming <rhs@apache.org>2009-01-13 18:11:43 +0000
commit7e34266b9a23f4536415bfbc3f161b84615b6550 (patch)
tree484008cf2d413f58b5e4ab80b373303c66200888 /RC9/qpid/python
parent4612263ea692f00a4bd810438bdaf9bc88022091 (diff)
downloadqpid-python-M4.tar.gz
Tag M4 RC9M4
git-svn-id: https://svn.apache.org/repos/asf/qpid/tags/M4@734202 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'RC9/qpid/python')
-rwxr-xr-xRC9/qpid/python/LICENSE.txt203
-rw-r--r--RC9/qpid/python/NOTICE.txt20
-rw-r--r--RC9/qpid/python/README.txt56
-rw-r--r--RC9/qpid/python/RELEASE_NOTES17
-rwxr-xr-xRC9/qpid/python/amqp-doc80
-rwxr-xr-xRC9/qpid/python/commands/qpid-config392
-rwxr-xr-xRC9/qpid/python/commands/qpid-printevents74
-rwxr-xr-xRC9/qpid/python/commands/qpid-queue-stats144
-rwxr-xr-xRC9/qpid/python/commands/qpid-route514
-rwxr-xr-xRC9/qpid/python/commands/qpid-tool195
-rw-r--r--RC9/qpid/python/cpp_failing_0-10.txt0
-rw-r--r--RC9/qpid/python/cpp_failing_0-8.txt0
-rw-r--r--RC9/qpid/python/cpp_failing_0-9.txt4
-rw-r--r--RC9/qpid/python/doc/test-requirements.txt29
-rwxr-xr-xRC9/qpid/python/examples/direct/declare_queues.py76
-rwxr-xr-xRC9/qpid/python/examples/direct/direct_consumer.py94
-rwxr-xr-xRC9/qpid/python/examples/direct/direct_producer.py73
-rwxr-xr-xRC9/qpid/python/examples/direct/listener.py109
-rw-r--r--RC9/qpid/python/examples/direct/verify22
-rw-r--r--RC9/qpid/python/examples/direct/verify.in14
-rwxr-xr-xRC9/qpid/python/examples/fanout/fanout_consumer.py99
-rwxr-xr-xRC9/qpid/python/examples/fanout/fanout_producer.py72
-rwxr-xr-xRC9/qpid/python/examples/fanout/listener.py117
-rw-r--r--RC9/qpid/python/examples/fanout/verify24
-rw-r--r--RC9/qpid/python/examples/fanout/verify.in27
-rwxr-xr-xRC9/qpid/python/examples/pubsub/topic_publisher.py92
-rwxr-xr-xRC9/qpid/python/examples/pubsub/topic_subscriber.py154
-rw-r--r--RC9/qpid/python/examples/pubsub/verify23
-rw-r--r--RC9/qpid/python/examples/pubsub/verify.in55
-rwxr-xr-xRC9/qpid/python/examples/request-response/client.py131
-rwxr-xr-xRC9/qpid/python/examples/request-response/server.py110
-rw-r--r--RC9/qpid/python/examples/request-response/verify24
-rw-r--r--RC9/qpid/python/examples/request-response/verify.in14
-rwxr-xr-xRC9/qpid/python/examples/xml-exchange/declare_queues.py90
-rwxr-xr-xRC9/qpid/python/examples/xml-exchange/listener.py105
-rw-r--r--RC9/qpid/python/examples/xml-exchange/verify22
-rw-r--r--RC9/qpid/python/examples/xml-exchange/verify.in15
-rwxr-xr-xRC9/qpid/python/examples/xml-exchange/xml_consumer.py96
-rwxr-xr-xRC9/qpid/python/examples/xml-exchange/xml_producer.py92
-rwxr-xr-xRC9/qpid/python/hello-world64
-rw-r--r--RC9/qpid/python/java_failing_0-8.txt2
-rw-r--r--RC9/qpid/python/java_failing_0-9.txt18
-rw-r--r--RC9/qpid/python/mllib/__init__.py65
-rw-r--r--RC9/qpid/python/mllib/dom.py295
-rw-r--r--RC9/qpid/python/mllib/parsers.py139
-rw-r--r--RC9/qpid/python/mllib/transforms.py164
-rw-r--r--RC9/qpid/python/models/fedsim/__init__.py19
-rw-r--r--RC9/qpid/python/models/fedsim/fedsim.py434
-rw-r--r--RC9/qpid/python/models/fedsim/testBig.py88
-rw-r--r--RC9/qpid/python/models/fedsim/testRing.py48
-rw-r--r--RC9/qpid/python/models/fedsim/testStar.py65
-rw-r--r--RC9/qpid/python/models/fedsim/testStarAdd.py56
-rwxr-xr-xRC9/qpid/python/pal2py274
-rwxr-xr-xRC9/qpid/python/perftest113
-rw-r--r--RC9/qpid/python/qmf/__init__.py18
-rw-r--r--RC9/qpid/python/qmf/console.py1625
-rw-r--r--RC9/qpid/python/qpid/__init__.py84
-rw-r--r--RC9/qpid/python/qpid/assembler.py118
-rw-r--r--RC9/qpid/python/qpid/client.py225
-rw-r--r--RC9/qpid/python/qpid/codec.py590
-rw-r--r--RC9/qpid/python/qpid/codec010.py301
-rw-r--r--RC9/qpid/python/qpid/compat.py28
-rw-r--r--RC9/qpid/python/qpid/connection.py218
-rw-r--r--RC9/qpid/python/qpid/connection08.py493
-rw-r--r--RC9/qpid/python/qpid/content.py58
-rw-r--r--RC9/qpid/python/qpid/datatypes.py349
-rw-r--r--RC9/qpid/python/qpid/delegate.py53
-rw-r--r--RC9/qpid/python/qpid/delegates.py162
-rw-r--r--RC9/qpid/python/qpid/disp.py79
-rw-r--r--RC9/qpid/python/qpid/exceptions.py21
-rw-r--r--RC9/qpid/python/qpid/framer.py159
-rw-r--r--RC9/qpid/python/qpid/invoker.py48
-rw-r--r--RC9/qpid/python/qpid/log.py28
-rw-r--r--RC9/qpid/python/qpid/management.py913
-rw-r--r--RC9/qpid/python/qpid/managementdata.py753
-rw-r--r--RC9/qpid/python/qpid/message.py74
-rw-r--r--RC9/qpid/python/qpid/packer.py36
-rw-r--r--RC9/qpid/python/qpid/peer.py465
-rw-r--r--RC9/qpid/python/qpid/queue.py86
-rw-r--r--RC9/qpid/python/qpid/reference.py117
-rw-r--r--RC9/qpid/python/qpid/session.py379
-rw-r--r--RC9/qpid/python/qpid/spec.py59
-rw-r--r--RC9/qpid/python/qpid/spec010.py693
-rw-r--r--RC9/qpid/python/qpid/spec08.py504
-rw-r--r--RC9/qpid/python/qpid/testlib.py392
-rw-r--r--RC9/qpid/python/qpid/util.py117
-rw-r--r--RC9/qpid/python/qpid_config.py23
-rwxr-xr-xRC9/qpid/python/rule2test108
-rwxr-xr-xRC9/qpid/python/run-tests35
-rwxr-xr-xRC9/qpid/python/server80
-rwxr-xr-xRC9/qpid/python/server01072
-rw-r--r--RC9/qpid/python/setup.py25
-rw-r--r--RC9/qpid/python/tests/__init__.py30
-rw-r--r--RC9/qpid/python/tests/assembler.py78
-rw-r--r--RC9/qpid/python/tests/codec.py607
-rw-r--r--RC9/qpid/python/tests/codec010.py120
-rw-r--r--RC9/qpid/python/tests/connection.py215
-rw-r--r--RC9/qpid/python/tests/datatypes.py257
-rw-r--r--RC9/qpid/python/tests/framer.py95
-rw-r--r--RC9/qpid/python/tests/queue.py71
-rw-r--r--RC9/qpid/python/tests/spec.py74
-rw-r--r--RC9/qpid/python/tests/spec010.py84
-rw-r--r--RC9/qpid/python/tests_0-10/__init__.py30
-rw-r--r--RC9/qpid/python/tests_0-10/alternate_exchange.py150
-rw-r--r--RC9/qpid/python/tests_0-10/broker.py93
-rw-r--r--RC9/qpid/python/tests_0-10/dtx.py775
-rw-r--r--RC9/qpid/python/tests_0-10/example.py95
-rw-r--r--RC9/qpid/python/tests_0-10/exchange.py416
-rw-r--r--RC9/qpid/python/tests_0-10/management.py240
-rw-r--r--RC9/qpid/python/tests_0-10/message.py847
-rw-r--r--RC9/qpid/python/tests_0-10/persistence.py67
-rw-r--r--RC9/qpid/python/tests_0-10/query.py235
-rw-r--r--RC9/qpid/python/tests_0-10/queue.py366
-rw-r--r--RC9/qpid/python/tests_0-10/tx.py265
-rw-r--r--RC9/qpid/python/tests_0-8/__init__.py20
-rw-r--r--RC9/qpid/python/tests_0-8/basic.py395
-rw-r--r--RC9/qpid/python/tests_0-8/broker.py104
-rw-r--r--RC9/qpid/python/tests_0-8/example.py94
-rw-r--r--RC9/qpid/python/tests_0-8/exchange.py327
-rw-r--r--RC9/qpid/python/tests_0-8/queue.py255
-rw-r--r--RC9/qpid/python/tests_0-8/testlib.py66
-rw-r--r--RC9/qpid/python/tests_0-8/tx.py209
-rw-r--r--RC9/qpid/python/tests_0-9/__init__.py20
-rw-r--r--RC9/qpid/python/tests_0-9/basic.py396
-rw-r--r--RC9/qpid/python/tests_0-9/broker.py133
-rw-r--r--RC9/qpid/python/tests_0-9/dtx.py587
-rw-r--r--RC9/qpid/python/tests_0-9/example.py94
-rw-r--r--RC9/qpid/python/tests_0-9/exchange.py327
-rw-r--r--RC9/qpid/python/tests_0-9/execution.py29
-rw-r--r--RC9/qpid/python/tests_0-9/message.py657
-rw-r--r--RC9/qpid/python/tests_0-9/query.py224
-rw-r--r--RC9/qpid/python/tests_0-9/queue.py340
-rw-r--r--RC9/qpid/python/tests_0-9/testlib.py66
-rw-r--r--RC9/qpid/python/tests_0-9/tx.py188
134 files changed, 24651 insertions, 0 deletions
diff --git a/RC9/qpid/python/LICENSE.txt b/RC9/qpid/python/LICENSE.txt
new file mode 100755
index 0000000000..6b0b1270ff
--- /dev/null
+++ b/RC9/qpid/python/LICENSE.txt
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/RC9/qpid/python/NOTICE.txt b/RC9/qpid/python/NOTICE.txt
new file mode 100644
index 0000000000..32ccdb70c4
--- /dev/null
+++ b/RC9/qpid/python/NOTICE.txt
@@ -0,0 +1,20 @@
+=========================================================================
+== NOTICE file corresponding to the section 4 d of ==
+== the Apache License, Version 2.0, ==
+== in this case for the Apache Qpid distribution. ==
+=========================================================================
+
+This product includes software developed by the Apache Software Foundation
+(http://www.apache.org/).
+
+Please read the LICENSE.txt file present in the root directory of this
+distribution.
+
+
+Aside from contributions to the Apache Qpid project, this software also
+includes (binary only):
+
+ - None at this time
+
+
+
diff --git a/RC9/qpid/python/README.txt b/RC9/qpid/python/README.txt
new file mode 100644
index 0000000000..bae9f6ab0b
--- /dev/null
+++ b/RC9/qpid/python/README.txt
@@ -0,0 +1,56 @@
+= INSTALLATION =
+
+Extract the release archive into a directory of your choice and set
+your PYTHONPATH accordingly:
+
+ tar -xzf qpid-python-<version>.tar.gz -C <install-prefix>
+ export PYTHONPATH=<install-prefix>/qpid-<version>/python
+
+= GETTING STARTED =
+
+The python client includes a simple hello-world example that publishes
+and consumes a message:
+
+ cp <install-prefix>/qpid-<version>/python/hello-world .
+ ./hello-world
+
+= EXAMPLES =
+
+More comprehensive examples can be found here:
+
+ cd <install-prefix>/qpid-<version>/python/examples
+
+= RUNNING THE TESTS =
+
+The "tests" directory contains a collection of unit tests for the
+python client. The "tests_0-10", "tests_0-9", and "tests_0-8"
+directories contain protocol level conformance tests for AMQP brokers
+of the specified version.
+
+Simplest way to run the tests:
+
+ 1. Run a broker on the default port
+
+ 2. ./run-tests -s <version>
+
+Where <version> is one of "0-8", "0-9", or "0-10-errata".
+
+See the run-tests usage for for additional options:
+
+ ./run-tests -h
+
+== Expected failures ==
+
+Certain tests are expected to fail due to incomplete functionality or
+unresolved interop issues. To skip expected failures for the C++ or
+Java brokers:
+
+ ./run-tests -I <file-name>
+
+Where <file-name> is one of the following files:
+
+ * cpp_failing_0-10.txt
+ * cpp_failing_0-9.txt
+ * cpp_failing_0-8.txt
+ * java_failing_0-9.txt
+ * java_failing_0-8.txt
diff --git a/RC9/qpid/python/RELEASE_NOTES b/RC9/qpid/python/RELEASE_NOTES
new file mode 100644
index 0000000000..c0903df38e
--- /dev/null
+++ b/RC9/qpid/python/RELEASE_NOTES
@@ -0,0 +1,17 @@
+Apache Python M4 Release Notes
+------------------------------
+
+The Qpid M4 release of the python client contains support the for both
+ 0-8 and 0-10 of the AMQP specification as well as support for the
+non-WIP portion of the 0-9 specification. You can access these
+specifications from:
+
+http://jira.amqp.org/confluence/display/AMQP/Download
+
+For full details of Qpid capabilities, as they currently stand, see our
+project page at:
+
+http://cwiki.apache.org/confluence/display/qpid/Index
+
+The README file provided contains some details on installing and using
+the python client that is included with this distribution.
diff --git a/RC9/qpid/python/amqp-doc b/RC9/qpid/python/amqp-doc
new file mode 100755
index 0000000000..1f5910f942
--- /dev/null
+++ b/RC9/qpid/python/amqp-doc
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys, re
+from qpid.spec import load, pythonize
+from getopt import gnu_getopt as getopt, GetoptError
+from fnmatch import fnmatchcase as fnmatch
+
+def die(msg):
+ print >> sys.stderr, msg
+ sys.exit(1)
+
+def usage(msg = ""):
+ return ("""%s
+
+Usage %s [<options>] [<pattern_1> ... <pattern_n>]
+
+Options:
+ -e, --regexp use regex instead of glob when matching
+ -s, --spec <url> location of amqp.xml
+""" % (msg, sys.argv[0])).strip()
+
+try:
+ opts, args = getopt(sys.argv[1:], "s:ea:", ["regexp", "spec=", "additional="])
+except GetoptError, e:
+ die(str(e))
+
+regexp = False
+spec = "../specs/amqp.0-9.xml"
+errata = []
+for k, v in opts:
+ if k == "-e" or k == "--regexp": regexp = True
+ if k == "-s" or k == "--spec": spec = v
+ if k == "-a" or k == "--additional": errata.append(v)
+
+if regexp:
+ def match(pattern, value):
+ try:
+ return re.match(pattern, value)
+ except Exception, e:
+ die("error: '%s': %s" % (pattern, e))
+else:
+ def match(pattern, value):
+ return fnmatch(value, pattern)
+
+spec = load(spec, *errata)
+methods = {}
+patterns = args
+for pattern in patterns:
+ for c in spec.classes:
+ for m in c.methods:
+ name = pythonize("%s_%s" % (c.name, m.name))
+ if match(pattern, name):
+ methods[name] = m.define_method(name)
+
+if patterns:
+ if methods:
+ AMQP = type("AMQP[%s]" % ", ".join(patterns), (), methods)
+ else:
+ die("no matches")
+else:
+ AMQP = spec.define_class("AMQP")
+
+help(AMQP)
diff --git a/RC9/qpid/python/commands/qpid-config b/RC9/qpid/python/commands/qpid-config
new file mode 100755
index 0000000000..ff3c7db46e
--- /dev/null
+++ b/RC9/qpid/python/commands/qpid-config
@@ -0,0 +1,392 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import getopt
+import sys
+import locale
+from qmf.console import Session
+
+_recursive = False
+_host = "localhost"
+_durable = False
+_clusterDurable = False
+_fileCount = 8
+_fileSize = 24
+_maxQueueSize = None
+_maxQueueCount = None
+_policyType = None
+_lvq = False
+_msgSequence = False
+_ive = False
+
+FILECOUNT = "qpid.file_count"
+FILESIZE = "qpid.file_size"
+MAX_QUEUE_SIZE = "qpid.max_size"
+MAX_QUEUE_COUNT = "qpid.max_count"
+POLICY_TYPE = "qpid.policy_type"
+CLUSTER_DURABLE = "qpid.persist_last_node"
+LVQ = "qpid.last_value_queue"
+MSG_SEQUENCE = "qpid.msg_sequence"
+IVE = "qpid.ive"
+
+def Usage ():
+ print "Usage: qpid-config [OPTIONS]"
+ print " qpid-config [OPTIONS] exchanges [filter-string]"
+ print " qpid-config [OPTIONS] queues [filter-string]"
+ print " qpid-config [OPTIONS] add exchange <type> <name> [AddExchangeOptions]"
+ print " qpid-config [OPTIONS] del exchange <name>"
+ print " qpid-config [OPTIONS] add queue <name> [AddQueueOptions]"
+ print " qpid-config [OPTIONS] del queue <name>"
+ print " qpid-config [OPTIONS] bind <exchange-name> <queue-name> [binding-key]"
+ print " qpid-config [OPTIONS] unbind <exchange-name> <queue-name> [binding-key]"
+ print
+ print "Options:"
+ print " -b [ --bindings ] Show bindings in queue or exchange list"
+ print " -a [ --broker-addr ] Address (localhost) Address of qpidd broker"
+ print " broker-addr is in the form: [username/password@] hostname | ip-address [:<port>]"
+ print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
+ print
+ print "Add Queue Options:"
+ print " --durable Queue is durable"
+ print " --cluster-durable Queue becomes durable if there is only one functioning cluster node"
+ print " --file-count N (8) Number of files in queue's persistence journal"
+ print " --file-size N (24) File size in pages (64Kib/page)"
+ print " --max-queue-size N Maximum in-memory queue size as bytes"
+ print " --max-queue-count N Maximum in-memory queue size as a number of messages"
+ print " --policy-type TYPE Action taken when queue limit is reached (reject, flow_to_disk, ring, ring_strict)"
+ print " --last-value-queue Enable LVQ behavior on the queue"
+ print
+ print "Add Exchange Options:"
+ print " --durable Exchange is durable"
+ print " --sequence Exchange will insert a 'qpid.msg_sequence' field in the message header"
+ print " with a value that increments for each message forwarded."
+ print " --ive Exchange will behave as an 'initial-value-exchange', keeping a reference"
+ print " to the last message forwarded and enqueuing that message to newly bound"
+ print " queues."
+ print
+ sys.exit (1)
+
+class BrokerManager:
+ def __init__ (self):
+ self.brokerName = None
+ self.qmf = None
+ self.broker = None
+
+ def SetBroker (self, brokerUrl):
+ self.url = brokerUrl
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(brokerUrl)
+ agents = self.qmf.getAgents()
+ for a in agents:
+ if a.getAgentBank() == 0:
+ self.brokerAgent = a
+
+ def Disconnect(self):
+ if self.broker:
+ self.qmf.delBroker(self.broker)
+
+ def Overview (self):
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
+ print "Total Exchanges: %d" % len (exchanges)
+ etype = {}
+ for ex in exchanges:
+ if ex.type not in etype:
+ etype[ex.type] = 1
+ else:
+ etype[ex.type] = etype[ex.type] + 1
+ for typ in etype:
+ print "%15s: %d" % (typ, etype[typ])
+
+ print
+ print " Total Queues: %d" % len (queues)
+ _durable = 0
+ for queue in queues:
+ if queue.durable:
+ _durable = _durable + 1
+ print " durable: %d" % _durable
+ print " non-durable: %d" % (len (queues) - _durable)
+
+ def ExchangeList (self, filter):
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ caption1 = "Type "
+ caption2 = "Exchange Name"
+ maxNameLen = len(caption2)
+ for ex in exchanges:
+ if self.match(ex.name, filter):
+ if len(ex.name) > maxNameLen: maxNameLen = len(ex.name)
+ print "%s%-*s Attributes" % (caption1, maxNameLen, caption2)
+ line = ""
+ for i in range(((maxNameLen + len(caption1)) / 5) + 5):
+ line += "====="
+ print line
+
+ for ex in exchanges:
+ if self.match (ex.name, filter):
+ print "%-10s%-*s " % (ex.type, maxNameLen, ex.name),
+ args = ex.arguments
+ if ex.durable: print "--durable",
+ if MSG_SEQUENCE in args and args[MSG_SEQUENCE] == 1: print "--sequence",
+ if IVE in args and args[IVE] == 1: print "--ive",
+ print
+
+ def ExchangeListRecurse (self, filter):
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ bindings = self.qmf.getObjects(_class="binding", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
+ for ex in exchanges:
+ if self.match (ex.name, filter):
+ print "Exchange '%s' (%s)" % (ex.name, ex.type)
+ for bind in bindings:
+ if bind.exchangeRef == ex.getObjectId():
+ qname = "<unknown>"
+ queue = self.findById (queues, bind.queueRef)
+ if queue != None:
+ qname = queue.name
+ print " bind [%s] => %s" % (bind.bindingKey, qname)
+
+
+ def QueueList (self, filter):
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
+
+ caption = "Queue Name"
+ maxNameLen = len(caption)
+ for q in queues:
+ if self.match (q.name, filter):
+ if len(q.name) > maxNameLen: maxNameLen = len(q.name)
+ print "%-*s Attributes" % (maxNameLen, caption)
+ line = ""
+ for i in range((maxNameLen / 5) + 5):
+ line += "====="
+ print line
+
+ for q in queues:
+ if self.match (q.name, filter):
+ print "%-*s " % (maxNameLen, q.name),
+ args = q.arguments
+ if q.durable: print "--durable",
+ if CLUSTER_DURABLE in args and args[CLUSTER_DURABLE] == 1: print "--cluster-durable",
+ if q.autoDelete: print "auto-del",
+ if q.exclusive: print "excl",
+ if FILESIZE in args: print "--file-size=%d" % args[FILESIZE],
+ if FILECOUNT in args: print "--file-count=%d" % args[FILECOUNT],
+ if MAX_QUEUE_SIZE in args: print "--max-queue-size=%d" % args[MAX_QUEUE_SIZE],
+ if MAX_QUEUE_COUNT in args: print "--max-queue-count=%d" % args[MAX_QUEUE_COUNT],
+ if POLICY_TYPE in args: print "--policy-type=%s" % args[POLICY_TYPE],
+ if LVQ in args and args[LVQ] == 1: print "--last-value-queue",
+ print
+
+ def QueueListRecurse (self, filter):
+ exchanges = self.qmf.getObjects(_class="exchange", _agent=self.brokerAgent)
+ bindings = self.qmf.getObjects(_class="binding", _agent=self.brokerAgent)
+ queues = self.qmf.getObjects(_class="queue", _agent=self.brokerAgent)
+ for queue in queues:
+ if self.match (queue.name, filter):
+ print "Queue '%s'" % queue.name
+ for bind in bindings:
+ if bind.queueRef == queue.getObjectId():
+ ename = "<unknown>"
+ ex = self.findById (exchanges, bind.exchangeRef)
+ if ex != None:
+ ename = ex.name
+ if ename == "":
+ ename = "''"
+ print " bind [%s] => %s" % (bind.bindingKey, ename)
+
+ def AddExchange (self, args):
+ if len (args) < 2:
+ Usage ()
+ etype = args[0]
+ ename = args[1]
+ declArgs = {}
+ if _msgSequence:
+ declArgs[MSG_SEQUENCE] = 1
+ if _ive:
+ declArgs[IVE] = 1
+ self.broker.getAmqpSession().exchange_declare (exchange=ename, type=etype, durable=_durable, arguments=declArgs)
+
+ def DelExchange (self, args):
+ if len (args) < 1:
+ Usage ()
+ ename = args[0]
+ self.broker.getAmqpSession().exchange_delete (exchange=ename)
+
+ def AddQueue (self, args):
+ if len (args) < 1:
+ Usage ()
+ qname = args[0]
+ declArgs = {}
+ if _durable:
+ declArgs[FILECOUNT] = _fileCount
+ declArgs[FILESIZE] = _fileSize
+
+ if _maxQueueSize:
+ declArgs[MAX_QUEUE_SIZE] = _maxQueueSize
+ if _maxQueueCount:
+ declArgs[MAX_QUEUE_COUNT] = _maxQueueCount
+ if _policyType:
+ declArgs[POLICY_TYPE] = _policyType
+ if _clusterDurable:
+ declArgs[CLUSTER_DURABLE] = 1
+ if _lvq:
+ declArgs[LVQ] = 1
+
+ self.broker.getAmqpSession().queue_declare (queue=qname, durable=_durable, arguments=declArgs)
+
+ def DelQueue (self, args):
+ if len (args) < 1:
+ Usage ()
+ qname = args[0]
+ self.broker.getAmqpSession().queue_delete (queue=qname)
+
+ def Bind (self, args):
+ if len (args) < 2:
+ Usage ()
+ ename = args[0]
+ qname = args[1]
+ key = ""
+ if len (args) > 2:
+ key = args[2]
+ self.broker.getAmqpSession().exchange_bind (queue=qname, exchange=ename, binding_key=key)
+
+ def Unbind (self, args):
+ if len (args) < 2:
+ Usage ()
+ ename = args[0]
+ qname = args[1]
+ key = ""
+ if len (args) > 2:
+ key = args[2]
+ self.broker.getAmqpSession().exchange_unbind (queue=qname, exchange=ename, binding_key=key)
+
+ def findById (self, items, id):
+ for item in items:
+ if item.getObjectId() == id:
+ return item
+ return None
+
+ def match (self, name, filter):
+ if filter == "":
+ return True
+ if name.find (filter) == -1:
+ return False
+ return True
+
+def YN (bool):
+ if bool:
+ return 'Y'
+ return 'N'
+
+
+##
+## Main Program
+##
+
+try:
+ longOpts = ("durable", "cluster-durable", "bindings", "broker-addr=", "file-count=",
+ "file-size=", "max-queue-size=", "max-queue-count=", "policy-type=",
+ "last-value-queue", "sequence", "ive")
+ (optlist, encArgs) = getopt.gnu_getopt (sys.argv[1:], "a:b", longOpts)
+except:
+ Usage ()
+
+try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+except:
+ cargs = encArgs
+
+for opt in optlist:
+ if opt[0] == "-b" or opt[0] == "--bindings":
+ _recursive = True
+ if opt[0] == "-a" or opt[0] == "--broker-addr":
+ _host = opt[1]
+ if opt[0] == "--durable":
+ _durable = True
+ if opt[0] == "--cluster-durable":
+ _clusterDurable = True
+ if opt[0] == "--file-count":
+ _fileCount = int (opt[1])
+ if opt[0] == "--file-size":
+ _fileSize = int (opt[1])
+ if opt[0] == "--max-queue-size":
+ _maxQueueSize = int (opt[1])
+ if opt[0] == "--max-queue-count":
+ _maxQueueCount = int (opt[1])
+ if opt[0] == "--policy-type":
+ _policyType = opt[1]
+ if opt[0] == "--last-value-queue":
+ _lvq = True
+ if opt[0] == "--sequence":
+ _msgSequence = True
+ if opt[0] == "--ive":
+ _ive = True
+
+nargs = len (cargs)
+bm = BrokerManager ()
+
+try:
+ bm.SetBroker(_host)
+ if nargs == 0:
+ bm.Overview ()
+ else:
+ cmd = cargs[0]
+ modifier = ""
+ if nargs > 1:
+ modifier = cargs[1]
+ if cmd == "exchanges":
+ if _recursive:
+ bm.ExchangeListRecurse (modifier)
+ else:
+ bm.ExchangeList (modifier)
+ elif cmd == "queues":
+ if _recursive:
+ bm.QueueListRecurse (modifier)
+ else:
+ bm.QueueList (modifier)
+ elif cmd == "add":
+ if modifier == "exchange":
+ bm.AddExchange (cargs[2:])
+ elif modifier == "queue":
+ bm.AddQueue (cargs[2:])
+ else:
+ Usage ()
+ elif cmd == "del":
+ if modifier == "exchange":
+ bm.DelExchange (cargs[2:])
+ elif modifier == "queue":
+ bm.DelQueue (cargs[2:])
+ else:
+ Usage ()
+ elif cmd == "bind":
+ bm.Bind (cargs[1:])
+ elif cmd == "unbind":
+ bm.Unbind (cargs[1:])
+ else:
+ Usage ()
+except KeyboardInterrupt:
+ print
+except Exception,e:
+ print "Failed:", e.args
+ sys.exit(1)
+
+bm.Disconnect()
diff --git a/RC9/qpid/python/commands/qpid-printevents b/RC9/qpid/python/commands/qpid-printevents
new file mode 100755
index 0000000000..0c1b618a1f
--- /dev/null
+++ b/RC9/qpid/python/commands/qpid-printevents
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import optparse
+import sys
+import socket
+from time import time, strftime, gmtime, sleep
+from qmf.console import Console, Session
+
+class EventConsole(Console):
+ def event(self, broker, event):
+ print event
+
+ def brokerConnected(self, broker):
+ print strftime("%c", gmtime(time())), "NOTIC qpid-printevents:brokerConnected broker=%s" % broker.getUrl()
+
+ def brokerDisconnected(self, broker):
+ print strftime("%c", gmtime(time())), "NOTIC qpid-printevents:brokerDisconnected broker=%s" % broker.getUrl()
+
+
+##
+## Main Program
+##
+def main():
+ _usage = "%prog [options] [broker-addr]..."
+ _description = \
+"""Collect and print events from one or more Qpid message brokers. If no broker-addr is
+supplied, %prog will connect to 'localhost:5672'.
+broker-addr is of the form: [username/password@] hostname | ip-address [:<port>]
+ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost
+"""
+ p = optparse.OptionParser(usage=_usage, description=_description)
+
+ options, arguments = p.parse_args()
+ if len(arguments) == 0:
+ arguments.append("localhost")
+
+ console = EventConsole()
+ session = Session(console, rcvObjects=False, rcvHeartbeats=False, manageConnections=True)
+ brokers = []
+ for host in arguments:
+ brokers.append(session.addBroker(host))
+
+ try:
+ while (True):
+ sleep(10)
+ except KeyboardInterrupt:
+ for broker in brokers:
+ session.delBroker(broker)
+ print
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/RC9/qpid/python/commands/qpid-queue-stats b/RC9/qpid/python/commands/qpid-queue-stats
new file mode 100755
index 0000000000..356a1d2d8d
--- /dev/null
+++ b/RC9/qpid/python/commands/qpid-queue-stats
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import optparse
+import sys
+import re
+import socket
+import qpid
+from threading import Condition
+from qmf.console import Session, Console
+from qpid.peer import Closed
+from qpid.connection import Connection, ConnectionFailed
+from time import sleep
+
+class BrokerManager(Console):
+ def __init__(self, host):
+ self.url = host
+ self.objects = {}
+ self.filter = None
+ self.session = Session(self, rcvEvents=False, rcvHeartbeats=False,
+ userBindings=True, manageConnections=True)
+ self.broker = self.session.addBroker(self.url)
+ self.firstError = True
+
+ def setFilter(self,filter):
+ self.filter = filter
+
+ def brokerConnected(self, broker):
+ if not self.firstError:
+ print "*** Broker connected"
+ self.firstError = False
+
+ def brokerDisconnected(self, broker):
+ print "*** Broker connection lost - %s, retrying..." % broker.getError()
+ self.firstError = False
+ self.objects.clear()
+
+ def objectProps(self, broker, record):
+ className = record.getClassKey().getClassName()
+ if className != "queue":
+ return
+
+ id = record.getObjectId().__repr__()
+ if id not in self.objects:
+ self.objects[id] = (record.name, None, None)
+
+ def objectStats(self, broker, record):
+ className = record.getClassKey().getClassName()
+ if className != "queue":
+ return
+
+ id = record.getObjectId().__repr__()
+ if id not in self.objects:
+ return
+
+ (name, first, last) = self.objects[id]
+ if first == None:
+ self.objects[id] = (name, record, None)
+ return
+
+ if len(self.filter) > 0 :
+ match = False
+
+ for x in self.filter:
+ if x.match(name):
+ match = True
+ break
+ if match == False:
+ return
+
+ if last == None:
+ lastSample = first
+ else:
+ lastSample = last
+
+ self.objects[id] = (name, first, record)
+
+ deltaTime = float (record.getTimestamps()[0] - lastSample.getTimestamps()[0])
+ if deltaTime < 1000000000.0:
+ return
+ enqueueRate = float (record.msgTotalEnqueues - lastSample.msgTotalEnqueues) / \
+ (deltaTime / 1000000000.0)
+ dequeueRate = float (record.msgTotalDequeues - lastSample.msgTotalDequeues) / \
+ (deltaTime / 1000000000.0)
+ print "%-41s%10.2f%11d%13.2f%13.2f" % \
+ (name, deltaTime / 1000000000, record.msgDepth, enqueueRate, dequeueRate)
+
+
+ def Display (self):
+ self.session.bindClass("org.apache.qpid.broker", "queue")
+ print "Queue Name Sec Depth Enq Rate Deq Rate"
+ print "========================================================================================"
+ try:
+ while True:
+ sleep (1)
+ if self.firstError and self.broker.getError():
+ self.firstError = False
+ print "*** Error: %s, retrying..." % self.broker.getError()
+ except KeyboardInterrupt:
+ print
+ self.session.delBroker(self.broker)
+
+##
+## Main Program
+##
+def main():
+ p = optparse.OptionParser()
+ p.add_option('--broker-address','-a', default='localhost' , help='broker-addr is in the form: [username/password@] hostname | ip-address [:<port>] \n ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost')
+ p.add_option('--filter','-f' ,default=None ,help='a list of comma separated queue names (regex are accepted) to show')
+
+ options, arguments = p.parse_args()
+
+ host = options.broker_address
+ filter = []
+ if options.filter != None:
+ for s in options.filter.split(","):
+ filter.append(re.compile(s))
+
+ bm = BrokerManager(host)
+ bm.setFilter(filter)
+ bm.Display()
+
+if __name__ == '__main__':
+ main()
+
diff --git a/RC9/qpid/python/commands/qpid-route b/RC9/qpid/python/commands/qpid-route
new file mode 100755
index 0000000000..e0e655683a
--- /dev/null
+++ b/RC9/qpid/python/commands/qpid-route
@@ -0,0 +1,514 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import getopt
+import sys
+import socket
+import os
+import locale
+from qmf.console import Session, BrokerURL
+
+def Usage():
+ print "Usage: qpid-route [OPTIONS] dynamic add <dest-broker> <src-broker> <exchange> [tag] [exclude-list]"
+ print " qpid-route [OPTIONS] dynamic del <dest-broker> <src-broker> <exchange>"
+ print
+ print " qpid-route [OPTIONS] route add <dest-broker> <src-broker> <exchange> <routing-key> [tag] [exclude-list]"
+ print " qpid-route [OPTIONS] route del <dest-broker> <src-broker> <exchange> <routing-key>"
+ print " qpid-route [OPTIONS] queue add <dest-broker> <src-broker> <exchange> <queue>"
+ print " qpid-route [OPTIONS] queue del <dest-broker> <src-broker> <exchange> <queue>"
+ print " qpid-route [OPTIONS] route list [<dest-broker>]"
+ print " qpid-route [OPTIONS] route flush [<dest-broker>]"
+ print " qpid-route [OPTIONS] route map [<broker>]"
+ print
+ print " qpid-route [OPTIONS] link add <dest-broker> <src-broker>"
+ print " qpid-route [OPTIONS] link del <dest-broker> <src-broker>"
+ print " qpid-route [OPTIONS] link list [<dest-broker>]"
+ print
+ print "Options:"
+ print " -v [ --verbose ] Verbose output"
+ print " -q [ --quiet ] Quiet output, don't print duplicate warnings"
+ print " -d [ --durable ] Added configuration shall be durable"
+ print " -e [ --del-empty-link ] Delete link after deleting last route on the link"
+ print " -s [ --src-local ] Make connection to source broker (push route)"
+ print " -t <transport> [ --transport <transport>]"
+ print " Specify transport to use for links, defaults to tcp"
+ print
+ print " dest-broker and src-broker are in the form: [username/password@] hostname | ip-address [:<port>]"
+ print " ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost"
+ print
+ sys.exit(1)
+
+_verbose = False
+_quiet = False
+_durable = False
+_dellink = False
+_srclocal = False
+_transport = "tcp"
+
+class RouteManager:
+ def __init__(self, localBroker):
+ self.local = BrokerURL(localBroker)
+ self.remote = None
+ self.qmf = Session()
+ self.broker = self.qmf.addBroker(localBroker)
+
+ def disconnect(self):
+ self.qmf.delBroker(self.broker)
+
+ def getLink(self):
+ links = self.qmf.getObjects(_class="link")
+ for link in links:
+ if self.remote.match(link.host, link.port):
+ return link
+ return None
+
+ def addLink(self, remoteBroker):
+ self.remote = BrokerURL(remoteBroker)
+ if self.local.match(self.remote.host, self.remote.port):
+ raise Exception("Linking broker to itself is not permitted")
+
+ brokers = self.qmf.getObjects(_class="broker")
+ broker = brokers[0]
+ link = self.getLink()
+ if link == None:
+ if self.remote.authName == "anonymous":
+ mech = "ANONYMOUS"
+ else:
+ mech = "PLAIN"
+ res = broker.connect(self.remote.host, self.remote.port, _durable,
+ mech, self.remote.authName, self.remote.authPass,
+ _transport)
+ if _verbose:
+ print "Connect method returned:", res.status, res.text
+
+ def delLink(self, remoteBroker):
+ self.remote = BrokerURL(remoteBroker)
+ brokers = self.qmf.getObjects(_class="broker")
+ broker = brokers[0]
+ link = self.getLink()
+ if link == None:
+ raise Exception("Link not found")
+
+ res = link.close()
+ if _verbose:
+ print "Close method returned:", res.status, res.text
+
+ def listLinks(self):
+ links = self.qmf.getObjects(_class="link")
+ if len(links) == 0:
+ print "No Links Found"
+ else:
+ print
+ print "Host Port Transport Durable State Last Error"
+ print "============================================================================="
+ for link in links:
+ print "%-16s%-8d%-13s%c %-18s%s" % \
+ (link.host, link.port, link.transport, YN(link.durable), link.state, link.lastError)
+
+ def mapRoutes(self):
+ qmf = self.qmf
+ print
+ print "Finding Linked Brokers:"
+
+ brokerList = {}
+ brokerList[self.local.name()] = self.broker
+ print " %s... Ok" % self.local
+
+ added = True
+ while added:
+ added = False
+ links = qmf.getObjects(_class="link")
+ for link in links:
+ url = BrokerURL("%s:%d" % (link.host, link.port))
+ if url.name() not in brokerList:
+ print " %s..." % url.name(),
+ try:
+ b = qmf.addBroker("%s:%d" % (link.host, link.port))
+ brokerList[url.name()] = b
+ added = True
+ print "Ok"
+ except Exception, e:
+ print e
+
+ print
+ print "Dynamic Routes:"
+ bridges = qmf.getObjects(_class="bridge", dynamic=True)
+ fedExchanges = []
+ for bridge in bridges:
+ if bridge.src not in fedExchanges:
+ fedExchanges.append(bridge.src)
+ if len(fedExchanges) == 0:
+ print " none found"
+ print
+
+ for ex in fedExchanges:
+ print " Exchange %s:" % ex
+ pairs = []
+ for bridge in bridges:
+ if bridge.src == ex:
+ link = bridge._linkRef_
+ fromUrl = "%s:%s" % (link.host, link.port)
+ toUrl = bridge.getBroker().getUrl()
+ found = False
+ for pair in pairs:
+ if pair.matches(fromUrl, toUrl):
+ found = True
+ if not found:
+ pairs.append(RoutePair(fromUrl, toUrl))
+ for pair in pairs:
+ print " %s" % pair
+ print
+
+ print "Static Routes:"
+ bridges = qmf.getObjects(_class="bridge", dynamic=False)
+ if len(bridges) == 0:
+ print " none found"
+ print
+
+ for bridge in bridges:
+ link = bridge._linkRef_
+ fromUrl = "%s:%s" % (link.host, link.port)
+ toUrl = bridge.getBroker().getUrl()
+ leftType = "ex"
+ rightType = "ex"
+ if bridge.srcIsLocal:
+ arrow = "=>"
+ left = bridge.src
+ right = bridge.dest
+ if bridge.srcIsQueue:
+ leftType = "queue"
+ else:
+ arrow = "<="
+ left = bridge.dest
+ right = bridge.src
+ if bridge.srcIsQueue:
+ rightType = "queue"
+
+ if bridge.srcIsQueue:
+ print " %s(%s=%s) %s %s(%s=%s)" % \
+ (toUrl, leftType, left, arrow, fromUrl, rightType, right)
+ else:
+ print " %s(%s=%s) %s %s(%s=%s) key=%s" % \
+ (toUrl, leftType, left, arrow, fromUrl, rightType, right, bridge.key)
+ print
+
+ for broker in brokerList:
+ if broker != self.local.name():
+ qmf.delBroker(brokerList[broker])
+
+
+ def addRoute(self, remoteBroker, exchange, routingKey, tag, excludes, dynamic=False):
+ if dynamic and _srclocal:
+ raise Exception("--src-local is not permitted on dynamic routes")
+
+ self.addLink(remoteBroker)
+ link = self.getLink()
+ if link == None:
+ raise Exception("Link failed to create")
+
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.key == routingKey and not bridge.srcIsQueue:
+ if not _quiet:
+ raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, routingKey))
+ sys.exit(0)
+
+ if _verbose:
+ print "Creating inter-broker binding..."
+ res = link.bridge(_durable, exchange, exchange, routingKey, tag, excludes, False, _srclocal, dynamic)
+ if res.status != 0:
+ raise Exception(res.text)
+ if _verbose:
+ print "Bridge method returned:", res.status, res.text
+
+ def addQueueRoute(self, remoteBroker, exchange, queue):
+ self.addLink(remoteBroker)
+ link = self.getLink()
+ if link == None:
+ raise Exception("Link failed to create")
+
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue:
+ if not _quiet:
+ raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, queue))
+ sys.exit(0)
+
+ if _verbose:
+ print "Creating inter-broker binding..."
+ res = link.bridge(_durable, queue, exchange, "", "", "", True, _srclocal, False)
+ if res.status != 0:
+ raise Exception(res.text)
+ if _verbose:
+ print "Bridge method returned:", res.status, res.text
+
+ def delQueueRoute(self, remoteBroker, exchange, queue):
+ self.remote = BrokerURL(remoteBroker)
+ link = self.getLink()
+ if link == None:
+ if not _quiet:
+ raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name()))
+ sys.exit(0)
+
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and \
+ bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue:
+ if _verbose:
+ print "Closing bridge..."
+ res = bridge.close()
+ if res.status != 0:
+ raise Exception("Error closing bridge: %d - %s" % (res.status, res.text))
+ if len(bridges) == 1 and _dellink:
+ link = self.getLink()
+ if link == None:
+ sys.exit(0)
+ if _verbose:
+ print "Last bridge on link, closing link..."
+ res = link.close()
+ if res.status != 0:
+ raise Exception("Error closing link: %d - %s" % (res.status, res.text))
+ sys.exit(0)
+ if not _quiet:
+ raise Exception("Route not found")
+
+ def delRoute(self, remoteBroker, exchange, routingKey, dynamic=False):
+ self.remote = BrokerURL(remoteBroker)
+ link = self.getLink()
+ if link == None:
+ if not _quiet:
+ raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name()))
+ sys.exit(0)
+
+ bridges = self.qmf.getObjects(_class="bridge")
+ for bridge in bridges:
+ if bridge.linkRef == link.getObjectId() and bridge.dest == exchange and bridge.key == routingKey \
+ and bridge.dynamic == dynamic:
+ if _verbose:
+ print "Closing bridge..."
+ res = bridge.close()
+ if res.status != 0:
+ raise Exception("Error closing bridge: %d - %s" % (res.status, res.text))
+ if len(bridges) == 1 and _dellink:
+ link = self.getLink()
+ if link == None:
+ sys.exit(0)
+ if _verbose:
+ print "Last bridge on link, closing link..."
+ res = link.close()
+ if res.status != 0:
+ raise Exception("Error closing link: %d - %s" % (res.status, res.text))
+ sys.exit(0)
+ if not _quiet:
+ raise Exception("Route not found")
+
+ def listRoutes(self):
+ links = self.qmf.getObjects(_class="link")
+ bridges = self.qmf.getObjects(_class="bridge")
+
+ for bridge in bridges:
+ myLink = None
+ for link in links:
+ if bridge.linkRef == link.getObjectId():
+ myLink = link
+ break
+ if myLink != None:
+ if bridge.dynamic:
+ keyText = "<dynamic>"
+ else:
+ keyText = bridge.key
+ print "%s %s:%d %s %s" % (self.local.name(), myLink.host, myLink.port, bridge.dest, keyText)
+
+ def clearAllRoutes(self):
+ links = self.qmf.getObjects(_class="link")
+ bridges = self.qmf.getObjects(_class="bridge")
+
+ for bridge in bridges:
+ if _verbose:
+ myLink = None
+ for link in links:
+ if bridge.linkRef == link.getObjectId():
+ myLink = link
+ break
+ if myLink != None:
+ print "Deleting Bridge: %s:%d %s %s... " % (myLink.host, myLink.port, bridge.dest, bridge.key),
+ res = bridge.close()
+ if res.status != 0:
+ print "Error: %d - %s" % (res.status, res.text)
+ elif _verbose:
+ print "Ok"
+
+ if _dellink:
+ links = self.qmf.getObjects(_class="link")
+ for link in links:
+ if _verbose:
+ print "Deleting Link: %s:%d... " % (link.host, link.port),
+ res = link.close()
+ if res.status != 0:
+ print "Error: %d - %s" % (res.status, res.text)
+ elif _verbose:
+ print "Ok"
+
+class RoutePair:
+ def __init__(self, fromUrl, toUrl):
+ self.fromUrl = fromUrl
+ self.toUrl = toUrl
+ self.bidir = False
+
+ def __repr__(self):
+ if self.bidir:
+ delimit = "<=>"
+ else:
+ delimit = " =>"
+ return "%s %s %s" % (self.fromUrl, delimit, self.toUrl)
+
+ def matches(self, fromUrl, toUrl):
+ if fromUrl == self.fromUrl and toUrl == self.toUrl:
+ return True
+ if toUrl == self.fromUrl and fromUrl == self.toUrl:
+ self.bidir = True
+ return True
+ return False
+
+
+def YN(val):
+ if val == 1:
+ return 'Y'
+ return 'N'
+
+##
+## Main Program
+##
+
+try:
+ longOpts = ("verbose", "quiet", "durable", "del-empty-link", "src-local", "transport=")
+ (optlist, encArgs) = getopt.gnu_getopt(sys.argv[1:], "vqdest:", longOpts)
+except:
+ Usage()
+
+try:
+ encoding = locale.getpreferredencoding()
+ cargs = [a.decode(encoding) for a in encArgs]
+except:
+ cargs = encArgs
+
+for opt in optlist:
+ if opt[0] == "-v" or opt[0] == "--verbose":
+ _verbose = True
+ if opt[0] == "-q" or opt[0] == "--quiet":
+ _quiet = True
+ if opt[0] == "-d" or opt[0] == "--durable":
+ _durable = True
+ if opt[0] == "-e" or opt[0] == "--del-empty-link":
+ _dellink = True
+ if opt[0] == "-s" or opt[0] == "--src-local":
+ _srclocal = True
+ if opt[0] == "-t" or opt[0] == "--transport":
+ _transport = opt[1]
+
+nargs = len(cargs)
+if nargs < 2:
+ Usage()
+if nargs == 2:
+ localBroker = "localhost"
+else:
+ if _srclocal:
+ localBroker = cargs[3]
+ remoteBroker = cargs[2]
+ else:
+ localBroker = cargs[2]
+ if nargs > 3:
+ remoteBroker = cargs[3]
+
+group = cargs[0]
+cmd = cargs[1]
+
+try:
+ rm = RouteManager(localBroker)
+ if group == "link":
+ if cmd == "add":
+ if nargs != 4:
+ Usage()
+ rm.addLink(remoteBroker)
+ elif cmd == "del":
+ if nargs != 4:
+ Usage()
+ rm.delLink(remoteBroker)
+ elif cmd == "list":
+ rm.listLinks()
+
+ elif group == "dynamic":
+ if cmd == "add":
+ if nargs < 5 or nargs > 7:
+ Usage()
+
+ tag = ""
+ excludes = ""
+ if nargs > 5: tag = cargs[5]
+ if nargs > 6: excludes = cargs[6]
+ rm.addRoute(remoteBroker, cargs[4], "", tag, excludes, dynamic=True)
+ elif cmd == "del":
+ if nargs != 5:
+ Usage()
+ else:
+ rm.delRoute(remoteBroker, cargs[4], "", dynamic=True)
+
+ elif group == "route":
+ if cmd == "add":
+ if nargs < 6 or nargs > 8:
+ Usage()
+
+ tag = ""
+ excludes = ""
+ if nargs > 6: tag = cargs[6]
+ if nargs > 7: excludes = cargs[7]
+ rm.addRoute(remoteBroker, cargs[4], cargs[5], tag, excludes, dynamic=False)
+ elif cmd == "del":
+ if nargs != 6:
+ Usage()
+ rm.delRoute(remoteBroker, cargs[4], cargs[5], dynamic=False)
+ elif cmd == "map":
+ rm.mapRoutes()
+ else:
+ if cmd == "list":
+ rm.listRoutes()
+ elif cmd == "flush":
+ rm.clearAllRoutes()
+ else:
+ Usage()
+
+ elif group == "queue":
+ if nargs != 6:
+ Usage()
+ if cmd == "add":
+ rm.addQueueRoute(remoteBroker, exchange=cargs[4], queue=cargs[5])
+ elif cmd == "del":
+ rm.delQueueRoute(remoteBroker, exchange=cargs[4], queue=cargs[5])
+ else:
+ Usage()
+
+except Exception,e:
+ print "Failed:", e.args
+ sys.exit(1)
+
+rm.disconnect()
diff --git a/RC9/qpid/python/commands/qpid-tool b/RC9/qpid/python/commands/qpid-tool
new file mode 100755
index 0000000000..14308f69fb
--- /dev/null
+++ b/RC9/qpid/python/commands/qpid-tool
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+import getopt
+import sys
+import socket
+from cmd import Cmd
+from qpid.connection import ConnectionFailed
+from qpid.managementdata import ManagementData
+from shlex import split
+from qpid.disp import Display
+from qpid.peer import Closed
+
+class Mcli (Cmd):
+ """ Management Command Interpreter """
+
+ def __init__ (self, dataObject, dispObject):
+ Cmd.__init__ (self)
+ self.dataObject = dataObject
+ self.dispObject = dispObject
+ self.dataObject.setCli (self)
+ self.prompt = "qpid: "
+
+ def emptyline (self):
+ pass
+
+ def setPromptMessage (self, p):
+ if p == None:
+ self.prompt = "qpid: "
+ else:
+ self.prompt = "qpid[%s]: " % p
+
+ def do_help (self, data):
+ print "Management Tool for QPID"
+ print
+ print "Commands:"
+ print " list - Print summary of existing objects by class"
+ print " list <className> - Print list of objects of the specified class"
+ print " list <className> active - Print list of non-deleted objects of the specified class"
+ print " show <className> - Print contents of all objects of specified class"
+ print " show <className> active - Print contents of all non-deleted objects of specified class"
+ print " show <list-of-IDs> - Print contents of one or more objects (infer className)"
+ print " show <className> <list-of-IDs> - Print contents of one or more objects"
+ print " list is space-separated, ranges may be specified (i.e. 1004-1010)"
+ print " call <ID> <methodName> [<args>] - Invoke a method on an object"
+ print " schema - Print summary of object classes seen on the target"
+ print " schema <className> - Print details of an object class"
+ print " set time-format short - Select short timestamp format (default)"
+ print " set time-format long - Select long timestamp format"
+ print " id [<ID>] - Display translations of display object ids"
+ print " quit or ^D - Exit the program"
+ print
+
+ def complete_set (self, text, line, begidx, endidx):
+ """ Command completion for the 'set' command """
+ tokens = split (line)
+ if len (tokens) < 2:
+ return ["time-format "]
+ elif tokens[1] == "time-format":
+ if len (tokens) == 2:
+ return ["long", "short"]
+ elif len (tokens) == 3:
+ if "long".find (text) == 0:
+ return ["long"]
+ elif "short".find (text) == 0:
+ return ["short"]
+ elif "time-format".find (text) == 0:
+ return ["time-format "]
+ return []
+
+ def do_set (self, data):
+ tokens = split (data)
+ try:
+ if tokens[0] == "time-format":
+ self.dispObject.do_setTimeFormat (tokens[1])
+ except:
+ pass
+
+ def do_id (self, data):
+ self.dataObject.do_id(data)
+
+ def complete_schema (self, text, line, begidx, endidx):
+ tokens = split (line)
+ if len (tokens) > 2:
+ return []
+ return self.dataObject.classCompletions (text)
+
+ def do_schema (self, data):
+ self.dataObject.do_schema (data)
+
+ def complete_list (self, text, line, begidx, endidx):
+ tokens = split (line)
+ if len (tokens) > 2:
+ return []
+ return self.dataObject.classCompletions (text)
+
+ def do_list (self, data):
+ self.dataObject.do_list (data)
+
+ def do_show (self, data):
+ self.dataObject.do_show (data)
+
+ def do_call (self, data):
+ try:
+ self.dataObject.do_call (data)
+ except ValueError, e:
+ print "ValueError:", e
+
+ def do_EOF (self, data):
+ print "quit"
+ try:
+ self.dataObject.do_exit ()
+ except:
+ pass
+ return True
+
+ def do_quit (self, data):
+ try:
+ self.dataObject.do_exit ()
+ except:
+ pass
+ return True
+
+ def postcmd (self, stop, line):
+ return stop
+
+ def postloop (self):
+ print "Exiting..."
+ self.dataObject.close ()
+
+def Usage ():
+ print "Usage: qpid-tool [[<username>/<password>@]<target-host>[:<tcp-port>]]"
+ print
+ sys.exit (1)
+
+#=========================================================
+# Main Program
+#=========================================================
+
+# Get host name and port if specified on the command line
+cargs = sys.argv[1:]
+_host = "localhost"
+
+if len (cargs) > 0:
+ _host = cargs[0]
+
+if _host[0] == '-':
+ Usage()
+
+disp = Display ()
+
+# Attempt to make a connection to the target broker
+try:
+ data = ManagementData (disp, _host)
+except socket.error, e:
+ print "Socket Error (%s):" % _host, e[1]
+ sys.exit (1)
+except IOError, e:
+ print "IOError: %d - %s: %s" % (e.errno, e.strerror, e.filename)
+ sys.exit (1)
+except ConnectionFailed, e:
+ print "Connect Failed %d - %s" % (e[0], e[1])
+ sys.exit(1)
+except Exception, e:
+ if str(e).find ("Exchange not found") != -1:
+ print "Management not enabled on broker: Use '-m yes' option on broker startup."
+ sys.exit(1)
+
+# Instantiate the CLI interpreter and launch it.
+cli = Mcli (data, disp)
+print ("Management Tool for QPID")
+try:
+ cli.cmdloop ()
+except Closed, e:
+ print "Connection to Broker Lost:", e
+ sys.exit (1)
diff --git a/RC9/qpid/python/cpp_failing_0-10.txt b/RC9/qpid/python/cpp_failing_0-10.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/RC9/qpid/python/cpp_failing_0-10.txt
diff --git a/RC9/qpid/python/cpp_failing_0-8.txt b/RC9/qpid/python/cpp_failing_0-8.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/RC9/qpid/python/cpp_failing_0-8.txt
diff --git a/RC9/qpid/python/cpp_failing_0-9.txt b/RC9/qpid/python/cpp_failing_0-9.txt
new file mode 100644
index 0000000000..06c31080fb
--- /dev/null
+++ b/RC9/qpid/python/cpp_failing_0-9.txt
@@ -0,0 +1,4 @@
+tests_0-9.message.MessageTests.test_checkpoint
+tests_0-9.message.MessageTests.test_reject
+tests_0-9.basic.BasicTests.test_get
+
diff --git a/RC9/qpid/python/doc/test-requirements.txt b/RC9/qpid/python/doc/test-requirements.txt
new file mode 100644
index 0000000000..5089b49dbe
--- /dev/null
+++ b/RC9/qpid/python/doc/test-requirements.txt
@@ -0,0 +1,29 @@
+###############################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+###############################################################################
+
+ * start and stop server, possibly in different configurations, should
+ at least be able to specify host and port
+
+ * initiate multiple connections/server
+
+ * initiate multiple channels/connection
+
+ * enable positive and negative tests for any protocol interaction
+
+ * test harness must be as robust as possible to spec changes
diff --git a/RC9/qpid/python/examples/direct/declare_queues.py b/RC9/qpid/python/examples/direct/declare_queues.py
new file mode 100755
index 0000000000..13818ee9d7
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/declare_queues.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ declare_queues.py
+
+ Creates and binds a queue on an AMQP direct exchange.
+
+ All messages using the routing key "routing_key" are
+ sent to the queue named "message_queue".
+"""
+
+# Common includes
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Create a queue -------------------------------------
+
+# queue_declare() creates an AMQP queue, which is held
+# on the broker. Published messages are sent to the AMQP queue,
+# from which messages are delivered to consumers.
+#
+# exchange_bind() determines which messages are routed to a queue.
+# Route all messages with the binding key "routing_key" to
+# the AMQP queue named "message_queue".
+
+session.queue_declare(queue="message_queue")
+session.exchange_bind(exchange="amq.direct", queue="message_queue", binding_key="routing_key")
+
+#----- Cleanup ---------------------------------------------
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/direct/direct_consumer.py b/RC9/qpid/python/examples/direct/direct_consumer.py
new file mode 100755
index 0000000000..b07e53c5c7
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/direct_consumer.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ direct_consumer.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue".
+"""
+
+import qpid
+import sys
+import os
+from random import randint
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Read from queue --------------------------------------------
+
+# Now let's create a local client queue and tell it to read
+# incoming messages.
+
+# The consumer tag identifies the client-side queue.
+
+local_queue_name = "local_queue"
+queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages
+# from the AMQP queue to this local client queue. The broker will
+# start delivering messages as soon as credit is allocated using
+# queue.start().
+
+session.message_subscribe(queue="message_queue", destination=local_queue_name)
+queue.start()
+
+# Initialize 'final' and 'content', variables used to identify the last message.
+
+final = "That's all, folks!" # In a message body, signals the last message
+content = "" # Content of the last message read
+
+message = None
+while content != final:
+ message = queue.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/direct/direct_producer.py b/RC9/qpid/python/examples/direct/direct_producer.py
new file mode 100755
index 0000000000..fcbb4675e4
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/direct_producer.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ direct_producer.py
+
+ Publishes messages to an AMQP direct exchange, using
+ the routing key "routing_key"
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message
+from qpid.datatypes import uuid4
+from qpid.queue import Empty
+
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Publish some messages ------------------------------
+
+# Create some messages and put them on the broker.
+props = session.delivery_properties(routing_key="routing_key")
+
+for i in range(10):
+ session.message_transfer(destination="amq.direct", message=Message(props,"message " + str(i)))
+
+session.message_transfer(destination="amq.direct", message=Message(props,"That's all, folks!"))
+
+#----- Cleanup --------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/direct/listener.py b/RC9/qpid/python/examples/direct/listener.py
new file mode 100755
index 0000000000..9d06bd3929
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/listener.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ listener.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue". It is implemented
+ as a message listener.
+"""
+
+# Common includes
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+# Includes specific to this example
+
+from time import sleep
+
+
+#----- Message Receive Handler -----------------------------
+class Receiver:
+ def __init__ (self):
+ self.finalReceived = False
+
+ def isFinal (self):
+ return self.finalReceived
+
+ def Handler (self, message):
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+ if content == "That's all, folks!":
+ self.finalReceived = True
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Read from queue --------------------------------------------
+
+# Now let's create a local client queue and tell it to read
+# incoming messages.
+
+# The local_queue_name identifies the client-side queue.
+
+local_queue_name = "local_queue"
+queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages
+# from the AMQP queue to this local client queue. The broker will
+# start delivering messages as soon as message_subscribe() is called.
+
+session.message_subscribe(queue="message_queue", destination=local_queue_name)
+queue.start()
+
+receiver = Receiver()
+queue.listen (receiver.Handler)
+
+while not receiver.isFinal() :
+ sleep (1)
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/direct/verify b/RC9/qpid/python/examples/direct/verify
new file mode 100644
index 0000000000..92f87bf827
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/verify
@@ -0,0 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+clients ./declare_queues.py ./direct_producer.py ./direct_consumer.py
+outputs ./declare_queues.py.out ./direct_producer.py.out ./direct_consumer.py.out
diff --git a/RC9/qpid/python/examples/direct/verify.in b/RC9/qpid/python/examples/direct/verify.in
new file mode 100644
index 0000000000..5e691619d9
--- /dev/null
+++ b/RC9/qpid/python/examples/direct/verify.in
@@ -0,0 +1,14 @@
+==== declare_queues.py.out
+==== direct_producer.py.out
+==== direct_consumer.py.out
+message 0
+message 1
+message 2
+message 3
+message 4
+message 5
+message 6
+message 7
+message 8
+message 9
+That's all, folks!
diff --git a/RC9/qpid/python/examples/fanout/fanout_consumer.py b/RC9/qpid/python/examples/fanout/fanout_consumer.py
new file mode 100755
index 0000000000..0452baa8da
--- /dev/null
+++ b/RC9/qpid/python/examples/fanout/fanout_consumer.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ fanout_consumer.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue".
+"""
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Initialization --------------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+
+#----- Main Body -------------------------------------------
+
+# Create a server-side queue and route messages to it.
+# The server-side queue must have a unique name. Use the
+# session id for that.
+server_queue_name = session.name
+session.queue_declare(queue=server_queue_name)
+session.exchange_bind(queue=server_queue_name, exchange="amq.fanout")
+
+# Create a local queue to receive messages from the server-side
+# queue.
+local_queue_name = "local_queue"
+local_queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the server to deliver messages
+# from the AMQP queue to this local client queue.
+
+session.message_subscribe(queue=server_queue_name, destination=local_queue_name)
+local_queue.start()
+
+print "Subscribed to queue " + server_queue_name
+sys.stdout.flush()
+
+# Initialize 'final' and 'content', variables used to identify the last message.
+final = "That's all, folks!" # In a message body, signals the last message
+content = "" # Content of the last message read
+
+# Read the messages - acknowledge each one
+message = None
+while content != final:
+ message = local_queue.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/fanout/fanout_producer.py b/RC9/qpid/python/examples/fanout/fanout_producer.py
new file mode 100755
index 0000000000..c4df252c70
--- /dev/null
+++ b/RC9/qpid/python/examples/fanout/fanout_producer.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ fanout_producer.py
+
+ Publishes messages to an AMQP direct exchange, using
+ the routing key "routing_key"
+"""
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, uuid4
+from qpid.queue import Empty
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+
+#----- Publish some messages ------------------------------
+
+# Create some messages and put them on the broker.
+
+delivery_properties = session.delivery_properties(routing_key="routing_key")
+
+for i in range(10):
+ session.message_transfer(destination="amq.fanout", message=Message(delivery_properties,"message " + str(i)))
+
+session.message_transfer(destination="amq.fanout", message=Message(delivery_properties, "That's all, folks!"))
+
+#----- Cleanup --------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/fanout/listener.py b/RC9/qpid/python/examples/fanout/listener.py
new file mode 100755
index 0000000000..29db402e9d
--- /dev/null
+++ b/RC9/qpid/python/examples/fanout/listener.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ listener.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue".
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#
+
+from time import sleep
+
+#----- Message Receive Handler -----------------------------
+class Receiver:
+ def __init__ (self):
+ self.finalReceived = False
+
+ def isFinal (self):
+ return self.finalReceived
+
+ def Handler (self, message):
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+ if content == "That's all, folks!":
+ self.finalReceived = True
+
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Read from queue --------------------------------------------
+
+# Create a server-side queue and route messages to it.
+# The server-side queue must have a unique name. Use the
+# session id for that.
+
+server_queue_name = session.name
+session.queue_declare(queue=server_queue_name)
+session.exchange_bind(queue=server_queue_name, exchange="amq.fanout")
+
+# Create a local queue to receive messages from the server-side
+# queue.
+local_queue_name = "local_queue"
+local_queue = session.incoming(local_queue_name)
+
+
+# The local queue name identifies the client-side queue.
+
+local_queue_name = "local_queue"
+local_queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages
+# from the AMQP queue to this local client queue. The broker will
+# start delivering messages as soon as local_queue.start() is called.
+
+session.message_subscribe(queue=server_queue_name, destination=local_queue_name)
+local_queue.start()
+
+receiver = Receiver ()
+local_queue.listen (receiver.Handler)
+
+while not receiver.isFinal ():
+ sleep (1)
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close()
diff --git a/RC9/qpid/python/examples/fanout/verify b/RC9/qpid/python/examples/fanout/verify
new file mode 100644
index 0000000000..9e5c364bfa
--- /dev/null
+++ b/RC9/qpid/python/examples/fanout/verify
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+background "Subscribed" ./fanout_consumer.py
+background "Subscribed" ./fanout_consumer.py
+clients ./fanout_producer.py
+outputs ./fanout_producer.py.out "./fanout_consumer.py.out | remove_uuid" "./fanout_consumer.pyX.out | remove_uuid"
diff --git a/RC9/qpid/python/examples/fanout/verify.in b/RC9/qpid/python/examples/fanout/verify.in
new file mode 100644
index 0000000000..d4b8670de9
--- /dev/null
+++ b/RC9/qpid/python/examples/fanout/verify.in
@@ -0,0 +1,27 @@
+==== fanout_producer.py.out
+==== fanout_consumer.py.out | remove_uuid
+Subscribed to queue
+message 0
+message 1
+message 2
+message 3
+message 4
+message 5
+message 6
+message 7
+message 8
+message 9
+That's all, folks!
+==== fanout_consumer.pyX.out | remove_uuid
+Subscribed to queue
+message 0
+message 1
+message 2
+message 3
+message 4
+message 5
+message 6
+message 7
+message 8
+message 9
+That's all, folks!
diff --git a/RC9/qpid/python/examples/pubsub/topic_publisher.py b/RC9/qpid/python/examples/pubsub/topic_publisher.py
new file mode 100755
index 0000000000..b50d5fa8ca
--- /dev/null
+++ b/RC9/qpid/python/examples/pubsub/topic_publisher.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ topic_publisher.py
+
+ This is a simple AMQP publisher application that uses a
+ Topic exchange. The publisher specifies the routing key
+ and the exchange for each message.
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions ----------------------------------------
+
+def send_msg(routing_key):
+ props = session.delivery_properties(routing_key=routing_key)
+ for i in range(5):
+ session.message_transfer(destination="amq.topic", message=Message(props,routing_key + " " + str(i)))
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Publish some messages ------------------------------
+
+# Create some messages and put them on the broker. Use the
+# topic exchange. The routing keys are "usa.news", "usa.weather",
+# "europe.news", and "europe.weather".
+
+# usa.news
+send_msg("usa.news")
+
+# usa.weather
+send_msg("usa.weather")
+
+# europe.news
+send_msg("europe.news")
+
+# europe.weather
+send_msg("europe.weather")
+
+# Signal termination
+props = session.delivery_properties(routing_key="control")
+session.message_transfer(destination="amq.topic", message=Message(props,"That's all, folks!"))
+
+
+#----- Cleanup --------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/pubsub/topic_subscriber.py b/RC9/qpid/python/examples/pubsub/topic_subscriber.py
new file mode 100755
index 0000000000..489c7cbb19
--- /dev/null
+++ b/RC9/qpid/python/examples/pubsub/topic_subscriber.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ topic_subscriber.py
+
+ This subscriber creates private queues and binds them
+ to the topics 'usa.#', 'europe.#', '#.news', and '#.weather'.
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions -------------------------------------------
+
+def dump_queue(queue):
+
+ content = "" # Content of the last message read
+ final = "That's all, folks!" # In a message body, signals the last message
+ message = 0
+
+ while content != final:
+ try:
+ message = queue.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+ except Empty:
+ print "No more messages!"
+ return
+
+
+
+def subscribe_queue(server_queue_name, local_queue_name):
+
+ print "Subscribing local queue '" + local_queue_name + "' to " + server_queue_name + "'"
+
+ queue = session.incoming(local_queue_name)
+
+ session.message_subscribe(queue=server_queue_name, destination=local_queue_name)
+ queue.start()
+
+ return queue
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Main Body -- ----------------------------------------
+
+# declare queues on the server
+
+news = "news-" + session.name
+weather = "weather-" + session.name
+usa = "usa-" + session.name
+europe = "europe-" + session.name
+
+session.queue_declare(queue=news, exclusive=True)
+session.queue_declare(queue=weather, exclusive=True)
+session.queue_declare(queue=usa, exclusive=True)
+session.queue_declare(queue=europe, exclusive=True)
+
+# Routing keys may be "usa.news", "usa.weather", "europe.news", or "europe.weather".
+
+# The '#' symbol matches one component of a multipart name, e.g. "#.news" matches
+# "europe.news" or "usa.news".
+
+session.exchange_bind(exchange="amq.topic", queue=news, binding_key="#.news")
+session.exchange_bind(exchange="amq.topic", queue=weather, binding_key="#.weather")
+session.exchange_bind(exchange="amq.topic", queue=usa, binding_key="usa.#")
+session.exchange_bind(exchange="amq.topic", queue=europe, binding_key="europe.#")
+
+# Bind each queue to the control queue so we know when to stop
+
+session.exchange_bind(exchange="amq.topic", queue=news, binding_key="control")
+session.exchange_bind(exchange="amq.topic", queue=weather, binding_key="control")
+session.exchange_bind(exchange="amq.topic", queue=usa, binding_key="control")
+session.exchange_bind(exchange="amq.topic", queue=europe, binding_key="control")
+
+# Remind the user to start the topic producer
+
+print "Queues created - please start the topic producer"
+sys.stdout.flush()
+
+# Subscribe local queues to server queues
+
+local_news = "local_news"
+local_weather = "local_weather"
+local_usa = "local_usa"
+local_europe = "local_europe"
+
+local_news_queue = subscribe_queue(news, local_news)
+local_weather_queue = subscribe_queue(weather, local_weather)
+local_usa_queue = subscribe_queue(usa, local_usa)
+local_europe_queue = subscribe_queue(europe, local_europe)
+
+# Call dump_queue to print messages from each queue
+
+print "Messages on 'news' queue:"
+dump_queue(local_news_queue)
+
+print "Messages on 'weather' queue:"
+dump_queue(local_weather_queue)
+
+print "Messages on 'usa' queue:"
+dump_queue(local_usa_queue)
+
+print "Messages on 'europe' queue:"
+dump_queue(local_europe_queue)
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/pubsub/verify b/RC9/qpid/python/examples/pubsub/verify
new file mode 100644
index 0000000000..cf1bade62e
--- /dev/null
+++ b/RC9/qpid/python/examples/pubsub/verify
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+background "Queues created" ./topic_subscriber.py
+clients ./topic_publisher.py
+outputs ./topic_publisher.py.out "topic_subscriber.py.out | remove_uuid | sort"
diff --git a/RC9/qpid/python/examples/pubsub/verify.in b/RC9/qpid/python/examples/pubsub/verify.in
new file mode 100644
index 0000000000..1b74acd832
--- /dev/null
+++ b/RC9/qpid/python/examples/pubsub/verify.in
@@ -0,0 +1,55 @@
+==== topic_publisher.py.out
+==== topic_subscriber.py.out | remove_uuid | sort
+europe.news 0
+europe.news 0
+europe.news 1
+europe.news 1
+europe.news 2
+europe.news 2
+europe.news 3
+europe.news 3
+europe.news 4
+europe.news 4
+europe.weather 0
+europe.weather 0
+europe.weather 1
+europe.weather 1
+europe.weather 2
+europe.weather 2
+europe.weather 3
+europe.weather 3
+europe.weather 4
+europe.weather 4
+Messages on 'europe' queue:
+Messages on 'news' queue:
+Messages on 'usa' queue:
+Messages on 'weather' queue:
+Queues created - please start the topic producer
+Subscribing local queue 'local_europe' to europe-'
+Subscribing local queue 'local_news' to news-'
+Subscribing local queue 'local_usa' to usa-'
+Subscribing local queue 'local_weather' to weather-'
+That's all, folks!
+That's all, folks!
+That's all, folks!
+That's all, folks!
+usa.news 0
+usa.news 0
+usa.news 1
+usa.news 1
+usa.news 2
+usa.news 2
+usa.news 3
+usa.news 3
+usa.news 4
+usa.news 4
+usa.weather 0
+usa.weather 0
+usa.weather 1
+usa.weather 1
+usa.weather 2
+usa.weather 2
+usa.weather 3
+usa.weather 3
+usa.weather 4
+usa.weather 4
diff --git a/RC9/qpid/python/examples/request-response/client.py b/RC9/qpid/python/examples/request-response/client.py
new file mode 100755
index 0000000000..b29fcf3ea7
--- /dev/null
+++ b/RC9/qpid/python/examples/request-response/client.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ client.py
+
+ Client for a client/server example
+
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions -------------------------------------------
+
+def dump_queue(queue_name):
+
+ print "Messages on queue: " + queue_name
+
+ message = 0
+
+ while True:
+ try:
+ message = queue.get(timeout=10)
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print "Response: " + content
+ except Empty:
+ print "No more messages!"
+ break
+ except:
+ print "Unexpected exception!"
+ break
+
+
+#----- Initialization --------------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+
+#----- Main Body -- ----------------------------------------
+
+# Create a response queue for the server to send responses to. Use the
+# same string as the name of the queue and the name of the routing
+# key.
+
+reply_to = "reply_to:" + session.name
+session.queue_declare(queue=reply_to, exclusive=True)
+session.exchange_bind(exchange="amq.direct", queue=reply_to, binding_key=reply_to)
+
+# Create a local queue and subscribe it to the response queue
+
+local_queue_name = "local_queue"
+queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages from
+# the server's reply_to queue to our local client queue. The server
+# will start delivering messages as soon as message credit is
+# available.
+
+session.message_subscribe(queue=reply_to, destination=local_queue_name)
+queue.start()
+
+# Send some messages to the server's request queue
+
+lines = ["Twas brillig, and the slithy toves",
+ "Did gyre and gimble in the wabe.",
+ "All mimsy were the borogroves,",
+ "And the mome raths outgrabe."]
+
+# We will use the same reply_to and routing key
+# for each message
+
+message_properties = session.message_properties()
+message_properties.reply_to = session.reply_to("amq.direct", reply_to)
+delivery_properties = session.delivery_properties(routing_key="request")
+
+for line in lines:
+ print "Request: " + line
+ session.message_transfer(destination="amq.direct", message=Message(message_properties, delivery_properties, line))
+
+# Now see what messages the server sent to our reply_to queue
+
+dump_queue(reply_to)
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/request-response/server.py b/RC9/qpid/python/examples/request-response/server.py
new file mode 100755
index 0000000000..a80c4541e4
--- /dev/null
+++ b/RC9/qpid/python/examples/request-response/server.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ server.py
+
+ Server for a client/server example
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions -------------------------------------------
+def respond(session, request):
+
+ # The routing key for the response is the request's reply-to
+ # property. The body for the response is the request's body,
+ # converted to upper case.
+
+ message_properties = request.get("message_properties")
+ reply_to = message_properties.reply_to
+ if reply_to == None:
+ raise Exception("This message is missing the 'reply_to' property, which is required")
+
+ props = session.delivery_properties(routing_key=reply_to["routing_key"])
+ session.message_transfer(destination=reply_to["exchange"], message=Message(props,request.body.upper()))
+
+#----- Initialization --------------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Main Body -- ----------------------------------------
+
+# Create a request queue and subscribe to it
+
+session.queue_declare(queue="request", exclusive=True)
+session.exchange_bind(exchange="amq.direct", queue="request", binding_key="request")
+
+local_queue_name = "local_queue"
+
+session.message_subscribe(queue="request", destination=local_queue_name)
+
+queue = session.incoming(local_queue_name)
+queue.start()
+
+# Remind the user to start the client program
+
+print "Request server running - run your client now."
+print "(Times out after 100 seconds ...)"
+sys.stdout.flush()
+
+# Respond to each request
+
+# If we get a message, send it back to the user (as indicated in the
+# ReplyTo property)
+
+while True:
+ try:
+ request = queue.get(timeout=100)
+ respond(session, request)
+ session.message_accept(RangedSet(request.id))
+ except Empty:
+ print "No more messages!"
+ break;
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close(timeout=10)
diff --git a/RC9/qpid/python/examples/request-response/verify b/RC9/qpid/python/examples/request-response/verify
new file mode 100644
index 0000000000..3c058febb2
--- /dev/null
+++ b/RC9/qpid/python/examples/request-response/verify
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+background "Request server running" ./server.py
+clients ./client.py
+kill %% # Must kill the server.
+outputs "./client.py.out | remove_uuid" " server.py.out | remove_uuid"
diff --git a/RC9/qpid/python/examples/request-response/verify.in b/RC9/qpid/python/examples/request-response/verify.in
new file mode 100644
index 0000000000..4c31128975
--- /dev/null
+++ b/RC9/qpid/python/examples/request-response/verify.in
@@ -0,0 +1,14 @@
+==== client.py.out | remove_uuid
+Request: Twas brillig, and the slithy toves
+Request: Did gyre and gimble in the wabe.
+Request: All mimsy were the borogroves,
+Request: And the mome raths outgrabe.
+Messages on queue: reply_to:
+Response: TWAS BRILLIG, AND THE SLITHY TOVES
+Response: DID GYRE AND GIMBLE IN THE WABE.
+Response: ALL MIMSY WERE THE BOROGROVES,
+Response: AND THE MOME RATHS OUTGRABE.
+No more messages!
+==== server.py.out | remove_uuid
+Request server running - run your client now.
+(Times out after 100 seconds ...)
diff --git a/RC9/qpid/python/examples/xml-exchange/declare_queues.py b/RC9/qpid/python/examples/xml-exchange/declare_queues.py
new file mode 100755
index 0000000000..ca40af5dc5
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/declare_queues.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ declare_queues.py
+
+ Creates and binds a queue on an AMQP direct exchange.
+
+ All messages using the routing key "routing_key" are
+ sent to the queue named "message_queue".
+"""
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Initialization -----------------------------------
+
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Create a queue -------------------------------------
+
+# queue_declare() creates an AMQP queue, which is held
+# on the broker. Published messages are sent to the AMQP queue,
+# from which messages are delivered to consumers.
+#
+# queue_bind() determines which messages are routed to a queue.
+# Route all messages with the routing key "routing_key" to
+# the AMQP queue named "message_queue".
+
+session.exchange_declare(exchange="xml", type="xml")
+session.queue_declare(queue="message_queue")
+
+binding = {}
+binding["xquery"] = """
+ let $w := ./weather
+ return $w/station = 'Raleigh-Durham International Airport (KRDU)'
+ and $w/temperature_f > 50
+ and $w/temperature_f - $w/dewpoint > 5
+ and $w/wind_speed_mph > 7
+ and $w/wind_speed_mph < 20 """
+
+
+session.exchange_bind(exchange="xml", queue="message_queue", binding_key="weather", arguments=binding)
+
+
+#----- Cleanup ---------------------------------------------
+
+session.close()
+
+
diff --git a/RC9/qpid/python/examples/xml-exchange/listener.py b/RC9/qpid/python/examples/xml-exchange/listener.py
new file mode 100755
index 0000000000..a56f5d6018
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/listener.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ listener.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue". It is implemented
+ as a message listener.
+"""
+
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#
+
+from time import sleep
+
+
+#----- Message Receive Handler -----------------------------
+class Receiver:
+ def __init__ (self):
+ self.finalReceived = False
+
+ def isFinal (self):
+ return self.finalReceived
+
+ def Handler (self, message):
+ content = message.body
+ session.message_accept(RangedSet(message.id))
+ print content
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Read from queue --------------------------------------------
+
+# Now let's create a local client queue and tell it to read
+# incoming messages.
+
+# The consumer tag identifies the client-side queue.
+
+local_queue_name = "local_queue"
+local_queue = session.incoming(local_queue_name)
+
+# Call message_subscribe() to tell the broker to deliver messages
+# from the AMQP queue to this local client queue. The broker will
+# start delivering messages as soon as local_queue.start() is called.
+
+session.message_subscribe(queue="message_queue", destination=local_queue_name)
+local_queue.start()
+
+receiver = Receiver ()
+local_queue.listen (receiver.Handler)
+
+sleep (10)
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close()
diff --git a/RC9/qpid/python/examples/xml-exchange/verify b/RC9/qpid/python/examples/xml-exchange/verify
new file mode 100644
index 0000000000..a93a32dc90
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/verify
@@ -0,0 +1,22 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# See https://svn.apache.org/repos/asf/incubator/qpid/trunk/qpid/bin/verify
+clients ./declare_queues.py ./xml_producer.py ./xml_consumer.py
+outputs ./declare_queues.py.out ./xml_producer.py.out ./xml_consumer.py.out
diff --git a/RC9/qpid/python/examples/xml-exchange/verify.in b/RC9/qpid/python/examples/xml-exchange/verify.in
new file mode 100644
index 0000000000..e5b9909408
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/verify.in
@@ -0,0 +1,15 @@
+==== declare_queues.py.out
+==== xml_producer.py.out
+<weather><station>Raleigh-Durham International Airport (KRDU)</station><wind_speed_mph>0</wind_speed_mph><temperature_f>30</temperature_f><dewpoint>35</dewpoint></weather>
+<weather><station>New Bern, Craven County Regional Airport (KEWN)</station><wind_speed_mph>2</wind_speed_mph><temperature_f>40</temperature_f><dewpoint>40</dewpoint></weather>
+<weather><station>Boone, Watauga County Hospital Heliport (KTNB)</station><wind_speed_mph>5</wind_speed_mph><temperature_f>50</temperature_f><dewpoint>45</dewpoint></weather>
+<weather><station>Hatteras, Mitchell Field (KHSE)</station><wind_speed_mph>10</wind_speed_mph><temperature_f>60</temperature_f><dewpoint>50</dewpoint></weather>
+<weather><station>Raleigh-Durham International Airport (KRDU)</station><wind_speed_mph>16</wind_speed_mph><temperature_f>70</temperature_f><dewpoint>35</dewpoint></weather>
+<weather><station>New Bern, Craven County Regional Airport (KEWN)</station><wind_speed_mph>22</wind_speed_mph><temperature_f>80</temperature_f><dewpoint>40</dewpoint></weather>
+<weather><station>Boone, Watauga County Hospital Heliport (KTNB)</station><wind_speed_mph>28</wind_speed_mph><temperature_f>90</temperature_f><dewpoint>45</dewpoint></weather>
+<weather><station>Hatteras, Mitchell Field (KHSE)</station><wind_speed_mph>35</wind_speed_mph><temperature_f>100</temperature_f><dewpoint>50</dewpoint></weather>
+<weather><station>Raleigh-Durham International Airport (KRDU)</station><wind_speed_mph>42</wind_speed_mph><temperature_f>30</temperature_f><dewpoint>35</dewpoint></weather>
+<weather><station>New Bern, Craven County Regional Airport (KEWN)</station><wind_speed_mph>51</wind_speed_mph><temperature_f>40</temperature_f><dewpoint>40</dewpoint></weather>
+==== xml_consumer.py.out
+<weather><station>Raleigh-Durham International Airport (KRDU)</station><wind_speed_mph>16</wind_speed_mph><temperature_f>70</temperature_f><dewpoint>35</dewpoint></weather>
+No more messages!
diff --git a/RC9/qpid/python/examples/xml-exchange/xml_consumer.py b/RC9/qpid/python/examples/xml-exchange/xml_consumer.py
new file mode 100755
index 0000000000..cd89110b05
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/xml_consumer.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ direct_consumer.py
+
+ This AMQP client reads messages from a message
+ queue named "message_queue".
+"""
+
+import qpid
+import sys
+import os
+from random import randint
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+
+#----- Initialization --------------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+
+#----- Read from queue --------------------------------------------
+
+# Now let's create a local client queue and tell it to read
+# incoming messages.
+
+# The consumer tag identifies the client-side queue.
+
+local_queue_name = "local_queue"
+local_queue = session.incoming(local_queue_name)
+
+# Call message_consume() to tell the broker to deliver messages
+# from the AMQP queue to this local client queue. The broker will
+# start delivering messages as soon as local_queue.start() is called.
+
+session.message_subscribe(queue="message_queue", destination=local_queue_name)
+local_queue.start()
+
+# Initialize 'final' and 'content', variables used to identify the last message.
+
+message = None
+while True:
+ try:
+ message = local_queue.get(timeout=10)
+ session.message_accept(RangedSet(message.id))
+ content = message.body
+ print content
+ except Empty:
+ print "No more messages!"
+ break
+
+
+#----- Cleanup ------------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+#
+
+session.close()
diff --git a/RC9/qpid/python/examples/xml-exchange/xml_producer.py b/RC9/qpid/python/examples/xml-exchange/xml_producer.py
new file mode 100755
index 0000000000..fa97cab4e1
--- /dev/null
+++ b/RC9/qpid/python/examples/xml-exchange/xml_producer.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ xml_producer.py
+
+ Publishes messages to an XML exchange, using
+ the routing key "weather"
+"""
+
+
+import qpid
+import sys
+import os
+from qpid.util import connect
+from qpid.connection import Connection
+from qpid.datatypes import Message, RangedSet, uuid4
+from qpid.queue import Empty
+
+#----- Functions ----------------------------------------
+
+# Data for weather reports
+
+station = ("Raleigh-Durham International Airport (KRDU)",
+ "New Bern, Craven County Regional Airport (KEWN)",
+ "Boone, Watauga County Hospital Heliport (KTNB)",
+ "Hatteras, Mitchell Field (KHSE)")
+wind_speed_mph = ( 0, 2, 5, 10, 16, 22, 28, 35, 42, 51, 61, 70, 80 )
+temperature_f = ( 30, 40, 50, 60, 70, 80, 90, 100 )
+dewpoint = ( 35, 40, 45, 50 )
+
+def pick_one(list, i):
+ return str( list [ i % len(list)] )
+
+def report(i):
+ return "<weather>" + "<station>" + pick_one(station,i)+ "</station>" + "<wind_speed_mph>" + pick_one(wind_speed_mph,i) + "</wind_speed_mph>" + "<temperature_f>" + pick_one(temperature_f,i) + "</temperature_f>" + "<dewpoint>" + pick_one(dewpoint,i) + "</dewpoint>" + "</weather>"
+
+
+#----- Initialization -----------------------------------
+
+# Set parameters for login
+
+host="127.0.0.1"
+port=5672
+user="guest"
+password="guest"
+
+# If an alternate host or port has been specified, use that instead
+# (this is used in our unit tests)
+if len(sys.argv) > 1 :
+ host=sys.argv[1]
+if len(sys.argv) > 2 :
+ port=int(sys.argv[2])
+
+# Create a connection.
+socket = connect(host, port)
+connection = Connection (sock=socket, username=user, password=password)
+connection.start()
+session = connection.session(str(uuid4()))
+
+#----- Publish some messages ------------------------------
+
+# Create some messages and put them on the broker.
+
+props = session.delivery_properties(routing_key="weather")
+
+for i in range(10):
+ print report(i)
+ session.message_transfer(destination="xml", message=Message(props, report(i)))
+
+
+#----- Cleanup --------------------------------------------
+
+# Clean up before exiting so there are no open threads.
+
+session.close()
diff --git a/RC9/qpid/python/hello-world b/RC9/qpid/python/hello-world
new file mode 100755
index 0000000000..efee84059c
--- /dev/null
+++ b/RC9/qpid/python/hello-world
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys
+from qpid.connection import Connection
+from qpid.util import connect
+from qpid.datatypes import uuid4, Message
+
+broker = "127.0.0.1"
+port = 5672
+
+if len(sys.argv) > 1: broker = sys.argv[1]
+if len(sys.argv) > 2: port = int(sys.argv[2])
+
+if len(sys.argv) > 3:
+ print >> sys.stderr, "usage: hello-world [ <broker> [ <port> ] ]"
+ sys.exit(1)
+
+# connect to the server and start a session
+conn = Connection(connect(broker, port))
+conn.start()
+ssn = conn.session(str(uuid4()))
+
+# create a queue
+ssn.queue_declare("test-queue")
+
+# publish a message
+dp = ssn.delivery_properties(routing_key="test-queue")
+mp = ssn.message_properties(content_type="text/plain")
+msg = Message(dp, mp, "Hello World!")
+ssn.message_transfer(message=msg)
+
+# subscribe to a queue
+ssn.message_subscribe(destination="messages", queue="test-queue",
+ accept_mode=ssn.accept_mode.none)
+incoming = ssn.incoming("messages")
+
+# start incoming message flow
+incoming.start()
+
+# grab a message from the queue
+
+print incoming.get(timeout=10)
+
+# cancel the subscription and close the session and connection
+ssn.message_cancel(destination="messages")
+ssn.close()
+conn.close()
diff --git a/RC9/qpid/python/java_failing_0-8.txt b/RC9/qpid/python/java_failing_0-8.txt
new file mode 100644
index 0000000000..c13b40a42c
--- /dev/null
+++ b/RC9/qpid/python/java_failing_0-8.txt
@@ -0,0 +1,2 @@
+tests_0-8.exchange.RecommendedTypesRuleTests.testTopic
+tests_0-8.exchange.RequiredInstancesRuleTests.testAmqTopic
diff --git a/RC9/qpid/python/java_failing_0-9.txt b/RC9/qpid/python/java_failing_0-9.txt
new file mode 100644
index 0000000000..7252d0f496
--- /dev/null
+++ b/RC9/qpid/python/java_failing_0-9.txt
@@ -0,0 +1,18 @@
+ntests.basic.BasicTests.test_qos_prefetch_count
+tests.basic.BasicTests.test_ack
+tests.basic.BasicTests.test_cancel
+tests.basic.BasicTests.test_consume_exclusive
+tests.basic.BasicTests.test_consume_no_local
+tests.basic.BasicTests.test_consume_queue_errors
+tests.basic.BasicTests.test_consume_unique_consumers
+tests.basic.BasicTests.test_get
+tests.basic.BasicTests.test_qos_prefetch_size
+tests.basic.BasicTests.test_recover_requeue
+
+tests.exchange.RecommendedTypesRuleTests.testTopic
+tests.exchange.RequiredInstancesRuleTests.testAmqTopic
+
+tests.message.MessageTests.test_checkpoint
+tests.message.MessageTests.test_reject
+
+tests.broker.BrokerTests.test_ping_pong
diff --git a/RC9/qpid/python/mllib/__init__.py b/RC9/qpid/python/mllib/__init__.py
new file mode 100644
index 0000000000..39e9363614
--- /dev/null
+++ b/RC9/qpid/python/mllib/__init__.py
@@ -0,0 +1,65 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module provides document parsing and transformation utilities for
+both SGML and XML.
+"""
+
+import os, dom, transforms, parsers, sys
+import xml.sax, types
+from cStringIO import StringIO
+
+def transform(node, *args):
+ result = node
+ for t in args:
+ if isinstance(t, types.ClassType):
+ t = t()
+ result = result.dispatch(t)
+ return result
+
+def sgml_parse(source):
+ if isinstance(source, basestring):
+ source = StringIO(source)
+ fname = "<string>"
+ elif hasattr(source, "name"):
+ fname = source.name
+ p = parsers.SGMLParser()
+ num = 1
+ for line in source:
+ p.feed(line)
+ p.parser.line(fname, num, None)
+ num += 1
+ p.close()
+ return p.parser.tree
+
+def xml_parse(filename):
+ if sys.version_info[0:2] == (2,3):
+ # XXX: this is for older versions of python
+ source = "file://%s" % os.path.abspath(filename)
+ else:
+ source = filename
+ p = parsers.XMLParser()
+ xml.sax.parse(source, p)
+ return p.parser.tree
+
+def sexp(node):
+ s = transforms.Sexp()
+ node.dispatch(s)
+ return s.out
diff --git a/RC9/qpid/python/mllib/dom.py b/RC9/qpid/python/mllib/dom.py
new file mode 100644
index 0000000000..df2b88322a
--- /dev/null
+++ b/RC9/qpid/python/mllib/dom.py
@@ -0,0 +1,295 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Simple DOM for both SGML and XML documents.
+"""
+
+from __future__ import division
+from __future__ import generators
+from __future__ import nested_scopes
+
+import transforms
+
+class Container:
+
+ def __init__(self):
+ self.children = []
+
+ def add(self, child):
+ child.parent = self
+ self.children.append(child)
+
+ def extend(self, children):
+ for child in children:
+ child.parent = self
+ self.children.append(child)
+
+class Component:
+
+ def __init__(self):
+ self.parent = None
+
+ def index(self):
+ if self.parent:
+ return self.parent.children.index(self)
+ else:
+ return 0
+
+ def _line(self, file, line, column):
+ self.file = file
+ self.line = line
+ self.column = column
+
+class DispatchError(Exception):
+
+ def __init__(self, scope, f):
+ msg = "no such attribtue"
+
+class Dispatcher:
+
+ def is_type(self, type):
+ cls = self
+ while cls != None:
+ if cls.type == type:
+ return True
+ cls = cls.base
+ return False
+
+ def dispatch(self, f, attrs = ""):
+ cls = self
+ while cls != None:
+ if hasattr(f, cls.type):
+ return getattr(f, cls.type)(self)
+ else:
+ cls = cls.base
+
+ cls = self
+ while cls != None:
+ if attrs:
+ sep = ", "
+ if cls.base == None:
+ sep += "or "
+ else:
+ sep = ""
+ attrs += "%s'%s'" % (sep, cls.type)
+ cls = cls.base
+
+ raise AttributeError("'%s' object has no attribute %s" %
+ (f.__class__.__name__, attrs))
+
+class Node(Container, Component, Dispatcher):
+
+ type = "node"
+ base = None
+
+ def __init__(self):
+ Container.__init__(self)
+ Component.__init__(self)
+ self.query = Query([self])
+
+ def __getitem__(self, name):
+ for nd in self.query[name]:
+ return nd
+
+ def text(self):
+ return self.dispatch(transforms.Text())
+
+ def tag(self, name, *attrs, **kwargs):
+ t = Tag(name, *attrs, **kwargs)
+ self.add(t)
+ return t
+
+ def data(self, s):
+ d = Data(s)
+ self.add(d)
+ return d
+
+ def entity(self, s):
+ e = Entity(s)
+ self.add(e)
+ return e
+
+class Tree(Node):
+
+ type = "tree"
+ base = Node
+
+class Tag(Node):
+
+ type = "tag"
+ base = Node
+
+ def __init__(self, _name, *attrs, **kwargs):
+ Node.__init__(self)
+ self.name = _name
+ self.attrs = list(attrs)
+ self.attrs.extend(kwargs.items())
+ self.singleton = False
+
+ def get_attr(self, name):
+ for k, v in self.attrs:
+ if name == k:
+ return v
+
+ def dispatch(self, f):
+ try:
+ attr = "do_" + self.name
+ method = getattr(f, attr)
+ except AttributeError:
+ return Dispatcher.dispatch(self, f, "'%s'" % attr)
+ return method(self)
+
+class Leaf(Component, Dispatcher):
+
+ type = "leaf"
+ base = None
+
+ def __init__(self, data):
+ assert isinstance(data, basestring)
+ self.data = data
+
+class Data(Leaf):
+ type = "data"
+ base = Leaf
+
+class Entity(Leaf):
+ type = "entity"
+ base = Leaf
+
+class Character(Leaf):
+ type = "character"
+ base = Leaf
+
+class Comment(Leaf):
+ type = "comment"
+ base = Leaf
+
+###################
+## Query Classes ##
+###########################################################################
+
+class Adder:
+
+ def __add__(self, other):
+ return Sum(self, other)
+
+class Sum(Adder):
+
+ def __init__(self, left, right):
+ self.left = left
+ self.right = right
+
+ def __iter__(self):
+ for x in self.left:
+ yield x
+ for x in self.right:
+ yield x
+
+class View(Adder):
+
+ def __init__(self, source):
+ self.source = source
+
+class Filter(View):
+
+ def __init__(self, predicate, source):
+ View.__init__(self, source)
+ self.predicate = predicate
+
+ def __iter__(self):
+ for nd in self.source:
+ if self.predicate(nd): yield nd
+
+class Flatten(View):
+
+ def __iter__(self):
+ sources = [iter(self.source)]
+ while sources:
+ try:
+ nd = sources[-1].next()
+ if isinstance(nd, Tree):
+ sources.append(iter(nd.children))
+ else:
+ yield nd
+ except StopIteration:
+ sources.pop()
+
+class Children(View):
+
+ def __iter__(self):
+ for nd in self.source:
+ for child in nd.children:
+ yield child
+
+class Attributes(View):
+
+ def __iter__(self):
+ for nd in self.source:
+ for a in nd.attrs:
+ yield a
+
+class Values(View):
+
+ def __iter__(self):
+ for name, value in self.source:
+ yield value
+
+def flatten_path(path):
+ if isinstance(path, basestring):
+ for part in path.split("/"):
+ yield part
+ elif callable(path):
+ yield path
+ else:
+ for p in path:
+ for fp in flatten_path(p):
+ yield fp
+
+class Query(View):
+
+ def __iter__(self):
+ for nd in self.source:
+ yield nd
+
+ def __getitem__(self, path):
+ query = self.source
+ for p in flatten_path(path):
+ if callable(p):
+ select = Query
+ pred = p
+ source = query
+ elif isinstance(p, basestring):
+ if p[0] == "@":
+ select = Values
+ pred = lambda x, n=p[1:]: x[0] == n
+ source = Attributes(query)
+ elif p[0] == "#":
+ select = Query
+ pred = lambda x, t=p[1:]: x.is_type(t)
+ source = Children(query)
+ else:
+ select = Query
+ pred = lambda x, n=p: isinstance(x, Tag) and x.name == n
+ source = Flatten(Children(query))
+ else:
+ raise ValueError(p)
+ query = select(Filter(pred, source))
+
+ return query
diff --git a/RC9/qpid/python/mllib/parsers.py b/RC9/qpid/python/mllib/parsers.py
new file mode 100644
index 0000000000..3e7cc10dc2
--- /dev/null
+++ b/RC9/qpid/python/mllib/parsers.py
@@ -0,0 +1,139 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Parsers for SGML and XML to dom.
+"""
+
+import sgmllib, xml.sax.handler
+from dom import *
+
+class Parser:
+
+ def __init__(self):
+ self.tree = Tree()
+ self.node = self.tree
+ self.nodes = []
+
+ def line(self, id, lineno, colno):
+ while self.nodes:
+ n = self.nodes.pop()
+ n._line(id, lineno, colno)
+
+ def add(self, node):
+ self.node.add(node)
+ self.nodes.append(node)
+
+ def start(self, name, attrs):
+ tag = Tag(name, *attrs)
+ self.add(tag)
+ self.node = tag
+
+ def end(self, name):
+ self.balance(name)
+ self.node = self.node.parent
+
+ def data(self, data):
+ children = self.node.children
+ if children and isinstance(children[-1], Data):
+ children[-1].data += data
+ else:
+ self.add(Data(data))
+
+ def comment(self, comment):
+ self.add(Comment(comment))
+
+ def entity(self, ref):
+ self.add(Entity(ref))
+
+ def character(self, ref):
+ self.add(Character(ref))
+
+ def balance(self, name = None):
+ while self.node != self.tree and name != self.node.name:
+ self.node.parent.extend(self.node.children)
+ del self.node.children[:]
+ self.node.singleton = True
+ self.node = self.node.parent
+
+
+class SGMLParser(sgmllib.SGMLParser):
+
+ def __init__(self, entitydefs = None):
+ sgmllib.SGMLParser.__init__(self)
+ if entitydefs == None:
+ self.entitydefs = {}
+ else:
+ self.entitydefs = entitydefs
+ self.parser = Parser()
+
+ def unknown_starttag(self, name, attrs):
+ self.parser.start(name, attrs)
+
+ def handle_data(self, data):
+ self.parser.data(data)
+
+ def handle_comment(self, comment):
+ self.parser.comment(comment)
+
+ def unknown_entityref(self, ref):
+ self.parser.entity(ref)
+
+ def unknown_charref(self, ref):
+ self.parser.character(ref)
+
+ def unknown_endtag(self, name):
+ self.parser.end(name)
+
+ def close(self):
+ sgmllib.SGMLParser.close(self)
+ self.parser.balance()
+ assert self.parser.node == self.parser.tree
+
+class XMLParser(xml.sax.handler.ContentHandler):
+
+ def __init__(self):
+ self.parser = Parser()
+ self.locator = None
+
+ def line(self):
+ if self.locator != None:
+ self.parser.line(self.locator.getSystemId(),
+ self.locator.getLineNumber(),
+ self.locator.getColumnNumber())
+
+ def setDocumentLocator(self, locator):
+ self.locator = locator
+
+ def startElement(self, name, attrs):
+ self.parser.start(name, attrs.items())
+ self.line()
+
+ def endElement(self, name):
+ self.parser.end(name)
+ self.line()
+
+ def characters(self, content):
+ self.parser.data(content)
+ self.line()
+
+ def skippedEntity(self, name):
+ self.parser.entity(name)
+ self.line()
+
diff --git a/RC9/qpid/python/mllib/transforms.py b/RC9/qpid/python/mllib/transforms.py
new file mode 100644
index 0000000000..69d99125e3
--- /dev/null
+++ b/RC9/qpid/python/mllib/transforms.py
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Useful transforms for dom objects.
+"""
+
+import dom
+from cStringIO import StringIO
+
+class Visitor:
+
+ def descend(self, node):
+ for child in node.children:
+ child.dispatch(self)
+
+ def node(self, node):
+ self.descend(node)
+
+ def leaf(self, leaf):
+ pass
+
+class Identity:
+
+ def descend(self, node):
+ result = []
+ for child in node.children:
+ result.append(child.dispatch(self))
+ return result
+
+ def default(self, tag):
+ result = dom.Tag(tag.name, *tag.attrs)
+ result.extend(self.descend(tag))
+ return result
+
+ def tree(self, tree):
+ result = dom.Tree()
+ result.extend(self.descend(tree))
+ return result
+
+ def tag(self, tag):
+ return self.default(tag)
+
+ def leaf(self, leaf):
+ return leaf.__class__(leaf.data)
+
+class Sexp(Identity):
+
+ def __init__(self):
+ self.stack = []
+ self.level = 0
+ self.out = ""
+
+ def open(self, s):
+ self.out += "(%s" % s
+ self.level += len(s) + 1
+ self.stack.append(s)
+
+ def line(self, s = ""):
+ self.out = self.out.rstrip()
+ self.out += "\n" + " "*self.level + s
+
+ def close(self):
+ s = self.stack.pop()
+ self.level -= len(s) + 1
+ self.out = self.out.rstrip()
+ self.out += ")"
+
+ def tree(self, tree):
+ self.open("+ ")
+ for child in tree.children:
+ self.line(); child.dispatch(self)
+ self.close()
+
+ def tag(self, tag):
+ self.open("Node(%s) " % tag.name)
+ for child in tag.children:
+ self.line(); child.dispatch(self)
+ self.close()
+
+ def leaf(self, leaf):
+ self.line("%s(%s)" % (leaf.__class__.__name__, leaf.data))
+
+class Output:
+
+ def descend(self, node):
+ out = StringIO()
+ for child in node.children:
+ out.write(child.dispatch(self))
+ return out.getvalue()
+
+ def default(self, tag):
+ out = StringIO()
+ out.write("<%s" % tag.name)
+ for k, v in tag.attrs:
+ out.write(' %s="%s"' % (k, v))
+ out.write(">")
+ out.write(self.descend(tag))
+ if not tag.singleton:
+ out.write("</%s>" % tag.name)
+ return out.getvalue()
+
+ def tree(self, tree):
+ return self.descend(tree)
+
+ def tag(self, tag):
+ return self.default(tag)
+
+ def data(self, leaf):
+ return leaf.data
+
+ def entity(self, leaf):
+ return "&%s;" % leaf.data
+
+ def character(self, leaf):
+ raise Exception("TODO")
+
+ def comment(self, leaf):
+ return "<!-- %s -->" % leaf.data
+
+class Empty(Output):
+
+ def tag(self, tag):
+ return self.descend(tag)
+
+ def data(self, leaf):
+ return ""
+
+ def entity(self, leaf):
+ return ""
+
+ def character(self, leaf):
+ return ""
+
+ def comment(self, leaf):
+ return ""
+
+class Text(Empty):
+
+ def data(self, leaf):
+ return leaf.data
+
+ def entity(self, leaf):
+ return "&%s;" % leaf.data
+
+ def character(self, leaf):
+ # XXX: is this right?
+ return "&#%s;" % leaf.data
diff --git a/RC9/qpid/python/models/fedsim/__init__.py b/RC9/qpid/python/models/fedsim/__init__.py
new file mode 100644
index 0000000000..63a3f41f28
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/__init__.py
@@ -0,0 +1,19 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
diff --git a/RC9/qpid/python/models/fedsim/fedsim.py b/RC9/qpid/python/models/fedsim/fedsim.py
new file mode 100644
index 0000000000..edb6c4c8ed
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/fedsim.py
@@ -0,0 +1,434 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+class Sim:
+ def __init__(self):
+ self.brokers = {}
+ self.clients = {}
+ self.errors = 0
+ self.warnings = 0
+
+ def error(self, text):
+ self.errors += 1
+ print "###### Error:", text
+
+ def warning(self, text):
+ self.warnings += 1
+ print "###### Warning:", text
+
+ def end(self):
+ print "========================"
+ print "Errors: %d, Warnings: %d" % (self.errors, self.warnings)
+ print "========================"
+
+ def dumpState(self):
+ print "============================"
+ print "===== Federation State ====="
+ print "============================"
+ for broker in self.brokers:
+ for exchange in self.brokers[broker].exchanges:
+ print "Exchange %s.%s" % (broker, exchange)
+ for key in self.brokers[broker].exchanges[exchange].keys:
+ print " Key %s" % key
+ for queue in self.brokers[broker].exchanges[exchange].keys[key]:
+ print " Queue %s origins=%s" % \
+ (queue.name, self.brokers[broker].exchanges[exchange].keys[key][queue].originList)
+
+ def addBroker(self, name):
+ if name in self.brokers:
+ raise Exception("Broker of same name already exists")
+ broker = Broker(self, name)
+ self.brokers[name] = broker
+ return broker
+
+ def addClient(self, name, broker):
+ if name in self.clients:
+ raise Exception("Client of same name already exists")
+ client = Client(self, name, broker)
+ self.clients[name] = client
+ return client
+
+ def link(self, left, right, bidir=True):
+ print "====== link %s to %s, bidir=%s" % (left.tag, right.tag, bidir)
+ l1 = left.createLink(right)
+ l1.bridge("amq.direct")
+ if bidir:
+ l2 = right.createLink(left)
+ l2.bridge("amq.direct")
+
+ def bind(self, client, key):
+ print "====== bind Client(%s): k=%s" % (client.name, key)
+ client.bind(key)
+
+ def unbind(self, client, key):
+ print "====== unbind Client(%s): k=%s" % (client.name, key)
+ client.unbind(key)
+
+ def sendMessage(self, key, broker, body="Message Body"):
+ print "====== sendMessage: broker=%s k=%s" % (broker.tag, key)
+ msg = Message(key, body)
+ exchange = broker.exchanges["amq.direct"]
+ for client in self.clients:
+ self.clients[client].expect(key);
+ exchange.receive(key, msg, True)
+ for client in self.clients:
+ self.clients[client].checkReception()
+
+
+class Destination:
+ def receive(self, key, msg, fromUser=False):
+ pass
+
+
+class Client(Destination):
+ def __init__(self, sim, name, broker):
+ self.sim = sim
+ self.name = name
+ self.broker = broker
+ self.broker.connect(self)
+ self.queue = self.broker.declare_queue(name)
+ self.subscription = self.broker.subscribe(self, name)
+ self.expected = None
+ self.boundKeys = []
+
+ def bind(self, key):
+ self.boundKeys.append(key)
+ self.broker.bind("amq.direct", self.name, key)
+
+ def unbind(self, key):
+ self.boundKeys.remove(key)
+ self.broker.unbind("amq.direct", self.name, key)
+
+ def receive(self, key, msg, fromUser=False):
+ print "Client(%s) received [%s]: %s" % (self.name, key, msg.body)
+ if self.expected == key:
+ self.expected = None
+ else:
+ self.sim.error("Client(%s) received unexpected message with key [%s]" % \
+ (self.name, self.expected))
+
+ def expect(self, key):
+ if key in self.boundKeys:
+ self.expected = key
+
+ def checkReception(self):
+ if self.expected:
+ self.sim.error("Client(%s) never received message with key [%s]" % \
+ (self.name, self.expected))
+
+class Broker(Client):
+ def __init__(self, sim, tag):
+ self.sim = sim
+ self.tag = tag
+ self.connections = {}
+ self.exchanges = {}
+ self.queues = {}
+ self.subscriptions = {}
+ self.links = {}
+ self.directExchange = self.declare_exchange("amq.direct")
+
+ def connect(self, client):
+ if client in self.connections:
+ raise Exception("Client already connected")
+ self.connections[client] = Connection(client)
+
+ def declare_queue(self, name, tag=None, exclude=None):
+ if name in self.queues:
+ raise Exception("Queue already exists")
+ self.queues[name] = Queue(self, name, tag, exclude)
+
+ def subscribe(self, dest, queueName):
+ if queueName not in self.queues:
+ raise Exception("Queue does not exist")
+ self.queues[queueName].setDest(dest)
+
+ def declare_exchange(self, name):
+ if name in self.exchanges:
+ return
+ exchange = Exchange(self, name)
+ self.exchanges[name] = exchange
+ return exchange
+
+ def bind(self, exchangeName, queueName, key, tagList=[], fedOp=None, origin=None):
+ if exchangeName not in self.exchanges:
+ raise Exception("Exchange not found")
+ if queueName not in self.queues:
+ raise Exception("Queue not found")
+ exchange = self.exchanges[exchangeName]
+ queue = self.queues[queueName]
+ exchange.bind(queue, key, tagList, fedOp, origin)
+
+ def unbind(self, exchangeName, queueName, key):
+ if exchangeName not in self.exchanges:
+ raise Exception("Exchange not found")
+ if queueName not in self.queues:
+ raise Exception("Queue not found")
+ exchange = self.exchanges[exchangeName]
+ queue = self.queues[queueName]
+ exchange.unbind(queue, key)
+
+ def createLink(self, other):
+ if other in self.links:
+ raise Exception("Peer broker already linked")
+ link = Link(self, other)
+ self.links[other] = link
+ return link
+
+
+class Connection:
+ def __init__(self, client):
+ self.client = client
+
+
+class Exchange(Destination):
+ def __init__(self, broker, name):
+ self.broker = broker
+ self.sim = broker.sim
+ self.name = name
+ self.keys = {}
+ self.bridges = []
+
+ def bind(self, queue, key, tagList, fedOp, origin):
+ if not fedOp: fedOp = "bind"
+ print "Exchange(%s.%s) bind q=%s, k=%s, tags=%s, op=%s, origin=%s" % \
+ (self.broker.tag, self.name, queue.name, key, tagList, fedOp, origin),
+
+ if self.broker.tag in tagList:
+ print "(tag ignored)"
+ return
+
+ if fedOp == "bind" or fedOp == "unbind":
+ if key not in self.keys:
+ self.keys[key] = {}
+ queueMap = self.keys[key]
+
+ if fedOp == "bind":
+ ##
+ ## Add local or federation binding case
+ ##
+ if queue in queueMap:
+ if origin and origin in queueMap[queue].originList:
+ print "(dup ignored)"
+ elif origin:
+ queueMap[queue].originList.append(origin)
+ print "(origin added)"
+ else:
+ binding = Binding(origin)
+ queueMap[queue] = binding
+ print "(binding added)"
+
+ elif fedOp == "unbind":
+ ##
+ ## Delete federation binding case
+ ##
+ if queue in queueMap:
+ binding = queueMap[queue]
+ if origin and origin in binding.originList:
+ binding.originList.remove(origin)
+ if len(binding.originList) == 0:
+ queueMap.pop(queue)
+ if len(queueMap) == 0:
+ self.keys.pop(key)
+ print "(last origin del)"
+ else:
+ print "(removed origin)"
+ else:
+ print "(origin not found)"
+ else:
+ print "(queue not found)"
+
+ elif fedOp == "reorigin":
+ print "(ok)"
+ self.reorigin()
+
+ elif fedOp == "hello":
+ print "(ok)"
+
+ else:
+ raise Exception("Unknown fed-opcode '%s'" % fedOp)
+
+ newTagList = []
+ newTagList.append(self.broker.tag)
+ for tag in tagList:
+ newTagList.append(tag)
+ if origin:
+ propOrigin = origin
+ else:
+ propOrigin = self.broker.tag
+
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, newTagList, fedOp, propOrigin)
+
+ def reorigin(self):
+ myTag = []
+ myTag.append(self.broker.tag)
+ for key in self.keys:
+ queueMap = self.keys[key]
+ found = False
+ for queue in queueMap:
+ binding = queueMap[queue]
+ if binding.isLocal():
+ found = True
+ if found:
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, myTag, "bind", self.broker.tag)
+
+ def unbind(self, queue, key):
+ print "Exchange(%s.%s) unbind q=%s, k=%s" % (self.broker.tag, self.name, queue.name, key),
+ if key not in self.keys:
+ print "(key not known)"
+ return
+ queueMap = self.keys[key]
+ if queue not in queueMap:
+ print "(queue not bound)"
+ return
+ queueMap.pop(queue)
+ if len(queueMap) == 0:
+ self.keys.pop(key)
+ print "(ok, remove bound-key)"
+ else:
+ print "(ok)"
+
+ count = 0
+ for queue in queueMap:
+ if len(queueMap[queue].originList) == 0:
+ count += 1
+
+ if count == 0:
+ myTag = []
+ myTag.append(self.broker.tag)
+ for bridge in self.bridges:
+ if bridge.isDynamic():
+ bridge.propagate(key, myTag, "unbind", self.broker.tag)
+
+ def receive(self, key, msg, fromUser=False):
+ sent = False
+ if key in self.keys:
+ queueMap = self.keys[key]
+ for queue in queueMap:
+ if queue.enqueue(msg):
+ sent = True
+ if not sent and not fromUser:
+ self.sim.warning("Exchange(%s.%s) received unroutable message: k=%s" % \
+ (self.broker.tag, self.name, key))
+
+ def addDynamicBridge(self, bridge):
+ if bridge in self.bridges:
+ raise Exception("Dynamic bridge already added to exchange")
+ self.bridges.append(bridge)
+
+ for b in self.bridges:
+ if b != bridge:
+ b.sendReorigin()
+ self.reorigin()
+
+class Queue:
+ def __init__(self, broker, name, tag=None, exclude=None):
+ self.broker = broker
+ self.name = name
+ self.tag = tag
+ self.exclude = exclude
+ self.dest = None
+
+ def setDest(self, dest):
+ self.dest = dest
+
+ def enqueue(self, msg):
+ print "Queue(%s.%s) rcvd k=%s, tags=%s" % (self.broker.tag, self.name, msg.key, msg.tags),
+ if self.dest == None:
+ print "(dropped, no dest)"
+ return False
+ if self.exclude and msg.tagFound(self.exclude):
+ print "(dropped, tag)"
+ return False
+ if self.tag:
+ msg.appendTag(self.tag)
+ print "(ok)"
+ self.dest.receive(msg.key, msg)
+ return True
+
+
+class Binding:
+ def __init__(self, origin):
+ self.originList = []
+ if origin:
+ self.originList.append(origin)
+
+ def isLocal(self):
+ return len(self.originList) == 0
+
+
+class Link:
+ def __init__(self, local, remote):
+ self.local = local
+ self.remote = remote
+ self.remote.connect(self)
+ self.bridges = []
+
+ def bridge(self, exchangeName):
+ bridge = Bridge(self, exchangeName)
+
+
+class Bridge:
+ def __init__(self, link, exchangeName):
+ self.link = link
+ self.exchangeName = exchangeName
+ if self.exchangeName not in link.local.exchanges:
+ raise Exception("Exchange not found")
+ self.exchange = link.local.exchanges[self.exchangeName]
+ self.queueName = "bridge." + link.local.tag
+ self.link.remote.declare_queue(self.queueName, self.link.remote.tag, self.link.local.tag)
+ self.link.remote.subscribe(self.exchange, self.queueName)
+ self.exchange.addDynamicBridge(self)
+
+ def isDynamic(self):
+ return True
+
+ def localTag(self):
+ return self.link.local.tag
+
+ def remoteTag(self):
+ return self.link.remote.tag
+
+ def propagate(self, key, tagList, fedOp, origin):
+ if self.link.remote.tag not in tagList:
+ self.link.remote.bind(self.exchangeName, self.queueName, key, tagList, fedOp, origin)
+
+ def sendReorigin(self):
+ myTag = []
+ myTag.append(self.link.local.tag)
+ self.link.remote.bind(self.exchangeName, self.queueName, "", myTag, "reorigin", "")
+
+
+class Message:
+ def __init__(self, key, body):
+ self.key = key
+ self.body = body
+ self.tags = []
+
+ def appendTag(self, tag):
+ if tag not in self.tags:
+ self.tags.append(tag)
+
+ def tagFound(self, tag):
+ return tag in self.tags
+
+
diff --git a/RC9/qpid/python/models/fedsim/testBig.py b/RC9/qpid/python/models/fedsim/testBig.py
new file mode 100644
index 0000000000..416a086983
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/testBig.py
@@ -0,0 +1,88 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+b4 = sim.addBroker("B4")
+b5 = sim.addBroker("B5")
+b6 = sim.addBroker("B6")
+b7 = sim.addBroker("B7")
+b8 = sim.addBroker("B8")
+
+c1 = sim.addClient("C1", b1)
+c3 = sim.addClient("C3", b3)
+c4 = sim.addClient("C4", b4)
+c5 = sim.addClient("C5", b5)
+c8 = sim.addClient("C8", b8)
+
+sim.link(b1, b2)
+sim.link(b3, b2)
+sim.link(b4, b2)
+sim.link(b5, b2)
+
+sim.link(b6, b7)
+sim.link(b6, b8)
+
+sim.bind(c1, "A")
+sim.bind(c3, "B")
+sim.bind(c8, "A")
+
+sim.link(b5, b6)
+
+sim.bind(c4, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", b4)
+sim.sendMessage("A", b5)
+sim.sendMessage("A", b6)
+sim.sendMessage("A", b7)
+sim.sendMessage("A", b8)
+
+sim.sendMessage("B", b1)
+sim.sendMessage("B", b2)
+sim.sendMessage("B", b3)
+sim.sendMessage("B", b4)
+sim.sendMessage("B", b5)
+sim.sendMessage("B", b6)
+sim.sendMessage("B", b7)
+sim.sendMessage("B", b8)
+
+sim.unbind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", b4)
+sim.sendMessage("A", b5)
+sim.sendMessage("A", b6)
+sim.sendMessage("A", b7)
+sim.sendMessage("A", b8)
+
+sim.unbind(c4, "A")
+sim.unbind(c3, "B")
+sim.unbind(c8, "A")
+
+sim.dumpState()
+sim.end()
diff --git a/RC9/qpid/python/models/fedsim/testRing.py b/RC9/qpid/python/models/fedsim/testRing.py
new file mode 100644
index 0000000000..c883b54993
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/testRing.py
@@ -0,0 +1,48 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+
+sim.link(b1, b2, False)
+sim.link(b2, b3, False)
+sim.link(b3, b1, False)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+
+sim.bind(c1, "A")
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+
+sim.unbind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+
+sim.end()
diff --git a/RC9/qpid/python/models/fedsim/testStar.py b/RC9/qpid/python/models/fedsim/testStar.py
new file mode 100644
index 0000000000..e6b801446f
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/testStar.py
@@ -0,0 +1,65 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+bc = sim.addBroker("BC")
+
+sim.link(b1, bc)
+sim.link(b2, bc)
+sim.link(b3, bc)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+cc = sim.addClient("CC", bc)
+
+sim.bind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.unbind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.unbind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", b3)
+sim.sendMessage("A", bc)
+
+sim.end()
diff --git a/RC9/qpid/python/models/fedsim/testStarAdd.py b/RC9/qpid/python/models/fedsim/testStarAdd.py
new file mode 100644
index 0000000000..e0eb44952a
--- /dev/null
+++ b/RC9/qpid/python/models/fedsim/testStarAdd.py
@@ -0,0 +1,56 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from fedsim import Sim
+
+sim = Sim()
+b1 = sim.addBroker("B1")
+b2 = sim.addBroker("B2")
+b3 = sim.addBroker("B3")
+bc = sim.addBroker("BC")
+
+sim.link(b1, bc)
+sim.link(b2, bc)
+
+c1 = sim.addClient("C1", b1)
+c2 = sim.addClient("C2", b2)
+c3 = sim.addClient("C3", b3)
+cc = sim.addClient("CC", bc)
+
+sim.bind(c1, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.bind(c2, "A")
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.bind(c3, "A")
+sim.link(b3, bc)
+
+sim.sendMessage("A", b1)
+sim.sendMessage("A", b2)
+sim.sendMessage("A", bc)
+
+sim.end()
+
diff --git a/RC9/qpid/python/pal2py b/RC9/qpid/python/pal2py
new file mode 100755
index 0000000000..544151bf76
--- /dev/null
+++ b/RC9/qpid/python/pal2py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys, os, xml
+
+from qpid.spec import load, pythonize
+from textwrap import TextWrapper
+from xml.sax.handler import ContentHandler
+
+class Block:
+
+ def __init__(self, children):
+ self.children = children
+
+ def emit(self, out):
+ for child in self.children:
+ if not hasattr(child, "emit"):
+ raise ValueError(child)
+ child.emit(out)
+
+ if not self.children:
+ out.line("pass")
+
+class If:
+
+ def __init__(self, expr, cons, alt = None):
+ self.expr = expr
+ self.cons = cons
+ self.alt = alt
+
+ def emit(self, out):
+ out.line("if ")
+ self.expr.emit(out)
+ out.write(":")
+ out.level += 1
+ self.cons.emit(out)
+ out.level -= 1
+ if self.alt:
+ out.line("else:")
+ out.level += 1
+ self.alt.emit(out)
+ out.level -= 1
+
+class Stmt:
+
+ def __init__(self, code):
+ self.code = code
+
+ def emit(self, out):
+ out.line(self.code)
+
+class Expr:
+
+ def __init__(self, code):
+ self.code = code
+
+ def emit(self, out):
+ out.write(self.code)
+
+class Abort:
+
+ def __init__(self, expr):
+ self.expr = expr
+
+ def emit(self, out):
+ out.line("assert False, ")
+ self.expr.emit(out)
+
+WRAPPER = TextWrapper()
+
+def wrap(text):
+ return WRAPPER.wrap(" ".join(text.split()))
+
+class Doc:
+
+ def __init__(self, text):
+ self.text = text
+
+ def emit(self, out):
+ out.line('"""')
+ for line in wrap(self.text):
+ out.line(line)
+ out.line('"""')
+
+class Frame:
+
+ def __init__(self, attrs):
+ self.attrs = attrs
+ self.children = []
+ self.text = None
+
+ def __getattr__(self, attr):
+ return self.attrs[attr]
+
+def isunicode(s):
+ if isinstance(s, str):
+ return False
+ for ch in s:
+ if ord(ch) > 127:
+ return True
+ return False
+
+def string_literal(s):
+ if s == None:
+ return None
+ if isunicode(s):
+ return "%r" % s
+ else:
+ return "%r" % str(s)
+
+TRUTH = {
+ "1": True,
+ "0": False,
+ "true": True,
+ "false": False
+ }
+
+LITERAL = {
+ "shortstr": string_literal,
+ "longstr": string_literal,
+ "bit": lambda s: TRUTH[s.lower()],
+ "longlong": lambda s: "%r" % long(s)
+ }
+
+def literal(s, field):
+ return LITERAL[field.type](s)
+
+def palexpr(s, field):
+ if s.startswith("$"):
+ return "msg.%s" % s[1:]
+ else:
+ return literal(s, field)
+
+class Translator(ContentHandler):
+
+ def __init__(self, spec):
+ self.spec = spec
+ self.stack = []
+ self.content = None
+ self.root = Frame(None)
+ self.push(self.root)
+
+ def emit(self, out):
+ blk = Block(self.root.children)
+ blk.emit(out)
+ out.write("\n")
+
+ def peek(self):
+ return self.stack[-1]
+
+ def pop(self):
+ return self.stack.pop()
+
+ def push(self, frame):
+ self.stack.append(frame)
+
+ def startElement(self, name, attrs):
+ self.push(Frame(attrs))
+
+ def endElement(self, name):
+ frame = self.pop()
+ if hasattr(self, name):
+ child = getattr(self, name)(frame)
+ else:
+ child = self.handle(name, frame)
+
+ if child:
+ self.peek().children.append(child)
+
+ def characters(self, text):
+ frame = self.peek()
+ if frame.text:
+ frame.text += text
+ else:
+ frame.text = text
+
+ def handle(self, name, frame):
+ for klass in self.spec.classes:
+ pyklass = pythonize(klass.name)
+ if name.startswith(pyklass):
+ name = name[len(pyklass) + 1:]
+ break
+ else:
+ raise ValueError("unknown class: %s" % name)
+
+ for method in klass.methods:
+ pymethod = pythonize(method.name)
+ if name == pymethod:
+ break
+ else:
+ raise ValueError("unknown method: %s" % name)
+
+ args = ["%s = %s" % (key, palexpr(val, method.fields.bypyname[key]))
+ for key, val in frame.attrs.items()]
+ if method.content and self.content:
+ args.append("content = %r" % string_literal(self.content))
+ code = "ssn.%s_%s(%s)" % (pyklass, pymethod, ", ".join(args))
+ if pymethod == "consume":
+ code = "consumer_tag = %s.consumer_tag" % code
+ return Stmt(code)
+
+ def pal(self, frame):
+ return Block([Doc(frame.text)] + frame.children)
+
+ def include(self, frame):
+ base, ext = os.path.splitext(frame.filename)
+ return Stmt("from %s import *" % base)
+
+ def session(self, frame):
+ return Block([Stmt("cli = open()"), Stmt("ssn = cli.channel(0)"),
+ Stmt("ssn.channel_open()")] + frame.children)
+
+ def empty(self, frame):
+ return If(Expr("msg == None"), Block(frame.children))
+
+ def abort(self, frame):
+ return Abort(Expr(string_literal(frame.text)))
+
+ def wait(self, frame):
+ return Stmt("msg = ssn.queue(consumer_tag).get(timeout=%r)" %
+ (int(frame.timeout)/1000))
+
+ def basic_arrived(self, frame):
+ if frame.children:
+ return If(Expr("msg != None"), Block(frame.children))
+
+ def basic_content(self, frame):
+ self.content = frame.text
+
+class Emitter:
+
+ def __init__(self, out):
+ self.out = out
+ self.level = 0
+
+ def write(self, code):
+ self.out.write(code)
+
+ def line(self, code):
+ self.write("\n%s%s" % (" "*self.level, code))
+
+ def flush(self):
+ self.out.flush()
+
+ def close(self):
+ self.out.close()
+
+
+for f in sys.argv[2:]:
+ base, ext = os.path.splitext(f)
+ spec = load(sys.argv[1])
+ t = Translator(spec)
+ xml.sax.parse(f, t)
+# out = Emitter(open("%s.py" % base))
+ out = Emitter(sys.stdout)
+ t.emit(out)
+ out.close()
diff --git a/RC9/qpid/python/perftest b/RC9/qpid/python/perftest
new file mode 100755
index 0000000000..f4d3c95e96
--- /dev/null
+++ b/RC9/qpid/python/perftest
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+def publisher(n):
+ import qpid
+ import sys
+ from qpid.client import Client
+ from qpid.content import Content
+ if len(sys.argv) >= 3:
+ n = int(sys.argv[2])
+ client = Client("127.0.0.1", 5672)
+ client.start({"LOGIN": "guest", "PASSWORD": "guest"})
+ channel = client.channel(1)
+ channel.session_open()
+ message = Content("message")
+ message["routing_key"] = "message_queue"
+ print "producing ", n, " messages"
+ for i in range(n):
+ channel.message_transfer(destination="amq.direct", content=message)
+
+ print "producing final message"
+ message = Content("That's done")
+ message["routing_key"] = "message_queue"
+ channel.message_transfer(destination="amq.direct", content=message)
+
+ print "consuming sync message"
+ consumer = "consumer"
+ queue = client.queue(consumer)
+ channel.message_subscribe(queue="sync_queue", destination=consumer)
+ channel.message_flow(consumer, 0, 0xFFFFFFFF)
+ channel.message_flow(consumer, 1, 0xFFFFFFFF)
+ queue.get(block = True)
+ print "done"
+ channel.session_close()
+
+def consumer():
+ import sys
+ import qpid
+ from qpid.client import Client
+ from qpid.content import Content
+ client = Client("127.0.0.1", 5672)
+ client.start({"LOGIN": "guest", "PASSWORD": "guest"})
+ channel = client.channel(1)
+ channel.session_open()
+ consumer = "consumer"
+ queue = client.queue(consumer)
+ channel.message_subscribe(queue="message_queue", destination=consumer)
+ channel.message_flow(consumer, 0, 0xFFFFFFFF)
+ channel.message_flow(consumer, 1, 0xFFFFFFFF)
+ final = "That's done"
+ content = ""
+ message = None
+ print "getting messages"
+ while content != final:
+ message = queue.get(block = True)
+ content = message.content.body
+ message.complete(cumulative=True)
+
+ print "consumed all messages"
+ message = Content("message")
+ message["routing_key"] = "sync_queue"
+ channel.message_transfer(destination="amq.direct", content=message)
+ print "done"
+ channel.session_close()
+
+if __name__=='__main__':
+ import sys
+ import qpid
+ from timeit import Timer
+ from qpid.client import Client
+ from qpid.content import Content
+ client = Client("127.0.0.1", 5672)
+ client.start({"LOGIN": "guest", "PASSWORD": "guest"})
+ channel = client.channel(1)
+ channel.session_open()
+ channel.queue_declare(queue="message_queue")
+ channel.queue_bind(exchange="amq.direct", queue="message_queue", routing_key="message_queue")
+ channel.queue_declare(queue="sync_queue")
+ channel.queue_bind(exchange="amq.direct", queue="sync_queue", routing_key="sync_queue")
+ channel.session_close()
+
+ numMess = 100
+ if len(sys.argv) >= 3:
+ numMess = int(sys.argv[2])
+ if len(sys.argv) == 1:
+ print "error: please specify prod or cons"
+ elif sys.argv[1] == 'prod':
+ tprod = Timer("publisher(100)", "from __main__ import publisher")
+ tp = tprod.timeit(1)
+ print "produced and consumed" , numMess + 2 ,"messages in: ", tp
+ elif sys.argv[1] == 'cons':
+ tcons = Timer("consumer()", "from __main__ import consumer")
+ tc = tcons.timeit(1)
+ print "consumed " , numMess ," in: ", tc
+ else:
+ print "please specify prod or cons"
diff --git a/RC9/qpid/python/qmf/__init__.py b/RC9/qpid/python/qmf/__init__.py
new file mode 100644
index 0000000000..31d5a2ef58
--- /dev/null
+++ b/RC9/qpid/python/qmf/__init__.py
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/RC9/qpid/python/qmf/console.py b/RC9/qpid/python/qmf/console.py
new file mode 100644
index 0000000000..0009726fe7
--- /dev/null
+++ b/RC9/qpid/python/qmf/console.py
@@ -0,0 +1,1625 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+""" Console API for Qpid Management Framework """
+
+import os
+import qpid
+import struct
+import socket
+import re
+from qpid.peer import Closed
+from qpid.connection import Connection, ConnectionFailed
+from qpid.datatypes import UUID, uuid4, Message, RangedSet
+from qpid.util import connect, ssl, URL
+from qpid.codec010 import StringCodec as Codec
+from threading import Lock, Condition, Thread
+from time import time, strftime, gmtime
+from cStringIO import StringIO
+
+class Console:
+ """ To access the asynchronous operations, a class must be derived from
+ Console with overrides of any combination of the available methods. """
+
+ def brokerConnected(self, broker):
+ """ Invoked when a connection is established to a broker """
+ pass
+
+ def brokerDisconnected(self, broker):
+ """ Invoked when the connection to a broker is lost """
+ pass
+
+ def newPackage(self, name):
+ """ Invoked when a QMF package is discovered. """
+ pass
+
+ def newClass(self, kind, classKey):
+ """ Invoked when a new class is discovered. Session.getSchema can be
+ used to obtain details about the class."""
+ pass
+
+ def newAgent(self, agent):
+ """ Invoked when a QMF agent is discovered. """
+ pass
+
+ def delAgent(self, agent):
+ """ Invoked when a QMF agent disconects. """
+ pass
+
+ def objectProps(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def objectStats(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def event(self, broker, event):
+ """ Invoked when an event is raised. """
+ pass
+
+ def heartbeat(self, agent, timestamp):
+ """ """
+ pass
+
+ def brokerInfo(self, broker):
+ """ """
+ pass
+
+ def methodResponse(self, broker, seq, response):
+ """ """
+ pass
+
+class BrokerURL(URL):
+ def __init__(self, text):
+ URL.__init__(self, text)
+ socket.gethostbyname(self.host)
+ if self.port is None:
+ if self.scheme == URL.AMQPS:
+ self.port = 5671
+ else:
+ self.port = 5672
+ self.authName = self.user or "guest"
+ self.authPass = self.password or "guest"
+ self.authMech = "PLAIN"
+
+ def name(self):
+ return self.host + ":" + str(self.port)
+
+ def match(self, host, port):
+ return socket.gethostbyname(self.host) == socket.gethostbyname(host) and self.port == port
+
+class Session:
+ """
+ An instance of the Session class represents a console session running
+ against one or more QMF brokers. A single instance of Session is needed
+ to interact with the management framework as a console.
+ """
+ _CONTEXT_SYNC = 1
+ _CONTEXT_STARTUP = 2
+ _CONTEXT_MULTIGET = 3
+
+ GET_WAIT_TIME = 60
+
+ def __init__(self, console=None, rcvObjects=True, rcvEvents=True, rcvHeartbeats=True,
+ manageConnections=False, userBindings=False):
+ """
+ Initialize a session. If the console argument is provided, the
+ more advanced asynchronous features are available. If console is
+ defaulted, the session will operate in a simpler, synchronous manner.
+
+ The rcvObjects, rcvEvents, and rcvHeartbeats arguments are meaningful only if 'console'
+ is provided. They control whether object updates, events, and agent-heartbeats are
+ subscribed to. If the console is not interested in receiving one or more of the above,
+ setting the argument to False will reduce tha bandwidth used by the API.
+
+ If manageConnections is set to True, the Session object will manage connections to
+ the brokers. This means that if a broker is unreachable, it will retry until a connection
+ can be established. If a connection is lost, the Session will attempt to reconnect.
+
+ If manageConnections is set to False, the user is responsible for handing failures. In
+ this case, an unreachable broker will cause addBroker to raise an exception.
+
+ If userBindings is set to False (the default) and rcvObjects is True, the console will
+ receive data for all object classes. If userBindings is set to True, the user must select
+ which classes the console shall receive by invoking the bindPackage or bindClass methods.
+ This allows the console to be configured to receive only information that is relavant to
+ a particular application. If rcvObjects id False, userBindings has no meaning.
+ """
+ self.console = console
+ self.brokers = []
+ self.packages = {}
+ self.seqMgr = SequenceManager()
+ self.cv = Condition()
+ self.syncSequenceList = []
+ self.getResult = []
+ self.getSelect = []
+ self.error = None
+ self.rcvObjects = rcvObjects
+ self.rcvEvents = rcvEvents
+ self.rcvHeartbeats = rcvHeartbeats
+ self.userBindings = userBindings
+ if self.console == None:
+ self.rcvObjects = False
+ self.rcvEvents = False
+ self.rcvHeartbeats = False
+ self.bindingKeyList = self._bindingKeys()
+ self.manageConnections = manageConnections
+
+ if self.userBindings and not self.rcvObjects:
+ raise Exception("userBindings can't be set unless rcvObjects is set and a console is provided")
+
+ def __repr__(self):
+ return "QMF Console Session Manager (brokers: %d)" % len(self.brokers)
+
+ def addBroker(self, target="localhost"):
+ """ Connect to a Qpid broker. Returns an object of type Broker. """
+ url = BrokerURL(target)
+ broker = Broker(self, url.host, url.port, url.authMech, url.authName, url.authPass,
+ ssl = url.scheme == URL.AMQPS)
+
+ self.brokers.append(broker)
+ if not self.manageConnections:
+ self.getObjects(broker=broker, _class="agent")
+ return broker
+
+ def delBroker(self, broker):
+ """ Disconnect from a broker. The 'broker' argument is the object
+ returned from the addBroker call """
+ broker._shutdown()
+ self.brokers.remove(broker)
+ del broker
+
+ def getPackages(self):
+ """ Get the list of known QMF packages """
+ for broker in self.brokers:
+ broker._waitForStable()
+ list = []
+ for package in self.packages:
+ list.append(package)
+ return list
+
+ def getClasses(self, packageName):
+ """ Get the list of known classes within a QMF package """
+ for broker in self.brokers:
+ broker._waitForStable()
+ list = []
+ if packageName in self.packages:
+ for pkey in self.packages[packageName]:
+ list.append(self.packages[packageName][pkey].getKey())
+ return list
+
+ def getSchema(self, classKey):
+ """ Get the schema for a QMF class """
+ for broker in self.brokers:
+ broker._waitForStable()
+ pname = classKey.getPackageName()
+ pkey = classKey.getPackageKey()
+ if pname in self.packages:
+ if pkey in self.packages[pname]:
+ return self.packages[pname][pkey]
+
+ def bindPackage(self, packageName):
+ """ Request object updates for all table classes within a package. """
+ if not self.userBindings or not self.rcvObjects:
+ raise Exception("userBindings option not set for Session")
+ key = "console.obj.*.*.%s.#" % packageName
+ self.bindingKeyList.append(key)
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName,
+ binding_key=key)
+
+ def bindClass(self, pname, cname):
+ """ Request object updates for a particular table class by package and class name. """
+ if not self.userBindings or not self.rcvObjects:
+ raise Exception("userBindings option not set for Session")
+ key = "console.obj.*.*.%s.%s.#" % (pname, cname)
+ self.bindingKeyList.append(key)
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName,
+ binding_key=key)
+
+ def bindClassKey(self, classKey):
+ """ Request object updates for a particular table class by class key. """
+ pname = classKey.getPackageName()
+ cname = classKey.getClassName()
+ self.bindClass(pname, cname)
+
+ def getAgents(self, broker=None):
+ """ Get a list of currently known agents """
+ brokerList = []
+ if broker == None:
+ for b in self.brokers:
+ brokerList.append(b)
+ else:
+ brokerList.append(broker)
+
+ for b in brokerList:
+ b._waitForStable()
+ agentList = []
+ for b in brokerList:
+ for a in b.getAgents():
+ agentList.append(a)
+ return agentList
+
+ def getObjects(self, **kwargs):
+ """ Get a list of objects from QMF agents.
+ All arguments are passed by name(keyword).
+
+ The class for queried objects may be specified in one of the following ways:
+
+ _schema = <schema> - supply a schema object returned from getSchema.
+ _key = <key> - supply a classKey from the list returned by getClasses.
+ _class = <name> - supply a class name as a string. If the class name exists
+ in multiple packages, a _package argument may also be supplied.
+ _objectId = <id> - get the object referenced by the object-id
+
+ If objects should be obtained from only one agent, use the following argument.
+ Otherwise, the query will go to all agents.
+
+ _agent = <agent> - supply an agent from the list returned by getAgents.
+
+ If the get query is to be restricted to one broker (as opposed to all connected brokers),
+ add the following argument:
+
+ _broker = <broker> - supply a broker as returned by addBroker.
+
+ If additional arguments are supplied, they are used as property selectors. For example,
+ if the argument name="test" is supplied, only objects whose "name" property is "test"
+ will be returned in the result.
+ """
+ if "_broker" in kwargs:
+ brokerList = []
+ brokerList.append(kwargs["_broker"])
+ else:
+ brokerList = self.brokers
+ for broker in brokerList:
+ broker._waitForStable()
+
+ agentList = []
+ if "_agent" in kwargs:
+ agent = kwargs["_agent"]
+ if agent.broker not in brokerList:
+ raise Exception("Supplied agent is not accessible through the supplied broker")
+ if agent.broker.isConnected():
+ agentList.append(agent)
+ else:
+ if "_objectId" in kwargs:
+ oid = kwargs["_objectId"]
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.getBrokerBank() == oid.getBrokerBank() and agent.getAgentBank() == oid.getAgentBank():
+ agentList.append(agent)
+ else:
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.broker.isConnected():
+ agentList.append(agent)
+
+ if len(agentList) == 0:
+ return []
+
+ pname = None
+ cname = None
+ hash = None
+ classKey = None
+ if "_schema" in kwargs: classKey = kwargs["_schema"].getKey()
+ elif "_key" in kwargs: classKey = kwargs["_key"]
+ elif "_class" in kwargs:
+ cname = kwargs["_class"]
+ if "_package" in kwargs:
+ pname = kwargs["_package"]
+ if cname == None and classKey == None and "_objectId" not in kwargs:
+ raise Exception("No class supplied, use '_schema', '_key', '_class', or '_objectId' argument")
+
+ map = {}
+ self.getSelect = []
+ if "_objectId" in kwargs:
+ map["_objectid"] = kwargs["_objectId"].__repr__()
+ else:
+ if cname == None:
+ cname = classKey.getClassName()
+ pname = classKey.getPackageName()
+ hash = classKey.getHash()
+ map["_class"] = cname
+ if pname != None: map["_package"] = pname
+ if hash != None: map["_hash"] = hash
+ for item in kwargs:
+ if item[0] != '_':
+ self.getSelect.append((item, kwargs[item]))
+
+ self.getResult = []
+ for agent in agentList:
+ broker = agent.broker
+ sendCodec = Codec(broker.conn.spec)
+ try:
+ self.cv.acquire()
+ seq = self.seqMgr._reserve(self._CONTEXT_MULTIGET)
+ self.syncSequenceList.append(seq)
+ finally:
+ self.cv.release()
+ broker._setHeader(sendCodec, 'G', seq)
+ sendCodec.write_map(map)
+ smsg = broker._message(sendCodec.encoded, "agent.%d.%d" % (agent.brokerBank, agent.agentBank))
+ broker._send(smsg)
+
+ starttime = time()
+ timeout = False
+ try:
+ self.cv.acquire()
+ while len(self.syncSequenceList) > 0 and self.error == None:
+ self.cv.wait(self.GET_WAIT_TIME)
+ if time() - starttime > self.GET_WAIT_TIME:
+ for pendingSeq in self.syncSequenceList:
+ self.seqMgr._release(pendingSeq)
+ self.syncSequenceList = []
+ timeout = True
+ finally:
+ self.cv.release()
+
+ if self.error:
+ errorText = self.error
+ self.error = None
+ raise Exception(errorText)
+
+ if len(self.getResult) == 0 and timeout:
+ raise RuntimeError("No agent responded within timeout period")
+ return self.getResult
+
+ def setEventFilter(self, **kwargs):
+ """ """
+ pass
+
+ def _bindingKeys(self):
+ keyList = []
+ keyList.append("schema.#")
+ if self.rcvObjects and self.rcvEvents and self.rcvHeartbeats and not self.userBindings:
+ keyList.append("console.#")
+ else:
+ if self.rcvObjects and not self.userBindings:
+ keyList.append("console.obj.#")
+ else:
+ keyList.append("console.obj.*.*.org.apache.qpid.broker.agent")
+ if self.rcvEvents:
+ keyList.append("console.event.#")
+ if self.rcvHeartbeats:
+ keyList.append("console.heartbeat.#")
+ return keyList
+
+ def _handleBrokerConnect(self, broker):
+ if self.console:
+ self.console.brokerConnected(broker)
+
+ def _handleBrokerDisconnect(self, broker):
+ if self.console:
+ self.console.brokerDisconnected(broker)
+
+ def _handleBrokerResp(self, broker, codec, seq):
+ broker.brokerId = UUID(codec.read_uuid())
+ if self.console != None:
+ self.console.brokerInfo(broker)
+
+ # Send a package request
+ # (effectively inc and dec outstanding by not doing anything)
+ sendCodec = Codec(broker.conn.spec)
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'P', seq)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handlePackageInd(self, broker, codec, seq):
+ pname = str(codec.read_str8())
+ notify = False
+ try:
+ self.cv.acquire()
+ if pname not in self.packages:
+ self.packages[pname] = {}
+ notify = True
+ finally:
+ self.cv.release()
+ if notify and self.console != None:
+ self.console.newPackage(pname)
+
+ # Send a class request
+ broker._incOutstanding()
+ sendCodec = Codec(broker.conn.spec)
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'Q', seq)
+ sendCodec.write_str8(pname)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handleCommandComplete(self, broker, codec, seq):
+ code = codec.read_uint32()
+ text = codec.read_str8()
+ context = self.seqMgr._release(seq)
+ if context == self._CONTEXT_STARTUP:
+ broker._decOutstanding()
+ elif context == self._CONTEXT_SYNC and seq == broker.syncSequence:
+ try:
+ broker.cv.acquire()
+ broker.syncInFlight = False
+ broker.cv.notify()
+ finally:
+ broker.cv.release()
+ elif context == self._CONTEXT_MULTIGET and seq in self.syncSequenceList:
+ try:
+ self.cv.acquire()
+ self.syncSequenceList.remove(seq)
+ if len(self.syncSequenceList) == 0:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _handleClassInd(self, broker, codec, seq):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ unknown = False
+
+ try:
+ self.cv.acquire()
+ if classKey.getPackageName() in self.packages:
+ if classKey.getPackageKey() not in self.packages[classKey.getPackageName()]:
+ unknown = True
+ finally:
+ self.cv.release()
+
+ if unknown:
+ # Send a schema request for the unknown class
+ broker._incOutstanding()
+ sendCodec = Codec(broker.conn.spec)
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'S', seq)
+ classKey.encode(sendCodec)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+ def _handleMethodResp(self, broker, codec, seq):
+ code = codec.read_uint32()
+ text = codec.read_str16()
+ outArgs = {}
+ method, synchronous = self.seqMgr._release(seq)
+ if code == 0:
+ for arg in method.arguments:
+ if arg.dir.find("O") != -1:
+ outArgs[arg.name] = self._decodeValue(codec, arg.type)
+ result = MethodResult(code, text, outArgs)
+ if synchronous:
+ try:
+ broker.cv.acquire()
+ broker.syncResult = result
+ broker.syncInFlight = False
+ broker.cv.notify()
+ finally:
+ broker.cv.release()
+ else:
+ if self.console:
+ self.console.methodResponse(broker, seq, result)
+
+ def _handleHeartbeatInd(self, broker, codec, seq, msg):
+ brokerBank = 1
+ agentBank = 0
+ dp = msg.get("delivery_properties")
+ if dp:
+ key = dp["routing_key"]
+ keyElements = key.split(".")
+ if len(keyElements) == 4:
+ brokerBank = int(keyElements[2])
+ agentBank = int(keyElements[3])
+
+ agent = broker.getAgent(brokerBank, agentBank)
+ timestamp = codec.read_uint64()
+ if self.console != None and agent != None:
+ self.console.heartbeat(agent, timestamp)
+
+ def _handleEventInd(self, broker, codec, seq):
+ if self.console != None:
+ event = Event(self, broker, codec)
+ self.console.event(broker, event)
+
+ def _handleSchemaResp(self, broker, codec, seq):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ _class = SchemaClass(kind, classKey, codec)
+ try:
+ self.cv.acquire()
+ self.packages[classKey.getPackageName()][classKey.getPackageKey()] = _class
+ finally:
+ self.cv.release()
+
+ self.seqMgr._release(seq)
+ broker._decOutstanding()
+ if self.console != None:
+ self.console.newClass(kind, classKey)
+
+ def _handleContentInd(self, broker, codec, seq, prop=False, stat=False):
+ classKey = ClassKey(codec)
+ try:
+ self.cv.acquire()
+ pname = classKey.getPackageName()
+ if pname not in self.packages:
+ return
+ pkey = classKey.getPackageKey()
+ if pkey not in self.packages[pname]:
+ return
+ schema = self.packages[pname][pkey]
+ finally:
+ self.cv.release()
+
+ object = Object(self, broker, schema, codec, prop, stat)
+ if pname == "org.apache.qpid.broker" and classKey.getClassName() == "agent" and prop:
+ broker._updateAgent(object)
+
+ try:
+ self.cv.acquire()
+ if seq in self.syncSequenceList:
+ if object.getTimestamps()[2] == 0 and self._selectMatch(object):
+ self.getResult.append(object)
+ return
+ finally:
+ self.cv.release()
+
+ if self.console and self.rcvObjects:
+ if prop:
+ self.console.objectProps(broker, object)
+ if stat:
+ self.console.objectStats(broker, object)
+
+ def _handleError(self, error):
+ self.error = error
+ try:
+ self.cv.acquire()
+ self.syncSequenceList = []
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _selectMatch(self, object):
+ """ Check the object against self.getSelect to check for a match """
+ for key, value in self.getSelect:
+ for prop, propval in object.getProperties():
+ if key == prop.name and value != propval:
+ return False
+ return True
+
+ def _decodeValue(self, codec, typecode):
+ """ Decode, from the codec, a value based on its typecode. """
+ if typecode == 1: data = codec.read_uint8() # U8
+ elif typecode == 2: data = codec.read_uint16() # U16
+ elif typecode == 3: data = codec.read_uint32() # U32
+ elif typecode == 4: data = codec.read_uint64() # U64
+ elif typecode == 6: data = codec.read_str8() # SSTR
+ elif typecode == 7: data = codec.read_str16() # LSTR
+ elif typecode == 8: data = codec.read_int64() # ABSTIME
+ elif typecode == 9: data = codec.read_uint64() # DELTATIME
+ elif typecode == 10: data = ObjectId(codec) # REF
+ elif typecode == 11: data = codec.read_uint8() != 0 # BOOL
+ elif typecode == 12: data = codec.read_float() # FLOAT
+ elif typecode == 13: data = codec.read_double() # DOUBLE
+ elif typecode == 14: data = UUID(codec.read_uuid()) # UUID
+ elif typecode == 15: data = codec.read_map() # FTABLE
+ elif typecode == 16: data = codec.read_int8() # S8
+ elif typecode == 17: data = codec.read_int16() # S16
+ elif typecode == 18: data = codec.read_int32() # S32
+ elif typecode == 19: data = codec.read_int64() # S63
+ else:
+ raise ValueError("Invalid type code: %d" % typecode)
+ return data
+
+ def _encodeValue(self, codec, value, typecode):
+ """ Encode, into the codec, a value based on its typecode. """
+ if typecode == 1: codec.write_uint8 (int(value)) # U8
+ elif typecode == 2: codec.write_uint16 (int(value)) # U16
+ elif typecode == 3: codec.write_uint32 (long(value)) # U32
+ elif typecode == 4: codec.write_uint64 (long(value)) # U64
+ elif typecode == 6: codec.write_str8 (value) # SSTR
+ elif typecode == 7: codec.write_str16 (value) # LSTR
+ elif typecode == 8: codec.write_int64 (long(value)) # ABSTIME
+ elif typecode == 9: codec.write_uint64 (long(value)) # DELTATIME
+ elif typecode == 10: value.encode (codec) # REF
+ elif typecode == 11: codec.write_uint8 (int(value)) # BOOL
+ elif typecode == 12: codec.write_float (float(value)) # FLOAT
+ elif typecode == 13: codec.write_double (float(value)) # DOUBLE
+ elif typecode == 14: codec.write_uuid (value.bytes) # UUID
+ elif typecode == 15: codec.write_map (value) # FTABLE
+ elif typecode == 16: codec.write_int8 (int(value)) # S8
+ elif typecode == 17: codec.write_int16 (int(value)) # S16
+ elif typecode == 18: codec.write_int32 (int(value)) # S32
+ elif typecode == 19: codec.write_int64 (int(value)) # S64
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def _displayValue(self, value, typecode):
+ """ """
+ if typecode == 1: return unicode(value)
+ elif typecode == 2: return unicode(value)
+ elif typecode == 3: return unicode(value)
+ elif typecode == 4: return unicode(value)
+ elif typecode == 6: return value
+ elif typecode == 7: return value
+ elif typecode == 8: return unicode(strftime("%c", gmtime(value / 1000000000)))
+ elif typecode == 9: return unicode(value)
+ elif typecode == 10: return unicode(value.__repr__())
+ elif typecode == 11:
+ if value: return u"T"
+ else: return u"F"
+ elif typecode == 12: return unicode(value)
+ elif typecode == 13: return unicode(value)
+ elif typecode == 14: return unicode(value.__repr__())
+ elif typecode == 15: return unicode(value.__repr__())
+ elif typecode == 16: return unicode(value)
+ elif typecode == 17: return unicode(value)
+ elif typecode == 18: return unicode(value)
+ elif typecode == 19: return unicode(value)
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def _sendMethodRequest(self, broker, schemaKey, objectId, name, argList):
+ """ This function can be used to send a method request to an object given only the
+ broker, schemaKey, and objectId. This is an uncommon usage pattern as methods are
+ normally invoked on the object itself.
+ """
+ schema = self.getSchema(schemaKey)
+ for method in schema.getMethods():
+ if name == method.name:
+ aIdx = 0
+ sendCodec = Codec(broker.conn.spec)
+ seq = self.seqMgr._reserve((method, False))
+ broker._setHeader(sendCodec, 'M', seq)
+ objectId.encode(sendCodec)
+ schemaKey.encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(argList):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(argList)))
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._encodeValue(sendCodec, argList[aIdx], arg.type)
+ aIdx += 1
+ smsg = broker._message(sendCodec.encoded, "agent.%d.%d" %
+ (objectId.getBrokerBank(), objectId.getAgentBank()))
+ broker._send(smsg)
+ return seq
+ return None
+
+class Package:
+ """ """
+ def __init__(self, name):
+ self.name = name
+
+class ClassKey:
+ """ A ClassKey uniquely identifies a class from the schema. """
+ def __init__(self, constructor):
+ if type(constructor) == str:
+ # construct from __repr__ string
+ try:
+ self.pname, cls = constructor.split(":")
+ self.cname, hsh = cls.split("(")
+ hsh = hsh.strip(")")
+ hexValues = hsh.split("-")
+ h0 = int(hexValues[0], 16)
+ h1 = int(hexValues[1], 16)
+ h2 = int(hexValues[2], 16)
+ h3 = int(hexValues[3], 16)
+ self.hash = struct.pack("!LLLL", h0, h1, h2, h3)
+ except:
+ raise Exception("Invalid ClassKey format")
+ else:
+ # construct from codec
+ codec = constructor
+ self.pname = str(codec.read_str8())
+ self.cname = str(codec.read_str8())
+ self.hash = codec.read_bin128()
+
+ def encode(self, codec):
+ codec.write_str8(self.pname)
+ codec.write_str8(self.cname)
+ codec.write_bin128(self.hash)
+
+ def getPackageName(self):
+ return self.pname
+
+ def getClassName(self):
+ return self.cname
+
+ def getHash(self):
+ return self.hash
+
+ def getHashString(self):
+ return "%08x-%08x-%08x-%08x" % struct.unpack ("!LLLL", self.hash)
+
+ def getPackageKey(self):
+ return (self.cname, self.hash)
+
+ def __repr__(self):
+ return self.pname + ":" + self.cname + "(" + self.getHashString() + ")"
+
+class SchemaClass:
+ """ """
+ CLASS_KIND_TABLE = 1
+ CLASS_KIND_EVENT = 2
+
+ def __init__(self, kind, key, codec):
+ self.kind = kind
+ self.classKey = key
+ self.properties = []
+ self.statistics = []
+ self.methods = []
+ self.arguments = []
+
+ if self.kind == self.CLASS_KIND_TABLE:
+ propCount = codec.read_uint16()
+ statCount = codec.read_uint16()
+ methodCount = codec.read_uint16()
+ for idx in range(propCount):
+ self.properties.append(SchemaProperty(codec))
+ for idx in range(statCount):
+ self.statistics.append(SchemaStatistic(codec))
+ for idx in range(methodCount):
+ self.methods.append(SchemaMethod(codec))
+
+ elif self.kind == self.CLASS_KIND_EVENT:
+ argCount = codec.read_uint16()
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=False))
+
+ def __repr__(self):
+ if self.kind == self.CLASS_KIND_TABLE:
+ kindStr = "Table"
+ elif self.kind == self.CLASS_KIND_EVENT:
+ kindStr = "Event"
+ else:
+ kindStr = "Unsupported"
+ result = "%s Class: %s " % (kindStr, self.classKey.__repr__())
+ return result
+
+ def getKey(self):
+ """ Return the class-key for this class. """
+ return self.classKey
+
+ def getProperties(self):
+ """ Return the list of properties for the class. """
+ return self.properties
+
+ def getStatistics(self):
+ """ Return the list of statistics for the class. """
+ return self.statistics
+
+ def getMethods(self):
+ """ Return the list of methods for the class. """
+ return self.methods
+
+ def getArguments(self):
+ """ Return the list of events for the class. """
+ return self.arguments
+
+class SchemaProperty:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.access = str(map["access"])
+ self.index = map["index"] != 0
+ self.optional = map["optional"] != 0
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+
+ def __repr__(self):
+ return self.name
+
+class SchemaStatistic:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.unit = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "desc" : self.desc = value
+
+ def __repr__(self):
+ return self.name
+
+class SchemaMethod:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ argCount = map["argCount"]
+ if "desc" in map:
+ self.desc = map["desc"]
+ else:
+ self.desc = None
+ self.arguments = []
+
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=True))
+
+ def __repr__(self):
+ result = self.name + "("
+ first = True
+ for arg in self.arguments:
+ if arg.dir.find("I") != -1:
+ if first:
+ first = False
+ else:
+ result += ", "
+ result += arg.name
+ result += ")"
+ return result
+
+class SchemaArgument:
+ """ """
+ def __init__(self, codec, methodArg):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ if methodArg:
+ self.dir = str(map["dir"]).upper()
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+ self.default = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+ elif key == "default" : self.default = value
+
+class ObjectId:
+ """ Object that represents QMF object identifiers """
+ def __init__(self, codec, first=0, second=0):
+ if codec:
+ self.first = codec.read_uint64()
+ self.second = codec.read_uint64()
+ else:
+ self.first = first
+ self.second = second
+
+ def __cmp__(self, other):
+ if other == None or not isinstance(other, ObjectId) :
+ return 1
+ if self.first < other.first:
+ return -1
+ if self.first > other.first:
+ return 1
+ if self.second < other.second:
+ return -1
+ if self.second > other.second:
+ return 1
+ return 0
+
+ def __repr__(self):
+ return "%d-%d-%d-%d-%d" % (self.getFlags(), self.getSequence(),
+ self.getBrokerBank(), self.getAgentBank(), self.getObject())
+
+ def index(self):
+ return (self.first, self.second)
+
+ def getFlags(self):
+ return (self.first & 0xF000000000000000) >> 60
+
+ def getSequence(self):
+ return (self.first & 0x0FFF000000000000) >> 48
+
+ def getBrokerBank(self):
+ return (self.first & 0x0000FFFFF0000000) >> 28
+
+ def getAgentBank(self):
+ return self.first & 0x000000000FFFFFFF
+
+ def getObject(self):
+ return self.second
+
+ def isDurable(self):
+ return self.getSequence() == 0
+
+ def encode(self, codec):
+ codec.write_uint64(self.first)
+ codec.write_uint64(self.second)
+
+ def __hash__(self):
+ return (self.first, self.second).__hash__()
+
+ def __eq__(self, other):
+ return (self.first, self.second).__eq__(other)
+
+class Object(object):
+ """ """
+ def __init__(self, session, broker, schema, codec, prop, stat):
+ """ """
+ self._session = session
+ self._broker = broker
+ self._schema = schema
+ self._currentTime = codec.read_uint64()
+ self._createTime = codec.read_uint64()
+ self._deleteTime = codec.read_uint64()
+ self._objectId = ObjectId(codec)
+ self._properties = []
+ self._statistics = []
+ if prop:
+ notPresent = self._parsePresenceMasks(codec, schema)
+ for property in schema.getProperties():
+ if property.name in notPresent:
+ self._properties.append((property, None))
+ else:
+ self._properties.append((property, self._session._decodeValue(codec, property.type)))
+ if stat:
+ for statistic in schema.getStatistics():
+ self._statistics.append((statistic, self._session._decodeValue(codec, statistic.type)))
+
+ def getBroker(self):
+ """ Return the broker from which this object was sent """
+ return self._broker
+
+ def getObjectId(self):
+ """ Return the object identifier for this object """
+ return self._objectId
+
+ def getClassKey(self):
+ """ Return the class-key that references the schema describing this object. """
+ return self._schema.getKey()
+
+ def getSchema(self):
+ """ Return the schema that describes this object. """
+ return self._schema
+
+ def getMethods(self):
+ """ Return a list of methods available for this object. """
+ return self._schema.getMethods()
+
+ def getTimestamps(self):
+ """ Return the current, creation, and deletion times for this object. """
+ return self._currentTime, self._createTime, self._deleteTime
+
+ def getIndex(self):
+ """ Return a string describing this object's primary key. """
+ result = u""
+ for property, value in self._properties:
+ if property.index:
+ if result != u"":
+ result += u":"
+ try:
+ valstr = unicode(self._session._displayValue(value, property.type))
+ except:
+ valstr = u"<undecodable>"
+ result += valstr
+ return result
+
+ def getProperties(self):
+ return self._properties
+
+ def getStatistics(self):
+ return self._statistics
+
+ def mergeUpdate(self, newer):
+ """ Replace properties and/or statistics with a newly received update """
+ if self._objectId != newer._objectId:
+ raise Exception("Objects with different object-ids")
+ if len(newer.getProperties()) > 0:
+ self.properties = newer.getProperties()
+ if len(newer.getStatistics()) > 0:
+ self.statistics = newer.getStatistics()
+
+ def __repr__(self):
+ key = self.getClassKey()
+ return key.getPackageName() + ":" + key.getClassName() +\
+ "[" + self.getObjectId().__repr__() + "] " + self.getIndex().encode("utf8")
+
+ def __getattr__(self, name):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ return lambda *args, **kwargs : self._invoke(name, args, kwargs)
+ for property, value in self._properties:
+ if name == property.name:
+ return value
+ if name == "_" + property.name + "_" and property.type == 10: # Dereference references
+ deref = self._session.getObjects(_objectId=value, _broker=self._broker)
+ if len(deref) != 1:
+ return None
+ else:
+ return deref[0]
+ for statistic, value in self._statistics:
+ if name == statistic.name:
+ return value
+ raise Exception("Type Object has no attribute '%s'" % name)
+
+ def _sendMethodRequest(self, name, args, kwargs, synchronous=False):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ aIdx = 0
+ sendCodec = Codec(self._broker.conn.spec)
+ seq = self._session.seqMgr._reserve((method, synchronous))
+ self._broker._setHeader(sendCodec, 'M', seq)
+ self._objectId.encode(sendCodec)
+ self._schema.getKey().encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(args):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(args)))
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._session._encodeValue(sendCodec, args[aIdx], arg.type)
+ aIdx += 1
+ smsg = self._broker._message(sendCodec.encoded, "agent.%d.%d" %
+ (self._objectId.getBrokerBank(), self._objectId.getAgentBank()))
+ if synchronous:
+ try:
+ self._broker.cv.acquire()
+ self._broker.syncInFlight = True
+ finally:
+ self._broker.cv.release()
+ self._broker._send(smsg)
+ return seq
+ return None
+
+ def _invoke(self, name, args, kwargs):
+ if self._sendMethodRequest(name, args, kwargs, True):
+ try:
+ self._broker.cv.acquire()
+ starttime = time()
+ while self._broker.syncInFlight and self._broker.error == None:
+ self._broker.cv.wait(self._broker.SYNC_TIME)
+ if time() - starttime > self._broker.SYNC_TIME:
+ self._session.seqMgr._release(seq)
+ raise RuntimeError("Timed out waiting for method to respond")
+ finally:
+ self._broker.cv.release()
+ if self._broker.error != None:
+ errorText = self._broker.error
+ self._broker.error = None
+ raise Exception(errorText)
+ return self._broker.syncResult
+ raise Exception("Invalid Method (software defect) [%s]" % name)
+
+ def _parsePresenceMasks(self, codec, schema):
+ excludeList = []
+ bit = 0
+ for property in schema.getProperties():
+ if property.optional:
+ if bit == 0:
+ mask = codec.read_uint8()
+ bit = 1
+ if (mask & bit) == 0:
+ excludeList.append(property.name)
+ bit *= 2
+ if bit == 256:
+ bit = 0
+ return excludeList
+
+class MethodResult(object):
+ """ """
+ def __init__(self, status, text, outArgs):
+ """ """
+ self.status = status
+ self.text = text
+ self.outArgs = outArgs
+
+ def __getattr__(self, name):
+ if name in self.outArgs:
+ return self.outArgs[name]
+
+ def __repr__(self):
+ return "%s (%d) - %s" % (self.text, self.status, self.outArgs)
+
+class ManagedConnection(Thread):
+ """ Thread class for managing a connection. """
+ DELAY_MIN = 1
+ DELAY_MAX = 128
+ DELAY_FACTOR = 2
+
+ def __init__(self, broker):
+ Thread.__init__(self)
+ self.broker = broker
+ self.cv = Condition()
+ self.canceled = False
+
+ def stop(self):
+ """ Tell this thread to stop running and return. """
+ try:
+ self.cv.acquire()
+ self.canceled = True
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def disconnected(self):
+ """ Notify the thread that the connection was lost. """
+ try:
+ self.cv.acquire()
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def run(self):
+ """ Main body of the running thread. """
+ delay = self.DELAY_MIN
+ while True:
+ try:
+ self.broker._tryToConnect()
+ try:
+ self.cv.acquire()
+ while (not self.canceled) and self.broker.connected:
+ self.cv.wait()
+ if self.canceled:
+ return
+ delay = self.DELAY_MIN
+ finally:
+ self.cv.release()
+ except socket.error:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+ except SessionDetached:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+ except Closed:
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+
+ try:
+ self.cv.acquire()
+ self.cv.wait(delay)
+ if self.canceled:
+ return
+ finally:
+ self.cv.release()
+
+class Broker:
+ """ This object represents a connection (or potential connection) to a QMF broker. """
+ SYNC_TIME = 60
+
+ def __init__(self, session, host, port, authMech, authUser, authPass, ssl=False):
+ self.session = session
+ self.host = host
+ self.port = port
+ self.ssl = ssl
+ self.authUser = authUser
+ self.authPass = authPass
+ self.cv = Condition()
+ self.error = None
+ self.brokerId = None
+ self.connected = False
+ self.amqpSessionId = "%s.%d" % (os.uname()[1], os.getpid())
+ if self.session.manageConnections:
+ self.thread = ManagedConnection(self)
+ self.thread.start()
+ else:
+ self.thread = None
+ self._tryToConnect()
+
+ def isConnected(self):
+ """ Return True if there is an active connection to the broker. """
+ return self.connected
+
+ def getError(self):
+ """ Return the last error message seen while trying to connect to the broker. """
+ return self.error
+
+ def getBrokerId(self):
+ """ Get broker's unique identifier (UUID) """
+ return self.brokerId
+
+ def getBrokerBank(self):
+ """ Return the broker-bank value. This is the value that the broker assigns to
+ objects within its control. This value appears as a field in the ObjectId
+ of objects created by agents controlled by this broker. """
+ return 1
+
+ def getAgent(self, brokerBank, agentBank):
+ """ Return the agent object associated with a particular broker and agent bank value."""
+ bankKey = (brokerBank, agentBank)
+ if bankKey in self.agents:
+ return self.agents[bankKey]
+ return None
+
+ def getSessionId(self):
+ """ Get the identifier of the AMQP session to the broker """
+ return self.amqpSessionId
+
+ def getAgents(self):
+ """ Get the list of agents reachable via this broker """
+ return self.agents.values()
+
+ def getAmqpSession(self):
+ """ Get the AMQP session object for this connected broker. """
+ return self.amqpSession
+
+ def getUrl(self):
+ """ """
+ return "%s:%d" % (self.host, self.port)
+
+ def getFullUrl(self, noAuthIfGuestDefault=True):
+ """ """
+ ssl = ""
+ if self.ssl:
+ ssl = "s"
+ auth = "%s/%s@" % (self.authUser, self.authPass)
+ if self.authUser == "" or \
+ (noAuthIfGuestDefault and self.authUser == "guest" and self.authPass == "guest"):
+ auth = ""
+ return "amqp%s://%s%s:%d" % (ssl, auth, self.host, self.port or 5672)
+
+ def __repr__(self):
+ if self.connected:
+ return "Broker connected at: %s" % self.getUrl()
+ else:
+ return "Disconnected Broker"
+
+ def _tryToConnect(self):
+ try:
+ self.agents = {}
+ self.agents[(1,0)] = Agent(self, 0, "BrokerAgent")
+ self.topicBound = False
+ self.syncInFlight = False
+ self.syncRequest = 0
+ self.syncResult = None
+ self.reqsOutstanding = 1
+
+ sock = connect(self.host, self.port)
+ if self.ssl:
+ sock = ssl(sock)
+ self.conn = Connection(sock, username=self.authUser, password=self.authPass)
+ self.conn.start()
+ self.replyName = "reply-%s" % self.amqpSessionId
+ self.amqpSession = self.conn.session(self.amqpSessionId)
+ self.amqpSession.auto_sync = True
+ self.amqpSession.queue_declare(queue=self.replyName, exclusive=True, auto_delete=True)
+ self.amqpSession.exchange_bind(exchange="amq.direct",
+ queue=self.replyName, binding_key=self.replyName)
+ self.amqpSession.message_subscribe(queue=self.replyName, destination="rdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("rdest").listen(self._replyCb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="rdest", flow_mode=1)
+ self.amqpSession.message_flow(destination="rdest", unit=0, value=0xFFFFFFFF)
+ self.amqpSession.message_flow(destination="rdest", unit=1, value=0xFFFFFFFF)
+
+ self.topicName = "topic-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.topicName, exclusive=True, auto_delete=True)
+ self.amqpSession.message_subscribe(queue=self.topicName, destination="tdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("tdest").listen(self._replyCb)
+ self.amqpSession.message_set_flow_mode(destination="tdest", flow_mode=1)
+ self.amqpSession.message_flow(destination="tdest", unit=0, value=0xFFFFFFFF)
+ self.amqpSession.message_flow(destination="tdest", unit=1, value=0xFFFFFFFF)
+
+ self.connected = True
+ self.session._handleBrokerConnect(self)
+
+ codec = Codec(self.conn.spec)
+ self._setHeader(codec, 'B')
+ msg = self._message(codec.encoded)
+ self._send(msg)
+
+ except socket.error, e:
+ self.error = "Socket Error %s - %s" % (e[0], e[1])
+ raise
+ except Closed, e:
+ self.error = "Connect Failed %d - %s" % (e[0], e[1])
+ raise
+ except ConnectionFailed, e:
+ self.error = "Connect Failed %d - %s" % (e[0], e[1])
+ raise
+
+ def _updateAgent(self, obj):
+ bankKey = (obj.brokerBank, obj.agentBank)
+ if obj._deleteTime == 0:
+ if bankKey not in self.agents:
+ agent = Agent(self, obj.agentBank, obj.label)
+ self.agents[bankKey] = agent
+ if self.session.console != None:
+ self.session.console.newAgent(agent)
+ else:
+ agent = self.agents.pop(bankKey, None)
+ if agent != None and self.session.console != None:
+ self.session.console.delAgent(agent)
+
+ def _setHeader(self, codec, opcode, seq=0):
+ """ Compose the header of a management message. """
+ codec.write_uint8(ord('A'))
+ codec.write_uint8(ord('M'))
+ codec.write_uint8(ord('2'))
+ codec.write_uint8(ord(opcode))
+ codec.write_uint32(seq)
+
+ def _checkHeader(self, codec):
+ """ Check the header of a management message and extract the opcode and class. """
+ try:
+ octet = chr(codec.read_uint8())
+ if octet != 'A':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != 'M':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != '2':
+ return None, None
+ opcode = chr(codec.read_uint8())
+ seq = codec.read_uint32()
+ return opcode, seq
+ except:
+ return None, None
+
+ def _message (self, body, routing_key="broker"):
+ dp = self.amqpSession.delivery_properties()
+ dp.routing_key = routing_key
+ mp = self.amqpSession.message_properties()
+ mp.content_type = "x-application/qmf"
+ mp.reply_to = self.amqpSession.reply_to("amq.direct", self.replyName)
+ return Message(dp, mp, body)
+
+ def _send(self, msg, dest="qpid.management"):
+ self.amqpSession.message_transfer(destination=dest, message=msg)
+
+ def _shutdown(self):
+ if self.thread:
+ self.thread.stop()
+ self.thread.join()
+ if self.connected:
+ self.amqpSession.incoming("rdest").stop()
+ if self.session.console != None:
+ self.amqpSession.incoming("tdest").stop()
+ self.amqpSession.close()
+ self.conn.close()
+ self.connected = False
+
+ def _waitForStable(self):
+ try:
+ self.cv.acquire()
+ if not self.connected:
+ return
+ if self.reqsOutstanding == 0:
+ return
+ self.syncInFlight = True
+ starttime = time()
+ while self.reqsOutstanding != 0:
+ self.cv.wait(self.SYNC_TIME)
+ if time() - starttime > self.SYNC_TIME:
+ raise RuntimeError("Timed out waiting for broker to synchronize")
+ finally:
+ self.cv.release()
+
+ def _incOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding += 1
+ finally:
+ self.cv.release()
+
+ def _decOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding -= 1
+ if self.reqsOutstanding == 0 and not self.topicBound:
+ self.topicBound = True
+ for key in self.session.bindingKeyList:
+ self.amqpSession.exchange_bind(exchange="qpid.management",
+ queue=self.topicName, binding_key=key)
+ if self.reqsOutstanding == 0 and self.syncInFlight:
+ self.syncInFlight = False
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _replyCb(self, msg):
+ codec = Codec(self.conn.spec, msg.body)
+ while True:
+ opcode, seq = self._checkHeader(codec)
+ if opcode == None: return
+ if opcode == 'b': self.session._handleBrokerResp (self, codec, seq)
+ elif opcode == 'p': self.session._handlePackageInd (self, codec, seq)
+ elif opcode == 'z': self.session._handleCommandComplete (self, codec, seq)
+ elif opcode == 'q': self.session._handleClassInd (self, codec, seq)
+ elif opcode == 'm': self.session._handleMethodResp (self, codec, seq)
+ elif opcode == 'h': self.session._handleHeartbeatInd (self, codec, seq, msg)
+ elif opcode == 'e': self.session._handleEventInd (self, codec, seq)
+ elif opcode == 's': self.session._handleSchemaResp (self, codec, seq)
+ elif opcode == 'c': self.session._handleContentInd (self, codec, seq, prop=True)
+ elif opcode == 'i': self.session._handleContentInd (self, codec, seq, stat=True)
+ elif opcode == 'g': self.session._handleContentInd (self, codec, seq, prop=True, stat=True)
+
+ def _exceptionCb(self, data):
+ self.connected = False
+ self.error = data
+ try:
+ self.cv.acquire()
+ if self.syncInFlight:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+ self.session._handleError(self.error)
+ self.session._handleBrokerDisconnect(self)
+ if self.thread:
+ self.thread.disconnected()
+
+class Agent:
+ """ """
+ def __init__(self, broker, agentBank, label):
+ self.broker = broker
+ self.brokerBank = broker.getBrokerBank()
+ self.agentBank = agentBank
+ self.label = label
+
+ def __repr__(self):
+ return "Agent at bank %d.%d (%s)" % (self.brokerBank, self.agentBank, self.label)
+
+ def getBroker(self):
+ return self.broker
+
+ def getBrokerBank(self):
+ return self.brokerBank
+
+ def getAgentBank(self):
+ return self.agentBank
+
+class Event:
+ """ """
+ def __init__(self, session, broker, codec):
+ self.session = session
+ self.broker = broker
+ self.classKey = ClassKey(codec)
+ self.timestamp = codec.read_int64()
+ self.severity = codec.read_uint8()
+ self.schema = None
+ pname = self.classKey.getPackageName()
+ pkey = self.classKey.getPackageKey()
+ if pname in session.packages:
+ if pkey in session.packages[pname]:
+ self.schema = session.packages[pname][pkey]
+ self.arguments = {}
+ for arg in self.schema.arguments:
+ self.arguments[arg.name] = session._decodeValue(codec, arg.type)
+
+ def __repr__(self):
+ if self.schema == None:
+ return "<uninterpretable>"
+ out = strftime("%c", gmtime(self.timestamp / 1000000000))
+ out += " " + self._sevName() + " " + self.classKey.getPackageName() + ":" + self.classKey.getClassName()
+ out += " broker=" + self.broker.getUrl()
+ for arg in self.schema.arguments:
+ disp = self.session._displayValue(self.arguments[arg.name], arg.type).encode("utf8")
+ if " " in disp:
+ disp = "\"" + disp + "\""
+ out += " " + arg.name + "=" + disp
+ return out
+
+ def _sevName(self):
+ if self.severity == 0 : return "EMER "
+ if self.severity == 1 : return "ALERT"
+ if self.severity == 2 : return "CRIT "
+ if self.severity == 3 : return "ERROR"
+ if self.severity == 4 : return "WARN "
+ if self.severity == 5 : return "NOTIC"
+ if self.severity == 6 : return "INFO "
+ if self.severity == 7 : return "DEBUG"
+ return "INV-%d" % self.severity
+
+ def getClassKey(self):
+ return self.classKey
+
+ def getArguments(self):
+ return self.arguments
+
+ def getTimestamp(self):
+ return self.timestamp
+
+ def getName(self):
+ return self.name
+
+ def getSchema(self):
+ return self.schema
+
+class SequenceManager:
+ """ Manage sequence numbers for asynchronous method calls """
+ def __init__(self):
+ self.lock = Lock()
+ self.sequence = 0
+ self.pending = {}
+
+ def _reserve(self, data):
+ """ Reserve a unique sequence number """
+ try:
+ self.lock.acquire()
+ result = self.sequence
+ self.sequence = self.sequence + 1
+ self.pending[result] = data
+ finally:
+ self.lock.release()
+ return result
+
+ def _release(self, seq):
+ """ Release a reserved sequence number """
+ data = None
+ try:
+ self.lock.acquire()
+ if seq in self.pending:
+ data = self.pending[seq]
+ del self.pending[seq]
+ finally:
+ self.lock.release()
+ return data
+
+
+class DebugConsole(Console):
+ """ """
+ def brokerConnected(self, broker):
+ print "brokerConnected:", broker
+
+ def brokerDisconnected(self, broker):
+ print "brokerDisconnected:", broker
+
+ def newPackage(self, name):
+ print "newPackage:", name
+
+ def newClass(self, kind, classKey):
+ print "newClass:", kind, classKey
+
+ def newAgent(self, agent):
+ print "newAgent:", agent
+
+ def delAgent(self, agent):
+ print "delAgent:", agent
+
+ def objectProps(self, broker, record):
+ print "objectProps:", record
+
+ def objectStats(self, broker, record):
+ print "objectStats:", record
+
+ def event(self, broker, event):
+ print "event:", event
+
+ def heartbeat(self, agent, timestamp):
+ print "heartbeat:", agent
+
+ def brokerInfo(self, broker):
+ print "brokerInfo:", broker
+
diff --git a/RC9/qpid/python/qpid/__init__.py b/RC9/qpid/python/qpid/__init__.py
new file mode 100644
index 0000000000..ff9cc04df8
--- /dev/null
+++ b/RC9/qpid/python/qpid/__init__.py
@@ -0,0 +1,84 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import spec, codec, connection, content, peer, delegate, client
+
+class Struct:
+
+ def __init__(self, type, *args, **kwargs):
+ self.__dict__["type"] = type
+ self.__dict__["_values"] = {}
+
+ if len(args) > len(self.type.fields):
+ raise TypeError("too many args")
+
+ for a, f in zip(args, self.type.fields):
+ self.set(f.name, a)
+
+ for k, a in kwargs.items():
+ self.set(k, a)
+
+ def _check(self, attr):
+ field = self.type.fields.byname.get(attr)
+ if field == None:
+ raise AttributeError(attr)
+ return field
+
+ def exists(self, attr):
+ return self.type.fields.byname.has_key(attr)
+
+ def has(self, attr):
+ self._check(attr)
+ return self._values.has_key(attr)
+
+ def set(self, attr, value):
+ self._check(attr)
+ self._values[attr] = value
+
+ def get(self, attr):
+ field = self._check(attr)
+ return self._values.get(attr, field.default())
+
+ def clear(self, attr):
+ self._check(attr)
+ del self._values[attr]
+
+ def __setattr__(self, attr, value):
+ self.set(attr, value)
+
+ def __getattr__(self, attr):
+ return self.get(attr)
+
+ def __delattr__(self, attr):
+ self.clear(attr)
+
+ def __setitem__(self, attr, value):
+ self.set(attr, value)
+
+ def __getitem__(self, attr):
+ return self.get(attr)
+
+ def __delitem__(self, attr):
+ self.clear(attr)
+
+ def __str__(self):
+ return "%s %s" % (self.type, self._values)
+
+ def __repr__(self):
+ return str(self)
diff --git a/RC9/qpid/python/qpid/assembler.py b/RC9/qpid/python/qpid/assembler.py
new file mode 100644
index 0000000000..92bb0aa0f8
--- /dev/null
+++ b/RC9/qpid/python/qpid/assembler.py
@@ -0,0 +1,118 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from codec010 import StringCodec
+from framer import *
+from logging import getLogger
+
+log = getLogger("qpid.io.seg")
+
+class Segment:
+
+ def __init__(self, first, last, type, track, channel, payload):
+ self.id = None
+ self.offset = None
+ self.first = first
+ self.last = last
+ self.type = type
+ self.track = track
+ self.channel = channel
+ self.payload = payload
+
+ def decode(self, spec):
+ segs = spec["segment_type"]
+ choice = segs.choices[self.type]
+ return getattr(self, "decode_%s" % choice.name)(spec)
+
+ def decode_control(self, spec):
+ sc = StringCodec(spec, self.payload)
+ return sc.read_control()
+
+ def decode_command(self, spec):
+ sc = StringCodec(spec, self.payload)
+ hdr, cmd = sc.read_command()
+ cmd.id = self.id
+ return hdr, cmd
+
+ def decode_header(self, spec):
+ sc = StringCodec(spec, self.payload)
+ values = []
+ while len(sc.encoded) > 0:
+ values.append(sc.read_struct32())
+ return values
+
+ def decode_body(self, spec):
+ return self.payload
+
+ def __str__(self):
+ return "%s%s %s %s %s %r" % (int(self.first), int(self.last), self.type,
+ self.track, self.channel, self.payload)
+
+ def __repr__(self):
+ return str(self)
+
+class Assembler(Framer):
+
+ def __init__(self, sock, max_payload = Frame.MAX_PAYLOAD):
+ Framer.__init__(self, sock)
+ self.max_payload = max_payload
+ self.fragments = {}
+
+ def read_segment(self):
+ while True:
+ frame = self.read_frame()
+
+ key = (frame.channel, frame.track)
+ seg = self.fragments.get(key)
+ if seg == None:
+ seg = Segment(frame.isFirstSegment(), frame.isLastSegment(),
+ frame.type, frame.track, frame.channel, "")
+ self.fragments[key] = seg
+
+ seg.payload += frame.payload
+
+ if frame.isLastFrame():
+ self.fragments.pop(key)
+ log.debug("RECV %s", seg)
+ return seg
+
+ def write_segment(self, segment):
+ remaining = segment.payload
+
+ first = True
+ while first or remaining:
+ payload = remaining[:self.max_payload]
+ remaining = remaining[self.max_payload:]
+
+ flags = 0
+ if first:
+ flags |= FIRST_FRM
+ first = False
+ if not remaining:
+ flags |= LAST_FRM
+ if segment.first:
+ flags |= FIRST_SEG
+ if segment.last:
+ flags |= LAST_SEG
+
+ frame = Frame(flags, segment.type, segment.track, segment.channel,
+ payload)
+ self.write_frame(frame)
+
+ log.debug("SENT %s", segment)
diff --git a/RC9/qpid/python/qpid/client.py b/RC9/qpid/python/qpid/client.py
new file mode 100644
index 0000000000..4605710de8
--- /dev/null
+++ b/RC9/qpid/python/qpid/client.py
@@ -0,0 +1,225 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+An AQMP client implementation that uses a custom delegate for
+interacting with the server.
+"""
+
+import os, threading
+from peer import Peer, Channel, Closed
+from delegate import Delegate
+from connection08 import Connection, Frame, connect
+from spec import load
+from queue import Queue
+from reference import ReferenceId, References
+
+
+class Client:
+
+ def __init__(self, host, port, spec = None, vhost = None):
+ self.host = host
+ self.port = port
+ if spec:
+ self.spec = spec
+ else:
+ try:
+ name = os.environ["AMQP_SPEC"]
+ except KeyError:
+ raise EnvironmentError("environment variable AMQP_SPEC must be set")
+ self.spec = load(name)
+ self.structs = StructFactory(self.spec)
+ self.sessions = {}
+
+ self.mechanism = None
+ self.response = None
+ self.locale = None
+
+ self.vhost = vhost
+ if self.vhost == None:
+ self.vhost = "/"
+
+ self.queues = {}
+ self.lock = threading.Lock()
+
+ self.closed = False
+ self.reason = None
+ self.started = threading.Event()
+
+ def wait(self):
+ self.started.wait()
+ if self.closed:
+ raise Closed(self.reason)
+
+ def queue(self, key):
+ self.lock.acquire()
+ try:
+ try:
+ q = self.queues[key]
+ except KeyError:
+ q = Queue(0)
+ self.queues[key] = q
+ finally:
+ self.lock.release()
+ return q
+
+ def start(self, response, mechanism="AMQPLAIN", locale="en_US", tune_params=None):
+ self.mechanism = mechanism
+ self.response = response
+ self.locale = locale
+ self.tune_params = tune_params
+
+ self.socket = connect(self.host, self.port)
+ self.conn = Connection(self.socket, self.spec)
+ self.peer = Peer(self.conn, ClientDelegate(self), Session)
+
+ self.conn.init()
+ self.peer.start()
+ self.wait()
+ self.channel(0).connection_open(self.vhost)
+
+ def channel(self, id):
+ self.lock.acquire()
+ try:
+ ssn = self.peer.channel(id)
+ ssn.client = self
+ self.sessions[id] = ssn
+ finally:
+ self.lock.release()
+ return ssn
+
+ def session(self):
+ self.lock.acquire()
+ try:
+ id = None
+ for i in xrange(1, 64*1024):
+ if not self.sessions.has_key(id):
+ id = i
+ break
+ finally:
+ self.lock.release()
+ if id == None:
+ raise RuntimeError("out of channels")
+ else:
+ return self.channel(id)
+
+ def close(self):
+ self.socket.close()
+
+class ClientDelegate(Delegate):
+
+ def __init__(self, client):
+ Delegate.__init__(self)
+ self.client = client
+
+ def connection_start(self, ch, msg):
+ msg.start_ok(mechanism=self.client.mechanism,
+ response=self.client.response,
+ locale=self.client.locale)
+
+ def connection_tune(self, ch, msg):
+ if self.client.tune_params:
+ #todo: just override the params, i.e. don't require them
+ # all to be included in tune_params
+ msg.tune_ok(**self.client.tune_params)
+ else:
+ msg.tune_ok(*msg.frame.args)
+ self.client.started.set()
+
+ def message_transfer(self, ch, msg):
+ self.client.queue(msg.destination).put(msg)
+
+ def message_open(self, ch, msg):
+ ch.references.open(msg.reference)
+
+ def message_close(self, ch, msg):
+ ch.references.close(msg.reference)
+
+ def message_append(self, ch, msg):
+ ch.references.get(msg.reference).append(msg.bytes)
+
+ def message_acquired(self, ch, msg):
+ ch.control_queue.put(msg)
+
+ def basic_deliver(self, ch, msg):
+ self.client.queue(msg.consumer_tag).put(msg)
+
+ def channel_pong(self, ch, msg):
+ msg.ok()
+
+ def channel_close(self, ch, msg):
+ ch.closed(msg)
+
+ def session_ack(self, ch, msg):
+ pass
+
+ def session_closed(self, ch, msg):
+ ch.closed(msg)
+
+ def connection_close(self, ch, msg):
+ self.client.peer.closed(msg)
+
+ def execution_complete(self, ch, msg):
+ ch.completion.complete(msg.cumulative_execution_mark)
+
+ def execution_result(self, ch, msg):
+ future = ch.futures[msg.command_id]
+ future.put_response(ch, msg.data)
+
+ def closed(self, reason):
+ self.client.closed = True
+ self.client.reason = reason
+ self.client.started.set()
+
+class StructFactory:
+
+ def __init__(self, spec):
+ self.spec = spec
+ self.factories = {}
+
+ def __getattr__(self, name):
+ if self.factories.has_key(name):
+ return self.factories[name]
+ elif self.spec.domains.byname.has_key(name):
+ f = lambda *args, **kwargs: self.struct(name, *args, **kwargs)
+ self.factories[name] = f
+ return f
+ else:
+ raise AttributeError(name)
+
+ def struct(self, name, *args, **kwargs):
+ return self.spec.struct(name, *args, **kwargs)
+
+class Session(Channel):
+
+ def __init__(self, *args):
+ Channel.__init__(self, *args)
+ self.references = References()
+ self.client = None
+
+ def open(self):
+ self.session_open()
+
+ def close(self):
+ self.session_close()
+ self.client.lock.acquire()
+ try:
+ del self.client.sessions[self.id]
+ finally:
+ self.client.lock.release()
diff --git a/RC9/qpid/python/qpid/codec.py b/RC9/qpid/python/qpid/codec.py
new file mode 100644
index 0000000000..8026b209dc
--- /dev/null
+++ b/RC9/qpid/python/qpid/codec.py
@@ -0,0 +1,590 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Utility code to translate between python objects and AMQP encoded data
+fields.
+
+The unit test for this module is located in tests/codec.py
+"""
+
+import re, qpid, spec08
+from cStringIO import StringIO
+from struct import *
+from reference import ReferenceId
+
+class EOF(Exception):
+ pass
+
+TYPE_ALIASES = {
+ "long_string": "longstr",
+ "unsigned_int": "long"
+ }
+
+class Codec:
+
+ """
+ class that handles encoding/decoding of AMQP primitives
+ """
+
+ def __init__(self, stream, spec):
+ """
+ initializing the stream/fields used
+ """
+ self.stream = stream
+ self.spec = spec
+ self.nwrote = 0
+ self.nread = 0
+ self.incoming_bits = []
+ self.outgoing_bits = []
+
+ self.types = {}
+ self.codes = {}
+ self.encodings = {
+ basestring: "longstr",
+ int: "long",
+ long: "long",
+ None.__class__:"void",
+ list: "sequence",
+ tuple: "sequence",
+ dict: "table"
+ }
+
+ for constant in self.spec.constants:
+ if constant.klass == "field-table-type":
+ type = constant.name.replace("field_table_", "")
+ self.typecode(constant.id, TYPE_ALIASES.get(type, type))
+
+ if not self.types:
+ self.typecode(ord('S'), "longstr")
+ self.typecode(ord('I'), "long")
+
+ def typecode(self, code, type):
+ self.types[code] = type
+ self.codes[type] = code
+
+ def resolve(self, klass):
+ if self.encodings.has_key(klass):
+ return self.encodings[klass]
+ for base in klass.__bases__:
+ result = self.resolve(base)
+ if result != None:
+ return result
+
+ def read(self, n):
+ """
+ reads in 'n' bytes from the stream. Can raise EOF exception
+ """
+ self.clearbits()
+ data = self.stream.read(n)
+ if n > 0 and len(data) == 0:
+ raise EOF()
+ self.nread += len(data)
+ return data
+
+ def write(self, s):
+ """
+ writes data 's' to the stream
+ """
+ self.flushbits()
+ self.stream.write(s)
+ self.nwrote += len(s)
+
+ def flush(self):
+ """
+ flushes the bits and data present in the stream
+ """
+ self.flushbits()
+ self.stream.flush()
+
+ def flushbits(self):
+ """
+ flushes the bits(compressed into octets) onto the stream
+ """
+ if len(self.outgoing_bits) > 0:
+ bytes = []
+ index = 0
+ for b in self.outgoing_bits:
+ if index == 0: bytes.append(0)
+ if b: bytes[-1] |= 1 << index
+ index = (index + 1) % 8
+ del self.outgoing_bits[:]
+ for byte in bytes:
+ self.encode_octet(byte)
+
+ def clearbits(self):
+ if self.incoming_bits:
+ self.incoming_bits = []
+
+ def pack(self, fmt, *args):
+ """
+ packs the data 'args' as per the format 'fmt' and writes it to the stream
+ """
+ self.write(pack(fmt, *args))
+
+ def unpack(self, fmt):
+ """
+ reads data from the stream and unpacks it as per the format 'fmt'
+ """
+ size = calcsize(fmt)
+ data = self.read(size)
+ values = unpack(fmt, data)
+ if len(values) == 1:
+ return values[0]
+ else:
+ return values
+
+ def encode(self, type, value):
+ """
+ calls the appropriate encode function e.g. encode_octet, encode_short etc.
+ """
+ if isinstance(type, spec08.Struct):
+ self.encode_struct(type, value)
+ else:
+ getattr(self, "encode_" + type)(value)
+
+ def decode(self, type):
+ """
+ calls the appropriate decode function e.g. decode_octet, decode_short etc.
+ """
+ if isinstance(type, spec08.Struct):
+ return self.decode_struct(type)
+ else:
+ return getattr(self, "decode_" + type)()
+
+ def encode_bit(self, o):
+ """
+ encodes a bit
+ """
+ if o:
+ self.outgoing_bits.append(True)
+ else:
+ self.outgoing_bits.append(False)
+
+ def decode_bit(self):
+ """
+ decodes a bit
+ """
+ if len(self.incoming_bits) == 0:
+ bits = self.decode_octet()
+ for i in range(8):
+ self.incoming_bits.append(bits >> i & 1 != 0)
+ return self.incoming_bits.pop(0)
+
+ def encode_octet(self, o):
+ """
+ encodes octet (8 bits) data 'o' in network byte order
+ """
+
+ # octet's valid range is [0,255]
+ if (o < 0 or o > 255):
+ raise ValueError('Valid range of octet is [0,255]')
+
+ self.pack("!B", int(o))
+
+ def decode_octet(self):
+ """
+ decodes a octet (8 bits) encoded in network byte order
+ """
+ return self.unpack("!B")
+
+ def encode_short(self, o):
+ """
+ encodes short (16 bits) data 'o' in network byte order
+ """
+
+ # short int's valid range is [0,65535]
+ if (o < 0 or o > 65535):
+ raise ValueError('Valid range of short int is [0,65535]: %s' % o)
+
+ self.pack("!H", int(o))
+
+ def decode_short(self):
+ """
+ decodes a short (16 bits) in network byte order
+ """
+ return self.unpack("!H")
+
+ def encode_long(self, o):
+ """
+ encodes long (32 bits) data 'o' in network byte order
+ """
+
+ # we need to check both bounds because on 64 bit platforms
+ # struct.pack won't raise an error if o is too large
+ if (o < 0 or o > 4294967295):
+ raise ValueError('Valid range of long int is [0,4294967295]')
+
+ self.pack("!L", int(o))
+
+ def decode_long(self):
+ """
+ decodes a long (32 bits) in network byte order
+ """
+ return self.unpack("!L")
+
+ def encode_signed_long(self, o):
+ self.pack("!q", o)
+
+ def decode_signed_long(self):
+ return self.unpack("!q")
+
+ def encode_signed_int(self, o):
+ self.pack("!l", o)
+
+ def decode_signed_int(self):
+ return self.unpack("!l")
+
+ def encode_longlong(self, o):
+ """
+ encodes long long (64 bits) data 'o' in network byte order
+ """
+ self.pack("!Q", o)
+
+ def decode_longlong(self):
+ """
+ decodes a long long (64 bits) in network byte order
+ """
+ return self.unpack("!Q")
+
+ def encode_float(self, o):
+ self.pack("!f", o)
+
+ def decode_float(self):
+ return self.unpack("!f")
+
+ def encode_double(self, o):
+ self.pack("!d", o)
+
+ def decode_double(self):
+ return self.unpack("!d")
+
+ def encode_bin128(self, b):
+ for idx in range (0,16):
+ self.pack("!B", ord (b[idx]))
+
+ def decode_bin128(self):
+ result = ""
+ for idx in range (0,16):
+ result = result + chr (self.unpack("!B"))
+ return result
+
+ def encode_raw(self, len, b):
+ for idx in range (0,len):
+ self.pack("!B", b[idx])
+
+ def decode_raw(self, len):
+ result = ""
+ for idx in range (0,len):
+ result = result + chr (self.unpack("!B"))
+ return result
+
+ def enc_str(self, fmt, s):
+ """
+ encodes a string 's' in network byte order as per format 'fmt'
+ """
+ size = len(s)
+ self.pack(fmt, size)
+ self.write(s)
+
+ def dec_str(self, fmt):
+ """
+ decodes a string in network byte order as per format 'fmt'
+ """
+ size = self.unpack(fmt)
+ return self.read(size)
+
+ def encode_shortstr(self, s):
+ """
+ encodes a short string 's' in network byte order
+ """
+
+ # short strings are limited to 255 octets
+ if len(s) > 255:
+ raise ValueError('Short strings are limited to 255 octets')
+
+ self.enc_str("!B", s)
+
+ def decode_shortstr(self):
+ """
+ decodes a short string in network byte order
+ """
+ return self.dec_str("!B")
+
+ def encode_longstr(self, s):
+ """
+ encodes a long string 's' in network byte order
+ """
+ if isinstance(s, dict):
+ self.encode_table(s)
+ else:
+ self.enc_str("!L", s)
+
+ def decode_longstr(self):
+ """
+ decodes a long string 's' in network byte order
+ """
+ return self.dec_str("!L")
+
+ def encode_table(self, tbl):
+ """
+ encodes a table data structure in network byte order
+ """
+ enc = StringIO()
+ codec = Codec(enc, self.spec)
+ if tbl:
+ for key, value in tbl.items():
+ if self.spec.major == 8 and self.spec.minor == 0 and len(key) > 128:
+ raise ValueError("field table key too long: '%s'" % key)
+ type = self.resolve(value.__class__)
+ if type == None:
+ raise ValueError("no encoding for: " + value.__class__)
+ codec.encode_shortstr(key)
+ codec.encode_octet(self.codes[type])
+ codec.encode(type, value)
+ s = enc.getvalue()
+ self.encode_long(len(s))
+ self.write(s)
+
+ def decode_table(self):
+ """
+ decodes a table data structure in network byte order
+ """
+ size = self.decode_long()
+ start = self.nread
+ result = {}
+ while self.nread - start < size:
+ key = self.decode_shortstr()
+ code = self.decode_octet()
+ if self.types.has_key(code):
+ value = self.decode(self.types[code])
+ else:
+ w = width(code)
+ if fixed(code):
+ value = self.read(w)
+ else:
+ value = self.read(self.dec_num(w))
+ result[key] = value
+ return result
+
+ def encode_timestamp(self, t):
+ """
+ encodes a timestamp data structure in network byte order
+ """
+ self.encode_longlong(t)
+
+ def decode_timestamp(self):
+ """
+ decodes a timestamp data structure in network byte order
+ """
+ return self.decode_longlong()
+
+ def encode_content(self, s):
+ """
+ encodes a content data structure in network byte order
+
+ content can be passed as a string in which case it is assumed to
+ be inline data, or as an instance of ReferenceId indicating it is
+ a reference id
+ """
+ if isinstance(s, ReferenceId):
+ self.encode_octet(1)
+ self.encode_longstr(s.id)
+ else:
+ self.encode_octet(0)
+ self.encode_longstr(s)
+
+ def decode_content(self):
+ """
+ decodes a content data structure in network byte order
+
+ return a string for inline data and a ReferenceId instance for
+ references
+ """
+ type = self.decode_octet()
+ if type == 0:
+ return self.decode_longstr()
+ else:
+ return ReferenceId(self.decode_longstr())
+
+ # new domains for 0-10:
+
+ def encode_rfc1982_long(self, s):
+ self.encode_long(s)
+
+ def decode_rfc1982_long(self):
+ return self.decode_long()
+
+ def encode_rfc1982_long_set(self, s):
+ self.encode_short(len(s) * 4)
+ for i in s:
+ self.encode_long(i)
+
+ def decode_rfc1982_long_set(self):
+ count = self.decode_short() / 4
+ set = []
+ for i in range(0, count):
+ set.append(self.decode_long())
+ return set;
+
+ def encode_uuid(self, s):
+ self.pack("16s", s)
+
+ def decode_uuid(self):
+ return self.unpack("16s")
+
+ def enc_num(self, width, n):
+ if width == 1:
+ self.encode_octet(n)
+ elif width == 2:
+ self.encode_short(n)
+ elif width == 3:
+ self.encode_long(n)
+ else:
+ raise ValueError("invalid width: %s" % width)
+
+ def dec_num(self, width):
+ if width == 1:
+ return self.decode_octet()
+ elif width == 2:
+ return self.decode_short()
+ elif width == 4:
+ return self.decode_long()
+ else:
+ raise ValueError("invalid width: %s" % width)
+
+ def encode_struct(self, type, s):
+ if type.size:
+ enc = StringIO()
+ codec = Codec(enc, self.spec)
+ codec.encode_struct_body(type, s)
+ codec.flush()
+ body = enc.getvalue()
+ self.enc_num(type.size, len(body))
+ self.write(body)
+ else:
+ self.encode_struct_body(type, s)
+
+ def decode_struct(self, type):
+ if type.size:
+ size = self.dec_num(type.size)
+ if size == 0:
+ return None
+ return self.decode_struct_body(type)
+
+ def encode_struct_body(self, type, s):
+ reserved = 8*type.pack - len(type.fields)
+ assert reserved >= 0
+
+ for f in type.fields:
+ if s == None:
+ self.encode_bit(False)
+ elif f.type == "bit":
+ self.encode_bit(s.get(f.name))
+ else:
+ self.encode_bit(s.has(f.name))
+
+ for i in range(reserved):
+ self.encode_bit(False)
+
+ for f in type.fields:
+ if f.type != "bit" and s != None and s.has(f.name):
+ self.encode(f.type, s.get(f.name))
+
+ self.flush()
+
+ def decode_struct_body(self, type):
+ reserved = 8*type.pack - len(type.fields)
+ assert reserved >= 0
+
+ s = qpid.Struct(type)
+
+ for f in type.fields:
+ if f.type == "bit":
+ s.set(f.name, self.decode_bit())
+ elif self.decode_bit():
+ s.set(f.name, None)
+
+ for i in range(reserved):
+ if self.decode_bit():
+ raise ValueError("expecting reserved flag")
+
+ for f in type.fields:
+ if f.type != "bit" and s.has(f.name):
+ s.set(f.name, self.decode(f.type))
+
+ self.clearbits()
+
+ return s
+
+ def encode_long_struct(self, s):
+ enc = StringIO()
+ codec = Codec(enc, self.spec)
+ type = s.type
+ codec.encode_short(type.type)
+ codec.encode_struct_body(type, s)
+ self.encode_longstr(enc.getvalue())
+
+ def decode_long_struct(self):
+ codec = Codec(StringIO(self.decode_longstr()), self.spec)
+ type = self.spec.structs[codec.decode_short()]
+ return codec.decode_struct_body(type)
+
+ def decode_array(self):
+ size = self.decode_long()
+ code = self.decode_octet()
+ count = self.decode_long()
+ result = []
+ for i in range(0, count):
+ if self.types.has_key(code):
+ value = self.decode(self.types[code])
+ else:
+ w = width(code)
+ if fixed(code):
+ value = self.read(w)
+ else:
+ value = self.read(self.dec_num(w))
+ result.append(value)
+ return result
+
+def fixed(code):
+ return (code >> 6) != 2
+
+def width(code):
+ # decimal
+ if code >= 192:
+ decsel = (code >> 4) & 3
+ if decsel == 0:
+ return 5
+ elif decsel == 1:
+ return 9
+ elif decsel == 3:
+ return 0
+ else:
+ raise ValueError(code)
+ # variable width
+ elif code < 192 and code >= 128:
+ lenlen = (code >> 4) & 3
+ if lenlen == 3: raise ValueError(code)
+ return 2 ** lenlen
+ # fixed width
+ else:
+ return (code >> 4) & 7
diff --git a/RC9/qpid/python/qpid/codec010.py b/RC9/qpid/python/qpid/codec010.py
new file mode 100644
index 0000000000..f34025ef17
--- /dev/null
+++ b/RC9/qpid/python/qpid/codec010.py
@@ -0,0 +1,301 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import datetime
+from packer import Packer
+from datatypes import serial, timestamp, RangedSet, Struct
+
+class CodecException(Exception): pass
+
+class Codec(Packer):
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ def write_void(self, v):
+ assert v == None
+ def read_void(self):
+ return None
+
+ def write_bit(self, b):
+ if not b: raise ValueError(b)
+ def read_bit(self):
+ return True
+
+ def read_uint8(self):
+ return self.unpack("!B")
+ def write_uint8(self, n):
+ return self.pack("!B", n)
+
+ def read_int8(self):
+ return self.unpack("!b")
+ def write_int8(self, n):
+ self.pack("!b", n)
+
+ def read_char(self):
+ return self.unpack("!c")
+ def write_char(self, c):
+ self.pack("!c", c)
+
+ def read_boolean(self):
+ return self.read_uint8() != 0
+ def write_boolean(self, b):
+ if b: n = 1
+ else: n = 0
+ self.write_uint8(n)
+
+
+ def read_uint16(self):
+ return self.unpack("!H")
+ def write_uint16(self, n):
+ self.pack("!H", n)
+
+ def read_int16(self):
+ return self.unpack("!h")
+ def write_int16(self, n):
+ self.pack("!h", n)
+
+
+ def read_uint32(self):
+ return self.unpack("!L")
+ def write_uint32(self, n):
+ self.pack("!L", n)
+
+ def read_int32(self):
+ return self.unpack("!l")
+ def write_int32(self, n):
+ self.pack("!l", n)
+
+ def read_float(self):
+ return self.unpack("!f")
+ def write_float(self, f):
+ self.pack("!f", f)
+
+ def read_sequence_no(self):
+ return serial(self.read_uint32())
+ def write_sequence_no(self, n):
+ self.write_uint32(n.value)
+
+
+ def read_uint64(self):
+ return self.unpack("!Q")
+ def write_uint64(self, n):
+ self.pack("!Q", n)
+
+ def read_int64(self):
+ return self.unpack("!q")
+ def write_int64(self, n):
+ self.pack("!q", n)
+
+ def read_datetime(self):
+ return timestamp(self.read_uint64())
+ def write_datetime(self, t):
+ if isinstance(t, datetime.datetime):
+ t = timestamp(t)
+ self.write_uint64(t)
+
+ def read_double(self):
+ return self.unpack("!d")
+ def write_double(self, d):
+ self.pack("!d", d)
+
+ def read_vbin8(self):
+ return self.read(self.read_uint8())
+ def write_vbin8(self, b):
+ self.write_uint8(len(b))
+ self.write(b)
+
+ def read_str8(self):
+ return self.read_vbin8().decode("utf8")
+ def write_str8(self, s):
+ self.write_vbin8(s.encode("utf8"))
+
+ def read_str16(self):
+ return self.read_vbin16().decode("utf8")
+ def write_str16(self, s):
+ self.write_vbin16(s.encode("utf8"))
+
+
+ def read_vbin16(self):
+ return self.read(self.read_uint16())
+ def write_vbin16(self, b):
+ self.write_uint16(len(b))
+ self.write(b)
+
+ def read_sequence_set(self):
+ result = RangedSet()
+ size = self.read_uint16()
+ nranges = size/8
+ while nranges > 0:
+ lower = self.read_sequence_no()
+ upper = self.read_sequence_no()
+ result.add(lower, upper)
+ nranges -= 1
+ return result
+ def write_sequence_set(self, ss):
+ size = 8*len(ss.ranges)
+ self.write_uint16(size)
+ for range in ss.ranges:
+ self.write_sequence_no(range.lower)
+ self.write_sequence_no(range.upper)
+
+ def read_vbin32(self):
+ return self.read(self.read_uint32())
+ def write_vbin32(self, b):
+ self.write_uint32(len(b))
+ self.write(b)
+
+ def write_map(self, m):
+ sc = StringCodec(self.spec)
+ if m is not None:
+ sc.write_uint32(len(m))
+ for k, v in m.items():
+ type = self.spec.encoding(v.__class__)
+ if type == None:
+ raise CodecException("no encoding for %s" % v.__class__)
+ sc.write_str8(k)
+ sc.write_uint8(type.code)
+ type.encode(sc, v)
+ self.write_vbin32(sc.encoded)
+ def read_map(self):
+ sc = StringCodec(self.spec, self.read_vbin32())
+ if not sc.encoded:
+ return None
+ count = sc.read_uint32()
+ result = {}
+ while sc.encoded:
+ k = sc.read_str8()
+ code = sc.read_uint8()
+ type = self.spec.types[code]
+ v = type.decode(sc)
+ result[k] = v
+ return result
+
+ def write_array(self, a):
+ sc = StringCodec(self.spec)
+ if a is not None:
+ if len(a) > 0:
+ type = self.spec.encoding(a[0].__class__)
+ else:
+ type = self.spec.encoding(None.__class__)
+ sc.write_uint8(type.code)
+ sc.write_uint32(len(a))
+ for o in a:
+ type.encode(sc, o)
+ self.write_vbin32(sc.encoded)
+ def read_array(self):
+ sc = StringCodec(self.spec, self.read_vbin32())
+ if not sc.encoded:
+ return None
+ type = self.spec.types[sc.read_uint8()]
+ count = sc.read_uint32()
+ result = []
+ while count > 0:
+ result.append(type.decode(sc))
+ count -= 1
+ return result
+
+ def write_list(self, l):
+ sc = StringCodec(self.spec)
+ if l is not None:
+ sc.write_uint32(len(l))
+ for o in l:
+ type = self.spec.encoding(o.__class__)
+ sc.write_uint8(type.code)
+ type.encode(sc, o)
+ self.write_vbin32(sc.encoded)
+ def read_list(self):
+ sc = StringCodec(self.spec, self.read_vbin32())
+ if not sc.encoded:
+ return None
+ count = sc.read_uint32()
+ result = []
+ while count > 0:
+ type = self.spec.types[sc.read_uint8()]
+ result.append(type.decode(sc))
+ count -= 1
+ return result
+
+ def read_struct32(self):
+ size = self.read_uint32()
+ code = self.read_uint16()
+ type = self.spec.structs[code]
+ fields = type.decode_fields(self)
+ return Struct(type, **fields)
+ def write_struct32(self, value):
+ sc = StringCodec(self.spec)
+ sc.write_uint16(value._type.code)
+ value._type.encode_fields(sc, value)
+ self.write_vbin32(sc.encoded)
+
+ def read_control(self):
+ cntrl = self.spec.controls[self.read_uint16()]
+ return Struct(cntrl, **cntrl.decode_fields(self))
+ def write_control(self, ctrl):
+ type = ctrl._type
+ self.write_uint16(type.code)
+ type.encode_fields(self, ctrl)
+
+ def read_command(self):
+ type = self.spec.commands[self.read_uint16()]
+ hdr = self.spec["session.header"].decode(self)
+ cmd = Struct(type, **type.decode_fields(self))
+ return hdr, cmd
+ def write_command(self, hdr, cmd):
+ self.write_uint16(cmd._type.code)
+ hdr._type.encode(self, hdr)
+ cmd._type.encode_fields(self, cmd)
+
+ def read_size(self, width):
+ if width > 0:
+ attr = "read_uint%d" % (width*8)
+ return getattr(self, attr)()
+
+ def write_size(self, width, n):
+ if width > 0:
+ attr = "write_uint%d" % (width*8)
+ getattr(self, attr)(n)
+
+ def read_uuid(self):
+ return self.unpack("16s")
+
+ def write_uuid(self, s):
+ self.pack("16s", s)
+
+ def read_bin128(self):
+ return self.unpack("16s")
+
+ def write_bin128(self, b):
+ self.pack("16s", b)
+
+
+
+class StringCodec(Codec):
+
+ def __init__(self, spec, encoded = ""):
+ Codec.__init__(self, spec)
+ self.encoded = encoded
+
+ def write(self, s):
+ self.encoded += s
+
+ def read(self, n):
+ result = self.encoded[:n]
+ self.encoded = self.encoded[n:]
+ return result
diff --git a/RC9/qpid/python/qpid/compat.py b/RC9/qpid/python/qpid/compat.py
new file mode 100644
index 0000000000..26f60fb8aa
--- /dev/null
+++ b/RC9/qpid/python/qpid/compat.py
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+try:
+ set = set
+except NameError:
+ from sets import Set as set
+
+try:
+ from socket import SHUT_RDWR
+except ImportError:
+ SHUT_RDWR = 2
diff --git a/RC9/qpid/python/qpid/connection.py b/RC9/qpid/python/qpid/connection.py
new file mode 100644
index 0000000000..4c9c02822a
--- /dev/null
+++ b/RC9/qpid/python/qpid/connection.py
@@ -0,0 +1,218 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import datatypes, session, socket
+from threading import Thread, Condition, RLock
+from util import wait, notify
+from assembler import Assembler, Segment
+from codec010 import StringCodec
+from session import Session
+from invoker import Invoker
+from spec010 import Control, Command, load
+from spec import default
+from exceptions import *
+from logging import getLogger
+import delegates
+
+class ChannelBusy(Exception): pass
+
+class ChannelsBusy(Exception): pass
+
+class SessionBusy(Exception): pass
+
+class ConnectionFailed(Exception): pass
+
+def client(*args, **kwargs):
+ return delegates.Client(*args, **kwargs)
+
+def server(*args, **kwargs):
+ return delegates.Server(*args, **kwargs)
+
+class SSLWrapper:
+
+ def __init__(self, ssl):
+ self.ssl = ssl
+
+ def recv(self, n):
+ return self.ssl.read(n)
+
+ def send(self, s):
+ return self.ssl.write(s)
+
+def sslwrap(sock):
+ if isinstance(sock, socket.SSLType):
+ return SSLWrapper(sock)
+ else:
+ return sock
+
+class Connection(Assembler):
+
+ def __init__(self, sock, spec=None, delegate=client, **args):
+ Assembler.__init__(self, sslwrap(sock))
+ if spec == None:
+ spec = load(default())
+ self.spec = spec
+ self.track = self.spec["track"]
+
+ self.lock = RLock()
+ self.attached = {}
+ self.sessions = {}
+
+ self.condition = Condition()
+ self.opened = False
+ self.failed = False
+ self.close_code = (None, "connection aborted")
+
+ self.thread = Thread(target=self.run)
+ self.thread.setDaemon(True)
+
+ self.channel_max = 65535
+
+ self.delegate = delegate(self, **args)
+
+ def attach(self, name, ch, delegate, force=False):
+ self.lock.acquire()
+ try:
+ ssn = self.attached.get(ch.id)
+ if ssn is not None:
+ if ssn.name != name:
+ raise ChannelBusy(ch, ssn)
+ else:
+ ssn = self.sessions.get(name)
+ if ssn is None:
+ ssn = Session(name, self.spec, delegate=delegate)
+ self.sessions[name] = ssn
+ elif ssn.channel is not None:
+ if force:
+ del self.attached[ssn.channel.id]
+ ssn.channel = None
+ else:
+ raise SessionBusy(ssn)
+ self.attached[ch.id] = ssn
+ ssn.channel = ch
+ ch.session = ssn
+ return ssn
+ finally:
+ self.lock.release()
+
+ def detach(self, name, ch):
+ self.lock.acquire()
+ try:
+ self.attached.pop(ch.id, None)
+ ssn = self.sessions.pop(name, None)
+ if ssn is not None:
+ ssn.channel = None
+ ssn.closed()
+ return ssn
+ finally:
+ self.lock.release()
+
+ def __channel(self):
+ # XXX: ch 0?
+ for i in xrange(self.channel_max):
+ if not self.attached.has_key(i):
+ return i
+ else:
+ raise ChannelsBusy()
+
+ def session(self, name, timeout=None, delegate=session.client):
+ self.lock.acquire()
+ try:
+ ch = Channel(self, self.__channel())
+ ssn = self.attach(name, ch, delegate)
+ ssn.channel.session_attach(name)
+ if wait(ssn.condition, lambda: ssn.channel is not None, timeout):
+ return ssn
+ else:
+ self.detach(name, ch)
+ raise Timeout()
+ finally:
+ self.lock.release()
+
+ def detach_all(self):
+ self.lock.acquire()
+ try:
+ for ssn in self.attached.values():
+ if self.close_code[0] != 200:
+ ssn.exceptions.append(self.close_code)
+ self.detach(ssn.name, ssn.channel)
+ finally:
+ self.lock.release()
+
+ def start(self, timeout=None):
+ self.delegate.start()
+ self.thread.start()
+ if not wait(self.condition, lambda: self.opened or self.failed, timeout):
+ raise Timeout()
+ if self.failed:
+ raise ConnectionFailed(*self.close_code)
+
+ def run(self):
+ # XXX: we don't really have a good way to exit this loop without
+ # getting the other end to kill the socket
+ while True:
+ try:
+ seg = self.read_segment()
+ except Closed:
+ self.detach_all()
+ break
+ self.delegate.received(seg)
+
+ def close(self, timeout=None):
+ if not self.opened: return
+ Channel(self, 0).connection_close(200)
+ if not wait(self.condition, lambda: not self.opened, timeout):
+ raise Timeout()
+ self.thread.join(timeout=timeout)
+
+ def __str__(self):
+ return "%s:%s" % self.sock.getsockname()
+
+ def __repr__(self):
+ return str(self)
+
+log = getLogger("qpid.io.ctl")
+
+class Channel(Invoker):
+
+ def __init__(self, connection, id):
+ self.connection = connection
+ self.id = id
+ self.session = None
+
+ def resolve_method(self, name):
+ inst = self.connection.spec.instructions.get(name)
+ if inst is not None and isinstance(inst, Control):
+ return self.METHOD, inst
+ else:
+ return self.ERROR, None
+
+ def invoke(self, type, args, kwargs):
+ ctl = type.new(args, kwargs)
+ sc = StringCodec(self.connection.spec)
+ sc.write_control(ctl)
+ self.connection.write_segment(Segment(True, True, type.segment_type,
+ type.track, self.id, sc.encoded))
+ log.debug("SENT %s", ctl)
+
+ def __str__(self):
+ return "%s[%s]" % (self.connection, self.id)
+
+ def __repr__(self):
+ return str(self)
diff --git a/RC9/qpid/python/qpid/connection08.py b/RC9/qpid/python/qpid/connection08.py
new file mode 100644
index 0000000000..be94a792cb
--- /dev/null
+++ b/RC9/qpid/python/qpid/connection08.py
@@ -0,0 +1,493 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+A Connection class containing socket code that uses the spec metadata
+to read and write Frame objects. This could be used by a client,
+server, or even a proxy implementation.
+"""
+
+import socket, codec, logging, qpid
+from cStringIO import StringIO
+from spec import load
+from codec import EOF
+from compat import SHUT_RDWR
+
+class SockIO:
+
+ def __init__(self, sock):
+ self.sock = sock
+
+ def write(self, buf):
+# print "OUT: %r" % buf
+ self.sock.sendall(buf)
+
+ def read(self, n):
+ data = ""
+ while len(data) < n:
+ try:
+ s = self.sock.recv(n - len(data))
+ except socket.error:
+ break
+ if len(s) == 0:
+ break
+# print "IN: %r" % s
+ data += s
+ return data
+
+ def flush(self):
+ pass
+
+ def close(self):
+ self.sock.shutdown(SHUT_RDWR)
+ self.sock.close()
+
+def connect(host, port):
+ sock = socket.socket()
+ sock.connect((host, port))
+ sock.setblocking(1)
+ return SockIO(sock)
+
+def listen(host, port, predicate = lambda: True):
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind((host, port))
+ sock.listen(5)
+ while predicate():
+ s, a = sock.accept()
+ yield SockIO(s)
+
+class Connection:
+
+ def __init__(self, io, spec):
+ self.codec = codec.Codec(io, spec)
+ self.spec = spec
+ self.FRAME_END = self.spec.constants.byname["frame_end"].id
+ self.write = getattr(self, "write_%s_%s" % (self.spec.major, self.spec.minor))
+ self.read = getattr(self, "read_%s_%s" % (self.spec.major, self.spec.minor))
+
+ def flush(self):
+ self.codec.flush()
+
+ INIT="!4s4B"
+
+ def init(self):
+ self.codec.pack(Connection.INIT, "AMQP", 1, 1, self.spec.major,
+ self.spec.minor)
+
+ def tini(self):
+ self.codec.unpack(Connection.INIT)
+
+ def write_8_0(self, frame):
+ c = self.codec
+ c.encode_octet(self.spec.constants.byname[frame.type].id)
+ c.encode_short(frame.channel)
+ body = StringIO()
+ enc = codec.Codec(body, self.spec)
+ frame.encode(enc)
+ enc.flush()
+ c.encode_longstr(body.getvalue())
+ c.encode_octet(self.FRAME_END)
+
+ def read_8_0(self):
+ c = self.codec
+ type = self.spec.constants.byid[c.decode_octet()].name
+ channel = c.decode_short()
+ body = c.decode_longstr()
+ dec = codec.Codec(StringIO(body), self.spec)
+ frame = Frame.DECODERS[type].decode(self.spec, dec, len(body))
+ frame.channel = channel
+ end = c.decode_octet()
+ if end != self.FRAME_END:
+ garbage = ""
+ while end != self.FRAME_END:
+ garbage += chr(end)
+ end = c.decode_octet()
+ raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage)
+ return frame
+
+ def write_0_9(self, frame):
+ self.write_8_0(frame)
+
+ def read_0_9(self):
+ return self.read_8_0()
+
+ def write_0_10(self, frame):
+ c = self.codec
+ flags = 0
+ if frame.bof: flags |= 0x08
+ if frame.eof: flags |= 0x04
+ if frame.bos: flags |= 0x02
+ if frame.eos: flags |= 0x01
+
+ c.encode_octet(flags) # TODO: currently fixed at ver=0, B=E=b=e=1
+ c.encode_octet(self.spec.constants.byname[frame.type].id)
+ body = StringIO()
+ enc = codec.Codec(body, self.spec)
+ frame.encode(enc)
+ enc.flush()
+ frame_size = len(body.getvalue()) + 12 # TODO: Magic number (frame header size)
+ c.encode_short(frame_size)
+ c.encode_octet(0) # Reserved
+ c.encode_octet(frame.subchannel & 0x0f)
+ c.encode_short(frame.channel)
+ c.encode_long(0) # Reserved
+ c.write(body.getvalue())
+ c.encode_octet(self.FRAME_END)
+
+ def read_0_10(self):
+ c = self.codec
+ flags = c.decode_octet() # TODO: currently ignoring flags
+ framing_version = (flags & 0xc0) >> 6
+ if framing_version != 0:
+ raise "frame error: unknown framing version"
+ type = self.spec.constants.byid[c.decode_octet()].name
+ frame_size = c.decode_short()
+ if frame_size < 12: # TODO: Magic number (frame header size)
+ raise "frame error: frame size too small"
+ reserved1 = c.decode_octet()
+ field = c.decode_octet()
+ subchannel = field & 0x0f
+ channel = c.decode_short()
+ reserved2 = c.decode_long() # TODO: reserved maybe need to ensure 0
+ if (flags & 0x30) != 0 or reserved1 != 0 or (field & 0xf0) != 0:
+ raise "frame error: reserved bits not all zero"
+ body_size = frame_size - 12 # TODO: Magic number (frame header size)
+ body = c.read(body_size)
+ dec = codec.Codec(StringIO(body), self.spec)
+ try:
+ frame = Frame.DECODERS[type].decode(self.spec, dec, len(body))
+ except EOF:
+ raise "truncated frame body: %r" % body
+ frame.channel = channel
+ frame.subchannel = subchannel
+ end = c.decode_octet()
+ if end != self.FRAME_END:
+ garbage = ""
+ while end != self.FRAME_END:
+ garbage += chr(end)
+ end = c.decode_octet()
+ raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage)
+ return frame
+
+ def write_99_0(self, frame):
+ self.write_0_10(frame)
+
+ def read_99_0(self):
+ return self.read_0_10()
+
+class Frame:
+
+ DECODERS = {}
+
+ class __metaclass__(type):
+
+ def __new__(cls, name, bases, dict):
+ for attr in ("encode", "decode", "type"):
+ if not dict.has_key(attr):
+ raise TypeError("%s must define %s" % (name, attr))
+ dict["decode"] = staticmethod(dict["decode"])
+ if dict.has_key("__init__"):
+ __init__ = dict["__init__"]
+ def init(self, *args, **kwargs):
+ args = list(args)
+ self.init(args, kwargs)
+ __init__(self, *args, **kwargs)
+ dict["__init__"] = init
+ t = type.__new__(cls, name, bases, dict)
+ if t.type != None:
+ Frame.DECODERS[t.type] = t
+ return t
+
+ type = None
+
+ def init(self, args, kwargs):
+ self.channel = kwargs.pop("channel", 0)
+ self.subchannel = kwargs.pop("subchannel", 0)
+ self.bos = True
+ self.eos = True
+ self.bof = True
+ self.eof = True
+
+ def encode(self, enc): abstract
+
+ def decode(spec, dec, size): abstract
+
+class Method(Frame):
+
+ type = "frame_method"
+
+ def __init__(self, method, args):
+ if len(args) != len(method.fields):
+ argspec = ["%s: %s" % (f.name, f.type)
+ for f in method.fields]
+ raise TypeError("%s.%s expecting (%s), got %s" %
+ (method.klass.name, method.name, ", ".join(argspec),
+ args))
+ self.method = method
+ self.method_type = method
+ self.args = args
+ self.eof = not method.content
+
+ def encode(self, c):
+ version = (c.spec.major, c.spec.minor)
+ if version == (0, 10) or version == (99, 0):
+ c.encode_octet(self.method.klass.id)
+ c.encode_octet(self.method.id)
+ else:
+ c.encode_short(self.method.klass.id)
+ c.encode_short(self.method.id)
+ for field, arg in zip(self.method.fields, self.args):
+ c.encode(field.type, arg)
+
+ def decode(spec, c, size):
+ version = (c.spec.major, c.spec.minor)
+ if version == (0, 10) or version == (99, 0):
+ klass = spec.classes.byid[c.decode_octet()]
+ meth = klass.methods.byid[c.decode_octet()]
+ else:
+ klass = spec.classes.byid[c.decode_short()]
+ meth = klass.methods.byid[c.decode_short()]
+ args = tuple([c.decode(f.type) for f in meth.fields])
+ return Method(meth, args)
+
+ def __str__(self):
+ return "[%s] %s %s" % (self.channel, self.method,
+ ", ".join([str(a) for a in self.args]))
+
+class Request(Frame):
+
+ type = "frame_request"
+
+ def __init__(self, id, response_mark, method):
+ self.id = id
+ self.response_mark = response_mark
+ self.method = method
+ self.method_type = method.method_type
+ self.args = method.args
+
+ def encode(self, enc):
+ enc.encode_longlong(self.id)
+ enc.encode_longlong(self.response_mark)
+ # reserved
+ enc.encode_long(0)
+ self.method.encode(enc)
+
+ def decode(spec, dec, size):
+ id = dec.decode_longlong()
+ mark = dec.decode_longlong()
+ # reserved
+ dec.decode_long()
+ method = Method.decode(spec, dec, size - 20)
+ return Request(id, mark, method)
+
+ def __str__(self):
+ return "[%s] Request(%s) %s" % (self.channel, self.id, self.method)
+
+class Response(Frame):
+
+ type = "frame_response"
+
+ def __init__(self, id, request_id, batch_offset, method):
+ self.id = id
+ self.request_id = request_id
+ self.batch_offset = batch_offset
+ self.method = method
+ self.method_type = method.method_type
+ self.args = method.args
+
+ def encode(self, enc):
+ enc.encode_longlong(self.id)
+ enc.encode_longlong(self.request_id)
+ enc.encode_long(self.batch_offset)
+ self.method.encode(enc)
+
+ def decode(spec, dec, size):
+ id = dec.decode_longlong()
+ request_id = dec.decode_longlong()
+ batch_offset = dec.decode_long()
+ method = Method.decode(spec, dec, size - 20)
+ return Response(id, request_id, batch_offset, method)
+
+ def __str__(self):
+ return "[%s] Response(%s,%s,%s) %s" % (self.channel, self.id, self.request_id, self.batch_offset, self.method)
+
+def uses_struct_encoding(spec):
+ return (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0)
+
+class Header(Frame):
+
+ type = "frame_header"
+
+ def __init__(self, klass, weight, size, properties):
+ self.klass = klass
+ self.weight = weight
+ self.size = size
+ self.properties = properties
+ self.eof = size == 0
+ self.bof = False
+
+ def __getitem__(self, name):
+ return self.properties[name]
+
+ def __setitem__(self, name, value):
+ self.properties[name] = value
+
+ def __delitem__(self, name):
+ del self.properties[name]
+
+ def encode(self, c):
+ if uses_struct_encoding(c.spec):
+ self.encode_structs(c)
+ else:
+ self.encode_legacy(c)
+
+ def encode_structs(self, c):
+ # XXX
+ structs = [qpid.Struct(c.spec.domains.byname["delivery_properties"].type),
+ qpid.Struct(c.spec.domains.byname["message_properties"].type)]
+
+ # XXX
+ props = self.properties.copy()
+ for k in self.properties:
+ for s in structs:
+ if s.exists(k):
+ s.set(k, props.pop(k))
+ if props:
+ raise TypeError("no such property: %s" % (", ".join(props)))
+
+ # message properties store the content-length now, and weight is
+ # deprecated
+ if self.size != None:
+ structs[1].content_length = self.size
+
+ for s in structs:
+ c.encode_long_struct(s)
+
+ def encode_legacy(self, c):
+ c.encode_short(self.klass.id)
+ c.encode_short(self.weight)
+ c.encode_longlong(self.size)
+
+ # property flags
+ nprops = len(self.klass.fields)
+ flags = 0
+ for i in range(nprops):
+ f = self.klass.fields.items[i]
+ flags <<= 1
+ if self.properties.get(f.name) != None:
+ flags |= 1
+ # the last bit indicates more flags
+ if i > 0 and (i % 15) == 0:
+ flags <<= 1
+ if nprops > (i + 1):
+ flags |= 1
+ c.encode_short(flags)
+ flags = 0
+ flags <<= ((16 - (nprops % 15)) % 16)
+ c.encode_short(flags)
+
+ # properties
+ for f in self.klass.fields:
+ v = self.properties.get(f.name)
+ if v != None:
+ c.encode(f.type, v)
+
+ def decode(spec, c, size):
+ if uses_struct_encoding(spec):
+ return Header.decode_structs(spec, c, size)
+ else:
+ return Header.decode_legacy(spec, c, size)
+
+ def decode_structs(spec, c, size):
+ structs = []
+ start = c.nread
+ while c.nread - start < size:
+ structs.append(c.decode_long_struct())
+
+ # XXX
+ props = {}
+ length = None
+ for s in structs:
+ for f in s.type.fields:
+ if s.has(f.name):
+ props[f.name] = s.get(f.name)
+ if f.name == "content_length":
+ length = s.get(f.name)
+ return Header(None, 0, length, props)
+
+ decode_structs = staticmethod(decode_structs)
+
+ def decode_legacy(spec, c, size):
+ klass = spec.classes.byid[c.decode_short()]
+ weight = c.decode_short()
+ size = c.decode_longlong()
+
+ # property flags
+ bits = []
+ while True:
+ flags = c.decode_short()
+ for i in range(15, 0, -1):
+ if flags >> i & 0x1 != 0:
+ bits.append(True)
+ else:
+ bits.append(False)
+ if flags & 0x1 == 0:
+ break
+
+ # properties
+ properties = {}
+ for b, f in zip(bits, klass.fields):
+ if b:
+ # Note: decode returns a unicode u'' string but only
+ # plain '' strings can be used as keywords so we need to
+ # stringify the names.
+ properties[str(f.name)] = c.decode(f.type)
+ return Header(klass, weight, size, properties)
+
+ decode_legacy = staticmethod(decode_legacy)
+
+ def __str__(self):
+ return "%s %s %s %s" % (self.klass, self.weight, self.size,
+ self.properties)
+
+class Body(Frame):
+
+ type = "frame_body"
+
+ def __init__(self, content):
+ self.content = content
+ self.eof = True
+ self.bof = False
+
+ def encode(self, enc):
+ enc.write(self.content)
+
+ def decode(spec, dec, size):
+ return Body(dec.read(size))
+
+ def __str__(self):
+ return "Body(%r)" % self.content
+
+# TODO:
+# OOB_METHOD = "frame_oob_method"
+# OOB_HEADER = "frame_oob_header"
+# OOB_BODY = "frame_oob_body"
+# TRACE = "frame_trace"
+# HEARTBEAT = "frame_heartbeat"
diff --git a/RC9/qpid/python/qpid/content.py b/RC9/qpid/python/qpid/content.py
new file mode 100644
index 0000000000..9391f4f1a8
--- /dev/null
+++ b/RC9/qpid/python/qpid/content.py
@@ -0,0 +1,58 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+A simple python representation for AMQP content.
+"""
+
+def default(val, defval):
+ if val == None:
+ return defval
+ else:
+ return val
+
+class Content:
+
+ def __init__(self, body = "", children = None, properties = None):
+ self.body = body
+ self.children = default(children, [])
+ self.properties = default(properties, {})
+
+ def size(self):
+ return len(self.body)
+
+ def weight(self):
+ return len(self.children)
+
+ def __getitem__(self, name):
+ return self.properties[name]
+
+ def __setitem__(self, name, value):
+ self.properties[name] = value
+
+ def __delitem__(self, name):
+ del self.properties[name]
+
+ def __str__(self):
+ if self.children:
+ return "%s [%s] %s" % (self.properties,
+ ", ".join(map(str, self.children)),
+ self.body)
+ else:
+ return "%s %s" % (self.properties, self.body)
diff --git a/RC9/qpid/python/qpid/datatypes.py b/RC9/qpid/python/qpid/datatypes.py
new file mode 100644
index 0000000000..eb1f86b0b0
--- /dev/null
+++ b/RC9/qpid/python/qpid/datatypes.py
@@ -0,0 +1,349 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import threading, struct, datetime, time
+
+class Struct:
+
+ def __init__(self, _type, *args, **kwargs):
+ if len(args) > len(_type.fields):
+ raise TypeError("%s() takes at most %s arguments (%s given)" %
+ (_type.name, len(_type.fields), len(args)))
+
+ self._type = _type
+
+ idx = 0
+ for field in _type.fields:
+ if idx < len(args):
+ arg = args[idx]
+ if kwargs.has_key(field.name):
+ raise TypeError("%s() got multiple values for keyword argument '%s'" %
+ (_type.name, field.name))
+ elif kwargs.has_key(field.name):
+ arg = kwargs.pop(field.name)
+ else:
+ arg = field.default()
+ setattr(self, field.name, arg)
+ idx += 1
+
+ if kwargs:
+ unexpected = kwargs.keys()[0]
+ raise TypeError("%s() got an unexpected keyword argument '%s'" %
+ (_type.name, unexpected))
+
+ def __getitem__(self, name):
+ return getattr(self, name)
+
+ def __setitem__(self, name, value):
+ if not hasattr(self, name):
+ raise AttributeError("'%s' object has no attribute '%s'" %
+ (self._type.name, name))
+ setattr(self, name, value)
+
+ def __repr__(self):
+ fields = []
+ for f in self._type.fields:
+ v = self[f.name]
+ if f.type.is_present(v):
+ fields.append("%s=%r" % (f.name, v))
+ return "%s(%s)" % (self._type.name, ", ".join(fields))
+
+class Message:
+
+ def __init__(self, *args):
+ if args:
+ self.body = args[-1]
+ else:
+ self.body = None
+ if len(args) > 1:
+ self.headers = list(args[:-1])
+ else:
+ self.headers = None
+ self.id = None
+
+ def has(self, name):
+ return self.get(name) != None
+
+ def get(self, name):
+ if self.headers:
+ for h in self.headers:
+ if h._type.name == name:
+ return h
+ return None
+
+ def set(self, header):
+ if self.headers is None:
+ self.headers = []
+ idx = 0
+ while idx < len(self.headers):
+ if self.headers[idx]._type == header._type:
+ self.headers[idx] = header
+ return
+ idx += 1
+ self.headers.append(header)
+
+ def clear(self, name):
+ idx = 0
+ while idx < len(self.headers):
+ if self.headers[idx]._type.name == name:
+ del self.headers[idx]
+ return
+ idx += 1
+
+ def __repr__(self):
+ args = []
+ if self.headers:
+ args.extend(map(repr, self.headers))
+ if self.body:
+ args.append(repr(self.body))
+ if self.id is not None:
+ args.append("id=%s" % self.id)
+ return "Message(%s)" % ", ".join(args)
+
+def serial(o):
+ if isinstance(o, Serial):
+ return o
+ else:
+ return Serial(o)
+
+class Serial:
+
+ def __init__(self, value):
+ self.value = value & 0xFFFFFFFF
+
+ def __hash__(self):
+ return hash(self.value)
+
+ def __cmp__(self, other):
+ if other is None:
+ return 1
+
+ other = serial(other)
+
+ delta = (self.value - other.value) & 0xFFFFFFFF
+ neg = delta & 0x80000000
+ mag = delta & 0x7FFFFFFF
+
+ if neg:
+ return -mag
+ else:
+ return mag
+
+ def __add__(self, other):
+ return Serial(self.value + other)
+
+ def __sub__(self, other):
+ return Serial(self.value - other)
+
+ def __repr__(self):
+ return "serial(%s)" % self.value
+
+ def __str__(self):
+ return str(self.value)
+
+class Range:
+
+ def __init__(self, lower, upper = None):
+ self.lower = serial(lower)
+ if upper is None:
+ self.upper = self.lower
+ else:
+ self.upper = serial(upper)
+
+ def __contains__(self, n):
+ return self.lower <= n and n <= self.upper
+
+ def __iter__(self):
+ i = self.lower
+ while i <= self.upper:
+ yield i
+ i += 1
+
+ def touches(self, r):
+ # XXX: are we doing more checks than we need?
+ return (self.lower - 1 in r or
+ self.upper + 1 in r or
+ r.lower - 1 in self or
+ r.upper + 1 in self or
+ self.lower in r or
+ self.upper in r or
+ r.lower in self or
+ r.upper in self)
+
+ def span(self, r):
+ return Range(min(self.lower, r.lower), max(self.upper, r.upper))
+
+ def intersect(self, r):
+ lower = max(self.lower, r.lower)
+ upper = min(self.upper, r.upper)
+ if lower > upper:
+ return None
+ else:
+ return Range(lower, upper)
+
+ def __repr__(self):
+ return "%s-%s" % (self.lower, self.upper)
+
+class RangedSet:
+
+ def __init__(self, *args):
+ self.ranges = []
+ for n in args:
+ self.add(n)
+
+ def __contains__(self, n):
+ for r in self.ranges:
+ if n in r:
+ return True
+ return False
+
+ def add_range(self, range):
+ idx = 0
+ while idx < len(self.ranges):
+ r = self.ranges[idx]
+ if range.touches(r):
+ del self.ranges[idx]
+ range = range.span(r)
+ elif range.upper < r.lower:
+ self.ranges.insert(idx, range)
+ return
+ else:
+ idx += 1
+ self.ranges.append(range)
+
+ def add(self, lower, upper = None):
+ self.add_range(Range(lower, upper))
+
+ def __iter__(self):
+ return iter(self.ranges)
+
+ def __repr__(self):
+ return str(self.ranges)
+
+class Future:
+ def __init__(self, initial=None, exception=Exception):
+ self.value = initial
+ self._error = None
+ self._set = threading.Event()
+ self.exception = exception
+
+ def error(self, error):
+ self._error = error
+ self._set.set()
+
+ def set(self, value):
+ self.value = value
+ self._set.set()
+
+ def get(self, timeout=None):
+ self._set.wait(timeout)
+ if self._error != None:
+ raise self.exception(self._error)
+ return self.value
+
+ def is_set(self):
+ return self._set.isSet()
+
+try:
+ import uuid
+ def random_uuid():
+ return uuid.uuid4().get_bytes()
+except ImportError:
+ import random
+ def random_uuid():
+ bytes = [random.randint(0, 255) for i in xrange(16)]
+
+ # From RFC4122, the version bits are set to 0100
+ bytes[7] &= 0x0F
+ bytes[7] |= 0x40
+
+ # From RFC4122, the top two bits of byte 8 get set to 01
+ bytes[8] &= 0x3F
+ bytes[8] |= 0x80
+ return "".join(map(chr, bytes))
+
+def uuid4():
+ return UUID(random_uuid())
+
+class UUID:
+
+ def __init__(self, bytes):
+ self.bytes = bytes
+
+ def __cmp__(self, other):
+ if isinstance(other, UUID):
+ return cmp(self.bytes, other.bytes)
+ raise NotImplemented()
+
+ def __str__(self):
+ return "%08x-%04x-%04x-%04x-%04x%08x" % struct.unpack("!LHHHHL", self.bytes)
+
+ def __repr__(self):
+ return "UUID(%r)" % str(self)
+
+ def __hash__(self):
+ return self.bytes.__hash__()
+
+class timestamp(float):
+
+ def __new__(cls, obj=None):
+ if obj is None:
+ obj = time.time()
+ elif isinstance(obj, datetime.datetime):
+ obj = time.mktime(obj.timetuple()) + 1e-6 * obj.microsecond
+ return super(timestamp, cls).__new__(cls, obj)
+
+ def datetime(self):
+ return datetime.datetime.fromtimestamp(self)
+
+ def __add__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() + other)
+ else:
+ return timestamp(float(self) + other)
+
+ def __sub__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() - other)
+ else:
+ return timestamp(float(self) - other)
+
+ def __radd__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() + other)
+ else:
+ return timestamp(other + float(self))
+
+ def __rsub__(self, other):
+ if isinstance(other, datetime.timedelta):
+ return timestamp(self.datetime() - other)
+ else:
+ return timestamp(other - float(self))
+
+ def __neg__(self):
+ return timestamp(-float(self))
+
+ def __pos__(self):
+ return self
+
+ def __abs__(self):
+ return timestamp(abs(float(self)))
+
+ def __repr__(self):
+ return "timestamp(%r)" % float(self)
diff --git a/RC9/qpid/python/qpid/delegate.py b/RC9/qpid/python/qpid/delegate.py
new file mode 100644
index 0000000000..b447c4aa29
--- /dev/null
+++ b/RC9/qpid/python/qpid/delegate.py
@@ -0,0 +1,53 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Delegate implementation intended for use with the peer module.
+"""
+
+import threading, inspect, traceback, sys
+from connection08 import Method, Request, Response
+
+def _handler_name(method):
+ return "%s_%s" % (method.klass.name, method.name)
+
+class Delegate:
+
+ def __init__(self):
+ self.handlers = {}
+ self.invokers = {}
+
+ def __call__(self, channel, frame):
+ method = frame.method
+
+ try:
+ handler = self.handlers[method]
+ except KeyError:
+ name = _handler_name(method)
+ handler = getattr(self, name)
+ self.handlers[method] = handler
+
+ try:
+ return handler(channel, frame)
+ except:
+ print >> sys.stderr, "Error in handler: %s\n\n%s" % \
+ (_handler_name(method), traceback.format_exc())
+
+ def closed(self, reason):
+ print "Connection closed: %s" % reason
diff --git a/RC9/qpid/python/qpid/delegates.py b/RC9/qpid/python/qpid/delegates.py
new file mode 100644
index 0000000000..bf26553dda
--- /dev/null
+++ b/RC9/qpid/python/qpid/delegates.py
@@ -0,0 +1,162 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, connection, session
+from util import notify
+from datatypes import RangedSet
+from logging import getLogger
+
+log = getLogger("qpid.io.ctl")
+
+class Delegate:
+
+ def __init__(self, connection, delegate=session.client):
+ self.connection = connection
+ self.spec = connection.spec
+ self.delegate = delegate
+ self.control = self.spec["track.control"].value
+
+ def received(self, seg):
+ ssn = self.connection.attached.get(seg.channel)
+ if ssn is None:
+ ch = connection.Channel(self.connection, seg.channel)
+ else:
+ ch = ssn.channel
+
+ if seg.track == self.control:
+ ctl = seg.decode(self.spec)
+ log.debug("RECV %s", ctl)
+ attr = ctl._type.qname.replace(".", "_")
+ getattr(self, attr)(ch, ctl)
+ elif ssn is None:
+ ch.session_detached()
+ else:
+ ssn.received(seg)
+
+ def connection_close(self, ch, close):
+ self.connection.close_code = (close.reply_code, close.reply_text)
+ ch.connection_close_ok()
+ self.connection.sock.close()
+ if not self.connection.opened:
+ self.connection.failed = True
+ notify(self.connection.condition)
+
+ def connection_close_ok(self, ch, close_ok):
+ self.connection.opened = False
+ notify(self.connection.condition)
+
+ def session_attach(self, ch, a):
+ try:
+ self.connection.attach(a.name, ch, self.delegate, a.force)
+ ch.session_attached(a.name)
+ except connection.ChannelBusy:
+ ch.session_detached(a.name)
+ except connection.SessionBusy:
+ ch.session_detached(a.name)
+
+ def session_attached(self, ch, a):
+ notify(ch.session.condition)
+
+ def session_detach(self, ch, d):
+ #send back the confirmation of detachment before removing the
+ #channel from the attached set; this avoids needing to hold the
+ #connection lock during the sending of this control and ensures
+ #that if the channel is immediately reused for a new session the
+ #attach request will follow the detached notification.
+ ch.session_detached(d.name)
+ ssn = self.connection.detach(d.name, ch)
+
+ def session_detached(self, ch, d):
+ self.connection.detach(d.name, ch)
+
+ def session_request_timeout(self, ch, rt):
+ ch.session_timeout(rt.timeout);
+
+ def session_command_point(self, ch, cp):
+ ssn = ch.session
+ ssn.receiver.next_id = cp.command_id
+ ssn.receiver.next_offset = cp.command_offset
+
+ def session_completed(self, ch, cmp):
+ ch.session.sender.completed(cmp.commands)
+ if cmp.timely_reply:
+ ch.session_known_completed(cmp.commands)
+ notify(ch.session.condition)
+
+ def session_known_completed(self, ch, kn_cmp):
+ ch.session.receiver.known_completed(kn_cmp.commands)
+
+ def session_flush(self, ch, f):
+ rcv = ch.session.receiver
+ if f.expected:
+ if rcv.next_id == None:
+ exp = None
+ else:
+ exp = RangedSet(rcv.next_id)
+ ch.session_expected(exp)
+ if f.confirmed:
+ ch.session_confirmed(rcv._completed)
+ if f.completed:
+ ch.session_completed(rcv._completed)
+
+class Server(Delegate):
+
+ def start(self):
+ self.connection.read_header()
+ self.connection.write_header(self.spec.major, self.spec.minor)
+ connection.Channel(self.connection, 0).connection_start(mechanisms=["ANONYMOUS"])
+
+ def connection_start_ok(self, ch, start_ok):
+ ch.connection_tune(channel_max=65535)
+
+ def connection_tune_ok(self, ch, tune_ok):
+ pass
+
+ def connection_open(self, ch, open):
+ self.connection.opened = True
+ ch.connection_open_ok()
+ notify(self.connection.condition)
+
+class Client(Delegate):
+
+ PROPERTIES = {"product": "qpid python client",
+ "version": "development",
+ "platform": os.name}
+
+ def __init__(self, connection, username="guest", password="guest", mechanism="PLAIN"):
+ Delegate.__init__(self, connection)
+ self.username = username
+ self.password = password
+ self.mechanism = mechanism
+
+ def start(self):
+ self.connection.write_header(self.spec.major, self.spec.minor)
+ self.connection.read_header()
+
+ def connection_start(self, ch, start):
+ r = "\0%s\0%s" % (self.username, self.password)
+ ch.connection_start_ok(client_properties=Client.PROPERTIES, mechanism=self.mechanism, response=r)
+
+ def connection_tune(self, ch, tune):
+ ch.connection_tune_ok()
+ ch.connection_open()
+
+ def connection_open_ok(self, ch, open_ok):
+ self.connection.opened = True
+ notify(self.connection.condition)
diff --git a/RC9/qpid/python/qpid/disp.py b/RC9/qpid/python/qpid/disp.py
new file mode 100644
index 0000000000..e46cb33c60
--- /dev/null
+++ b/RC9/qpid/python/qpid/disp.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from time import strftime, gmtime
+
+class Display:
+ """ Display formatting for QPID Management CLI """
+
+ def __init__ (self):
+ self.tableSpacing = 2
+ self.tablePrefix = " "
+ self.timestampFormat = "%X"
+
+ def table (self, title, heads, rows):
+ """ Print a formatted table with autosized columns """
+ print title
+ if len (rows) == 0:
+ return
+ colWidth = []
+ col = 0
+ line = self.tablePrefix
+ for head in heads:
+ width = len (head)
+ for row in rows:
+ cellWidth = len (unicode (row[col]))
+ if cellWidth > width:
+ width = cellWidth
+ colWidth.append (width + self.tableSpacing)
+ line = line + head
+ if col < len (heads) - 1:
+ for i in range (colWidth[col] - len (head)):
+ line = line + " "
+ col = col + 1
+ print line
+ line = self.tablePrefix
+ for width in colWidth:
+ for i in range (width):
+ line = line + "="
+ print line
+
+ for row in rows:
+ line = self.tablePrefix
+ col = 0
+ for width in colWidth:
+ line = line + unicode (row[col])
+ if col < len (heads) - 1:
+ for i in range (width - len (unicode (row[col]))):
+ line = line + " "
+ col = col + 1
+ print line
+
+ def do_setTimeFormat (self, fmt):
+ """ Select timestamp format """
+ if fmt == "long":
+ self.timestampFormat = "%c"
+ elif fmt == "short":
+ self.timestampFormat = "%X"
+
+ def timestamp (self, nsec):
+ """ Format a nanosecond-since-the-epoch timestamp for printing """
+ return strftime (self.timestampFormat, gmtime (nsec / 1000000000))
diff --git a/RC9/qpid/python/qpid/exceptions.py b/RC9/qpid/python/qpid/exceptions.py
new file mode 100644
index 0000000000..7eaaf81ed4
--- /dev/null
+++ b/RC9/qpid/python/qpid/exceptions.py
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+class Closed(Exception): pass
+class Timeout(Exception): pass
diff --git a/RC9/qpid/python/qpid/framer.py b/RC9/qpid/python/qpid/framer.py
new file mode 100644
index 0000000000..f6363b2291
--- /dev/null
+++ b/RC9/qpid/python/qpid/framer.py
@@ -0,0 +1,159 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import struct, socket
+from exceptions import Closed
+from packer import Packer
+from threading import RLock
+from logging import getLogger
+
+raw = getLogger("qpid.io.raw")
+frm = getLogger("qpid.io.frm")
+
+FIRST_SEG = 0x08
+LAST_SEG = 0x04
+FIRST_FRM = 0x02
+LAST_FRM = 0x01
+
+class Frame:
+
+ HEADER = "!2BHxBH4x"
+ MAX_PAYLOAD = 65535 - struct.calcsize(HEADER)
+
+ def __init__(self, flags, type, track, channel, payload):
+ if len(payload) > Frame.MAX_PAYLOAD:
+ raise ValueError("max payload size exceeded: %s" % len(payload))
+ self.flags = flags
+ self.type = type
+ self.track = track
+ self.channel = channel
+ self.payload = payload
+
+ def isFirstSegment(self):
+ return bool(FIRST_SEG & self.flags)
+
+ def isLastSegment(self):
+ return bool(LAST_SEG & self.flags)
+
+ def isFirstFrame(self):
+ return bool(FIRST_FRM & self.flags)
+
+ def isLastFrame(self):
+ return bool(LAST_FRM & self.flags)
+
+ def __str__(self):
+ return "%s%s%s%s %s %s %s %r" % (int(self.isFirstSegment()),
+ int(self.isLastSegment()),
+ int(self.isFirstFrame()),
+ int(self.isLastFrame()),
+ self.type,
+ self.track,
+ self.channel,
+ self.payload)
+
+class FramingError(Exception): pass
+
+class Framer(Packer):
+
+ HEADER="!4s4B"
+
+ def __init__(self, sock):
+ self.sock = sock
+ self.sock_lock = RLock()
+ self._buf = ""
+
+ def aborted(self):
+ return False
+
+ def write(self, buf):
+ self._buf += buf
+
+ def flush(self):
+ self.sock_lock.acquire()
+ try:
+ self._write(self._buf)
+ self._buf = ""
+ frm.debug("FLUSHED")
+ finally:
+ self.sock_lock.release()
+
+ def _write(self, buf):
+ while buf:
+ try:
+ n = self.sock.send(buf)
+ except socket.timeout:
+ if self.aborted():
+ raise Closed()
+ else:
+ continue
+ raw.debug("SENT %r", buf[:n])
+ buf = buf[n:]
+
+ def read(self, n):
+ data = ""
+ while len(data) < n:
+ try:
+ s = self.sock.recv(n - len(data))
+ except socket.timeout:
+ if self.aborted():
+ raise Closed()
+ else:
+ continue
+ except socket.error, e:
+ if data != "":
+ raise e
+ else:
+ raise Closed()
+ if len(s) == 0:
+ raise Closed()
+ data += s
+ raw.debug("RECV %r", s)
+ return data
+
+ def read_header(self):
+ return self.unpack(Framer.HEADER)
+
+ def write_header(self, major, minor):
+ self.sock_lock.acquire()
+ try:
+ self.pack(Framer.HEADER, "AMQP", 1, 1, major, minor)
+ self.flush()
+ finally:
+ self.sock_lock.release()
+
+ def write_frame(self, frame):
+ self.sock_lock.acquire()
+ try:
+ size = len(frame.payload) + struct.calcsize(Frame.HEADER)
+ track = frame.track & 0x0F
+ self.pack(Frame.HEADER, frame.flags, frame.type, size, track, frame.channel)
+ self.write(frame.payload)
+ if frame.isLastSegment() and frame.isLastFrame():
+ self.flush()
+ frm.debug("SENT %s", frame)
+ finally:
+ self.sock_lock.release()
+
+ def read_frame(self):
+ flags, type, size, track, channel = self.unpack(Frame.HEADER)
+ if flags & 0xF0: raise FramingError()
+ payload = self.read(size - struct.calcsize(Frame.HEADER))
+ frame = Frame(flags, type, track, channel, payload)
+ frm.debug("RECV %s", frame)
+ return frame
diff --git a/RC9/qpid/python/qpid/invoker.py b/RC9/qpid/python/qpid/invoker.py
new file mode 100644
index 0000000000..635f3ee769
--- /dev/null
+++ b/RC9/qpid/python/qpid/invoker.py
@@ -0,0 +1,48 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+
+# TODO: need a better naming for this class now that it does the value
+# stuff
+class Invoker:
+
+ def METHOD(self, name, resolved):
+ method = lambda *args, **kwargs: self.invoke(resolved, args, kwargs)
+ if sys.version_info[:2] > (2, 3):
+ method.__name__ = resolved.pyname
+ method.__doc__ = resolved.pydoc
+ method.__module__ = self.__class__.__module__
+ self.__dict__[name] = method
+ return method
+
+ def VALUE(self, name, resolved):
+ self.__dict__[name] = resolved
+ return resolved
+
+ def ERROR(self, name, resolved):
+ raise AttributeError("%s instance has no attribute '%s'" %
+ (self.__class__.__name__, name))
+
+ def resolve_method(self, name):
+ return ERROR, None
+
+ def __getattr__(self, name):
+ disp, resolved = self.resolve_method(name)
+ return disp(name, resolved)
diff --git a/RC9/qpid/python/qpid/log.py b/RC9/qpid/python/qpid/log.py
new file mode 100644
index 0000000000..1fd7d74136
--- /dev/null
+++ b/RC9/qpid/python/qpid/log.py
@@ -0,0 +1,28 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from logging import getLogger, StreamHandler, Formatter
+from logging import DEBUG, INFO, WARN, ERROR, CRITICAL
+
+def enable(name=None, level=WARN, file=None):
+ log = getLogger(name)
+ handler = StreamHandler(file)
+ handler.setFormatter(Formatter("%(asctime)s %(levelname)s %(message)s"))
+ log.addHandler(handler)
+ log.setLevel(level)
diff --git a/RC9/qpid/python/qpid/management.py b/RC9/qpid/python/qpid/management.py
new file mode 100644
index 0000000000..477f3e8f2b
--- /dev/null
+++ b/RC9/qpid/python/qpid/management.py
@@ -0,0 +1,913 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+###############################################################################
+## This file is being obsoleted by qmf/console.py
+###############################################################################
+
+"""
+Management API for Qpid
+"""
+
+import qpid
+import struct
+import socket
+from threading import Thread
+from datatypes import Message, RangedSet
+from time import time
+from cStringIO import StringIO
+from codec010 import StringCodec as Codec
+from threading import Lock, Condition
+
+
+class SequenceManager:
+ """ Manage sequence numbers for asynchronous method calls """
+ def __init__ (self):
+ self.lock = Lock ()
+ self.sequence = 0
+ self.pending = {}
+
+ def reserve (self, data):
+ """ Reserve a unique sequence number """
+ self.lock.acquire ()
+ result = self.sequence
+ self.sequence = self.sequence + 1
+ self.pending[result] = data
+ self.lock.release ()
+ return result
+
+ def release (self, seq):
+ """ Release a reserved sequence number """
+ data = None
+ self.lock.acquire ()
+ if seq in self.pending:
+ data = self.pending[seq]
+ del self.pending[seq]
+ self.lock.release ()
+ return data
+
+
+class mgmtObject (object):
+ """ Generic object that holds the contents of a management object with its
+ attributes set as object attributes. """
+
+ def __init__ (self, classKey, timestamps, row):
+ self.classKey = classKey
+ self.timestamps = timestamps
+ for cell in row:
+ setattr (self, cell[0], cell[1])
+
+class objectId(object):
+ """ Object that represents QMF object identifiers """
+
+ def __init__(self, codec, first=0, second=0):
+ if codec:
+ self.first = codec.read_uint64()
+ self.second = codec.read_uint64()
+ else:
+ self.first = first
+ self.second = second
+
+ def __cmp__(self, other):
+ if other == None:
+ return 1
+ if self.first < other.first:
+ return -1
+ if self.first > other.first:
+ return 1
+ if self.second < other.second:
+ return -1
+ if self.second > other.second:
+ return 1
+ return 0
+
+
+ def index(self):
+ return (self.first, self.second)
+
+ def getFlags(self):
+ return (self.first & 0xF000000000000000) >> 60
+
+ def getSequence(self):
+ return (self.first & 0x0FFF000000000000) >> 48
+
+ def getBroker(self):
+ return (self.first & 0x0000FFFFF0000000) >> 28
+
+ def getBank(self):
+ return self.first & 0x000000000FFFFFFF
+
+ def getObject(self):
+ return self.second
+
+ def isDurable(self):
+ return self.getSequence() == 0
+
+ def encode(self, codec):
+ codec.write_uint64(self.first)
+ codec.write_uint64(self.second)
+
+
+class methodResult:
+ """ Object that contains the result of a method call """
+
+ def __init__ (self, status, sText, args):
+ self.status = status
+ self.statusText = sText
+ for arg in args:
+ setattr (self, arg, args[arg])
+
+class brokerInfo:
+ """ Object that contains information about a broker and the session to it """
+
+ def __init__ (self, brokerId, sessionId):
+ self.brokerId = brokerId
+ self.sessionId = sessionId
+
+class managementChannel:
+ """ This class represents a connection to an AMQP broker. """
+
+ def __init__ (self, ssn, topicCb, replyCb, exceptionCb, cbContext, _detlife=0):
+ """ Given a channel on an established AMQP broker connection, this method
+ opens a session and performs all of the declarations and bindings needed
+ to participate in the management protocol. """
+ self.enabled = True
+ self.ssn = ssn
+ self.sessionId = ssn.name
+ self.topicName = "mgmt-%s" % self.sessionId
+ self.replyName = "repl-%s" % self.sessionId
+ self.qpidChannel = ssn
+ self.tcb = topicCb
+ self.rcb = replyCb
+ self.ecb = exceptionCb
+ self.context = cbContext
+ self.reqsOutstanding = 0
+ self.brokerInfo = None
+
+ ssn.auto_sync = False
+ ssn.queue_declare (queue=self.topicName, exclusive=True, auto_delete=True)
+ ssn.queue_declare (queue=self.replyName, exclusive=True, auto_delete=True)
+
+ ssn.exchange_bind (exchange="amq.direct",
+ queue=self.replyName, binding_key=self.replyName)
+ ssn.message_subscribe (queue=self.topicName, destination="tdest",
+ accept_mode=ssn.accept_mode.none,
+ acquire_mode=ssn.acquire_mode.pre_acquired)
+ ssn.message_subscribe (queue=self.replyName, destination="rdest",
+ accept_mode=ssn.accept_mode.none,
+ acquire_mode=ssn.acquire_mode.pre_acquired)
+
+ ssn.incoming ("tdest").listen (self.topicCb, self.exceptionCb)
+ ssn.incoming ("rdest").listen (self.replyCb)
+
+ ssn.message_set_flow_mode (destination="tdest", flow_mode=1)
+ ssn.message_flow (destination="tdest", unit=0, value=0xFFFFFFFF)
+ ssn.message_flow (destination="tdest", unit=1, value=0xFFFFFFFF)
+
+ ssn.message_set_flow_mode (destination="rdest", flow_mode=1)
+ ssn.message_flow (destination="rdest", unit=0, value=0xFFFFFFFF)
+ ssn.message_flow (destination="rdest", unit=1, value=0xFFFFFFFF)
+
+ def setBrokerInfo (self, data):
+ self.brokerInfo = data
+
+ def shutdown (self):
+ self.enabled = False
+ self.ssn.incoming("tdest").stop()
+ self.ssn.incoming("rdest").stop()
+
+ def topicCb (self, msg):
+ """ Receive messages via the topic queue on this channel. """
+ if self.enabled:
+ self.tcb (self, msg)
+
+ def replyCb (self, msg):
+ """ Receive messages via the reply queue on this channel. """
+ if self.enabled:
+ self.rcb (self, msg)
+
+ def exceptionCb (self, data):
+ if self.ecb != None:
+ self.ecb (self, data)
+
+ def send (self, exchange, msg):
+ if self.enabled:
+ self.qpidChannel.message_transfer (destination=exchange, message=msg)
+
+ def message (self, body, routing_key="broker"):
+ dp = self.qpidChannel.delivery_properties()
+ dp.routing_key = routing_key
+ mp = self.qpidChannel.message_properties()
+ mp.content_type = "application/octet-stream"
+ mp.reply_to = self.qpidChannel.reply_to("amq.direct", self.replyName)
+ return Message(dp, mp, body)
+
+
+class managementClient:
+ """ This class provides an API for access to management data on the AMQP
+ network. It implements the management protocol and manages the management
+ schemas as advertised by the various management agents in the network. """
+
+ CTRL_BROKER_INFO = 1
+ CTRL_SCHEMA_LOADED = 2
+ CTRL_USER = 3
+ CTRL_HEARTBEAT = 4
+
+ SYNC_TIME = 10.0
+
+ #========================================================
+ # User API - interacts with the class's user
+ #========================================================
+ def __init__ (self, amqpSpec, ctrlCb=None, configCb=None, instCb=None, methodCb=None, closeCb=None):
+ self.spec = amqpSpec
+ self.ctrlCb = ctrlCb
+ self.configCb = configCb
+ self.instCb = instCb
+ self.methodCb = methodCb
+ self.closeCb = closeCb
+ self.schemaCb = None
+ self.eventCb = None
+ self.channels = []
+ self.seqMgr = SequenceManager ()
+ self.schema = {}
+ self.packages = {}
+ self.cv = Condition ()
+ self.syncInFlight = False
+ self.syncSequence = 0
+ self.syncResult = None
+
+ def schemaListener (self, schemaCb):
+ """ Optionally register a callback to receive details of the schema of
+ managed objects in the network. """
+ self.schemaCb = schemaCb
+
+ def eventListener (self, eventCb):
+ """ Optionally register a callback to receive events from managed objects
+ in the network. """
+ self.eventCb = eventCb
+
+ def addChannel (self, channel, cbContext=None):
+ """ Register a new channel. """
+ mch = managementChannel (channel, self.topicCb, self.replyCb, self.exceptCb, cbContext)
+
+ self.channels.append (mch)
+ self.incOutstanding (mch)
+ codec = Codec (self.spec)
+ self.setHeader (codec, ord ('B'))
+ msg = mch.message(codec.encoded)
+ mch.send ("qpid.management", msg)
+ return mch
+
+ def removeChannel (self, mch):
+ """ Remove a previously added channel from management. """
+ mch.shutdown ()
+ self.channels.remove (mch)
+
+ def callMethod (self, channel, userSequence, objId, className, methodName, args=None):
+ """ Invoke a method on a managed object. """
+ self.method (channel, userSequence, objId, className, methodName, args)
+
+ def getObjects (self, channel, userSequence, className, bank=0):
+ """ Request immediate content from broker """
+ codec = Codec (self.spec)
+ self.setHeader (codec, ord ('G'), userSequence)
+ ft = {}
+ ft["_class"] = className
+ codec.write_map (ft)
+ msg = channel.message(codec.encoded, routing_key="agent.1.%d" % bank)
+ channel.send ("qpid.management", msg)
+
+ def syncWaitForStable (self, channel):
+ """ Synchronous (blocking) call to wait for schema stability on a channel """
+ self.cv.acquire ()
+ if channel.reqsOutstanding == 0:
+ self.cv.release ()
+ return channel.brokerInfo
+
+ self.syncInFlight = True
+ starttime = time ()
+ while channel.reqsOutstanding != 0:
+ self.cv.wait (self.SYNC_TIME)
+ if time () - starttime > self.SYNC_TIME:
+ self.cv.release ()
+ raise RuntimeError ("Timed out waiting for response on channel")
+ self.cv.release ()
+ return channel.brokerInfo
+
+ def syncCallMethod (self, channel, objId, className, methodName, args=None):
+ """ Synchronous (blocking) method call """
+ self.cv.acquire ()
+ self.syncInFlight = True
+ self.syncResult = None
+ self.syncSequence = self.seqMgr.reserve ("sync")
+ self.cv.release ()
+ self.callMethod (channel, self.syncSequence, objId, className, methodName, args)
+ self.cv.acquire ()
+ starttime = time ()
+ while self.syncInFlight:
+ self.cv.wait (self.SYNC_TIME)
+ if time () - starttime > self.SYNC_TIME:
+ self.cv.release ()
+ raise RuntimeError ("Timed out waiting for response on channel")
+ result = self.syncResult
+ self.cv.release ()
+ return result
+
+ def syncGetObjects (self, channel, className, bank=0):
+ """ Synchronous (blocking) get call """
+ self.cv.acquire ()
+ self.syncInFlight = True
+ self.syncResult = []
+ self.syncSequence = self.seqMgr.reserve ("sync")
+ self.cv.release ()
+ self.getObjects (channel, self.syncSequence, className, bank)
+ self.cv.acquire ()
+ starttime = time ()
+ while self.syncInFlight:
+ self.cv.wait (self.SYNC_TIME)
+ if time () - starttime > self.SYNC_TIME:
+ self.cv.release ()
+ raise RuntimeError ("Timed out waiting for response on channel")
+ result = self.syncResult
+ self.cv.release ()
+ return result
+
+ #========================================================
+ # Channel API - interacts with registered channel objects
+ #========================================================
+ def topicCb (self, ch, msg):
+ """ Receive messages via the topic queue of a particular channel. """
+ codec = Codec (self.spec, msg.body)
+ while True:
+ hdr = self.checkHeader (codec)
+ if hdr == None:
+ return
+
+ if hdr[0] == 'p':
+ self.handlePackageInd (ch, codec)
+ elif hdr[0] == 'q':
+ self.handleClassInd (ch, codec)
+ elif hdr[0] == 'h':
+ self.handleHeartbeat (ch, codec)
+ elif hdr[0] == 'e':
+ self.handleEvent (ch, codec)
+ else:
+ self.parse (ch, codec, hdr[0], hdr[1])
+
+ def replyCb (self, ch, msg):
+ """ Receive messages via the reply queue of a particular channel. """
+ codec = Codec (self.spec, msg.body)
+ hdr = self.checkHeader (codec)
+ if hdr == None:
+ return
+
+ if hdr[0] == 'm':
+ self.handleMethodReply (ch, codec, hdr[1])
+ elif hdr[0] == 'z':
+ self.handleCommandComplete (ch, codec, hdr[1])
+ elif hdr[0] == 'b':
+ self.handleBrokerResponse (ch, codec)
+ elif hdr[0] == 'p':
+ self.handlePackageInd (ch, codec)
+ elif hdr[0] == 'q':
+ self.handleClassInd (ch, codec)
+ else:
+ self.parse (ch, codec, hdr[0], hdr[1])
+
+ def exceptCb (self, ch, data):
+ if self.closeCb != None:
+ self.closeCb (ch.context, data)
+
+ #========================================================
+ # Internal Functions
+ #========================================================
+ def setHeader (self, codec, opcode, seq = 0):
+ """ Compose the header of a management message. """
+ codec.write_uint8 (ord ('A'))
+ codec.write_uint8 (ord ('M'))
+ codec.write_uint8 (ord ('2'))
+ codec.write_uint8 (opcode)
+ codec.write_uint32 (seq)
+
+ def checkHeader (self, codec):
+ """ Check the header of a management message and extract the opcode and class. """
+ try:
+ octet = chr (codec.read_uint8 ())
+ if octet != 'A':
+ return None
+ octet = chr (codec.read_uint8 ())
+ if octet != 'M':
+ return None
+ octet = chr (codec.read_uint8 ())
+ if octet != '2':
+ return None
+ opcode = chr (codec.read_uint8 ())
+ seq = codec.read_uint32 ()
+ return (opcode, seq)
+ except:
+ return None
+
+ def encodeValue (self, codec, value, typecode):
+ """ Encode, into the codec, a value based on its typecode. """
+ if typecode == 1:
+ codec.write_uint8 (int (value))
+ elif typecode == 2:
+ codec.write_uint16 (int (value))
+ elif typecode == 3:
+ codec.write_uint32 (long (value))
+ elif typecode == 4:
+ codec.write_uint64 (long (value))
+ elif typecode == 5:
+ codec.write_uint8 (int (value))
+ elif typecode == 6:
+ codec.write_str8 (value)
+ elif typecode == 7:
+ codec.write_str16 (value)
+ elif typecode == 8: # ABSTIME
+ codec.write_uint64 (long (value))
+ elif typecode == 9: # DELTATIME
+ codec.write_uint64 (long (value))
+ elif typecode == 10: # REF
+ value.encode(codec)
+ elif typecode == 11: # BOOL
+ codec.write_uint8 (int (value))
+ elif typecode == 12: # FLOAT
+ codec.write_float (float (value))
+ elif typecode == 13: # DOUBLE
+ codec.write_double (float (value))
+ elif typecode == 14: # UUID
+ codec.write_uuid (value)
+ elif typecode == 15: # FTABLE
+ codec.write_map (value)
+ elif typecode == 16:
+ codec.write_int8 (int(value))
+ elif typecode == 17:
+ codec.write_int16 (int(value))
+ elif typecode == 18:
+ codec.write_int32 (int(value))
+ elif typecode == 19:
+ codec.write_int64 (int(value))
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def decodeValue (self, codec, typecode):
+ """ Decode, from the codec, a value based on its typecode. """
+ if typecode == 1:
+ data = codec.read_uint8 ()
+ elif typecode == 2:
+ data = codec.read_uint16 ()
+ elif typecode == 3:
+ data = codec.read_uint32 ()
+ elif typecode == 4:
+ data = codec.read_uint64 ()
+ elif typecode == 5:
+ data = codec.read_uint8 ()
+ elif typecode == 6:
+ data = codec.read_str8 ()
+ elif typecode == 7:
+ data = codec.read_str16 ()
+ elif typecode == 8: # ABSTIME
+ data = codec.read_uint64 ()
+ elif typecode == 9: # DELTATIME
+ data = codec.read_uint64 ()
+ elif typecode == 10: # REF
+ data = objectId(codec)
+ elif typecode == 11: # BOOL
+ data = codec.read_uint8 ()
+ elif typecode == 12: # FLOAT
+ data = codec.read_float ()
+ elif typecode == 13: # DOUBLE
+ data = codec.read_double ()
+ elif typecode == 14: # UUID
+ data = codec.read_uuid ()
+ elif typecode == 15: # FTABLE
+ data = codec.read_map ()
+ elif typecode == 16:
+ data = codec.read_int8 ()
+ elif typecode == 17:
+ data = codec.read_int16 ()
+ elif typecode == 18:
+ data = codec.read_int32 ()
+ elif typecode == 19:
+ data = codec.read_int64 ()
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+ return data
+
+ def incOutstanding (self, ch):
+ self.cv.acquire ()
+ ch.reqsOutstanding = ch.reqsOutstanding + 1
+ self.cv.release ()
+
+ def decOutstanding (self, ch):
+ self.cv.acquire ()
+ ch.reqsOutstanding = ch.reqsOutstanding - 1
+ if ch.reqsOutstanding == 0 and self.syncInFlight:
+ self.syncInFlight = False
+ self.cv.notify ()
+ self.cv.release ()
+
+ if ch.reqsOutstanding == 0:
+ if self.ctrlCb != None:
+ self.ctrlCb (ch.context, self.CTRL_SCHEMA_LOADED, None)
+ ch.ssn.exchange_bind (exchange="qpid.management",
+ queue=ch.topicName, binding_key="console.#")
+ ch.ssn.exchange_bind (exchange="qpid.management",
+ queue=ch.topicName, binding_key="schema.#")
+
+
+ def handleMethodReply (self, ch, codec, sequence):
+ status = codec.read_uint32 ()
+ sText = codec.read_str16 ()
+
+ data = self.seqMgr.release (sequence)
+ if data == None:
+ return
+
+ (userSequence, classId, methodName) = data
+ args = {}
+ context = self.seqMgr.release (userSequence)
+
+ if status == 0:
+ schemaClass = self.schema[classId]
+ ms = schemaClass['M']
+ arglist = None
+ for mname in ms:
+ (mdesc, margs) = ms[mname]
+ if mname == methodName:
+ arglist = margs
+ if arglist == None:
+ return
+
+ for arg in arglist:
+ if arg[2].find("O") != -1:
+ args[arg[0]] = self.decodeValue (codec, arg[1])
+
+ if context == "sync" and userSequence == self.syncSequence:
+ self.cv.acquire ()
+ self.syncInFlight = False
+ self.syncResult = methodResult (status, sText, args)
+ self.cv.notify ()
+ self.cv.release ()
+ elif self.methodCb != None:
+ self.methodCb (ch.context, userSequence, status, sText, args)
+
+ def handleCommandComplete (self, ch, codec, seq):
+ code = codec.read_uint32 ()
+ text = codec.read_str8 ()
+ data = (seq, code, text)
+ context = self.seqMgr.release (seq)
+ if context == "outstanding":
+ self.decOutstanding (ch)
+ elif context == "sync" and seq == self.syncSequence:
+ self.cv.acquire ()
+ self.syncInFlight = False
+ self.cv.notify ()
+ self.cv.release ()
+ elif self.ctrlCb != None:
+ self.ctrlCb (ch.context, self.CTRL_USER, data)
+
+ def handleBrokerResponse (self, ch, codec):
+ uuid = codec.read_uuid ()
+ ch.brokerInfo = brokerInfo (uuid, ch.sessionId)
+ if self.ctrlCb != None:
+ self.ctrlCb (ch.context, self.CTRL_BROKER_INFO, ch.brokerInfo)
+
+ # Send a package request
+ sendCodec = Codec (self.spec)
+ seq = self.seqMgr.reserve ("outstanding")
+ self.setHeader (sendCodec, ord ('P'), seq)
+ smsg = ch.message(sendCodec.encoded)
+ ch.send ("qpid.management", smsg)
+
+ def handlePackageInd (self, ch, codec):
+ pname = codec.read_str8 ()
+ if pname not in self.packages:
+ self.packages[pname] = {}
+
+ # Send a class request
+ sendCodec = Codec (self.spec)
+ seq = self.seqMgr.reserve ("outstanding")
+ self.setHeader (sendCodec, ord ('Q'), seq)
+ self.incOutstanding (ch)
+ sendCodec.write_str8 (pname)
+ smsg = ch.message(sendCodec.encoded)
+ ch.send ("qpid.management", smsg)
+
+ def handleClassInd (self, ch, codec):
+ kind = codec.read_uint8()
+ if kind != 1: # This API doesn't handle new-style events
+ return
+ pname = codec.read_str8()
+ cname = codec.read_str8()
+ hash = codec.read_bin128()
+ if pname not in self.packages:
+ return
+
+ if (cname, hash) not in self.packages[pname]:
+ # Send a schema request
+ sendCodec = Codec (self.spec)
+ seq = self.seqMgr.reserve ("outstanding")
+ self.setHeader (sendCodec, ord ('S'), seq)
+ self.incOutstanding (ch)
+ sendCodec.write_str8 (pname)
+ sendCodec.write_str8 (cname)
+ sendCodec.write_bin128 (hash)
+ smsg = ch.message(sendCodec.encoded)
+ ch.send ("qpid.management", smsg)
+
+ def handleHeartbeat (self, ch, codec):
+ timestamp = codec.read_uint64()
+ if self.ctrlCb != None:
+ self.ctrlCb (ch.context, self.CTRL_HEARTBEAT, timestamp)
+
+ def handleEvent (self, ch, codec):
+ if self.eventCb == None:
+ return
+ timestamp = codec.read_uint64()
+ objId = objectId(codec)
+ packageName = codec.read_str8()
+ className = codec.read_str8()
+ hash = codec.read_bin128()
+ name = codec.read_str8()
+ classKey = (packageName, className, hash)
+ if classKey not in self.schema:
+ return;
+ schemaClass = self.schema[classKey]
+ row = []
+ es = schemaClass['E']
+ arglist = None
+ for ename in es:
+ (edesc, eargs) = es[ename]
+ if ename == name:
+ arglist = eargs
+ if arglist == None:
+ return
+ for arg in arglist:
+ row.append((arg[0], self.decodeValue(codec, arg[1])))
+ self.eventCb(ch.context, classKey, objId, name, row)
+
+ def parseSchema (self, ch, codec):
+ """ Parse a received schema-description message. """
+ self.decOutstanding (ch)
+ kind = codec.read_uint8()
+ if kind != 1: # This API doesn't handle new-style events
+ return
+ packageName = codec.read_str8 ()
+ className = codec.read_str8 ()
+ hash = codec.read_bin128 ()
+ configCount = codec.read_uint16 ()
+ instCount = codec.read_uint16 ()
+ methodCount = codec.read_uint16 ()
+
+ if packageName not in self.packages:
+ return
+ if (className, hash) in self.packages[packageName]:
+ return
+
+ classKey = (packageName, className, hash)
+ if classKey in self.schema:
+ return
+
+ configs = []
+ insts = []
+ methods = {}
+
+ configs.append (("id", 4, "", "", 1, 1, None, None, None, None, None))
+ insts.append (("id", 4, None, None))
+
+ for idx in range (configCount):
+ ft = codec.read_map ()
+ name = str (ft["name"])
+ type = ft["type"]
+ access = ft["access"]
+ index = ft["index"]
+ optional = ft["optional"]
+ unit = None
+ min = None
+ max = None
+ maxlen = None
+ desc = None
+
+ for key, value in ft.items ():
+ if key == "unit":
+ unit = str (value)
+ elif key == "min":
+ min = value
+ elif key == "max":
+ max = value
+ elif key == "maxlen":
+ maxlen = value
+ elif key == "desc":
+ desc = str (value)
+
+ config = (name, type, unit, desc, access, index, min, max, maxlen, optional)
+ configs.append (config)
+
+ for idx in range (instCount):
+ ft = codec.read_map ()
+ name = str (ft["name"])
+ type = ft["type"]
+ unit = None
+ desc = None
+
+ for key, value in ft.items ():
+ if key == "unit":
+ unit = str (value)
+ elif key == "desc":
+ desc = str (value)
+
+ inst = (name, type, unit, desc)
+ insts.append (inst)
+
+ for idx in range (methodCount):
+ ft = codec.read_map ()
+ mname = str (ft["name"])
+ argCount = ft["argCount"]
+ if "desc" in ft:
+ mdesc = str (ft["desc"])
+ else:
+ mdesc = None
+
+ args = []
+ for aidx in range (argCount):
+ ft = codec.read_map ()
+ name = str (ft["name"])
+ type = ft["type"]
+ dir = str (ft["dir"].upper ())
+ unit = None
+ min = None
+ max = None
+ maxlen = None
+ desc = None
+ default = None
+
+ for key, value in ft.items ():
+ if key == "unit":
+ unit = str (value)
+ elif key == "min":
+ min = value
+ elif key == "max":
+ max = value
+ elif key == "maxlen":
+ maxlen = value
+ elif key == "desc":
+ desc = str (value)
+ elif key == "default":
+ default = str (value)
+
+ arg = (name, type, dir, unit, desc, min, max, maxlen, default)
+ args.append (arg)
+ methods[mname] = (mdesc, args)
+
+ schemaClass = {}
+ schemaClass['C'] = configs
+ schemaClass['I'] = insts
+ schemaClass['M'] = methods
+ self.schema[classKey] = schemaClass
+
+ if self.schemaCb != None:
+ self.schemaCb (ch.context, classKey, configs, insts, methods, {})
+
+ def parsePresenceMasks(self, codec, schemaClass):
+ """ Generate a list of not-present properties """
+ excludeList = []
+ bit = 0
+ for element in schemaClass['C'][1:]:
+ if element[9] == 1:
+ if bit == 0:
+ mask = codec.read_uint8()
+ bit = 1
+ if (mask & bit) == 0:
+ excludeList.append(element[0])
+ bit = bit * 2
+ if bit == 256:
+ bit = 0
+ return excludeList
+
+ def parseContent (self, ch, cls, codec, seq=0):
+ """ Parse a received content message. """
+ if (cls == 'C' or (cls == 'B' and seq == 0)) and self.configCb == None:
+ return
+ if cls == 'I' and self.instCb == None:
+ return
+
+ packageName = codec.read_str8 ()
+ className = codec.read_str8 ()
+ hash = codec.read_bin128 ()
+ classKey = (packageName, className, hash)
+
+ if classKey not in self.schema:
+ return
+
+ row = []
+ timestamps = []
+
+ timestamps.append (codec.read_uint64 ()) # Current Time
+ timestamps.append (codec.read_uint64 ()) # Create Time
+ timestamps.append (codec.read_uint64 ()) # Delete Time
+ objId = objectId(codec)
+ schemaClass = self.schema[classKey]
+ if cls == 'C' or cls == 'B':
+ notPresent = self.parsePresenceMasks(codec, schemaClass)
+
+ if cls == 'C' or cls == 'B':
+ row.append(("id", objId))
+ for element in schemaClass['C'][1:]:
+ tc = element[1]
+ name = element[0]
+ if name in notPresent:
+ row.append((name, None))
+ else:
+ data = self.decodeValue(codec, tc)
+ row.append((name, data))
+
+ if cls == 'I' or cls == 'B':
+ if cls == 'I':
+ row.append(("id", objId))
+ for element in schemaClass['I'][1:]:
+ tc = element[1]
+ name = element[0]
+ data = self.decodeValue (codec, tc)
+ row.append ((name, data))
+
+ if cls == 'C' or (cls == 'B' and seq != self.syncSequence):
+ self.configCb (ch.context, classKey, row, timestamps)
+ elif cls == 'B' and seq == self.syncSequence:
+ if timestamps[2] == 0:
+ obj = mgmtObject (classKey, timestamps, row)
+ self.syncResult.append (obj)
+ elif cls == 'I':
+ self.instCb (ch.context, classKey, row, timestamps)
+
+ def parse (self, ch, codec, opcode, seq):
+ """ Parse a message received from the topic queue. """
+ if opcode == 's':
+ self.parseSchema (ch, codec)
+ elif opcode == 'c':
+ self.parseContent (ch, 'C', codec)
+ elif opcode == 'i':
+ self.parseContent (ch, 'I', codec)
+ elif opcode == 'g':
+ self.parseContent (ch, 'B', codec, seq)
+ else:
+ raise ValueError ("Unknown opcode: %c" % opcode);
+
+ def method (self, channel, userSequence, objId, classId, methodName, args):
+ """ Invoke a method on an object """
+ codec = Codec (self.spec)
+ sequence = self.seqMgr.reserve ((userSequence, classId, methodName))
+ self.setHeader (codec, ord ('M'), sequence)
+ objId.encode(codec)
+ codec.write_str8 (classId[0])
+ codec.write_str8 (classId[1])
+ codec.write_bin128 (classId[2])
+ codec.write_str8 (methodName)
+ bank = "%d.%d" % (objId.getBroker(), objId.getBank())
+
+ # Encode args according to schema
+ if classId not in self.schema:
+ self.seqMgr.release (sequence)
+ raise ValueError ("Unknown class name: %s" % classId)
+
+ schemaClass = self.schema[classId]
+ ms = schemaClass['M']
+ arglist = None
+ for mname in ms:
+ (mdesc, margs) = ms[mname]
+ if mname == methodName:
+ arglist = margs
+ if arglist == None:
+ self.seqMgr.release (sequence)
+ raise ValueError ("Unknown method name: %s" % methodName)
+
+ for arg in arglist:
+ if arg[2].find("I") != -1:
+ value = arg[8] # default
+ if arg[0] in args:
+ value = args[arg[0]]
+ if value == None:
+ self.seqMgr.release (sequence)
+ raise ValueError ("Missing non-defaulted argument: %s" % arg[0])
+ self.encodeValue (codec, value, arg[1])
+
+ packageName = classId[0]
+ className = classId[1]
+ msg = channel.message(codec.encoded, "agent." + bank)
+ channel.send ("qpid.management", msg)
diff --git a/RC9/qpid/python/qpid/managementdata.py b/RC9/qpid/python/qpid/managementdata.py
new file mode 100644
index 0000000000..46c746c0f9
--- /dev/null
+++ b/RC9/qpid/python/qpid/managementdata.py
@@ -0,0 +1,753 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+###############################################################################
+## This file is being obsoleted by qmf/console.py
+###############################################################################
+
+import qpid
+import re
+import socket
+import struct
+import os
+import locale
+from qpid.management import managementChannel, managementClient
+from threading import Lock
+from disp import Display
+from shlex import split
+from qpid.connection import Connection
+from qpid.util import connect
+
+class Broker:
+ def __init__ (self, text):
+ rex = re.compile(r"""
+ # [ <user> [ / <password> ] @] <host> [ :<port> ]
+ ^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X)
+ match = rex.match(text)
+ if not match: raise ValueError("'%s' is not a valid broker url" % (text))
+ user, password, host, port = match.groups()
+
+ self.host = socket.gethostbyname (host)
+ if port: self.port = int(port)
+ else: self.port = 5672
+ self.username = user or "guest"
+ self.password = password or "guest"
+
+ def name (self):
+ return self.host + ":" + str (self.port)
+
+class ManagementData:
+
+ #
+ # Data Structure:
+ #
+ # Please note that this data structure holds only the most recent
+ # configuration and instrumentation data for each object. It does
+ # not hold the detailed historical data that is sent from the broker.
+ # The only historical data it keeps are the high and low watermarks
+ # for hi-lo statistics.
+ #
+ # tables :== {class-key}
+ # {<obj-id>}
+ # (timestamp, config-record, inst-record)
+ # class-key :== (<package-name>, <class-name>, <class-hash>)
+ # timestamp :== (<last-interval-time>, <create-time>, <delete-time>)
+ # config-record :== [element]
+ # inst-record :== [element]
+ # element :== (<element-name>, <element-value>)
+ #
+
+ def registerObjId (self, objId):
+ if not objId.index() in self.idBackMap:
+ self.idBackMap[objId.index()] = self.nextId
+ self.idMap[self.nextId] = objId
+ self.nextId += 1
+
+ def displayObjId (self, objIdIndex):
+ if objIdIndex in self.idBackMap:
+ return self.idBackMap[objIdIndex]
+ else:
+ return 0
+
+ def rawObjId (self, displayId):
+ if displayId in self.idMap:
+ return self.idMap[displayId]
+ else:
+ return None
+
+ def displayClassName (self, cls):
+ (packageName, className, hash) = cls
+ rev = self.schema[cls][4]
+ if rev == 0:
+ suffix = ""
+ else:
+ suffix = ".%d" % rev
+ return packageName + ":" + className + suffix
+
+ def dataHandler (self, context, className, list, timestamps):
+ """ Callback for configuration and instrumentation data updates """
+ self.lock.acquire ()
+ try:
+ # If this class has not been seen before, create an empty dictionary to
+ # hold objects of this class
+ if className not in self.tables:
+ self.tables[className] = {}
+
+ # Register the ID so a more friendly presentation can be displayed
+ objId = list[0][1]
+ oidx = objId.index()
+ self.registerObjId (objId)
+
+ # If this object hasn't been seen before, create a new object record with
+ # the timestamps and empty lists for configuration and instrumentation data.
+ if oidx not in self.tables[className]:
+ self.tables[className][oidx] = (timestamps, [], [])
+
+ (unused, oldConf, oldInst) = self.tables[className][oidx]
+
+ # For config updates, simply replace old config list with the new one.
+ if context == 0: #config
+ self.tables[className][oidx] = (timestamps, list, oldInst)
+
+ # For instrumentation updates, carry the minimum and maximum values for
+ # "hi-lo" stats forward.
+ elif context == 1: #inst
+ if len (oldInst) == 0:
+ newInst = list
+ else:
+ newInst = []
+ for idx in range (len (list)):
+ (key, value) = list[idx]
+ if key.find ("High") == len (key) - 4:
+ if oldInst[idx][1] > value:
+ value = oldInst[idx][1]
+ if key.find ("Low") == len (key) - 3:
+ if oldInst[idx][1] < value:
+ value = oldInst[idx][1]
+ newInst.append ((key, value))
+ self.tables[className][oidx] = (timestamps, oldConf, newInst)
+
+ finally:
+ self.lock.release ()
+
+ def ctrlHandler (self, context, op, data):
+ if op == self.mclient.CTRL_BROKER_INFO:
+ pass
+ elif op == self.mclient.CTRL_HEARTBEAT:
+ pass
+
+ def configHandler (self, context, className, list, timestamps):
+ self.dataHandler (0, className, list, timestamps);
+
+ def instHandler (self, context, className, list, timestamps):
+ self.dataHandler (1, className, list, timestamps);
+
+ def methodReply (self, broker, sequence, status, sText, args):
+ """ Callback for method-reply messages """
+ self.lock.acquire ()
+ try:
+ line = "Call Result: " + self.methodsPending[sequence] + \
+ " " + str (status) + " (" + sText + ")"
+ print line, args
+ del self.methodsPending[sequence]
+ finally:
+ self.lock.release ()
+
+ def closeHandler (self, context, reason):
+ if self.operational:
+ print "Connection to broker lost:", reason
+ self.operational = False
+ if self.cli != None:
+ self.cli.setPromptMessage ("Broker Disconnected")
+
+ def schemaHandler (self, context, classKey, configs, insts, methods, events):
+ """ Callback for schema updates """
+ if classKey not in self.schema:
+ schemaRev = 0
+ for key in self.schema:
+ if classKey[0] == key[0] and classKey[1] == key[1]:
+ schemaRev += 1
+ self.schema[classKey] = (configs, insts, methods, events, schemaRev)
+
+ def setCli (self, cliobj):
+ self.cli = cliobj
+
+ def __init__ (self, disp, host, username="guest", password="guest"):
+ self.lock = Lock ()
+ self.tables = {}
+ self.schema = {}
+ self.bootSequence = 0
+ self.operational = False
+ self.disp = disp
+ self.cli = None
+ self.lastUnit = None
+ self.methodSeq = 1
+ self.methodsPending = {}
+ self.sessionId = "%s.%d" % (os.uname()[1], os.getpid())
+
+ self.broker = Broker (host)
+ self.conn = Connection (connect (self.broker.host, self.broker.port),
+ username=self.broker.username, password=self.broker.password)
+ self.spec = self.conn.spec
+ self.conn.start ()
+
+ self.mclient = managementClient (self.spec, self.ctrlHandler, self.configHandler,
+ self.instHandler, self.methodReply, self.closeHandler)
+ self.mclient.schemaListener (self.schemaHandler)
+ self.mch = self.mclient.addChannel (self.conn.session(self.sessionId))
+ self.operational = True
+ self.idMap = {}
+ self.idBackMap = {}
+ self.nextId = 101
+
+ def close (self):
+ pass
+
+ def refName (self, oid):
+ if oid == None:
+ return "NULL"
+ return str (self.displayObjId (oid.index()))
+
+ def valueDisplay (self, classKey, key, value):
+ if value == None:
+ return "<NULL>"
+ for kind in range (2):
+ schema = self.schema[classKey][kind]
+ for item in schema:
+ if item[0] == key:
+ typecode = item[1]
+ unit = item[2]
+ if (typecode >= 1 and typecode <= 5) or typecode == 12 or typecode == 13 or \
+ (typecode >= 16 and typecode <= 19):
+ if unit == None or unit == self.lastUnit:
+ return str (value)
+ else:
+ self.lastUnit = unit
+ suffix = ""
+ if value != 1:
+ suffix = "s"
+ return str (value) + " " + unit + suffix
+ elif typecode == 6 or typecode == 7: # strings
+ return value
+ elif typecode == 8:
+ if value == 0:
+ return "--"
+ return self.disp.timestamp (value)
+ elif typecode == 9:
+ return str (value)
+ elif typecode == 10:
+ return self.refName (value)
+ elif typecode == 11:
+ if value == 0:
+ return "False"
+ else:
+ return "True"
+ elif typecode == 14:
+ return "%08x-%04x-%04x-%04x-%04x%08x" % struct.unpack ("!LHHHHL", value)
+ elif typecode == 15:
+ return str (value)
+ return "*type-error*"
+
+ def getObjIndex (self, classKey, config):
+ """ Concatenate the values from index columns to form a unique object name """
+ result = ""
+ schemaConfig = self.schema[classKey][0]
+ for item in schemaConfig:
+ if item[5] == 1 and item[0] != "id":
+ if result != "":
+ result = result + "."
+ for key,val in config:
+ if key == item[0]:
+ result = result + self.valueDisplay (classKey, key, val)
+ return result
+
+ def getClassKey (self, className):
+ delimPos = className.find(":")
+ if delimPos == -1:
+ schemaRev = 0
+ delim = className.find(".")
+ if delim != -1:
+ schemaRev = int(className[delim + 1:])
+ name = className[0:delim]
+ else:
+ name = className
+ for key in self.schema:
+ if key[1] == name and self.schema[key][4] == schemaRev:
+ return key
+ else:
+ package = className[0:delimPos]
+ name = className[delimPos + 1:]
+ schemaRev = 0
+ delim = name.find(".")
+ if delim != -1:
+ schemaRev = int(name[delim + 1:])
+ name = name[0:delim]
+ for key in self.schema:
+ if key[0] == package and key[1] == name:
+ if self.schema[key][4] == schemaRev:
+ return key
+ return None
+
+ def classCompletions (self, prefix):
+ """ Provide a list of candidate class names for command completion """
+ self.lock.acquire ()
+ complist = []
+ try:
+ for name in self.tables:
+ if name.find (prefix) == 0:
+ complist.append (name)
+ finally:
+ self.lock.release ()
+ return complist
+
+ def typeName (self, typecode):
+ """ Convert type-codes to printable strings """
+ if typecode == 1:
+ return "uint8"
+ elif typecode == 2:
+ return "uint16"
+ elif typecode == 3:
+ return "uint32"
+ elif typecode == 4:
+ return "uint64"
+ elif typecode == 5:
+ return "bool"
+ elif typecode == 6:
+ return "short-string"
+ elif typecode == 7:
+ return "long-string"
+ elif typecode == 8:
+ return "abs-time"
+ elif typecode == 9:
+ return "delta-time"
+ elif typecode == 10:
+ return "reference"
+ elif typecode == 11:
+ return "boolean"
+ elif typecode == 12:
+ return "float"
+ elif typecode == 13:
+ return "double"
+ elif typecode == 14:
+ return "uuid"
+ elif typecode == 15:
+ return "field-table"
+ elif typecode == 16:
+ return "int8"
+ elif typecode == 17:
+ return "int16"
+ elif typecode == 18:
+ return "int32"
+ elif typecode == 19:
+ return "int64"
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+ def accessName (self, code):
+ """ Convert element access codes to printable strings """
+ if code == 1:
+ return "ReadCreate"
+ elif code == 2:
+ return "ReadWrite"
+ elif code == 3:
+ return "ReadOnly"
+ else:
+ raise ValueError ("Invalid access code: %d" %code)
+
+ def notNone (self, text):
+ if text == None:
+ return ""
+ else:
+ return text
+
+ def isOid (self, id):
+ for char in str (id):
+ if not char.isdigit () and not char == '-':
+ return False
+ return True
+
+ def listOfIds (self, classKey, tokens):
+ """ Generate a tuple of object ids for a classname based on command tokens. """
+ list = []
+ if len(tokens) == 0 or tokens[0] == "all":
+ for id in self.tables[classKey]:
+ list.append (self.displayObjId (id))
+
+ elif tokens[0] == "active":
+ for id in self.tables[classKey]:
+ if self.tables[classKey][id][0][2] == 0:
+ list.append (self.displayObjId (id))
+
+ else:
+ for token in tokens:
+ if self.isOid (token):
+ if token.find ("-") != -1:
+ ids = token.split("-", 2)
+ for id in range (int (ids[0]), int (ids[1]) + 1):
+ if self.getClassForId (self.rawObjId (long (id))) == classKey:
+ list.append (id)
+ else:
+ list.append (int(token))
+
+ list.sort ()
+ result = ()
+ for item in list:
+ result = result + (item,)
+ return result
+
+ def listClasses (self):
+ """ Generate a display of the list of classes """
+ self.lock.acquire ()
+ try:
+ rows = []
+ sorted = self.tables.keys ()
+ sorted.sort ()
+ for name in sorted:
+ active = 0
+ deleted = 0
+ for record in self.tables[name]:
+ isdel = False
+ ts = self.tables[name][record][0]
+ if ts[2] > 0:
+ isdel = True
+ if isdel:
+ deleted = deleted + 1
+ else:
+ active = active + 1
+ rows.append ((self.displayClassName (name), active, deleted))
+ if len (rows) != 0:
+ self.disp.table ("Management Object Types:",
+ ("ObjectType", "Active", "Deleted"), rows)
+ else:
+ print "Waiting for next periodic update"
+ finally:
+ self.lock.release ()
+
+ def listObjects (self, tokens):
+ """ Generate a display of a list of objects in a class """
+ if len(tokens) == 0:
+ print "Error - No class name provided"
+ return
+
+ self.lock.acquire ()
+ try:
+ classKey = self.getClassKey (tokens[0])
+ if classKey == None:
+ print ("Object type %s not known" % tokens[0])
+ else:
+ rows = []
+ if classKey in self.tables:
+ ids = self.listOfIds(classKey, tokens[1:])
+ for objId in ids:
+ (ts, config, inst) = self.tables[classKey][self.rawObjId(objId).index()]
+ createTime = self.disp.timestamp (ts[1])
+ destroyTime = "-"
+ if ts[2] > 0:
+ destroyTime = self.disp.timestamp (ts[2])
+ objIndex = self.getObjIndex (classKey, config)
+ row = (objId, createTime, destroyTime, objIndex)
+ rows.append (row)
+ self.disp.table ("Objects of type %s" % self.displayClassName(classKey),
+ ("ID", "Created", "Destroyed", "Index"),
+ rows)
+ finally:
+ self.lock.release ()
+
+ def showObjects (self, tokens):
+ """ Generate a display of object data for a particular class """
+ self.lock.acquire ()
+ try:
+ self.lastUnit = None
+ if self.isOid (tokens[0]):
+ if tokens[0].find ("-") != -1:
+ rootId = int (tokens[0][0:tokens[0].find ("-")])
+ else:
+ rootId = int (tokens[0])
+
+ classKey = self.getClassForId (self.rawObjId (rootId))
+ remaining = tokens
+ if classKey == None:
+ print "Id not known: %d" % int (tokens[0])
+ raise ValueError ()
+ else:
+ classKey = self.getClassKey (tokens[0])
+ remaining = tokens[1:]
+ if classKey not in self.tables:
+ print "Class not known: %s" % tokens[0]
+ raise ValueError ()
+
+ userIds = self.listOfIds (classKey, remaining)
+ if len (userIds) == 0:
+ print "No object IDs supplied"
+ raise ValueError ()
+
+ ids = []
+ for id in userIds:
+ if self.getClassForId (self.rawObjId (long (id))) == classKey:
+ ids.append (self.rawObjId (long (id)))
+
+ rows = []
+ timestamp = None
+ config = self.tables[classKey][ids[0].index()][1]
+ for eIdx in range (len (config)):
+ key = config[eIdx][0]
+ if key != "id":
+ row = ("property", key)
+ for id in ids:
+ if timestamp == None or \
+ timestamp < self.tables[classKey][id.index()][0][0]:
+ timestamp = self.tables[classKey][id.index()][0][0]
+ (key, value) = self.tables[classKey][id.index()][1][eIdx]
+ row = row + (self.valueDisplay (classKey, key, value),)
+ rows.append (row)
+
+ inst = self.tables[classKey][ids[0].index()][2]
+ for eIdx in range (len (inst)):
+ key = inst[eIdx][0]
+ if key != "id":
+ row = ("statistic", key)
+ for id in ids:
+ (key, value) = self.tables[classKey][id.index()][2][eIdx]
+ row = row + (self.valueDisplay (classKey, key, value),)
+ rows.append (row)
+
+ titleRow = ("Type", "Element")
+ for id in ids:
+ titleRow = titleRow + (self.refName(id),)
+ caption = "Object of type %s:" % self.displayClassName(classKey)
+ if timestamp != None:
+ caption = caption + " (last sample time: " + self.disp.timestamp (timestamp) + ")"
+ self.disp.table (caption, titleRow, rows)
+
+ except:
+ pass
+ self.lock.release ()
+
+ def schemaSummary (self):
+ """ Generate a display of the list of classes in the schema """
+ self.lock.acquire ()
+ try:
+ rows = []
+ sorted = self.schema.keys ()
+ sorted.sort ()
+ for classKey in sorted:
+ tuple = self.schema[classKey]
+ row = (self.displayClassName(classKey), len (tuple[0]), len (tuple[1]),
+ len (tuple[2]))
+ rows.append (row)
+ self.disp.table ("Classes in Schema:",
+ ("Class", "Properties", "Statistics", "Methods"),
+ rows)
+ finally:
+ self.lock.release ()
+
+ def schemaTable (self, className):
+ """ Generate a display of details of the schema of a particular class """
+ self.lock.acquire ()
+ try:
+ classKey = self.getClassKey (className)
+ if classKey == None:
+ print ("Class name %s not known" % className)
+ raise ValueError ()
+
+ rows = []
+ schemaRev = self.schema[classKey][4]
+ for config in self.schema[classKey][0]:
+ name = config[0]
+ if name != "id":
+ typename = self.typeName(config[1])
+ unit = self.notNone (config[2])
+ desc = self.notNone (config[3])
+ access = self.accessName (config[4])
+ extra = ""
+ if config[5] == 1:
+ extra += "index "
+ if config[6] != None:
+ extra += "Min: " + str(config[6]) + " "
+ if config[7] != None:
+ extra += "Max: " + str(config[7]) + " "
+ if config[8] != None:
+ extra += "MaxLen: " + str(config[8]) + " "
+ if config[9] == 1:
+ extra += "optional "
+ rows.append ((name, typename, unit, access, extra, desc))
+
+ for config in self.schema[classKey][1]:
+ name = config[0]
+ if name != "id":
+ typename = self.typeName(config[1])
+ unit = self.notNone (config[2])
+ desc = self.notNone (config[3])
+ rows.append ((name, typename, unit, "", "", desc))
+
+ titles = ("Element", "Type", "Unit", "Access", "Notes", "Description")
+ self.disp.table ("Schema for class '%s':" % self.displayClassName(classKey), titles, rows)
+
+ for mname in self.schema[classKey][2]:
+ (mdesc, args) = self.schema[classKey][2][mname]
+ caption = "\nMethod '%s' %s" % (mname, self.notNone (mdesc))
+ rows = []
+ for arg in args:
+ name = arg[0]
+ typename = self.typeName (arg[1])
+ dir = arg[2]
+ unit = self.notNone (arg[3])
+ desc = self.notNone (arg[4])
+ extra = ""
+ if arg[5] != None:
+ extra = extra + "Min: " + str (arg[5])
+ if arg[6] != None:
+ extra = extra + "Max: " + str (arg[6])
+ if arg[7] != None:
+ extra = extra + "MaxLen: " + str (arg[7])
+ if arg[8] != None:
+ extra = extra + "Default: " + str (arg[8])
+ rows.append ((name, typename, dir, unit, extra, desc))
+ titles = ("Argument", "Type", "Direction", "Unit", "Notes", "Description")
+ self.disp.table (caption, titles, rows)
+
+ except Exception,e:
+ pass
+ self.lock.release ()
+
+ def getClassForId (self, objId):
+ """ Given an object ID, return the class key for the referenced object """
+ for classKey in self.tables:
+ if objId.index() in self.tables[classKey]:
+ return classKey
+ return None
+
+ def callMethod (self, userOid, methodName, args):
+ self.lock.acquire ()
+ methodOk = True
+ try:
+ classKey = self.getClassForId (self.rawObjId (userOid))
+ if classKey == None:
+ raise ValueError ()
+
+ if methodName not in self.schema[classKey][2]:
+ print "Method '%s' not valid for class '%s'" % (methodName, self.displayClassName(classKey))
+ raise ValueError ()
+
+ schemaMethod = self.schema[classKey][2][methodName]
+ count = 0
+ for arg in range(len(schemaMethod[1])):
+ if schemaMethod[1][arg][2].find("I") != -1:
+ count += 1
+ if len (args) != count:
+ print "Wrong number of method args: Need %d, Got %d" % (count, len (args))
+ raise ValueError ()
+
+ namedArgs = {}
+ idx = 0
+ for arg in range(len(schemaMethod[1])):
+ if schemaMethod[1][arg][2].find("I") != -1:
+ namedArgs[schemaMethod[1][arg][0]] = args[idx]
+ idx += 1
+
+ self.methodSeq = self.methodSeq + 1
+ self.methodsPending[self.methodSeq] = methodName
+ except Exception, e:
+ methodOk = False
+ self.lock.release ()
+ if methodOk:
+# try:
+ self.mclient.callMethod (self.mch, self.methodSeq, self.rawObjId (userOid), classKey,
+ methodName, namedArgs)
+# except ValueError, e:
+# print "Error invoking method:", e
+
+ def makeIdRow (self, displayId):
+ if displayId in self.idMap:
+ objId = self.idMap[displayId]
+ else:
+ return None
+ if objId.getFlags() == 0:
+ flags = ""
+ else:
+ flags = str(objId.getFlags())
+ seq = objId.getSequence()
+ if seq == 0:
+ seqText = "<durable>"
+ else:
+ seqText = str(seq)
+ return (displayId, flags, seqText, objId.getBroker(), objId.getBank(), hex(objId.getObject()))
+
+ def listIds (self, select):
+ rows = []
+ if select == 0:
+ sorted = self.idMap.keys()
+ sorted.sort()
+ for displayId in sorted:
+ row = self.makeIdRow (displayId)
+ rows.append(row)
+ else:
+ row = self.makeIdRow (select)
+ if row == None:
+ print "Display Id %d not known" % select
+ return
+ rows.append(row)
+ self.disp.table("Translation of Display IDs:",
+ ("DisplayID", "Flags", "BootSequence", "Broker", "Bank", "Object"),
+ rows)
+
+ def do_list (self, data):
+ tokens = data.split ()
+ if len (tokens) == 0:
+ self.listClasses ()
+ else:
+ self.listObjects (tokens)
+
+ def do_show (self, data):
+ tokens = data.split ()
+ self.showObjects (tokens)
+
+ def do_schema (self, data):
+ if data == "":
+ self.schemaSummary ()
+ else:
+ self.schemaTable (data)
+
+ def do_call (self, data):
+ encTokens = data.split ()
+ try:
+ tokens = [a.decode(locale.getpreferredencoding()) for a in encArgs]
+ except:
+ tokens = encTokens
+ if len (tokens) < 2:
+ print "Not enough arguments supplied"
+ return
+
+ displayId = long (tokens[0])
+ methodName = tokens[1]
+ args = tokens[2:]
+ self.callMethod (displayId, methodName, args)
+
+ def do_id (self, data):
+ if data == "":
+ select = 0
+ else:
+ select = int(data)
+ self.listIds(select)
+
+ def do_exit (self):
+ self.mclient.removeChannel (self.mch)
diff --git a/RC9/qpid/python/qpid/message.py b/RC9/qpid/python/qpid/message.py
new file mode 100644
index 0000000000..eb3ef5c03c
--- /dev/null
+++ b/RC9/qpid/python/qpid/message.py
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from connection08 import Method, Request
+from sets import Set
+
+class Message:
+
+ def __init__(self, channel, frame, content = None):
+ self.channel = channel
+ self.frame = frame
+ self.method = frame.method_type
+ self.content = content
+ if self.method.is_l4_command():
+ self.command_id = self.channel.incoming_completion.sequence.next()
+ #print "allocated: ", self.command_id, "to ", self.method.klass.name, "_", self.method.name
+
+ def __len__(self):
+ return len(self.frame.args)
+
+ def _idx(self, idx):
+ if idx < 0: idx += len(self)
+ if idx < 0 or idx > len(self):
+ raise IndexError(idx)
+ return idx
+
+ def __getitem__(self, idx):
+ return self.frame.args[idx]
+
+ def __getattr__(self, attr):
+ fields = self.method.fields.byname
+ if fields.has_key(attr):
+ f = fields[attr]
+ result = self[self.method.fields.index(f)]
+ else:
+ for r in self.method.responses:
+ if attr == r.name:
+ def respond(*args, **kwargs):
+ batch=0
+ if kwargs.has_key("batchoffset"):
+ batch=kwargs.pop("batchoffset")
+ self.channel.respond(Method(r, r.arguments(*args, **kwargs)), batch, self.frame)
+ result = respond
+ break
+ else:
+ raise AttributeError(attr)
+ return result
+
+ STR = "%s %s content = %s"
+ REPR = STR.replace("%s", "%r")
+
+ def __str__(self):
+ return Message.STR % (self.method, self.frame.args, self.content)
+
+ def __repr__(self):
+ return Message.REPR % (self.method, self.frame.args, self.content)
+
+ def complete(self, cumulative=True):
+ self.channel.incoming_completion.complete(mark=self.command_id, cumulative=cumulative)
diff --git a/RC9/qpid/python/qpid/packer.py b/RC9/qpid/python/qpid/packer.py
new file mode 100644
index 0000000000..22c16918dc
--- /dev/null
+++ b/RC9/qpid/python/qpid/packer.py
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import struct
+
+class Packer:
+
+ def read(self, n): abstract
+
+ def write(self, s): abstract
+
+ def unpack(self, fmt):
+ values = struct.unpack(fmt, self.read(struct.calcsize(fmt)))
+ if len(values) == 1:
+ return values[0]
+ else:
+ return values
+
+ def pack(self, fmt, *args):
+ self.write(struct.pack(fmt, *args))
diff --git a/RC9/qpid/python/qpid/peer.py b/RC9/qpid/python/qpid/peer.py
new file mode 100644
index 0000000000..648f32ceef
--- /dev/null
+++ b/RC9/qpid/python/qpid/peer.py
@@ -0,0 +1,465 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module contains a skeletal peer implementation useful for
+implementing an AMQP server, client, or proxy. The peer implementation
+sorts incoming frames to their intended channels, and dispatches
+incoming method frames to a delegate.
+"""
+
+import thread, threading, traceback, socket, sys, logging
+from connection08 import EOF, Method, Header, Body, Request, Response
+from message import Message
+from queue import Queue, Closed as QueueClosed
+from content import Content
+from cStringIO import StringIO
+from time import time
+
+class Sequence:
+
+ def __init__(self, start, step = 1):
+ # we should keep start for wrap around
+ self._next = start
+ self.step = step
+ self.lock = thread.allocate_lock()
+
+ def next(self):
+ self.lock.acquire()
+ try:
+ result = self._next
+ self._next += self.step
+ return result
+ finally:
+ self.lock.release()
+
+class Peer:
+
+ def __init__(self, conn, delegate, channel_factory=None):
+ self.conn = conn
+ self.delegate = delegate
+ self.outgoing = Queue(0)
+ self.work = Queue(0)
+ self.channels = {}
+ self.lock = thread.allocate_lock()
+ if channel_factory:
+ self.channel_factory = channel_factory
+ else:
+ self.channel_factory = Channel
+
+ def channel(self, id):
+ self.lock.acquire()
+ try:
+ try:
+ ch = self.channels[id]
+ except KeyError:
+ ch = self.channel_factory(id, self.outgoing, self.conn.spec)
+ self.channels[id] = ch
+ finally:
+ self.lock.release()
+ return ch
+
+ def start(self):
+ thread.start_new_thread(self.writer, ())
+ thread.start_new_thread(self.reader, ())
+ thread.start_new_thread(self.worker, ())
+
+ def fatal(self, message=None):
+ """Call when an unexpected exception occurs that will kill a thread."""
+ if message: print >> sys.stderr, message
+ self.closed("Fatal error: %s\n%s" % (message or "", traceback.format_exc()))
+
+ def reader(self):
+ try:
+ while True:
+ try:
+ frame = self.conn.read()
+ except EOF, e:
+ self.work.close()
+ break
+ ch = self.channel(frame.channel)
+ ch.receive(frame, self.work)
+ except:
+ self.fatal()
+
+ def closed(self, reason):
+ # We must close the delegate first because closing channels
+ # may wake up waiting threads and we don't want them to see
+ # the delegate as open.
+ self.delegate.closed(reason)
+ for ch in self.channels.values():
+ ch.closed(reason)
+
+ def writer(self):
+ try:
+ while True:
+ try:
+ message = self.outgoing.get()
+ self.conn.write(message)
+ except socket.error, e:
+ self.closed(e)
+ break
+ self.conn.flush()
+ except:
+ self.fatal()
+
+ def worker(self):
+ try:
+ while True:
+ queue = self.work.get()
+ frame = queue.get()
+ channel = self.channel(frame.channel)
+ if frame.method_type.content:
+ content = read_content(queue)
+ else:
+ content = None
+
+ self.delegate(channel, Message(channel, frame, content))
+ except QueueClosed:
+ self.closed("worker closed")
+ except:
+ self.fatal()
+
+class Requester:
+
+ def __init__(self, writer):
+ self.write = writer
+ self.sequence = Sequence(1)
+ self.mark = 0
+ # request_id -> listener
+ self.outstanding = {}
+
+ def request(self, method, listener, content = None):
+ frame = Request(self.sequence.next(), self.mark, method)
+ self.outstanding[frame.id] = listener
+ self.write(frame, content)
+
+ def receive(self, channel, frame):
+ listener = self.outstanding.pop(frame.request_id)
+ listener(channel, frame)
+
+class Responder:
+
+ def __init__(self, writer):
+ self.write = writer
+ self.sequence = Sequence(1)
+
+ def respond(self, method, batch, request):
+ if isinstance(request, Method):
+ self.write(method)
+ else:
+ # allow batching from frame at either end
+ if batch<0:
+ frame = Response(self.sequence.next(), request.id+batch, -batch, method)
+ else:
+ frame = Response(self.sequence.next(), request.id, batch, method)
+ self.write(frame)
+
+class Closed(Exception): pass
+
+class Channel:
+
+ def __init__(self, id, outgoing, spec):
+ self.id = id
+ self.outgoing = outgoing
+ self.spec = spec
+ self.incoming = Queue(0)
+ self.responses = Queue(0)
+ self.queue = None
+ self._closed = False
+ self.reason = None
+
+ self.requester = Requester(self.write)
+ self.responder = Responder(self.write)
+
+ self.completion = OutgoingCompletion()
+ self.incoming_completion = IncomingCompletion(self)
+ self.futures = {}
+ self.control_queue = Queue(0)#used for incoming methods that appas may want to handle themselves
+
+ self.invoker = self.invoke_method
+ self.use_execution_layer = (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0)
+ self.synchronous = True
+
+ def closed(self, reason):
+ if self._closed:
+ return
+ self._closed = True
+ self.reason = reason
+ self.incoming.close()
+ self.responses.close()
+ self.completion.close()
+ self.incoming_completion.reset()
+ for f in self.futures.values():
+ f.put_response(self, reason)
+
+ def write(self, frame, content = None):
+ if self._closed:
+ raise Closed(self.reason)
+ frame.channel = self.id
+ self.outgoing.put(frame)
+ if (isinstance(frame, (Method, Request))
+ and content == None
+ and frame.method_type.content):
+ content = Content()
+ if content != None:
+ self.write_content(frame.method_type.klass, content)
+
+ def write_content(self, klass, content):
+ header = Header(klass, content.weight(), content.size(), content.properties)
+ self.write(header)
+ for child in content.children:
+ self.write_content(klass, child)
+ # should split up if content.body exceeds max frame size
+ if content.body:
+ self.write(Body(content.body))
+
+ def receive(self, frame, work):
+ if isinstance(frame, Method):
+ if frame.method.response:
+ self.queue = self.responses
+ else:
+ self.queue = self.incoming
+ work.put(self.incoming)
+ elif isinstance(frame, Request):
+ self.queue = self.incoming
+ work.put(self.incoming)
+ elif isinstance(frame, Response):
+ self.requester.receive(self, frame)
+ if frame.method_type.content:
+ self.queue = self.responses
+ return
+ self.queue.put(frame)
+
+ def queue_response(self, channel, frame):
+ channel.responses.put(frame.method)
+
+ def request(self, method, listener, content = None):
+ self.requester.request(method, listener, content)
+
+ def respond(self, method, batch, request):
+ self.responder.respond(method, batch, request)
+
+ def invoke(self, type, args, kwargs):
+ if (type.klass.name in ["channel", "session"]) and (type.name in ["close", "open", "closed"]):
+ self.completion.reset()
+ self.incoming_completion.reset()
+ self.completion.next_command(type)
+
+ content = kwargs.pop("content", None)
+ frame = Method(type, type.arguments(*args, **kwargs))
+ return self.invoker(frame, content)
+
+ # used for 0-9
+ def invoke_reliable(self, frame, content = None):
+ if not self.synchronous:
+ future = Future()
+ self.request(frame, future.put_response, content)
+ if not frame.method.responses: return None
+ else: return future
+
+ self.request(frame, self.queue_response, content)
+ if not frame.method.responses:
+ if self.use_execution_layer and frame.method_type.is_l4_command():
+ self.execution_sync()
+ self.completion.wait()
+ if self._closed:
+ raise Closed(self.reason)
+ return None
+ try:
+ resp = self.responses.get()
+ if resp.method_type.content:
+ return Message(self, resp, read_content(self.responses))
+ else:
+ return Message(self, resp)
+ except QueueClosed, e:
+ if self._closed:
+ raise Closed(self.reason)
+ else:
+ raise e
+
+ # used for 0-8 and 0-10
+ def invoke_method(self, frame, content = None):
+ if frame.method.result:
+ cmd_id = self.completion.command_id
+ future = Future()
+ self.futures[cmd_id] = future
+
+ self.write(frame, content)
+
+ try:
+ # here we depend on all nowait fields being named nowait
+ f = frame.method.fields.byname["nowait"]
+ nowait = frame.args[frame.method.fields.index(f)]
+ except KeyError:
+ nowait = False
+
+ try:
+ if not nowait and frame.method.responses:
+ resp = self.responses.get()
+ if resp.method.content:
+ content = read_content(self.responses)
+ else:
+ content = None
+ if resp.method in frame.method.responses:
+ return Message(self, resp, content)
+ else:
+ raise ValueError(resp)
+ elif frame.method.result:
+ if self.synchronous:
+ fr = future.get_response(timeout=10)
+ if self._closed:
+ raise Closed(self.reason)
+ return fr
+ else:
+ return future
+ elif self.synchronous and not frame.method.response \
+ and self.use_execution_layer and frame.method.is_l4_command():
+ self.execution_sync()
+ completed = self.completion.wait(timeout=10)
+ if self._closed:
+ raise Closed(self.reason)
+ if not completed:
+ self.closed("Timed-out waiting for completion of %s" % frame)
+ except QueueClosed, e:
+ if self._closed:
+ raise Closed(self.reason)
+ else:
+ raise e
+
+ def __getattr__(self, name):
+ type = self.spec.method(name)
+ if type == None: raise AttributeError(name)
+ method = lambda *args, **kwargs: self.invoke(type, args, kwargs)
+ self.__dict__[name] = method
+ return method
+
+def read_content(queue):
+ header = queue.get()
+ children = []
+ for i in range(header.weight):
+ children.append(read_content(queue))
+ buf = StringIO()
+ eof = header.eof
+ while not eof:
+ body = queue.get()
+ eof = body.eof
+ content = body.content
+ buf.write(content)
+ return Content(buf.getvalue(), children, header.properties.copy())
+
+class Future:
+ def __init__(self):
+ self.completed = threading.Event()
+
+ def put_response(self, channel, response):
+ self.response = response
+ self.completed.set()
+
+ def get_response(self, timeout=None):
+ self.completed.wait(timeout)
+ if self.completed.isSet():
+ return self.response
+ else:
+ return None
+
+ def is_complete(self):
+ return self.completed.isSet()
+
+class OutgoingCompletion:
+ """
+ Manages completion of outgoing commands i.e. command sent by this peer
+ """
+
+ def __init__(self):
+ self.condition = threading.Condition()
+
+ #todo, implement proper wraparound
+ self.sequence = Sequence(0) #issues ids for outgoing commands
+ self.command_id = -1 #last issued id
+ self.mark = -1 #commands up to this mark are known to be complete
+ self._closed = False
+
+ def next_command(self, method):
+ #the following test is a hack until the track/sub-channel is available
+ if method.is_l4_command():
+ self.command_id = self.sequence.next()
+
+ def reset(self):
+ self.sequence = Sequence(0) #reset counter
+
+ def close(self):
+ self.reset()
+ self.condition.acquire()
+ try:
+ self._closed = True
+ self.condition.notifyAll()
+ finally:
+ self.condition.release()
+
+ def complete(self, mark):
+ self.condition.acquire()
+ try:
+ self.mark = mark
+ #print "set mark to %s [%s] " % (self.mark, self)
+ self.condition.notifyAll()
+ finally:
+ self.condition.release()
+
+ def wait(self, point_of_interest=-1, timeout=None):
+ if point_of_interest == -1: point_of_interest = self.command_id
+ start_time = time()
+ remaining = timeout
+ self.condition.acquire()
+ try:
+ while not self._closed and point_of_interest > self.mark:
+ #print "waiting for %s, mark = %s [%s]" % (point_of_interest, self.mark, self)
+ self.condition.wait(remaining)
+ if not self._closed and point_of_interest > self.mark and timeout:
+ if (start_time + timeout) < time(): break
+ else: remaining = timeout - (time() - start_time)
+ finally:
+ self.condition.release()
+ return point_of_interest <= self.mark
+
+class IncomingCompletion:
+ """
+ Manages completion of incoming commands i.e. command received by this peer
+ """
+
+ def __init__(self, channel):
+ self.sequence = Sequence(0) #issues ids for incoming commands
+ self.mark = -1 #id of last command of whose completion notification was sent to the other peer
+ self.channel = channel
+
+ def reset(self):
+ self.sequence = Sequence(0) #reset counter
+
+ def complete(self, mark, cumulative=True):
+ if cumulative:
+ if mark > self.mark:
+ self.mark = mark
+ self.channel.execution_complete(cumulative_execution_mark=self.mark)
+ else:
+ #TODO: record and manage the ranges properly
+ range = [mark, mark]
+ if (self.mark == -1):#hack until wraparound is implemented
+ self.channel.execution_complete(cumulative_execution_mark=0xFFFFFFFF, ranged_execution_set=range)
+ else:
+ self.channel.execution_complete(cumulative_execution_mark=self.mark, ranged_execution_set=range)
diff --git a/RC9/qpid/python/qpid/queue.py b/RC9/qpid/python/qpid/queue.py
new file mode 100644
index 0000000000..c9f4d1d1d0
--- /dev/null
+++ b/RC9/qpid/python/qpid/queue.py
@@ -0,0 +1,86 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module augments the standard python multithreaded Queue
+implementation to add a close() method so that threads blocking on the
+content of a queue can be notified if the queue is no longer in use.
+"""
+
+from Queue import Queue as BaseQueue, Empty, Full
+from threading import Thread
+from exceptions import Closed
+
+class Queue(BaseQueue):
+
+ END = object()
+ STOP = object()
+
+ def __init__(self, *args, **kwargs):
+ BaseQueue.__init__(self, *args, **kwargs)
+ self.error = None
+ self.listener = None
+ self.exc_listener = None
+ self.thread = None
+
+ def close(self, error = None):
+ self.error = error
+ self.put(Queue.END)
+ if self.thread is not None:
+ self.thread.join()
+ self.thread = None
+
+ def get(self, block = True, timeout = None):
+ result = BaseQueue.get(self, block, timeout)
+ if result == Queue.END:
+ # this guarantees that any other waiting threads or any future
+ # calls to get will also result in a Closed exception
+ self.put(Queue.END)
+ raise Closed(self.error)
+ else:
+ return result
+
+ def listen(self, listener, exc_listener = None):
+ if listener is None and exc_listener is not None:
+ raise ValueError("cannot set exception listener without setting listener")
+
+ if listener is None:
+ if self.thread is not None:
+ self.put(Queue.STOP)
+ self.thread.join()
+ self.thread = None
+
+ self.listener = listener
+ self.exc_listener = exc_listener
+
+ if listener is not None and self.thread is None:
+ self.thread = Thread(target = self.run)
+ self.thread.setDaemon(True)
+ self.thread.start()
+
+ def run(self):
+ while True:
+ try:
+ o = self.get()
+ if o == Queue.STOP: break
+ self.listener(o)
+ except Closed, e:
+ if self.exc_listener is not None:
+ self.exc_listener(e)
+ break
diff --git a/RC9/qpid/python/qpid/reference.py b/RC9/qpid/python/qpid/reference.py
new file mode 100644
index 0000000000..48ecb67656
--- /dev/null
+++ b/RC9/qpid/python/qpid/reference.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Support for amqp 'reference' content (as opposed to inline content)
+"""
+
+import threading
+from queue import Queue, Closed
+
+class NotOpened(Exception): pass
+
+class AlreadyOpened(Exception): pass
+
+"""
+A representation of a reference id; can be passed wherever amqp
+content is required in place of inline data
+"""
+class ReferenceId:
+
+ def __init__(self, id):
+ self.id = id
+
+"""
+Holds content received through 'reference api'. Instances of this
+class will be placed in the consumers queue on receiving a transfer
+(assuming the reference has been opened). Data can be retrieved in
+chunks (as append calls are received) or in full (after reference has
+been closed signalling data s complete).
+"""
+
+class Reference:
+
+ def __init__(self, id):
+ self.id = id
+ self.chunks = Queue(0)
+
+ def close(self):
+ self.chunks.close()
+
+ def append(self, bytes):
+ self.chunks.put(bytes)
+
+ def get_chunk(self):
+ return self.chunks.get()
+
+ def get_complete(self):
+ data = ""
+ for chunk in self:
+ data += chunk
+ return data
+
+ def next(self):
+ try:
+ return self.get_chunk()
+ except Closed, e:
+ raise StopIteration
+
+ def __iter__(self):
+ return self
+
+"""
+Manages a set of opened references. New references can be opened and
+existing references can be retrieved or closed.
+"""
+class References:
+
+ def __init__(self):
+ self.map = {}
+ self.lock = threading.Lock()
+
+ def get(self, id):
+ self.lock.acquire()
+ try:
+ try:
+ ref = self.map[id]
+ except KeyError:
+ raise NotOpened()
+ finally:
+ self.lock.release()
+ return ref
+
+ def open(self, id):
+ self.lock.acquire()
+ try:
+ if id in self.map: raise AlreadyOpened()
+ self.map[id] = Reference(id)
+ finally:
+ self.lock.release()
+
+
+ def close(self, id):
+ self.get(id).close()
+ self.lock.acquire()
+ try:
+ self.map.pop(id)
+ finally:
+ self.lock.release()
+
diff --git a/RC9/qpid/python/qpid/session.py b/RC9/qpid/python/qpid/session.py
new file mode 100644
index 0000000000..4a7ecbc28a
--- /dev/null
+++ b/RC9/qpid/python/qpid/session.py
@@ -0,0 +1,379 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from threading import Condition, RLock, Lock, currentThread
+from invoker import Invoker
+from datatypes import RangedSet, Struct, Future
+from codec010 import StringCodec
+from assembler import Segment
+from queue import Queue
+from datatypes import Message, serial
+from util import wait, notify
+from exceptions import *
+from logging import getLogger
+
+log = getLogger("qpid.io.cmd")
+msg = getLogger("qpid.io.msg")
+
+class SessionException(Exception): pass
+class SessionClosed(SessionException): pass
+class SessionDetached(SessionException): pass
+
+def client(*args):
+ return Client(*args)
+
+def server(*args):
+ return Server(*args)
+
+INCOMPLETE = object()
+
+class Session(Invoker):
+
+ def __init__(self, name, spec, auto_sync=True, timeout=10, delegate=client):
+ self.name = name
+ self.spec = spec
+ self.auto_sync = auto_sync
+ self.timeout = timeout
+ self.channel = None
+ self.invoke_lock = Lock()
+ self._closing = False
+ self._closed = False
+
+ self.condition = Condition()
+
+ self.send_id = True
+ self.receiver = Receiver(self)
+ self.sender = Sender(self)
+
+ self.lock = RLock()
+ self._incoming = {}
+ self.results = {}
+ self.exceptions = []
+
+ self.assembly = None
+
+ self.delegate = delegate(self)
+
+ def incoming(self, destination):
+ self.lock.acquire()
+ try:
+ queue = self._incoming.get(destination)
+ if queue == None:
+ queue = Incoming(self, destination)
+ self._incoming[destination] = queue
+ return queue
+ finally:
+ self.lock.release()
+
+ def error(self):
+ exc = self.exceptions[:]
+ if len(exc) == 0:
+ return None
+ elif len(exc) == 1:
+ return exc[0]
+ else:
+ return tuple(exc)
+
+ def sync(self, timeout=None):
+ ch = self.channel
+ if ch is not None and currentThread() == ch.connection.thread:
+ raise SessionException("deadlock detected")
+ if not self.auto_sync:
+ self.execution_sync(sync=True)
+ last = self.sender.next_id - 1
+ if not wait(self.condition, lambda:
+ last in self.sender._completed or self.exceptions,
+ timeout):
+ raise Timeout()
+ if self.exceptions:
+ raise SessionException(self.error())
+
+ def close(self, timeout=None):
+ self.invoke_lock.acquire()
+ try:
+ self._closing = True
+ self.channel.session_detach(self.name)
+ finally:
+ self.invoke_lock.release()
+ if not wait(self.condition, lambda: self._closed, timeout):
+ raise Timeout()
+
+ def closed(self):
+ self.lock.acquire()
+ try:
+ if self._closed: return
+
+ error = self.error()
+ for id in self.results:
+ f = self.results[id]
+ f.error(error)
+ self.results.clear()
+
+ for q in self._incoming.values():
+ q.close(error)
+
+ self._closed = True
+ notify(self.condition)
+ finally:
+ self.lock.release()
+
+ def resolve_method(self, name):
+ cmd = self.spec.instructions.get(name)
+ if cmd is not None and cmd.track == self.spec["track.command"].value:
+ return self.METHOD, cmd
+ else:
+ # XXX
+ for st in self.spec.structs.values():
+ if st.name == name:
+ return self.METHOD, st
+ if self.spec.structs_by_name.has_key(name):
+ return self.METHOD, self.spec.structs_by_name[name]
+ if self.spec.enums.has_key(name):
+ return self.VALUE, self.spec.enums[name]
+ return self.ERROR, None
+
+ def invoke(self, type, args, kwargs):
+ # XXX
+ if not hasattr(type, "track"):
+ return type.new(args, kwargs)
+
+ self.invoke_lock.acquire()
+ try:
+ return self.do_invoke(type, args, kwargs)
+ finally:
+ self.invoke_lock.release()
+
+ def do_invoke(self, type, args, kwargs):
+ if self._closing:
+ raise SessionClosed()
+
+ if self.channel == None:
+ raise SessionDetached()
+
+ if type.segments:
+ if len(args) == len(type.fields) + 1:
+ message = args[-1]
+ args = args[:-1]
+ else:
+ message = kwargs.pop("message", None)
+ else:
+ message = None
+
+ hdr = Struct(self.spec["session.header"])
+ hdr.sync = self.auto_sync or kwargs.pop("sync", False)
+
+ cmd = type.new(args, kwargs)
+ sc = StringCodec(self.spec)
+ sc.write_command(hdr, cmd)
+
+ seg = Segment(True, (message == None or
+ (message.headers == None and message.body == None)),
+ type.segment_type, type.track, self.channel.id, sc.encoded)
+
+ if type.result:
+ result = Future(exception=SessionException)
+ self.results[self.sender.next_id] = result
+
+ self.send(seg)
+
+ log.debug("SENT %s %s %s", seg.id, hdr, cmd)
+
+ if message != None:
+ if message.headers != None:
+ sc = StringCodec(self.spec)
+ for st in message.headers:
+ sc.write_struct32(st)
+ seg = Segment(False, message.body == None, self.spec["segment_type.header"].value,
+ type.track, self.channel.id, sc.encoded)
+ self.send(seg)
+ if message.body != None:
+ seg = Segment(False, True, self.spec["segment_type.body"].value,
+ type.track, self.channel.id, message.body)
+ self.send(seg)
+ msg.debug("SENT %s", message)
+
+ if type.result:
+ if self.auto_sync:
+ return result.get(self.timeout)
+ else:
+ return result
+ elif self.auto_sync:
+ self.sync(self.timeout)
+
+ def received(self, seg):
+ self.receiver.received(seg)
+ if seg.first:
+ assert self.assembly == None
+ self.assembly = []
+ self.assembly.append(seg)
+ if seg.last:
+ self.dispatch(self.assembly)
+ self.assembly = None
+
+ def dispatch(self, assembly):
+ segments = assembly[:]
+
+ hdr, cmd = assembly.pop(0).decode(self.spec)
+ log.debug("RECV %s %s %s", cmd.id, hdr, cmd)
+
+ args = []
+
+ for st in cmd._type.segments:
+ if assembly:
+ seg = assembly[0]
+ if seg.type == st.segment_type:
+ args.append(seg.decode(self.spec))
+ assembly.pop(0)
+ continue
+ args.append(None)
+
+ assert len(assembly) == 0
+
+ attr = cmd._type.qname.replace(".", "_")
+ result = getattr(self.delegate, attr)(cmd, *args)
+
+ if cmd._type.result:
+ self.execution_result(cmd.id, result)
+
+ if result is not INCOMPLETE:
+ for seg in segments:
+ self.receiver.completed(seg)
+ # XXX: don't forget to obey sync for manual completion as well
+ if hdr.sync:
+ self.channel.session_completed(self.receiver._completed)
+
+ def send(self, seg):
+ self.sender.send(seg)
+
+ def __str__(self):
+ return '<Session: %s, %s>' % (self.name, self.channel)
+
+ def __repr__(self):
+ return str(self)
+
+class Receiver:
+
+ def __init__(self, session):
+ self.session = session
+ self.next_id = None
+ self.next_offset = None
+ self._completed = RangedSet()
+
+ def received(self, seg):
+ if self.next_id == None or self.next_offset == None:
+ raise Exception("todo")
+ seg.id = self.next_id
+ seg.offset = self.next_offset
+ if seg.last:
+ self.next_id += 1
+ self.next_offset = 0
+ else:
+ self.next_offset += len(seg.payload)
+
+ def completed(self, seg):
+ if seg.id == None:
+ raise ValueError("cannot complete unidentified segment")
+ if seg.last:
+ self._completed.add(seg.id)
+
+ def known_completed(self, commands):
+ completed = RangedSet()
+ for c in self._completed.ranges:
+ for kc in commands.ranges:
+ if c.lower in kc and c.upper in kc:
+ break
+ else:
+ completed.add_range(c)
+ self._completed = completed
+
+class Sender:
+
+ def __init__(self, session):
+ self.session = session
+ self.next_id = serial(0)
+ self.next_offset = 0
+ self.segments = []
+ self._completed = RangedSet()
+
+ def send(self, seg):
+ seg.id = self.next_id
+ seg.offset = self.next_offset
+ if seg.last:
+ self.next_id += 1
+ self.next_offset = 0
+ else:
+ self.next_offset += len(seg.payload)
+ self.segments.append(seg)
+ if self.session.send_id:
+ self.session.send_id = False
+ self.session.channel.session_command_point(seg.id, seg.offset)
+ self.session.channel.connection.write_segment(seg)
+
+ def completed(self, commands):
+ idx = 0
+ while idx < len(self.segments):
+ seg = self.segments[idx]
+ if seg.id in commands:
+ del self.segments[idx]
+ else:
+ idx += 1
+ for range in commands.ranges:
+ self._completed.add(range.lower, range.upper)
+
+class Incoming(Queue):
+
+ def __init__(self, session, destination):
+ Queue.__init__(self)
+ self.session = session
+ self.destination = destination
+
+ def start(self):
+ self.session.message_set_flow_mode(self.destination, self.session.flow_mode.credit)
+ for unit in self.session.credit_unit.values():
+ self.session.message_flow(self.destination, unit, 0xFFFFFFFFL)
+
+ def stop(self):
+ self.session.message_cancel(self.destination)
+ self.listen(None)
+
+class Delegate:
+
+ def __init__(self, session):
+ self.session = session
+
+ #XXX: do something with incoming accepts
+ def message_accept(self, ma): None
+
+ def execution_result(self, er):
+ future = self.session.results.pop(er.command_id)
+ future.set(er.value)
+
+ def execution_exception(self, ex):
+ self.session.exceptions.append(ex)
+
+class Client(Delegate):
+
+ def message_transfer(self, cmd, headers, body):
+ m = Message(body)
+ m.headers = headers
+ m.id = cmd.id
+ messages = self.session.incoming(cmd.destination)
+ messages.put(m)
+ msg.debug("RECV %s", m)
+ return INCOMPLETE
diff --git a/RC9/qpid/python/qpid/spec.py b/RC9/qpid/python/qpid/spec.py
new file mode 100644
index 0000000000..e6d914044c
--- /dev/null
+++ b/RC9/qpid/python/qpid/spec.py
@@ -0,0 +1,59 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module loads protocol metadata into python objects. It provides
+access to spec metadata via a python object model, and can also
+dynamically creating python methods, classes, and modules based on the
+spec metadata. All the generated methods have proper signatures and
+doc strings based on the spec metadata so the python help system can
+be used to browse the spec documentation. The generated methods all
+dispatch to the self.invoke(meth, args) callback of the containing
+class so that the generated code can be reused in a variety of
+situations.
+"""
+
+import os, mllib, spec08, spec010
+
+def default():
+ try:
+ amqp_spec = os.environ["AMQP_SPEC"]
+ return amqp_spec
+ except KeyError:
+ try:
+ from qpid_config import amqp_spec
+ return amqp_spec
+ except ImportError:
+ raise Exception("unable to locate the amqp specification, please set "
+ "the AMQP_SPEC environment variable or supply "
+ "qpid_config.py on the PYTHONPATH")
+
+def load(specfile, *errata):
+ for name in (specfile,) + errata:
+ if not os.path.exists(name):
+ raise IOError("No such file or directory: '%s'" % name)
+
+ doc = mllib.xml_parse(specfile)
+ major = doc["amqp/@major"]
+ minor = doc["amqp/@minor"]
+
+ if major == "0" and minor == "10":
+ return spec010.load(specfile, *errata)
+ else:
+ return spec08.load(specfile, *errata)
diff --git a/RC9/qpid/python/qpid/spec010.py b/RC9/qpid/python/qpid/spec010.py
new file mode 100644
index 0000000000..cbc85a5e8b
--- /dev/null
+++ b/RC9/qpid/python/qpid/spec010.py
@@ -0,0 +1,693 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, cPickle, datatypes, datetime
+from codec010 import StringCodec
+from util import mtime, fill
+
+class Node:
+
+ def __init__(self, children):
+ self.children = children
+ self.named = {}
+ self.docs = []
+ self.rules = []
+
+ def register(self):
+ for ch in self.children:
+ ch.register(self)
+
+ def resolve(self):
+ for ch in self.children:
+ ch.resolve()
+
+ def __getitem__(self, name):
+ path = name.split(".", 1)
+ nd = self.named
+ for step in path:
+ nd = nd[step]
+ return nd
+
+ def __iter__(self):
+ return iter(self.children)
+
+class Anonymous:
+
+ def __init__(self, children):
+ self.children = children
+
+ def register(self, node):
+ for ch in self.children:
+ ch.register(node)
+
+ def resolve(self):
+ for ch in self.children:
+ ch.resolve()
+
+class Named:
+
+ def __init__(self, name):
+ self.name = name
+ self.qname = None
+
+ def register(self, node):
+ self.spec = node.spec
+ self.klass = node.klass
+ node.named[self.name] = self
+ if node.qname:
+ self.qname = "%s.%s" % (node.qname, self.name)
+ else:
+ self.qname = self.name
+
+ def __str__(self):
+ return self.qname
+
+ def __repr__(self):
+ return str(self)
+
+class Lookup:
+
+ def lookup(self, name):
+ value = None
+ if self.klass:
+ try:
+ value = self.klass[name]
+ except KeyError:
+ pass
+ if not value:
+ value = self.spec[name]
+ return value
+
+class Coded:
+
+ def __init__(self, code):
+ self.code = code
+
+class Constant(Named, Node):
+
+ def __init__(self, name, value, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+ self.value = value
+
+ def register(self, node):
+ Named.register(self, node)
+ node.constants.append(self)
+ Node.register(self)
+
+class Type(Named, Node):
+
+ def __init__(self, name, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+
+ def is_present(self, value):
+ return value != None
+
+ def register(self, node):
+ Named.register(self, node)
+ Node.register(self)
+
+class Primitive(Coded, Type):
+
+ def __init__(self, name, code, fixed, variable, children):
+ Coded.__init__(self, code)
+ Type.__init__(self, name, children)
+ self.fixed = fixed
+ self.variable = variable
+
+ def register(self, node):
+ Type.register(self, node)
+ if self.code is not None:
+ self.spec.types[self.code] = self
+
+ def is_present(self, value):
+ if self.fixed == 0:
+ return value
+ else:
+ return Type.is_present(self, value)
+
+ def encode(self, codec, value):
+ getattr(codec, "write_%s" % self.name)(value)
+
+ def decode(self, codec):
+ return getattr(codec, "read_%s" % self.name)()
+
+class Domain(Type, Lookup):
+
+ def __init__(self, name, type, children):
+ Type.__init__(self, name, children)
+ self.type = type
+ self.choices = {}
+
+ def resolve(self):
+ self.type = self.lookup(self.type)
+ Node.resolve(self)
+
+ def encode(self, codec, value):
+ self.type.encode(codec, value)
+
+ def decode(self, codec):
+ return self.type.decode(codec)
+
+class Enum:
+
+ def __init__(self, name):
+ self.name = name
+ self._names = ()
+ self._values = ()
+
+ def values(self):
+ return self._values
+
+ def __repr__(self):
+ return "%s(%s)" % (self.name, ", ".join(self._names))
+
+class Choice(Named, Node):
+
+ def __init__(self, name, value, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+ self.value = value
+
+ def register(self, node):
+ Named.register(self, node)
+ node.choices[self.value] = self
+ Node.register(self)
+ try:
+ enum = node.spec.enums[node.name]
+ except KeyError:
+ enum = Enum(node.name)
+ node.spec.enums[node.name] = enum
+ setattr(enum, self.name, self.value)
+ enum._names += (self.name,)
+ enum._values += (self.value,)
+
+class Composite(Type, Coded):
+
+ def __init__(self, name, label, code, size, pack, children):
+ Coded.__init__(self, code)
+ Type.__init__(self, name, children)
+ self.label = label
+ self.fields = []
+ self.size = size
+ self.pack = pack
+
+ def new(self, args, kwargs):
+ return datatypes.Struct(self, *args, **kwargs)
+
+ def decode(self, codec):
+ codec.read_size(self.size)
+ if self.code is not None:
+ code = codec.read_uint16()
+ assert self.code == code
+ return datatypes.Struct(self, **self.decode_fields(codec))
+
+ def decode_fields(self, codec):
+ flags = 0
+ for i in range(self.pack):
+ flags |= (codec.read_uint8() << 8*i)
+
+ result = {}
+
+ for i in range(len(self.fields)):
+ f = self.fields[i]
+ if flags & (0x1 << i):
+ result[f.name] = f.type.decode(codec)
+ else:
+ result[f.name] = None
+ return result
+
+ def encode(self, codec, value):
+ sc = StringCodec(self.spec)
+ if self.code is not None:
+ sc.write_uint16(self.code)
+ self.encode_fields(sc, value)
+ codec.write_size(self.size, len(sc.encoded))
+ codec.write(sc.encoded)
+
+ def encode_fields(self, codec, values):
+ flags = 0
+ for i in range(len(self.fields)):
+ f = self.fields[i]
+ if f.type.is_present(values[f.name]):
+ flags |= (0x1 << i)
+ for i in range(self.pack):
+ codec.write_uint8((flags >> 8*i) & 0xFF)
+ for i in range(len(self.fields)):
+ f = self.fields[i]
+ if flags & (0x1 << i):
+ f.type.encode(codec, values[f.name])
+
+ def docstring(self):
+ docs = []
+ if self.label:
+ docs.append(self.label)
+ docs += [d.text for d in self.docs]
+ s = "\n\n".join([fill(t, 2) for t in docs])
+ for f in self.fields:
+ fdocs = []
+ if f.label:
+ fdocs.append(f.label)
+ else:
+ fdocs.append("")
+ fdocs += [d.text for d in f.docs]
+ s += "\n\n" + "\n\n".join([fill(fdocs[0], 4, f.name)] +
+ [fill(t, 4) for t in fdocs[1:]])
+ return s
+
+
+class Field(Named, Node, Lookup):
+
+ def __init__(self, name, label, type, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+ self.label = label
+ self.type = type
+ self.exceptions = []
+
+ def default(self):
+ return None
+
+ def register(self, node):
+ Named.register(self, node)
+ node.fields.append(self)
+ Node.register(self)
+
+ def resolve(self):
+ self.type = self.lookup(self.type)
+ Node.resolve(self)
+
+ def __str__(self):
+ return "%s: %s" % (self.qname, self.type.qname)
+
+class Struct(Composite):
+
+ def register(self, node):
+ Composite.register(self, node)
+ if self.code is not None:
+ self.spec.structs[self.code] = self
+ self.spec.structs_by_name[self.name] = self
+ self.pyname = self.name
+ self.pydoc = self.docstring()
+
+ def __str__(self):
+ fields = ",\n ".join(["%s: %s" % (f.name, f.type.qname)
+ for f in self.fields])
+ return "%s {\n %s\n}" % (self.qname, fields)
+
+class Segment:
+
+ def __init__(self):
+ self.segment_type = None
+
+ def register(self, node):
+ self.spec = node.spec
+ self.klass = node.klass
+ node.segments.append(self)
+ Node.register(self)
+
+class Instruction(Composite, Segment):
+
+ def __init__(self, name, label, code, children):
+ Composite.__init__(self, name, label, code, 0, 2, children)
+ Segment.__init__(self)
+ self.track = None
+ self.handlers = []
+
+ def __str__(self):
+ return "%s(%s)" % (self.qname, ", ".join(["%s: %s" % (f.name, f.type.qname)
+ for f in self.fields]))
+
+ def register(self, node):
+ Composite.register(self, node)
+ self.pyname = self.qname.replace(".", "_")
+ self.pydoc = self.docstring()
+ self.spec.instructions[self.pyname] = self
+
+class Control(Instruction):
+
+ def __init__(self, name, code, label, children):
+ Instruction.__init__(self, name, code, label, children)
+ self.response = None
+
+ def register(self, node):
+ Instruction.register(self, node)
+ node.controls.append(self)
+ self.spec.controls[self.code] = self
+ self.segment_type = self.spec["segment_type.control"].value
+ self.track = self.spec["track.control"].value
+
+class Command(Instruction):
+
+ def __init__(self, name, label, code, children):
+ Instruction.__init__(self, name, label, code, children)
+ self.result = None
+ self.exceptions = []
+ self.segments = []
+
+ def register(self, node):
+ Instruction.register(self, node)
+ node.commands.append(self)
+ self.spec.commands[self.code] = self
+ self.segment_type = self.spec["segment_type.command"].value
+ self.track = self.spec["track.command"].value
+
+class Header(Segment, Node):
+
+ def __init__(self, children):
+ Segment.__init__(self)
+ Node.__init__(self, children)
+ self.entries = []
+
+ def register(self, node):
+ Segment.register(self, node)
+ self.segment_type = self.spec["segment_type.header"].value
+ Node.register(self)
+
+class Entry(Lookup):
+
+ def __init__(self, type):
+ self.type = type
+
+ def register(self, node):
+ self.spec = node.spec
+ self.klass = node.klass
+ node.entries.append(self)
+
+ def resolve(self):
+ self.type = self.lookup(self.type)
+
+class Body(Segment, Node):
+
+ def __init__(self, children):
+ Segment.__init__(self)
+ Node.__init__(self, children)
+
+ def register(self, node):
+ Segment.register(self, node)
+ self.segment_type = self.spec["segment_type.body"].value
+ Node.register(self)
+
+ def resolve(self): pass
+
+class Class(Named, Coded, Node):
+
+ def __init__(self, name, code, children):
+ Named.__init__(self, name)
+ Coded.__init__(self, code)
+ Node.__init__(self, children)
+ self.controls = []
+ self.commands = []
+
+ def register(self, node):
+ Named.register(self, node)
+ self.klass = self
+ node.classes.append(self)
+ Node.register(self)
+
+class Doc:
+
+ def __init__(self, type, title, text):
+ self.type = type
+ self.title = title
+ self.text = text
+
+ def register(self, node):
+ node.docs.append(self)
+
+ def resolve(self): pass
+
+class Role(Named, Node):
+
+ def __init__(self, name, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+
+ def register(self, node):
+ Named.register(self, node)
+ Node.register(self)
+
+class Rule(Named, Node):
+
+ def __init__(self, name, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+
+ def register(self, node):
+ Named.register(self, node)
+ node.rules.append(self)
+ Node.register(self)
+
+class Exception(Named, Node):
+
+ def __init__(self, name, error_code, children):
+ Named.__init__(self, name)
+ Node.__init__(self, children)
+ self.error_code = error_code
+
+ def register(self, node):
+ Named.register(self, node)
+ node.exceptions.append(self)
+ Node.register(self)
+
+class Spec(Node):
+
+ ENCODINGS = {
+ basestring: "vbin16",
+ int: "int64",
+ long: "int64",
+ float: "float",
+ None.__class__: "void",
+ list: "list",
+ tuple: "list",
+ dict: "map",
+ datatypes.timestamp: "datetime",
+ datetime.datetime: "datetime"
+ }
+
+ def __init__(self, major, minor, port, children):
+ Node.__init__(self, children)
+ self.major = major
+ self.minor = minor
+ self.port = port
+ self.constants = []
+ self.classes = []
+ self.types = {}
+ self.qname = None
+ self.spec = self
+ self.klass = None
+ self.instructions = {}
+ self.controls = {}
+ self.commands = {}
+ self.structs = {}
+ self.structs_by_name = {}
+ self.enums = {}
+
+ def encoding(self, klass):
+ if Spec.ENCODINGS.has_key(klass):
+ return self.named[Spec.ENCODINGS[klass]]
+ for base in klass.__bases__:
+ result = self.encoding(base)
+ if result != None:
+ return result
+
+class Implement:
+
+ def __init__(self, handle):
+ self.handle = handle
+
+ def register(self, node):
+ node.handlers.append(self.handle)
+
+ def resolve(self): pass
+
+class Response(Node):
+
+ def __init__(self, name, children):
+ Node.__init__(self, children)
+ self.name = name
+
+ def register(self, node):
+ Node.register(self)
+
+class Result(Node, Lookup):
+
+ def __init__(self, type, children):
+ self.type = type
+ Node.__init__(self, children)
+
+ def register(self, node):
+ node.result = self
+ self.qname = node.qname
+ self.klass = node.klass
+ self.spec = node.spec
+ Node.register(self)
+
+ def resolve(self):
+ self.type = self.lookup(self.type)
+ Node.resolve(self)
+
+import mllib
+
+def num(s):
+ if s: return int(s, 0)
+
+REPLACE = {" ": "_", "-": "_"}
+KEYWORDS = {"global": "global_",
+ "return": "return_"}
+
+def id(name):
+ name = str(name)
+ for key, val in REPLACE.items():
+ name = name.replace(key, val)
+ try:
+ name = KEYWORDS[name]
+ except KeyError:
+ pass
+ return name
+
+class Loader:
+
+ def __init__(self):
+ self.class_code = 0
+
+ def code(self, nd):
+ c = num(nd["@code"])
+ if c is None:
+ return None
+ else:
+ return c | (self.class_code << 8)
+
+ def list(self, q):
+ result = []
+ for nd in q:
+ result.append(nd.dispatch(self))
+ return result
+
+ def children(self, n):
+ return self.list(n.query["#tag"])
+
+ def data(self, d):
+ return d.data
+
+ def do_amqp(self, a):
+ return Spec(num(a["@major"]), num(a["@minor"]), num(a["@port"]),
+ self.children(a))
+
+ def do_type(self, t):
+ return Primitive(id(t["@name"]), self.code(t), num(t["@fixed-width"]),
+ num(t["@variable-width"]), self.children(t))
+
+ def do_constant(self, c):
+ return Constant(id(c["@name"]), num(c["@value"]), self.children(c))
+
+ def do_domain(self, d):
+ return Domain(id(d["@name"]), id(d["@type"]), self.children(d))
+
+ def do_enum(self, e):
+ return Anonymous(self.children(e))
+
+ def do_choice(self, c):
+ return Choice(id(c["@name"]), num(c["@value"]), self.children(c))
+
+ def do_class(self, c):
+ code = num(c["@code"])
+ self.class_code = code
+ children = self.children(c)
+ children += self.list(c.query["command/result/struct"])
+ self.class_code = 0
+ return Class(id(c["@name"]), code, children)
+
+ def do_doc(self, doc):
+ text = reduce(lambda x, y: x + y, self.list(doc.children))
+ return Doc(doc["@type"], doc["@title"], text)
+
+ def do_xref(self, x):
+ return x["@ref"]
+
+ def do_role(self, r):
+ return Role(id(r["@name"]), self.children(r))
+
+ def do_control(self, c):
+ return Control(id(c["@name"]), c["@label"], self.code(c), self.children(c))
+
+ def do_rule(self, r):
+ return Rule(id(r["@name"]), self.children(r))
+
+ def do_implement(self, i):
+ return Implement(id(i["@handle"]))
+
+ def do_response(self, r):
+ return Response(id(r["@name"]), self.children(r))
+
+ def do_field(self, f):
+ return Field(id(f["@name"]), f["@label"], id(f["@type"]), self.children(f))
+
+ def do_struct(self, s):
+ return Struct(id(s["@name"]), s["@label"], self.code(s), num(s["@size"]),
+ num(s["@pack"]), self.children(s))
+
+ def do_command(self, c):
+ return Command(id(c["@name"]), c["@label"], self.code(c), self.children(c))
+
+ def do_segments(self, s):
+ return Anonymous(self.children(s))
+
+ def do_header(self, h):
+ return Header(self.children(h))
+
+ def do_entry(self, e):
+ return Entry(id(e["@type"]))
+
+ def do_body(self, b):
+ return Body(self.children(b))
+
+ def do_result(self, r):
+ type = r["@type"]
+ if not type:
+ type = r["struct/@name"]
+ return Result(id(type), self.list(r.query["#tag", lambda x: x.name != "struct"]))
+
+ def do_exception(self, e):
+ return Exception(id(e["@name"]), id(e["@error-code"]), self.children(e))
+
+def load(xml):
+ fname = xml + ".pcl"
+
+ if os.path.exists(fname) and mtime(fname) > mtime(__file__):
+ file = open(fname, "r")
+ s = cPickle.load(file)
+ file.close()
+ else:
+ doc = mllib.xml_parse(xml)
+ s = doc["amqp"].dispatch(Loader())
+ s.register()
+ s.resolve()
+
+ try:
+ file = open(fname, "w")
+ except IOError:
+ file = None
+
+ if file:
+ cPickle.dump(s, file)
+ file.close()
+
+ return s
diff --git a/RC9/qpid/python/qpid/spec08.py b/RC9/qpid/python/qpid/spec08.py
new file mode 100644
index 0000000000..a0047e7107
--- /dev/null
+++ b/RC9/qpid/python/qpid/spec08.py
@@ -0,0 +1,504 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module loads protocol metadata into python objects. It provides
+access to spec metadata via a python object model, and can also
+dynamically creating python methods, classes, and modules based on the
+spec metadata. All the generated methods have proper signatures and
+doc strings based on the spec metadata so the python help system can
+be used to browse the spec documentation. The generated methods all
+dispatch to the self.invoke(meth, args) callback of the containing
+class so that the generated code can be reused in a variety of
+situations.
+"""
+
+import re, new, mllib, qpid
+from util import fill
+
+class SpecContainer:
+
+ def __init__(self):
+ self.items = []
+ self.byname = {}
+ self.byid = {}
+ self.indexes = {}
+
+ def add(self, item):
+ if self.byname.has_key(item.name):
+ raise ValueError("duplicate name: %s" % item)
+ if item.id == None:
+ item.id = len(self)
+ elif self.byid.has_key(item.id):
+ raise ValueError("duplicate id: %s" % item)
+ self.indexes[item] = len(self.items)
+ self.items.append(item)
+ self.byname[item.name] = item
+ self.byid[item.id] = item
+
+ def index(self, item):
+ try:
+ return self.indexes[item]
+ except KeyError:
+ raise ValueError(item)
+
+ def __iter__(self):
+ return iter(self.items)
+
+ def __len__(self):
+ return len(self.items)
+
+class Metadata:
+
+ PRINT = []
+
+ def __init__(self):
+ pass
+
+ def __str__(self):
+ args = map(lambda f: "%s=%s" % (f, getattr(self, f)), self.PRINT)
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
+
+ def __repr__(self):
+ return str(self)
+
+class Spec(Metadata):
+
+ PRINT=["major", "minor", "file"]
+
+ def __init__(self, major, minor, file):
+ Metadata.__init__(self)
+ self.major = major
+ self.minor = minor
+ self.file = file
+ self.constants = SpecContainer()
+ self.domains = SpecContainer()
+ self.classes = SpecContainer()
+ # methods indexed by classname_methname
+ self.methods = {}
+ # structs by type code
+ self.structs = {}
+
+ def post_load(self):
+ self.module = self.define_module("amqp%s%s" % (self.major, self.minor))
+ self.klass = self.define_class("Amqp%s%s" % (self.major, self.minor))
+
+ def method(self, name):
+ if not self.methods.has_key(name):
+ for cls in self.classes:
+ clen = len(cls.name)
+ if name.startswith(cls.name) and name[clen] == "_":
+ end = name[clen + 1:]
+ if cls.methods.byname.has_key(end):
+ self.methods[name] = cls.methods.byname[end]
+ return self.methods.get(name)
+
+ def parse_method(self, name):
+ parts = re.split(r"\s*\.\s*", name)
+ if len(parts) != 2:
+ raise ValueError(name)
+ klass, meth = parts
+ return self.classes.byname[klass].methods.byname[meth]
+
+ def struct(self, name, *args, **kwargs):
+ type = self.domains.byname[name].type
+ return qpid.Struct(type, *args, **kwargs)
+
+ def define_module(self, name, doc = None):
+ module = new.module(name, doc)
+ module.__file__ = self.file
+ for c in self.classes:
+ cls = c.define_class(c.name)
+ cls.__module__ = module.__name__
+ setattr(module, c.name, cls)
+ return module
+
+ def define_class(self, name):
+ methods = {}
+ for c in self.classes:
+ for m in c.methods:
+ meth = m.klass.name + "_" + m.name
+ methods[meth] = m.define_method(meth)
+ return type(name, (), methods)
+
+class Constant(Metadata):
+
+ PRINT=["name", "id"]
+
+ def __init__(self, spec, name, id, klass, docs):
+ Metadata.__init__(self)
+ self.spec = spec
+ self.name = name
+ self.id = id
+ self.klass = klass
+ self.docs = docs
+
+class Domain(Metadata):
+
+ PRINT=["name", "type"]
+
+ def __init__(self, spec, name, type, description, docs):
+ Metadata.__init__(self)
+ self.spec = spec
+ self.id = None
+ self.name = name
+ self.type = type
+ self.description = description
+ self.docs = docs
+
+class Struct(Metadata):
+
+ PRINT=["size", "type", "pack"]
+
+ def __init__(self, size, type, pack):
+ Metadata.__init__(self)
+ self.size = size
+ self.type = type
+ self.pack = pack
+ self.fields = SpecContainer()
+
+class Class(Metadata):
+
+ PRINT=["name", "id"]
+
+ def __init__(self, spec, name, id, handler, docs):
+ Metadata.__init__(self)
+ self.spec = spec
+ self.name = name
+ self.id = id
+ self.handler = handler
+ self.fields = SpecContainer()
+ self.methods = SpecContainer()
+ self.docs = docs
+
+ def define_class(self, name):
+ methods = {}
+ for m in self.methods:
+ methods[m.name] = m.define_method(m.name)
+ return type(name, (), methods)
+
+class Method(Metadata):
+
+ PRINT=["name", "id"]
+
+ def __init__(self, klass, name, id, content, responses, result, synchronous,
+ description, docs):
+ Metadata.__init__(self)
+ self.klass = klass
+ self.name = name
+ self.id = id
+ self.content = content
+ self.responses = responses
+ self.result = result
+ self.synchronous = synchronous
+ self.fields = SpecContainer()
+ self.description = description
+ self.docs = docs
+ self.response = False
+
+ def is_l4_command(self):
+ return self.klass.name not in ["execution", "channel", "connection", "session"]
+
+ def arguments(self, *args, **kwargs):
+ nargs = len(args) + len(kwargs)
+ maxargs = len(self.fields)
+ if nargs > maxargs:
+ self._type_error("takes at most %s arguments (%s) given", maxargs, nargs)
+ result = []
+ for f in self.fields:
+ idx = self.fields.index(f)
+ if idx < len(args):
+ result.append(args[idx])
+ elif kwargs.has_key(f.name):
+ result.append(kwargs.pop(f.name))
+ else:
+ result.append(Method.DEFAULTS[f.type])
+ for key, value in kwargs.items():
+ if self.fields.byname.has_key(key):
+ self._type_error("got multiple values for keyword argument '%s'", key)
+ else:
+ self._type_error("got an unexpected keyword argument '%s'", key)
+ return tuple(result)
+
+ def _type_error(self, msg, *args):
+ raise TypeError("%s %s" % (self.name, msg % args))
+
+ def docstring(self):
+ s = "\n\n".join([fill(d, 2) for d in [self.description] + self.docs])
+ for f in self.fields:
+ if f.docs:
+ s += "\n\n" + "\n\n".join([fill(f.docs[0], 4, f.name)] +
+ [fill(d, 4) for d in f.docs[1:]])
+ if self.responses:
+ s += "\n\nValid responses: "
+ for r in self.responses:
+ s += r.name + " "
+ return s
+
+ METHOD = "__method__"
+ DEFAULTS = {"bit": False,
+ "shortstr": "",
+ "longstr": "",
+ "table": {},
+ "array": [],
+ "octet": 0,
+ "short": 0,
+ "long": 0,
+ "longlong": 0,
+ "timestamp": 0,
+ "content": None,
+ "uuid": "",
+ "rfc1982_long": 0,
+ "rfc1982_long_set": [],
+ "long_struct": None}
+
+ def define_method(self, name):
+ g = {Method.METHOD: self}
+ l = {}
+ args = [(f.name, Method.DEFAULTS[f.type]) for f in self.fields]
+ methargs = args[:]
+ if self.content:
+ args += [("content", None)]
+ code = "def %s(self, %s):\n" % \
+ (name, ", ".join(["%s = %r" % a for a in args]))
+ code += " %r\n" % self.docstring()
+ argnames = ", ".join([a[0] for a in methargs])
+ code += " return self.invoke(%s" % Method.METHOD
+ if argnames:
+ code += ", (%s,)" % argnames
+ else:
+ code += ", ()"
+ if self.content:
+ code += ", content"
+ code += ")"
+ exec code in g, l
+ return l[name]
+
+class Field(Metadata):
+
+ PRINT=["name", "id", "type"]
+
+ def __init__(self, name, id, type, domain, description, docs):
+ Metadata.__init__(self)
+ self.name = name
+ self.id = id
+ self.type = type
+ self.domain = domain
+ self.description = description
+ self.docs = docs
+
+ def default(self):
+ if isinstance(self.type, Struct):
+ return None
+ else:
+ return Method.DEFAULTS[self.type]
+
+WIDTHS = {
+ "octet": 1,
+ "short": 2,
+ "long": 4
+ }
+
+def width(st, default=None):
+ if st in (None, "none", ""):
+ return default
+ else:
+ return WIDTHS[st]
+
+def get_result(nd, spec):
+ result = nd["result"]
+ if not result: return None
+ name = result["@domain"]
+ if name != None: return spec.domains.byname[name]
+ st_nd = result["struct"]
+ st = Struct(width(st_nd["@size"]), int(result.parent.parent["@index"])*256 +
+ int(st_nd["@type"]), width(st_nd["@pack"], 2))
+ spec.structs[st.type] = st
+ load_fields(st_nd, st.fields, spec.domains.byname)
+ return st
+
+def get_desc(nd):
+ label = nd["@label"]
+ if not label:
+ label = nd.text()
+ if label:
+ label = label.strip()
+ return label
+
+def get_docs(nd):
+ return [n.text() for n in nd.query["doc"]]
+
+def load_fields(nd, l, domains):
+ for f_nd in nd.query["field"]:
+ type = f_nd["@domain"]
+ if type == None:
+ type = f_nd["@type"]
+ type = pythonize(type)
+ domain = None
+ while domains.has_key(type) and domains[type].type != type:
+ domain = domains[type]
+ type = domain.type
+ l.add(Field(pythonize(f_nd["@name"]), f_nd.index(), type, domain,
+ get_desc(f_nd), get_docs(f_nd)))
+
+def load(specfile, *errata):
+ doc = mllib.xml_parse(specfile)
+ spec_root = doc["amqp"]
+ spec = Spec(int(spec_root["@major"]), int(spec_root["@minor"]), specfile)
+
+ for root in [spec_root] + map(lambda x: mllib.xml_parse(x)["amqp"], errata):
+ # constants
+ for nd in root.query["constant"]:
+ val = nd["@value"]
+ if val.startswith("0x"): val = int(val, 16)
+ else: val = int(val)
+ const = Constant(spec, pythonize(nd["@name"]), val, nd["@class"],
+ get_docs(nd))
+ try:
+ spec.constants.add(const)
+ except ValueError, e:
+ pass
+ #print "Warning:", e
+
+ # domains are typedefs
+ structs = []
+ for nd in root.query["domain"]:
+ type = nd["@type"]
+ if type == None:
+ st_nd = nd["struct"]
+ code = st_nd["@type"]
+ if code not in (None, "", "none"):
+ code = int(code)
+ type = Struct(width(st_nd["@size"]), code, width(st_nd["@pack"], 2))
+ if type.type != None:
+ spec.structs[type.type] = type
+ structs.append((type, st_nd))
+ else:
+ type = pythonize(type)
+ domain = Domain(spec, pythonize(nd["@name"]), type, get_desc(nd),
+ get_docs(nd))
+ spec.domains.add(domain)
+
+ # structs
+ for st, st_nd in structs:
+ load_fields(st_nd, st.fields, spec.domains.byname)
+
+ # classes
+ for c_nd in root.query["class"]:
+ cname = pythonize(c_nd["@name"])
+ if spec.classes.byname.has_key(cname):
+ klass = spec.classes.byname[cname]
+ else:
+ klass = Class(spec, cname, int(c_nd["@index"]), c_nd["@handler"],
+ get_docs(c_nd))
+ spec.classes.add(klass)
+
+ added_methods = []
+ load_fields(c_nd, klass.fields, spec.domains.byname)
+ for m_nd in c_nd.query["method"]:
+ mname = pythonize(m_nd["@name"])
+ if klass.methods.byname.has_key(mname):
+ meth = klass.methods.byname[mname]
+ else:
+ meth = Method(klass, mname,
+ int(m_nd["@index"]),
+ m_nd["@content"] == "1",
+ [pythonize(nd["@name"]) for nd in m_nd.query["response"]],
+ get_result(m_nd, spec),
+ m_nd["@synchronous"] == "1",
+ get_desc(m_nd),
+ get_docs(m_nd))
+ klass.methods.add(meth)
+ added_methods.append(meth)
+ load_fields(m_nd, meth.fields, spec.domains.byname)
+ # resolve the responses
+ for m in added_methods:
+ m.responses = [klass.methods.byname[r] for r in m.responses]
+ for resp in m.responses:
+ resp.response = True
+
+ spec.post_load()
+ return spec
+
+REPLACE = {" ": "_", "-": "_"}
+KEYWORDS = {"global": "global_",
+ "return": "return_"}
+
+def pythonize(name):
+ name = str(name)
+ for key, val in REPLACE.items():
+ name = name.replace(key, val)
+ try:
+ name = KEYWORDS[name]
+ except KeyError:
+ pass
+ return name
+
+class Rule(Metadata):
+
+ PRINT = ["text", "implement", "tests"]
+
+ def __init__(self, text, implement, tests, path):
+ self.text = text
+ self.implement = implement
+ self.tests = tests
+ self.path = path
+
+def find_rules(node, rules):
+ if node.name == "rule":
+ rules.append(Rule(node.text, node.get("@implement"),
+ [ch.text for ch in node if ch.name == "test"],
+ node.path()))
+ if node.name == "doc" and node.get("@name") == "rule":
+ tests = []
+ if node.has("@test"):
+ tests.append(node["@test"])
+ rules.append(Rule(node.text, None, tests, node.path()))
+ for child in node:
+ find_rules(child, rules)
+
+def load_rules(specfile):
+ rules = []
+ find_rules(xmlutil.parse(specfile), rules)
+ return rules
+
+def test_summary():
+ template = """
+ <html><head><title>AMQP Tests</title></head>
+ <body>
+ <table width="80%%" align="center">
+ %s
+ </table>
+ </body>
+ </html>
+ """
+ rows = []
+ for rule in load_rules("amqp.org/specs/amqp7.xml"):
+ if rule.tests:
+ tests = ", ".join(rule.tests)
+ else:
+ tests = "&nbsp;"
+ rows.append('<tr bgcolor="#EEEEEE"><td><b>Path:</b> %s</td>'
+ '<td><b>Implement:</b> %s</td>'
+ '<td><b>Tests:</b> %s</td></tr>' %
+ (rule.path[len("/root/amqp"):], rule.implement, tests))
+ rows.append('<tr><td colspan="3">%s</td></tr>' % rule.text)
+ rows.append('<tr><td colspan="3">&nbsp;</td></tr>')
+
+ print template % "\n".join(rows)
diff --git a/RC9/qpid/python/qpid/testlib.py b/RC9/qpid/python/qpid/testlib.py
new file mode 100644
index 0000000000..31f52169ae
--- /dev/null
+++ b/RC9/qpid/python/qpid/testlib.py
@@ -0,0 +1,392 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# Support library for qpid python tests.
+#
+
+import sys, re, unittest, os, random, logging, traceback
+import qpid.client, qpid.spec, qmf.console
+import Queue
+from fnmatch import fnmatch
+from getopt import getopt, GetoptError
+from qpid.content import Content
+from qpid.message import Message
+
+#0-10 support
+from qpid.connection import Connection
+from qpid.spec010 import load
+from qpid.util import connect, ssl, URL
+
+def findmodules(root):
+ """Find potential python modules under directory root"""
+ found = []
+ for dirpath, subdirs, files in os.walk(root):
+ modpath = dirpath.replace(os.sep, '.')
+ if not re.match(r'\.svn$', dirpath): # Avoid SVN directories
+ for f in files:
+ match = re.match(r'(.+)\.py$', f)
+ if match and f != '__init__.py':
+ found.append('.'.join([modpath, match.group(1)]))
+ return found
+
+def default(value, default):
+ if (value == None): return default
+ else: return value
+
+class TestRunner:
+
+ SPEC_FOLDER = "../specs"
+
+ """Runs unit tests.
+
+ Parses command line arguments, provides utility functions for tests,
+ runs the selected test suite.
+ """
+
+ def _die(self, message = None):
+ if message: print message
+ print """
+run-tests [options] [test*]
+The name of a test is package.module.ClassName.testMethod
+Options:
+ -?/-h/--help : this message
+ -s/--spec <spec.xml> : URL of AMQP XML specification or one of these abbreviations:
+ 0-8 - use the default 0-8 specification.
+ 0-9 - use the default 0-9 specification.
+ 0-10-errata - use the 0-10 specification with qpid errata.
+ -e/--errata <errata.xml> : file containing amqp XML errata
+ -b/--broker [amqps://][<user>[/<password>]@]<host>[:<port>] : broker to connect to
+ -v/--verbose : verbose - lists tests as they are run.
+ -d/--debug : enable debug logging.
+ -i/--ignore <test> : ignore the named test.
+ -I/--ignore-file : file containing patterns to ignore.
+ -S/--skip-self-test : skips the client self tests in the 'tests folder'
+ -F/--spec-folder : folder that contains the specs to be loaded
+ """
+ sys.exit(1)
+
+ def setBroker(self, broker):
+ try:
+ self.url = URL(broker)
+ except ValueError:
+ self._die("'%s' is not a valid broker" % (broker))
+ self.user = default(self.url.user, "guest")
+ self.password = default(self.url.password, "guest")
+ self.host = self.url.host
+ if self.url.scheme == URL.AMQPS:
+ self.ssl = True
+ default_port = 5671
+ else:
+ self.ssl = False
+ default_port = 5672
+ self.port = default(self.url.port, default_port)
+
+ def ignoreFile(self, filename):
+ f = file(filename)
+ for line in f.readlines(): self.ignore.append(line.strip())
+ f.close()
+
+ def use08spec(self):
+ "True if we are running with the old 0-8 spec."
+ # NB: AMQP 0-8 identifies itself as 8-0 for historical reasons.
+ return self.spec.major==8 and self.spec.minor==0
+
+ def use09spec(self):
+ "True if we are running with the 0-9 (non-wip) spec."
+ return self.spec.major==0 and self.spec.minor==9
+
+ def _parseargs(self, args):
+ # Defaults
+ self.setBroker("localhost")
+ self.verbose = 1
+ self.ignore = []
+ self.specfile = "0-8"
+ self.errata = []
+ self.skip_self_test = False
+
+ try:
+ opts, self.tests = getopt(args, "s:e:b:h?dvSi:I:F:",
+ ["help", "spec", "errata=", "broker=",
+ "verbose", "skip-self-test", "ignore",
+ "ignore-file", "spec-folder"])
+ except GetoptError, e:
+ self._die(str(e))
+ for opt, value in opts:
+ if opt in ("-?", "-h", "--help"): self._die()
+ if opt in ("-s", "--spec"): self.specfile = value
+ if opt in ("-e", "--errata"): self.errata.append(value)
+ if opt in ("-b", "--broker"): self.setBroker(value)
+ if opt in ("-v", "--verbose"): self.verbose = 2
+ if opt in ("-d", "--debug"): logging.basicConfig(level=logging.DEBUG)
+ if opt in ("-i", "--ignore"): self.ignore.append(value)
+ if opt in ("-I", "--ignore-file"): self.ignoreFile(value)
+ if opt in ("-S", "--skip-self-test"): self.skip_self_test = True
+ if opt in ("-F", "--spec-folder"): TestRunner.SPEC_FOLDER = value
+
+ # Abbreviations for default settings.
+ if (self.specfile == "0-10"):
+ self.spec = load(self.get_spec_file("amqp.0-10.xml"))
+ elif (self.specfile == "0-10-errata"):
+ self.spec = load(self.get_spec_file("amqp.0-10-qpid-errata.xml"))
+ else:
+ if (self.specfile == "0-8"):
+ self.specfile = self.get_spec_file("amqp.0-8.xml")
+ elif (self.specfile == "0-9"):
+ self.specfile = self.get_spec_file("amqp.0-9.xml")
+ self.errata.append(self.get_spec_file("amqp-errata.0-9.xml"))
+
+ if (self.specfile == None):
+ self._die("No XML specification provided")
+ print "Using specification from:", self.specfile
+
+ self.spec = qpid.spec.load(self.specfile, *self.errata)
+
+ if len(self.tests) == 0:
+ if not self.skip_self_test:
+ self.tests=findmodules("tests")
+ if self.use08spec() or self.use09spec():
+ self.tests+=findmodules("tests_0-8")
+ elif (self.spec.major == 99 and self.spec.minor == 0):
+ self.tests+=findmodules("tests_0-10_preview")
+ elif (self.spec.major == 0 and self.spec.minor == 10):
+ self.tests+=findmodules("tests_0-10")
+
+ def testSuite(self):
+ class IgnoringTestSuite(unittest.TestSuite):
+ def addTest(self, test):
+ if isinstance(test, unittest.TestCase):
+ for pattern in testrunner.ignore:
+ if fnmatch(test.id(), pattern):
+ return
+ unittest.TestSuite.addTest(self, test)
+
+ # Use our IgnoringTestSuite in the test loader.
+ unittest.TestLoader.suiteClass = IgnoringTestSuite
+ return unittest.defaultTestLoader.loadTestsFromNames(self.tests)
+
+ def run(self, args=sys.argv[1:]):
+ self._parseargs(args)
+ runner = unittest.TextTestRunner(descriptions=False,
+ verbosity=self.verbose)
+ result = runner.run(self.testSuite())
+
+ if (self.ignore):
+ print "======================================="
+ print "NOTE: the following tests were ignored:"
+ for t in self.ignore: print t
+ print "======================================="
+
+ return result.wasSuccessful()
+
+ def connect(self, host=None, port=None, spec=None, user=None, password=None, tune_params=None):
+ """Connect to the broker, returns a qpid.client.Client"""
+ host = host or self.host
+ port = port or self.port
+ spec = spec or self.spec
+ user = user or self.user
+ password = password or self.password
+ client = qpid.client.Client(host, port, spec)
+ if self.use08spec():
+ client.start({"LOGIN": user, "PASSWORD": password}, tune_params=tune_params)
+ else:
+ client.start("\x00" + user + "\x00" + password, mechanism="PLAIN", tune_params=tune_params)
+ return client
+
+ def get_spec_file(self, fname):
+ return TestRunner.SPEC_FOLDER + os.sep + fname
+
+# Global instance for tests to call connect.
+testrunner = TestRunner()
+
+
+class TestBase(unittest.TestCase):
+ """Base class for Qpid test cases.
+
+ self.client is automatically connected with channel 1 open before
+ the test methods are run.
+
+ Deletes queues and exchanges after. Tests call
+ self.queue_declare(channel, ...) and self.exchange_declare(chanel,
+ ...) which are wrappers for the Channel functions that note
+ resources to clean up later.
+ """
+
+ def setUp(self):
+ self.queues = []
+ self.exchanges = []
+ self.client = self.connect()
+ self.channel = self.client.channel(1)
+ self.version = (self.client.spec.major, self.client.spec.minor)
+ if self.version == (8, 0) or self.version == (0, 9):
+ self.channel.channel_open()
+ else:
+ self.channel.session_open()
+
+ def tearDown(self):
+ try:
+ for ch, q in self.queues:
+ ch.queue_delete(queue=q)
+ for ch, ex in self.exchanges:
+ ch.exchange_delete(exchange=ex)
+ except:
+ print "Error on tearDown:"
+ print traceback.print_exc()
+
+ if not self.client.closed:
+ self.client.channel(0).connection_close(reply_code=200)
+ else:
+ self.client.close()
+
+ def connect(self, *args, **keys):
+ """Create a new connction, return the Client object"""
+ return testrunner.connect(*args, **keys)
+
+ def queue_declare(self, channel=None, *args, **keys):
+ channel = channel or self.channel
+ reply = channel.queue_declare(*args, **keys)
+ self.queues.append((channel, keys["queue"]))
+ return reply
+
+ def exchange_declare(self, channel=None, ticket=0, exchange='',
+ type='', passive=False, durable=False,
+ auto_delete=False,
+ arguments={}):
+ channel = channel or self.channel
+ reply = channel.exchange_declare(ticket=ticket, exchange=exchange, type=type, passive=passive,durable=durable, auto_delete=auto_delete, arguments=arguments)
+ self.exchanges.append((channel,exchange))
+ return reply
+
+ def uniqueString(self):
+ """Generate a unique string, unique for this TestBase instance"""
+ if not "uniqueCounter" in dir(self): self.uniqueCounter = 1;
+ return "Test Message " + str(self.uniqueCounter)
+
+ def consume(self, queueName):
+ """Consume from named queue returns the Queue object."""
+ if testrunner.use08spec() or testrunner.use09spec():
+ reply = self.channel.basic_consume(queue=queueName, no_ack=True)
+ return self.client.queue(reply.consumer_tag)
+ else:
+ if not "uniqueTag" in dir(self): self.uniqueTag = 1
+ else: self.uniqueTag += 1
+ consumer_tag = "tag" + str(self.uniqueTag)
+ self.channel.message_subscribe(queue=queueName, destination=consumer_tag)
+ self.channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
+ self.channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
+ return self.client.queue(consumer_tag)
+
+ def subscribe(self, channel=None, **keys):
+ channel = channel or self.channel
+ consumer_tag = keys["destination"]
+ channel.message_subscribe(**keys)
+ channel.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
+ channel.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
+
+ def assertEmpty(self, queue):
+ """Assert that the queue is empty"""
+ try:
+ queue.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Queue.Empty: None # Ignore
+
+ def assertPublishGet(self, queue, exchange="", routing_key="", properties=None):
+ """
+ Publish to exchange and assert queue.get() returns the same message.
+ """
+ body = self.uniqueString()
+ if testrunner.use08spec() or testrunner.use09spec():
+ self.channel.basic_publish(
+ exchange=exchange,
+ content=Content(body, properties=properties),
+ routing_key=routing_key)
+ else:
+ self.channel.message_transfer(
+ destination=exchange,
+ content=Content(body, properties={'application_headers':properties,'routing_key':routing_key}))
+ msg = queue.get(timeout=1)
+ if testrunner.use08spec() or testrunner.use09spec():
+ self.assertEqual(body, msg.content.body)
+ if (properties):
+ self.assertEqual(properties, msg.content.properties)
+ else:
+ self.assertEqual(body, msg.content.body)
+ if (properties):
+ self.assertEqual(properties, msg.content['application_headers'])
+
+ def assertPublishConsume(self, queue="", exchange="", routing_key="", properties=None):
+ """
+ Publish a message and consume it, assert it comes back intact.
+ Return the Queue object used to consume.
+ """
+ self.assertPublishGet(self.consume(queue), exchange, routing_key, properties)
+
+ def assertChannelException(self, expectedCode, message):
+ if self.version == (8, 0) or self.version == (0, 9):
+ if not isinstance(message, Message): self.fail("expected channel_close method, got %s" % (message))
+ self.assertEqual("channel", message.method.klass.name)
+ self.assertEqual("close", message.method.name)
+ else:
+ if not isinstance(message, Message): self.fail("expected session_closed method, got %s" % (message))
+ self.assertEqual("session", message.method.klass.name)
+ self.assertEqual("closed", message.method.name)
+ self.assertEqual(expectedCode, message.reply_code)
+
+
+ def assertConnectionException(self, expectedCode, message):
+ if not isinstance(message, Message): self.fail("expected connection_close method, got %s" % (message))
+ self.assertEqual("connection", message.method.klass.name)
+ self.assertEqual("close", message.method.name)
+ self.assertEqual(expectedCode, message.reply_code)
+
+class TestBase010(unittest.TestCase):
+ """
+ Base class for Qpid test cases. using the final 0-10 spec
+ """
+
+ def setUp(self):
+ self.conn = self.connect()
+ self.session = self.conn.session("test-session", timeout=10)
+ self.qmf = None
+
+ def startQmf(self):
+ self.qmf = qmf.console.Session()
+ self.qmf_broker = self.qmf.addBroker(str(testrunner.url))
+
+ def connect(self, host=None, port=None):
+ sock = connect(host or testrunner.host, port or testrunner.port)
+ if testrunner.url.scheme == URL.AMQPS:
+ sock = ssl(sock)
+ conn = Connection(sock, testrunner.spec, username=testrunner.user,
+ password=testrunner.password)
+ conn.start(timeout=10)
+ return conn
+
+ def tearDown(self):
+ if not self.session.error(): self.session.close(timeout=10)
+ self.conn.close(timeout=10)
+ if self.qmf:
+ self.qmf.delBroker(self.qmf_broker)
+
+ def subscribe(self, session=None, **keys):
+ session = session or self.session
+ consumer_tag = keys["destination"]
+ session.message_subscribe(**keys)
+ session.message_flow(destination=consumer_tag, unit=0, value=0xFFFFFFFF)
+ session.message_flow(destination=consumer_tag, unit=1, value=0xFFFFFFFF)
diff --git a/RC9/qpid/python/qpid/util.py b/RC9/qpid/python/qpid/util.py
new file mode 100644
index 0000000000..bb7f5090df
--- /dev/null
+++ b/RC9/qpid/python/qpid/util.py
@@ -0,0 +1,117 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, socket, time, textwrap, re
+
+ssl = socket.ssl
+
+def connect(host, port):
+ sock = socket.socket()
+ sock.connect((host, port))
+ sock.setblocking(1)
+ # XXX: we could use this on read, but we'd have to put write in a
+ # loop as well
+ # sock.settimeout(1)
+ return sock
+
+def listen(host, port, predicate = lambda: True, bound = lambda: None):
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind((host, port))
+ sock.listen(5)
+ bound()
+ while predicate():
+ s, a = sock.accept()
+ yield s
+
+def mtime(filename):
+ return os.stat(filename).st_mtime
+
+def wait(condition, predicate, timeout=None):
+ condition.acquire()
+ try:
+ passed = 0
+ start = time.time()
+ while not predicate():
+ if timeout is None:
+ condition.wait()
+ elif passed < timeout:
+ condition.wait(timeout - passed)
+ else:
+ return False
+ passed = time.time() - start
+ return True
+ finally:
+ condition.release()
+
+def notify(condition, action=lambda: None):
+ condition.acquire()
+ try:
+ action()
+ condition.notifyAll()
+ finally:
+ condition.release()
+
+def fill(text, indent, heading = None):
+ sub = indent * " "
+ if heading:
+ if not text:
+ return (indent - 2) * " " + heading
+ init = (indent - 2) * " " + heading + " -- "
+ else:
+ init = sub
+ w = textwrap.TextWrapper(initial_indent = init, subsequent_indent = sub)
+ return w.fill(" ".join(text.split()))
+
+class URL:
+
+ RE = re.compile(r"""
+ # [ <scheme>:// ] [ <user> [ / <password> ] @] <host> [ :<port> ]
+ ^ (?: ([^:/@]+)://)? (?: ([^:/@]+) (?: / ([^:/@]+) )? @)? ([^@:/]+) (?: :([0-9]+))?$
+""", re.X)
+
+ AMQPS = "amqps"
+ AMQP = "amqp"
+
+ def __init__(self, s):
+ match = URL.RE.match(s)
+ if match is None:
+ raise ValueError(s)
+ self.scheme, self.user, self.password, self.host, port = match.groups()
+ if port is None:
+ self.port = None
+ else:
+ self.port = int(port)
+
+ def __repr__(self):
+ return "URL(%r)" % str(self)
+
+ def __str__(self):
+ s = ""
+ if self.scheme:
+ s += "%s://" % self.scheme
+ if self.user:
+ s += self.user
+ if self.password:
+ s += "/%s" % self.password
+ s += "@"
+ s += self.host
+ if self.port:
+ s += ":%s" % self.port
+ return s
diff --git a/RC9/qpid/python/qpid_config.py b/RC9/qpid/python/qpid_config.py
new file mode 100644
index 0000000000..8f987e9962
--- /dev/null
+++ b/RC9/qpid/python/qpid_config.py
@@ -0,0 +1,23 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os
+
+qpid_home = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+amqp_spec = os.path.join(qpid_home, "specs", "amqp.0-10-qpid-errata.xml")
diff --git a/RC9/qpid/python/rule2test b/RC9/qpid/python/rule2test
new file mode 100755
index 0000000000..10f151366e
--- /dev/null
+++ b/RC9/qpid/python/rule2test
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# Convert rules to tests
+#
+import sys, re, os.path
+from getopt import getopt, GetoptError
+from string import capitalize
+from xml import dom
+from xml.dom.minidom import parse
+
+def camelcase(s):
+ """Convert 'string like this' to 'StringLikeThis'"""
+ return "".join([capitalize(w) for w in re.split(re.compile("\W*"), s)])
+
+def uncapitalize(s): return s[0].lower()+s[1:]
+
+def ancestors(node):
+ "Return iterator of ancestors from top-level element to node"
+ def generator(node):
+ while node and node.parentNode:
+ yield node
+ node = node.parentNode
+ return reversed(list(generator(node)))
+
+def tagAndName(element):
+ nameAttr = element.getAttribute("name");
+ if (nameAttr) : return camelcase(nameAttr) + camelcase(element.tagName)
+ else: return camelcase(element.tagName)
+
+def nodeText(n):
+ """Recursively collect text from all text nodes under n"""
+ if n.nodeType == dom.Node.TEXT_NODE:
+ return n.data
+ if n.childNodes:
+ return reduce(lambda t, c: t + nodeText(c), n.childNodes, "")
+ return ""
+
+def cleanup(docString, level=8):
+ unindent = re.sub("\n[ \t]*", "\n", docString.strip())
+ emptyLines = re.sub("\n\n\n", "\n\n", unindent)
+ indented = re.sub("\n", "\n"+level*" ", emptyLines)
+ return level*" " + indented
+
+def printTest(test, docstring):
+ print "class %s(TestBase):" % test
+ print ' """'
+ print docstring
+ print ' """'
+ print
+ print
+
+def printTests(doc, module):
+ """Returns dictionary { classname : [ (methodname, docstring)* ] * }"""
+ tests = {}
+ rules = doc.getElementsByTagName("rule")
+ for r in rules:
+ path = list(ancestors(r))
+ if module == path[1].getAttribute("name").lower():
+ test = "".join(map(tagAndName, path[2:])) + "Tests"
+ docstring = cleanup(nodeText(r), 4)
+ printTest(test, docstring)
+
+def usage(message=None):
+ if message: print >>sys.stderr, message
+ print >>sys.stderr, """
+rule2test [options] <amqpclass>
+
+Print test classes for each rule for the amqpclass in amqp.xml.
+
+Options:
+ -?/-h/--help : this message
+ -s/--spec <spec.xml> : file containing amqp XML spec
+"""
+ return 1
+
+def main(argv):
+ try: opts, args = getopt(argv[1:], "h?s:", ["help", "spec="])
+ except GetoptError, e: return usage(e)
+ spec = "../specs/amqp.xml" # Default
+ for opt, val in opts:
+ if (opt in ("-h", "-?", "--help")): return usage()
+ if (opt in ("-s", "--spec")): spec = val
+ doc = parse(spec)
+ if len(args) == 0: return usage()
+ printTests(doc, args[0])
+ return 0
+
+if (__name__ == "__main__"): sys.exit(main(sys.argv))
diff --git a/RC9/qpid/python/run-tests b/RC9/qpid/python/run-tests
new file mode 100755
index 0000000000..84b76ebfc1
--- /dev/null
+++ b/RC9/qpid/python/run-tests
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys, logging
+from qpid.testlib import testrunner
+from qpid.log import enable, WARN, DEBUG
+
+if "-vv" in sys.argv:
+ level = DEBUG
+else:
+ level = WARN
+
+enable("qpid", level)
+
+if not testrunner.run(): sys.exit(1)
+
+
+
diff --git a/RC9/qpid/python/server b/RC9/qpid/python/server
new file mode 100755
index 0000000000..56edd38490
--- /dev/null
+++ b/RC9/qpid/python/server
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import qpid
+from qpid.connection import Connection, listen
+from qpid.delegate import Delegate
+from qpid.peer import Peer
+from qpid import Struct
+
+class Server(Delegate):
+
+ def __init__(self):
+ Delegate.__init__(self)
+ self.queues = {}
+ self.bindings = {}
+
+ def connection_open(self, ch, msg):
+ msg.open_ok()
+
+ def session_open(self, ch, msg):
+ print "session open on channel %s" % ch.id
+ msg.attached()
+
+ def execution_flush(self, ch, msg):
+ pass
+
+ def queue_declare(self, ch, msg):
+ self.queues[msg.queue] = []
+ print "queue declared: %s" % msg.queue
+ msg.complete()
+
+ def queue_bind(self, ch, msg):
+ if self.bindings.has_key(msg.exchange):
+ queues = self.bindings[msg.exchange]
+ else:
+ queues = set()
+ self.bindings[msg.exchange] = queues
+ queues.add((msg.routing_key, msg.queue))
+ msg.complete()
+
+ def queue_query(self, ch, msg):
+ st = Struct(msg.method.result)
+ ch.execution_result(msg.command_id, st)
+ msg.complete()
+
+ def message_subscribe(self, ch, msg):
+ print msg
+ msg.complete()
+
+ def message_transfer(self, ch, msg):
+ print msg.content
+ msg.complete()
+
+
+spec = qpid.spec.load("../specs/amqp.0-10-preview.xml")
+
+for io in listen("0.0.0.0", 5672):
+ c = Connection(io, spec)
+ p = Peer(c, Server())
+ c.tini()
+ p.start()
+ ch = p.channel(0)
+ ch.connection_start()
+ ch.connection_tune()
diff --git a/RC9/qpid/python/server010 b/RC9/qpid/python/server010
new file mode 100755
index 0000000000..8dfcd7a585
--- /dev/null
+++ b/RC9/qpid/python/server010
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid import delegates
+from qpid.connection010 import Connection
+from qpid.util import connect, listen
+from qpid.spec010 import load
+from qpid.session import Client
+from qpid.datatypes import Message
+from qpid.log import enable, DEBUG, WARN
+
+import sys
+
+if "-v" in sys.argv:
+ level = DEBUG
+else:
+ level = WARN
+
+enable("qpid", level)
+
+spec = load("../specs/amqp.0-10.xml")
+
+class Server:
+
+ def connection(self, connection):
+ return delegates.Server(connection, self.session)
+
+ def session(self, session):
+ session.auto_sync = False
+ return SessionDelegate(session)
+
+class SessionDelegate(Client):
+
+ def __init__(self, session):
+ self.session = session
+
+ def queue_declare(self, qd):
+ print "Queue %s declared..." % qd.queue
+
+ def queue_query(self, qq):
+ return qq._type.result.type.new((qq.queue,), {})
+
+ def message_transfer(self, cmd, headers, body):
+ m = Message(body)
+ m.headers = headers
+ self.session.message_transfer(cmd.destination, cmd.accept_mode, cmd.acquire_mode, m)
+
+ def message_accept(self, messages):
+ print "ACCEPT %s" % messages
+
+server = Server()
+
+for s in listen("0.0.0.0", spec.port):
+ conn = Connection(s, spec, server.connection)
+ conn.start(5)
diff --git a/RC9/qpid/python/setup.py b/RC9/qpid/python/setup.py
new file mode 100644
index 0000000000..a49fa6ca51
--- /dev/null
+++ b/RC9/qpid/python/setup.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from distutils.core import setup
+
+setup(name="qpid", version="0.1", packages=["qpid"], scripts=["amqp-doc"],
+ url="http://incubator.apache.org/qpid",
+ license="Apache Software License",
+ description="Python language client implementation for Apache Qpid")
diff --git a/RC9/qpid/python/tests/__init__.py b/RC9/qpid/python/tests/__init__.py
new file mode 100644
index 0000000000..8ad514fc2f
--- /dev/null
+++ b/RC9/qpid/python/tests/__init__.py
@@ -0,0 +1,30 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from codec import *
+from queue import *
+from spec import *
+from framer import *
+from assembler import *
+from datatypes import *
+from connection import *
+from spec010 import *
+from codec010 import *
diff --git a/RC9/qpid/python/tests/assembler.py b/RC9/qpid/python/tests/assembler.py
new file mode 100644
index 0000000000..f4e37084b6
--- /dev/null
+++ b/RC9/qpid/python/tests/assembler.py
@@ -0,0 +1,78 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from threading import *
+from unittest import TestCase
+from qpid.util import connect, listen
+from qpid.assembler import *
+
+PORT = 1234
+
+class AssemblerTest(TestCase):
+
+ def setUp(self):
+ started = Event()
+ self.running = True
+
+ def run():
+ running = True
+ for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
+ asm = Assembler(s)
+ try:
+ asm.write_header(*asm.read_header()[-2:])
+ while True:
+ seg = asm.read_segment()
+ asm.write_segment(seg)
+ except Closed:
+ pass
+
+ self.server = Thread(target=run)
+ self.server.setDaemon(True)
+ self.server.start()
+
+ started.wait(3)
+ assert started.isSet()
+
+ def tearDown(self):
+ self.running = False
+ self.server.join()
+
+ def test(self):
+ asm = Assembler(connect("0.0.0.0", PORT), max_payload = 1)
+ asm.write_header(0, 10)
+ asm.write_segment(Segment(True, False, 1, 2, 3, "TEST"))
+ asm.write_segment(Segment(False, True, 1, 2, 3, "ING"))
+
+ assert asm.read_header() == ("AMQP", 1, 1, 0, 10)
+
+ seg = asm.read_segment()
+ assert seg.first == True
+ assert seg.last == False
+ assert seg.type == 1
+ assert seg.track == 2
+ assert seg.channel == 3
+ assert seg.payload == "TEST"
+
+ seg = asm.read_segment()
+ assert seg.first == False
+ assert seg.last == True
+ assert seg.type == 1
+ assert seg.track == 2
+ assert seg.channel == 3
+ assert seg.payload == "ING"
diff --git a/RC9/qpid/python/tests/codec.py b/RC9/qpid/python/tests/codec.py
new file mode 100644
index 0000000000..4bd3675af8
--- /dev/null
+++ b/RC9/qpid/python/tests/codec.py
@@ -0,0 +1,607 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import unittest
+from qpid.codec import Codec
+from qpid.spec import load
+from cStringIO import StringIO
+from qpid.reference import ReferenceId
+from qpid.testlib import testrunner
+
+__doc__ = """
+
+ This is a unit test script for qpid/codec.py
+
+ It can be run standalone or as part of the existing test framework.
+
+ To run standalone:
+ -------------------
+
+ Place in the qpid/python/tests/ directory and type...
+
+ python codec.py
+
+ A brief output will be printed on screen. The verbose output will be placed inn a file called
+ codec_unit_test_output.txt. [TODO: make this filename configurable]
+
+ To run as part of the existing test framework:
+ -----------------------------------------------
+
+ python run-tests tests.codec
+
+ Change History:
+ -----------------
+ Jimmy John 05/19/2007 Initial draft
+ Jimmy John 05/22/2007 Implemented comments by Rafael Schloming
+
+
+"""
+
+SPEC = None
+
+def spec():
+ global SPEC
+ if SPEC == None:
+ SPEC = load(testrunner.get_spec_file("amqp.0-8.xml"))
+ return SPEC
+
+# --------------------------------------
+# --------------------------------------
+class BaseDataTypes(unittest.TestCase):
+
+
+ """
+ Base class containing common functions
+ """
+
+ # ---------------
+ def setUp(self):
+ """
+ standard setUp for unitetest (refer unittest documentation for details)
+ """
+ self.codec = Codec(StringIO(), spec())
+
+ # ------------------
+ def tearDown(self):
+ """
+ standard tearDown for unitetest (refer unittest documentation for details)
+ """
+ self.codec.stream.flush()
+ self.codec.stream.close()
+
+ # ----------------------------------------
+ def callFunc(self, functionName, *args):
+ """
+ helper function - given a function name and arguments, calls the function with the args and
+ returns the contents of the stream
+ """
+ getattr(self.codec, functionName)(args[0])
+ return self.codec.stream.getvalue()
+
+ # ----------------------------------------
+ def readFunc(self, functionName, *args):
+ """
+ helper function - creates a input stream and then calls the function with arguments as have been
+ supplied
+ """
+ self.codec.stream = StringIO(args[0])
+ return getattr(self.codec, functionName)()
+
+
+# ----------------------------------------
+# ----------------------------------------
+class IntegerTestCase(BaseDataTypes):
+
+ """
+ Handles octet, short, long, long long
+
+ """
+
+ # -------------------------
+ def __init__(self, *args):
+ """
+ sets constants for use in tests
+ """
+
+ BaseDataTypes.__init__(self, *args)
+ self.const_integer = 2
+ self.const_integer_octet_encoded = '\x02'
+ self.const_integer_short_encoded = '\x00\x02'
+ self.const_integer_long_encoded = '\x00\x00\x00\x02'
+ self.const_integer_long_long_encoded = '\x00\x00\x00\x00\x00\x00\x00\x02'
+
+ # -------------------------- #
+ # Unsigned Octect - 8 bits #
+ # -------------------------- #
+
+ # --------------------------
+ def test_unsigned_octet(self):
+ """
+ ubyte format requires 0<=number<=255
+ """
+ self.failUnlessEqual(self.callFunc('encode_octet', self.const_integer), self.const_integer_octet_encoded, 'octect encoding FAILED...')
+
+ # -------------------------------------------
+ def test_octet_out_of_upper_range(self):
+ """
+ testing for input above acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_octet, 256)
+
+ # -------------------------------------------
+ def test_uoctet_out_of_lower_range(self):
+ """
+ testing for input below acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_octet, -1)
+
+ # ---------------------------------
+ def test_uoctet_with_fraction(self):
+ """
+ the fractional part should be ignored...
+ """
+ self.failUnlessEqual(self.callFunc('encode_octet', 2.5), self.const_integer_octet_encoded, 'octect encoding FAILED with fractions...')
+
+ # ------------------------------------
+ def test_unsigned_octet_decode(self):
+ """
+ octet decoding
+ """
+ self.failUnlessEqual(self.readFunc('decode_octet', self.const_integer_octet_encoded), self.const_integer, 'octect decoding FAILED...')
+
+ # ----------------------------------- #
+ # Unsigned Short Integers - 16 bits #
+ # ----------------------------------- #
+
+ # -----------------------
+ def test_ushort_int(self):
+ """
+ testing unsigned short integer
+ """
+ self.failUnlessEqual(self.callFunc('encode_short', self.const_integer), self.const_integer_short_encoded, 'short encoding FAILED...')
+
+ # -------------------------------------------
+ def test_ushort_int_out_of_upper_range(self):
+ """
+ testing for input above acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_short, 65536)
+
+ # -------------------------------------------
+ def test_ushort_int_out_of_lower_range(self):
+ """
+ testing for input below acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_short, -1)
+
+ # ---------------------------------
+ def test_ushort_int_with_fraction(self):
+ """
+ the fractional part should be ignored...
+ """
+ self.failUnlessEqual(self.callFunc('encode_short', 2.5), self.const_integer_short_encoded, 'short encoding FAILED with fractions...')
+
+ # ------------------------------------
+ def test_ushort_int_decode(self):
+ """
+ unsigned short decoding
+ """
+ self.failUnlessEqual(self.readFunc('decode_short', self.const_integer_short_encoded), self.const_integer, 'unsigned short decoding FAILED...')
+
+
+ # ---------------------------------- #
+ # Unsigned Long Integers - 32 bits #
+ # ---------------------------------- #
+
+ # -----------------------
+ def test_ulong_int(self):
+ """
+ testing unsigned long iteger
+ """
+ self.failUnlessEqual(self.callFunc('encode_long', self.const_integer), self.const_integer_long_encoded, 'long encoding FAILED...')
+
+ # -------------------------------------------
+ def test_ulong_int_out_of_upper_range(self):
+ """
+ testing for input above acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_long, 4294967296)
+
+ # -------------------------------------------
+ def test_ulong_int_out_of_lower_range(self):
+ """
+ testing for input below acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_long, -1)
+
+ # ---------------------------------
+ def test_ulong_int_with_fraction(self):
+ """
+ the fractional part should be ignored...
+ """
+ self.failUnlessEqual(self.callFunc('encode_long', 2.5), self.const_integer_long_encoded, 'long encoding FAILED with fractions...')
+
+ # -------------------------------
+ def test_ulong_int_decode(self):
+ """
+ unsigned long decoding
+ """
+ self.failUnlessEqual(self.readFunc('decode_long', self.const_integer_long_encoded), self.const_integer, 'unsigned long decoding FAILED...')
+
+
+ # --------------------------------------- #
+ # Unsigned Long Long Integers - 64 bits #
+ # --------------------------------------- #
+
+ # -----------------------
+ def test_ulong_long_int(self):
+ """
+ testing unsinged long long integer
+ """
+ self.failUnlessEqual(self.callFunc('encode_longlong', self.const_integer), self.const_integer_long_long_encoded, 'long long encoding FAILED...')
+
+ # -------------------------------------------
+ def test_ulong_long_int_out_of_upper_range(self):
+ """
+ testing for input above acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_longlong, 18446744073709551616)
+
+ # -------------------------------------------
+ def test_ulong_long_int_out_of_lower_range(self):
+ """
+ testing for input below acceptable range
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_longlong, -1)
+
+ # ---------------------------------
+ def test_ulong_long_int_with_fraction(self):
+ """
+ the fractional part should be ignored...
+ """
+ self.failUnlessEqual(self.callFunc('encode_longlong', 2.5), self.const_integer_long_long_encoded, 'long long encoding FAILED with fractions...')
+
+ # ------------------------------------
+ def test_ulong_long_int_decode(self):
+ """
+ unsigned long long decoding
+ """
+ self.failUnlessEqual(self.readFunc('decode_longlong', self.const_integer_long_long_encoded), self.const_integer, 'unsigned long long decoding FAILED...')
+
+# -----------------------------------
+# -----------------------------------
+class BitTestCase(BaseDataTypes):
+
+ """
+ Handles bits
+ """
+
+ # ----------------------------------------------
+ def callFunc(self, functionName, *args):
+ """
+ helper function
+ """
+ for ele in args:
+ getattr(self.codec, functionName)(ele)
+
+ self.codec.flush()
+ return self.codec.stream.getvalue()
+
+ # -------------------
+ def test_bit1(self):
+ """
+ sends in 11
+ """
+ self.failUnlessEqual(self.callFunc('encode_bit', 1, 1), '\x03', '11 bit encoding FAILED...')
+
+ # -------------------
+ def test_bit2(self):
+ """
+ sends in 10011
+ """
+ self.failUnlessEqual(self.callFunc('encode_bit', 1, 1, 0, 0, 1), '\x13', '10011 bit encoding FAILED...')
+
+ # -------------------
+ def test_bit3(self):
+ """
+ sends in 1110100111 [10 bits(right to left), should be compressed into two octets]
+ """
+ self.failUnlessEqual(self.callFunc('encode_bit', 1,1,1,0,0,1,0,1,1,1), '\xa7\x03', '1110100111(right to left) bit encoding FAILED...')
+
+ # ------------------------------------
+ def test_bit_decode_1(self):
+ """
+ decode bit 1
+ """
+ self.failUnlessEqual(self.readFunc('decode_bit', '\x01'), 1, 'decode bit 1 FAILED...')
+
+ # ------------------------------------
+ def test_bit_decode_0(self):
+ """
+ decode bit 0
+ """
+ self.failUnlessEqual(self.readFunc('decode_bit', '\x00'), 0, 'decode bit 0 FAILED...')
+
+# -----------------------------------
+# -----------------------------------
+class StringTestCase(BaseDataTypes):
+
+ """
+ Handles short strings, long strings
+ """
+
+ # ------------------------------------------------------------- #
+ # Short Strings - 8 bit length followed by zero or more octets #
+ # ------------------------------------------------------------- #
+
+ # ---------------------------------------
+ def test_short_string_zero_length(self):
+ """
+ 0 length short string
+ """
+ self.failUnlessEqual(self.callFunc('encode_shortstr', ''), '\x00', '0 length short string encoding FAILED...')
+
+ # -------------------------------------------
+ def test_short_string_positive_length(self):
+ """
+ positive length short string
+ """
+ self.failUnlessEqual(self.callFunc('encode_shortstr', 'hello world'), '\x0bhello world', 'positive length short string encoding FAILED...')
+
+ # -------------------------------------------
+ def test_short_string_out_of_upper_range(self):
+ """
+ string length > 255
+ """
+ self.failUnlessRaises(Exception, self.codec.encode_shortstr, 'x'*256)
+
+ # ------------------------------------
+ def test_short_string_decode(self):
+ """
+ short string decode
+ """
+ self.failUnlessEqual(self.readFunc('decode_shortstr', '\x0bhello world'), 'hello world', 'short string decode FAILED...')
+
+
+ # ------------------------------------------------------------- #
+ # Long Strings - 32 bit length followed by zero or more octets #
+ # ------------------------------------------------------------- #
+
+ # ---------------------------------------
+ def test_long_string_zero_length(self):
+ """
+ 0 length long string
+ """
+ self.failUnlessEqual(self.callFunc('encode_longstr', ''), '\x00\x00\x00\x00', '0 length long string encoding FAILED...')
+
+ # -------------------------------------------
+ def test_long_string_positive_length(self):
+ """
+ positive length long string
+ """
+ self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\x00\x00\x00\x0bhello world', 'positive length long string encoding FAILED...')
+
+ # ------------------------------------
+ def test_long_string_decode(self):
+ """
+ long string decode
+ """
+ self.failUnlessEqual(self.readFunc('decode_longstr', '\x00\x00\x00\x0bhello world'), 'hello world', 'long string decode FAILED...')
+
+
+# --------------------------------------
+# --------------------------------------
+class TimestampTestCase(BaseDataTypes):
+
+ """
+ No need of any test cases here as timestamps are implemented as long long which is tested above
+ """
+ pass
+
+# ---------------------------------------
+# ---------------------------------------
+class FieldTableTestCase(BaseDataTypes):
+
+ """
+ Handles Field Tables
+
+ Only S/I type messages seem to be implemented currently
+ """
+
+ # -------------------------
+ def __init__(self, *args):
+ """
+ sets constants for use in tests
+ """
+
+ BaseDataTypes.__init__(self, *args)
+ self.const_field_table_dummy_dict = {'$key1':'value1','$key2':'value2'}
+ self.const_field_table_dummy_dict_encoded = '\x00\x00\x00\x22\x05$key2S\x00\x00\x00\x06value2\x05$key1S\x00\x00\x00\x06value1'
+
+ # -------------------------------------------
+ def test_field_table_name_value_pair(self):
+ """
+ valid name value pair
+ """
+ self.failUnlessEqual(self.callFunc('encode_table', {'$key1':'value1'}), '\x00\x00\x00\x11\x05$key1S\x00\x00\x00\x06value1', 'valid name value pair encoding FAILED...')
+
+ # ---------------------------------------------------
+ def test_field_table_multiple_name_value_pair(self):
+ """
+ multiple name value pair
+ """
+ self.failUnlessEqual(self.callFunc('encode_table', self.const_field_table_dummy_dict), self.const_field_table_dummy_dict_encoded, 'multiple name value pair encoding FAILED...')
+
+ # ------------------------------------
+ def test_field_table_decode(self):
+ """
+ field table decode
+ """
+ self.failUnlessEqual(self.readFunc('decode_table', self.const_field_table_dummy_dict_encoded), self.const_field_table_dummy_dict, 'field table decode FAILED...')
+
+
+# ------------------------------------
+# ------------------------------------
+class ContentTestCase(BaseDataTypes):
+
+ """
+ Handles Content data types
+ """
+
+ # -----------------------------
+ def test_content_inline(self):
+ """
+ inline content
+ """
+ self.failUnlessEqual(self.callFunc('encode_content', 'hello inline message'), '\x00\x00\x00\x00\x14hello inline message', 'inline content encoding FAILED...')
+
+ # --------------------------------
+ def test_content_reference(self):
+ """
+ reference content
+ """
+ self.failUnlessEqual(self.callFunc('encode_content', ReferenceId('dummyId')), '\x01\x00\x00\x00\x07dummyId', 'reference content encoding FAILED...')
+
+ # ------------------------------------
+ def test_content_inline_decode(self):
+ """
+ inline content decode
+ """
+ self.failUnlessEqual(self.readFunc('decode_content', '\x00\x00\x00\x00\x14hello inline message'), 'hello inline message', 'inline content decode FAILED...')
+
+ # ------------------------------------
+ def test_content_reference_decode(self):
+ """
+ reference content decode
+ """
+ self.failUnlessEqual(self.readFunc('decode_content', '\x01\x00\x00\x00\x07dummyId').id, 'dummyId', 'reference content decode FAILED...')
+
+# ------------------------ #
+# Pre - existing test code #
+# ------------------------ #
+
+# ---------------------
+def test(type, value):
+ """
+ old test function cut/copy/paste from qpid/codec.py
+ """
+ if isinstance(value, (list, tuple)):
+ values = value
+ else:
+ values = [value]
+ stream = StringIO()
+ codec = Codec(stream, spec())
+ for v in values:
+ codec.encode(type, v)
+ codec.flush()
+ enc = stream.getvalue()
+ stream.reset()
+ dup = []
+ for i in xrange(len(values)):
+ dup.append(codec.decode(type))
+ if values != dup:
+ raise AssertionError("%r --> %r --> %r" % (values, enc, dup))
+
+# -----------------------
+def dotest(type, value):
+ """
+ old test function cut/copy/paste from qpid/codec.py
+ """
+ args = (type, value)
+ test(*args)
+
+# -------------
+def oldtests():
+ """
+ old test function cut/copy/paste from qpid/codec.py
+ """
+ for value in ("1", "0", "110", "011", "11001", "10101", "10011"):
+ for i in range(10):
+ dotest("bit", map(lambda x: x == "1", value*i))
+
+ for value in ({}, {"asdf": "fdsa", "fdsa": 1, "three": 3}, {"one": 1}):
+ dotest("table", value)
+
+ for type in ("octet", "short", "long", "longlong"):
+ for value in range(0, 256):
+ dotest(type, value)
+
+ for type in ("shortstr", "longstr"):
+ for value in ("", "a", "asdf"):
+ dotest(type, value)
+
+# -----------------------------------------
+class oldTests(unittest.TestCase):
+
+ """
+ class to handle pre-existing test cases
+ """
+
+ # ---------------------------
+ def test_oldtestcases(self):
+ """
+ call the old tests
+ """
+ return oldtests()
+
+# ---------------------------
+# ---------------------------
+if __name__ == '__main__':
+
+ codec_test_suite = unittest.TestSuite()
+
+ #adding all the test suites...
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(IntegerTestCase))
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(BitTestCase))
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(StringTestCase))
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TimestampTestCase))
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FieldTableTestCase))
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ContentTestCase))
+
+ #loading pre-existing test case from qpid/codec.py
+ codec_test_suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(oldTests))
+
+ run_output_stream = StringIO()
+ test_runner = unittest.TextTestRunner(run_output_stream, '', '')
+ test_result = test_runner.run(codec_test_suite)
+
+ print '\n%d test run...' % (test_result.testsRun)
+
+ if test_result.wasSuccessful():
+ print '\nAll tests successful\n'
+
+ if test_result.failures:
+ print '\n----------'
+ print '%d FAILURES:' % (len(test_result.failures))
+ print '----------\n'
+ for failure in test_result.failures:
+ print str(failure[0]) + ' ... FAIL'
+
+ if test_result.errors:
+ print '\n---------'
+ print '%d ERRORS:' % (len(test_result.errors))
+ print '---------\n'
+
+ for error in test_result.errors:
+ print str(error[0]) + ' ... ERROR'
+
+ f = open('codec_unit_test_output.txt', 'w')
+ f.write(str(run_output_stream.getvalue()))
+ f.close()
diff --git a/RC9/qpid/python/tests/codec010.py b/RC9/qpid/python/tests/codec010.py
new file mode 100644
index 0000000000..1912eac591
--- /dev/null
+++ b/RC9/qpid/python/tests/codec010.py
@@ -0,0 +1,120 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import time
+
+from unittest import TestCase
+from qpid.spec010 import load
+from qpid.codec010 import StringCodec
+from qpid.testlib import testrunner
+from qpid.datatypes import timestamp
+
+class CodecTest(TestCase):
+
+ def setUp(self):
+ self.spec = load(testrunner.get_spec_file("amqp.0-10.xml"))
+
+ def check(self, type, value, compare=True):
+ t = self.spec[type]
+ sc = StringCodec(self.spec)
+ t.encode(sc, value)
+ decoded = t.decode(sc)
+ if compare:
+ assert decoded == value, "%s, %s" % (decoded, value)
+ return decoded
+
+ def testMapString(self):
+ self.check("map", {"string": "this is a test"})
+
+ def testMapInt(self):
+ self.check("map", {"int": 3})
+
+ def testMapLong(self):
+ self.check("map", {"long": 2**32})
+ self.check("map", {"long": 1 << 34})
+ self.check("map", {"long": -(1 << 34)})
+
+ def testMapTimestamp(self):
+ decoded = self.check("map", {"timestamp": timestamp(0)})
+ assert isinstance(decoded["timestamp"], timestamp)
+
+ def testMapDatetime(self):
+ decoded = self.check("map", {"datetime": timestamp(0).datetime()}, compare=False)
+ assert isinstance(decoded["datetime"], timestamp)
+ assert decoded["datetime"] == 0.0
+
+ def testMapNone(self):
+ self.check("map", {"none": None})
+
+ def testMapNested(self):
+ self.check("map", {"map": {"string": "nested test"}})
+
+ def testMapList(self):
+ self.check("map", {"list": [1, "two", 3.0, -4]})
+
+ def testMapAll(self):
+ decoded = self.check("map", {"string": "this is a test",
+ "int": 3,
+ "long": 2**32,
+ "timestamp": timestamp(0),
+ "none": None,
+ "map": {"string": "nested map"},
+ "list": [1, "two", 3.0, -4]})
+ assert isinstance(decoded["timestamp"], timestamp)
+
+ def testMapEmpty(self):
+ self.check("map", {})
+
+ def testMapNone(self):
+ self.check("map", None)
+
+ def testList(self):
+ self.check("list", [1, "two", 3.0, -4])
+
+ def testListEmpty(self):
+ self.check("list", [])
+
+ def testListNone(self):
+ self.check("list", None)
+
+ def testArrayInt(self):
+ self.check("array", [1, 2, 3, 4])
+
+ def testArrayString(self):
+ self.check("array", ["one", "two", "three", "four"])
+
+ def testArrayEmpty(self):
+ self.check("array", [])
+
+ def testArrayNone(self):
+ self.check("array", None)
+
+ def testInt16(self):
+ self.check("int16", 3)
+ self.check("int16", -3)
+
+ def testInt64(self):
+ self.check("int64", 3)
+ self.check("int64", -3)
+ self.check("int64", 1<<34)
+ self.check("int64", -(1<<34))
+
+ def testDatetime(self):
+ self.check("datetime", timestamp(0))
+ self.check("datetime", timestamp(long(time.time())))
diff --git a/RC9/qpid/python/tests/connection.py b/RC9/qpid/python/tests/connection.py
new file mode 100644
index 0000000000..512fa62189
--- /dev/null
+++ b/RC9/qpid/python/tests/connection.py
@@ -0,0 +1,215 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from threading import *
+from unittest import TestCase
+from qpid.util import connect, listen
+from qpid.connection import *
+from qpid.datatypes import Message
+from qpid.testlib import testrunner
+from qpid.delegates import Server
+from qpid.queue import Queue
+from qpid.spec010 import load
+from qpid.session import Delegate
+
+PORT = 1234
+
+class TestServer:
+
+ def __init__(self, queue):
+ self.queue = queue
+
+ def connection(self, connection):
+ return Server(connection, delegate=self.session)
+
+ def session(self, session):
+ session.auto_sync = False
+ return TestSession(session, self.queue)
+
+class TestSession(Delegate):
+
+ def __init__(self, session, queue):
+ self.session = session
+ self.queue = queue
+
+ def execution_sync(self, es):
+ pass
+
+ def queue_query(self, qq):
+ return qq._type.result.type.new((qq.queue,), {})
+
+ def message_transfer(self, cmd, headers, body):
+ if cmd.destination == "echo":
+ m = Message(body)
+ m.headers = headers
+ self.session.message_transfer(cmd.destination, cmd.accept_mode,
+ cmd.acquire_mode, m)
+ elif cmd.destination == "abort":
+ self.session.channel.connection.sock.close()
+ else:
+ self.queue.put((cmd, headers, body))
+
+class ConnectionTest(TestCase):
+
+ def setUp(self):
+ self.spec = load(testrunner.get_spec_file("amqp.0-10.xml"))
+ self.queue = Queue()
+ self.running = True
+ started = Event()
+
+ def run():
+ ts = TestServer(self.queue)
+ for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
+ conn = Connection(s, self.spec, ts.connection)
+ try:
+ conn.start(5)
+ except Closed:
+ pass
+
+ self.server = Thread(target=run)
+ self.server.setDaemon(True)
+ self.server.start()
+
+ started.wait(3)
+ assert started.isSet()
+
+ def tearDown(self):
+ self.running = False
+ connect("0.0.0.0", PORT).close()
+ self.server.join(3)
+
+ def connect(self):
+ return Connection(connect("0.0.0.0", PORT), self.spec)
+
+ def test(self):
+ c = self.connect()
+ c.start(10)
+
+ ssn1 = c.session("test1", timeout=10)
+ ssn2 = c.session("test2", timeout=10)
+
+ assert ssn1 == c.sessions["test1"]
+ assert ssn2 == c.sessions["test2"]
+ assert ssn1.channel != None
+ assert ssn2.channel != None
+ assert ssn1 in c.attached.values()
+ assert ssn2 in c.attached.values()
+
+ ssn1.close(5)
+
+ assert ssn1.channel == None
+ assert ssn1 not in c.attached.values()
+ assert ssn2 in c.sessions.values()
+
+ ssn2.close(5)
+
+ assert ssn2.channel == None
+ assert ssn2 not in c.attached.values()
+ assert ssn2 not in c.sessions.values()
+
+ ssn = c.session("session", timeout=10)
+
+ assert ssn.channel != None
+ assert ssn in c.sessions.values()
+
+ destinations = ("one", "two", "three")
+
+ for d in destinations:
+ ssn.message_transfer(d)
+
+ for d in destinations:
+ cmd, header, body = self.queue.get(10)
+ assert cmd.destination == d
+ assert header == None
+ assert body == None
+
+ msg = Message("this is a test")
+ ssn.message_transfer("four", message=msg)
+ cmd, header, body = self.queue.get(10)
+ assert cmd.destination == "four"
+ assert header == None
+ assert body == msg.body
+
+ qq = ssn.queue_query("asdf")
+ assert qq.queue == "asdf"
+ c.close(5)
+
+ def testCloseGet(self):
+ c = self.connect()
+ c.start(10)
+ ssn = c.session("test", timeout=10)
+ echos = ssn.incoming("echo")
+
+ for i in range(10):
+ ssn.message_transfer("echo", message=Message("test%d" % i))
+
+ ssn.auto_sync=False
+ ssn.message_transfer("abort")
+
+ for i in range(10):
+ m = echos.get(timeout=10)
+ assert m.body == "test%d" % i
+
+ try:
+ m = echos.get(timeout=10)
+ assert False
+ except Closed, e:
+ pass
+
+ def testCloseListen(self):
+ c = self.connect()
+ c.start(10)
+ ssn = c.session("test", timeout=10)
+ echos = ssn.incoming("echo")
+
+ messages = []
+ exceptions = []
+ condition = Condition()
+ def listener(m): messages.append(m)
+ def exc_listener(e):
+ exceptions.append(e)
+ condition.acquire()
+ condition.notify()
+ condition.release()
+
+ echos.listen(listener, exc_listener)
+
+ for i in range(10):
+ ssn.message_transfer("echo", message=Message("test%d" % i))
+
+ ssn.auto_sync=False
+ ssn.message_transfer("abort")
+
+ condition.acquire()
+ condition.wait(10)
+ condition.release()
+
+ for i in range(10):
+ m = messages.pop(0)
+ assert m.body == "test%d" % i
+
+ assert len(exceptions) == 1
+
+ def testSync(self):
+ c = self.connect()
+ c.start(10)
+ s = c.session("test")
+ s.auto_sync = False
+ s.message_transfer("echo", message=Message("test"))
+ s.sync(10)
diff --git a/RC9/qpid/python/tests/datatypes.py b/RC9/qpid/python/tests/datatypes.py
new file mode 100644
index 0000000000..4b9e1bcc78
--- /dev/null
+++ b/RC9/qpid/python/tests/datatypes.py
@@ -0,0 +1,257 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from unittest import TestCase
+from qpid.testlib import testrunner
+from qpid.spec010 import load
+from qpid.datatypes import *
+
+class SerialTest(TestCase):
+
+ def test(self):
+ for s in (serial(0), serial(0x8FFFFFFF), serial(0xFFFFFFFF)):
+ assert s + 1 > s
+ assert s - 1 < s
+ assert s < s + 1
+ assert s > s - 1
+
+ assert serial(0xFFFFFFFF) + 1 == serial(0)
+
+ assert min(serial(0xFFFFFFFF), serial(0x0)) == serial(0xFFFFFFFF)
+ assert max(serial(0xFFFFFFFF), serial(0x0)) == serial(0x0)
+
+ def testIncr(self):
+ s = serial(0)
+ s += 1
+ assert s == serial(1)
+
+ def testIn(self):
+ l = [serial(1), serial(2), serial(3), serial(4)]
+ assert serial(1) in l
+ assert serial(0xFFFFFFFF + 2) in l
+ assert 4 in l
+
+ def testNone(self):
+ assert serial(0) != None
+
+ def testHash(self):
+ d = {}
+ d[serial(0)] = "zero"
+ assert d[0] == "zero"
+
+class RangedSetTest(TestCase):
+
+ def check(self, ranges):
+ posts = []
+ for range in ranges:
+ posts.append(range.lower)
+ posts.append(range.upper)
+
+ sorted = posts[:]
+ sorted.sort()
+
+ assert posts == sorted
+
+ idx = 1
+ while idx + 1 < len(posts):
+ assert posts[idx] + 1 != posts[idx+1]
+ idx += 2
+
+ def test(self):
+ rs = RangedSet()
+
+ self.check(rs.ranges)
+
+ rs.add(1)
+
+ assert 1 in rs
+ assert 2 not in rs
+ assert 0 not in rs
+ self.check(rs.ranges)
+
+ rs.add(2)
+
+ assert 0 not in rs
+ assert 1 in rs
+ assert 2 in rs
+ assert 3 not in rs
+ self.check(rs.ranges)
+
+ rs.add(0)
+
+ assert -1 not in rs
+ assert 0 in rs
+ assert 1 in rs
+ assert 2 in rs
+ assert 3 not in rs
+ self.check(rs.ranges)
+
+ rs.add(37)
+
+ assert -1 not in rs
+ assert 0 in rs
+ assert 1 in rs
+ assert 2 in rs
+ assert 3 not in rs
+ assert 36 not in rs
+ assert 37 in rs
+ assert 38 not in rs
+ self.check(rs.ranges)
+
+ rs.add(-1)
+ self.check(rs.ranges)
+
+ rs.add(-3)
+ self.check(rs.ranges)
+
+ rs.add(1, 20)
+ assert 21 not in rs
+ assert 20 in rs
+ self.check(rs.ranges)
+
+ def testAddSelf(self):
+ a = RangedSet()
+ a.add(0, 8)
+ self.check(a.ranges)
+ a.add(0, 8)
+ self.check(a.ranges)
+ assert len(a.ranges) == 1
+ range = a.ranges[0]
+ assert range.lower == 0
+ assert range.upper == 8
+
+class RangeTest(TestCase):
+
+ def testIntersect1(self):
+ a = Range(0, 10)
+ b = Range(9, 20)
+ i1 = a.intersect(b)
+ i2 = b.intersect(a)
+ assert i1.upper == 10
+ assert i2.upper == 10
+ assert i1.lower == 9
+ assert i2.lower == 9
+
+ def testIntersect2(self):
+ a = Range(0, 10)
+ b = Range(11, 20)
+ assert a.intersect(b) == None
+ assert b.intersect(a) == None
+
+ def testIntersect3(self):
+ a = Range(0, 10)
+ b = Range(3, 5)
+ i1 = a.intersect(b)
+ i2 = b.intersect(a)
+ assert i1.upper == 5
+ assert i2.upper == 5
+ assert i1.lower == 3
+ assert i2.lower == 3
+
+class UUIDTest(TestCase):
+
+ def test(self):
+ # this test is kind of lame, but it does excercise the basic
+ # functionality of the class
+ u = uuid4()
+ for i in xrange(1024):
+ assert u != uuid4()
+
+class MessageTest(TestCase):
+
+ def setUp(self):
+ self.spec = load(testrunner.get_spec_file("amqp.0-10-qpid-errata.xml"))
+ self.mp = Struct(self.spec["message.message_properties"])
+ self.dp = Struct(self.spec["message.delivery_properties"])
+ self.fp = Struct(self.spec["message.fragment_properties"])
+
+ def testHas(self):
+ m = Message(self.mp, self.dp, self.fp, "body")
+ assert m.has("message_properties")
+ assert m.has("delivery_properties")
+ assert m.has("fragment_properties")
+
+ def testGet(self):
+ m = Message(self.mp, self.dp, self.fp, "body")
+ assert m.get("message_properties") == self.mp
+ assert m.get("delivery_properties") == self.dp
+ assert m.get("fragment_properties") == self.fp
+
+ def testSet(self):
+ m = Message(self.mp, self.dp, "body")
+ assert m.get("fragment_properties") is None
+ m.set(self.fp)
+ assert m.get("fragment_properties") == self.fp
+
+ def testSetOnEmpty(self):
+ m = Message("body")
+ assert m.get("delivery_properties") is None
+ m.set(self.dp)
+ assert m.get("delivery_properties") == self.dp
+
+ def testSetReplace(self):
+ m = Message(self.mp, self.dp, self.fp, "body")
+ dp = Struct(self.spec["message.delivery_properties"])
+ assert m.get("delivery_properties") == self.dp
+ assert m.get("delivery_properties") != dp
+ m.set(dp)
+ assert m.get("delivery_properties") != self.dp
+ assert m.get("delivery_properties") == dp
+
+ def testClear(self):
+ m = Message(self.mp, self.dp, self.fp, "body")
+ assert m.get("message_properties") == self.mp
+ assert m.get("delivery_properties") == self.dp
+ assert m.get("fragment_properties") == self.fp
+ m.clear("fragment_properties")
+ assert m.get("fragment_properties") is None
+ assert m.get("message_properties") == self.mp
+ assert m.get("delivery_properties") == self.dp
+
+class TimestampTest(TestCase):
+
+ def check(self, expected, *values):
+ for v in values:
+ assert isinstance(v, timestamp)
+ assert v == expected
+ assert v == timestamp(expected)
+
+ def testAdd(self):
+ self.check(4.0,
+ timestamp(2.0) + 2.0,
+ 2.0 + timestamp(2.0))
+
+ def testSub(self):
+ self.check(2.0,
+ timestamp(4.0) - 2.0,
+ 4.0 - timestamp(2.0))
+
+ def testNeg(self):
+ self.check(-4.0, -timestamp(4.0))
+
+ def testPos(self):
+ self.check(+4.0, +timestamp(4.0))
+
+ def testAbs(self):
+ self.check(4.0, abs(timestamp(-4.0)))
+
+ def testConversion(self):
+ dt = timestamp(0).datetime()
+ t = timestamp(dt)
+ assert t == 0
diff --git a/RC9/qpid/python/tests/framer.py b/RC9/qpid/python/tests/framer.py
new file mode 100644
index 0000000000..e99166721c
--- /dev/null
+++ b/RC9/qpid/python/tests/framer.py
@@ -0,0 +1,95 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from threading import *
+from unittest import TestCase
+from qpid.util import connect, listen
+from qpid.framer import *
+
+PORT = 1234
+
+class FramerTest(TestCase):
+
+ def setUp(self):
+ self.running = True
+ started = Event()
+ def run():
+ for s in listen("0.0.0.0", PORT, lambda: self.running, lambda: started.set()):
+ conn = Framer(s)
+ try:
+ conn.write_header(*conn.read_header()[-2:])
+ while True:
+ frame = conn.read_frame()
+ conn.write_frame(frame)
+ conn.flush()
+ except Closed:
+ pass
+
+ self.server = Thread(target=run)
+ self.server.setDaemon(True)
+ self.server.start()
+
+ started.wait(3)
+ assert started.isSet()
+
+ def tearDown(self):
+ self.running = False
+ self.server.join(3)
+
+ def test(self):
+ c = Framer(connect("0.0.0.0", PORT))
+
+ c.write_header(0, 10)
+ assert c.read_header() == ("AMQP", 1, 1, 0, 10)
+
+ c.write_frame(Frame(FIRST_FRM, 1, 2, 3, "THIS"))
+ c.write_frame(Frame(0, 1, 2, 3, "IS"))
+ c.write_frame(Frame(0, 1, 2, 3, "A"))
+ c.write_frame(Frame(LAST_FRM, 1, 2, 3, "TEST"))
+ c.flush()
+
+ f = c.read_frame()
+ assert f.flags & FIRST_FRM
+ assert not (f.flags & LAST_FRM)
+ assert f.type == 1
+ assert f.track == 2
+ assert f.channel == 3
+ assert f.payload == "THIS"
+
+ f = c.read_frame()
+ assert f.flags == 0
+ assert f.type == 1
+ assert f.track == 2
+ assert f.channel == 3
+ assert f.payload == "IS"
+
+ f = c.read_frame()
+ assert f.flags == 0
+ assert f.type == 1
+ assert f.track == 2
+ assert f.channel == 3
+ assert f.payload == "A"
+
+ f = c.read_frame()
+ assert f.flags & LAST_FRM
+ assert not (f.flags & FIRST_FRM)
+ assert f.type == 1
+ assert f.track == 2
+ assert f.channel == 3
+ assert f.payload == "TEST"
diff --git a/RC9/qpid/python/tests/queue.py b/RC9/qpid/python/tests/queue.py
new file mode 100644
index 0000000000..e12354eb43
--- /dev/null
+++ b/RC9/qpid/python/tests/queue.py
@@ -0,0 +1,71 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import threading, time
+from unittest import TestCase
+from qpid.queue import Queue, Empty, Closed
+
+
+class QueueTest (TestCase):
+
+ # The qpid queue class just provides sime simple extensions to
+ # python's standard queue data structure, so we don't need to test
+ # all the queue functionality.
+
+ def test_listen(self):
+ values = []
+ heard = threading.Event()
+ def listener(x):
+ values.append(x)
+ heard.set()
+
+ q = Queue(0)
+ q.listen(listener)
+ heard.clear()
+ q.put(1)
+ heard.wait()
+ assert values[-1] == 1
+ heard.clear()
+ q.put(2)
+ heard.wait()
+ assert values[-1] == 2
+
+ q.listen(None)
+ q.put(3)
+ assert q.get(3) == 3
+ q.listen(listener)
+
+ heard.clear()
+ q.put(4)
+ heard.wait()
+ assert values[-1] == 4
+
+ def test_close(self):
+ q = Queue(0)
+ q.put(1); q.put(2); q.put(3); q.close()
+ assert q.get() == 1
+ assert q.get() == 2
+ assert q.get() == 3
+ for i in range(10):
+ try:
+ q.get()
+ raise AssertionError("expected Closed")
+ except Closed:
+ pass
diff --git a/RC9/qpid/python/tests/spec.py b/RC9/qpid/python/tests/spec.py
new file mode 100644
index 0000000000..d5ea1d682a
--- /dev/null
+++ b/RC9/qpid/python/tests/spec.py
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from unittest import TestCase
+from qpid.spec import load
+from qpid.testlib import testrunner
+
+class SpecTest(TestCase):
+
+ def check_load(self, *urls):
+ spec = load(*map(testrunner.get_spec_file, urls))
+ qdecl = spec.method("queue_declare")
+ assert qdecl != None
+ assert not qdecl.content
+
+ queue = qdecl.fields.byname["queue"]
+ assert queue != None
+ assert queue.domain.name == "queue_name"
+ assert queue.type == "shortstr"
+
+ qdecl_ok = spec.method("queue_declare_ok")
+
+ # 0-8 is actually 8-0
+ if (spec.major == 8 and spec.minor == 0 or
+ spec.major == 0 and spec.minor == 9):
+ assert qdecl_ok != None
+
+ assert len(qdecl.responses) == 1
+ assert qdecl_ok in qdecl.responses
+
+ publish = spec.method("basic_publish")
+ assert publish != None
+ assert publish.content
+
+ if (spec.major == 0 and spec.minor == 10):
+ assert qdecl_ok == None
+ reply_to = spec.domains.byname["reply_to"]
+ assert reply_to.type.size == 2
+ assert reply_to.type.pack == 2
+ assert len(reply_to.type.fields) == 2
+
+ qq = spec.method("queue_query")
+ assert qq != None
+ assert qq.result.size == 4
+ assert qq.result.type != None
+ args = qq.result.fields.byname["arguments"]
+ assert args.type == "table"
+
+ def test_load_0_8(self):
+ self.check_load("amqp.0-8.xml")
+
+ def test_load_0_9(self):
+ self.check_load("amqp.0-9.xml")
+
+ def test_load_0_9_errata(self):
+ self.check_load("amqp.0-9.xml", "amqp-errata.0-9.xml")
+
+ def test_load_0_10(self):
+ self.check_load("amqp.0-10-preview.xml")
diff --git a/RC9/qpid/python/tests/spec010.py b/RC9/qpid/python/tests/spec010.py
new file mode 100644
index 0000000000..df9cb9590a
--- /dev/null
+++ b/RC9/qpid/python/tests/spec010.py
@@ -0,0 +1,84 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, tempfile, shutil, stat
+from unittest import TestCase
+from qpid.spec010 import load
+from qpid.codec010 import Codec, StringCodec
+from qpid.testlib import testrunner
+from qpid.datatypes import Struct
+
+class SpecTest(TestCase):
+
+ def setUp(self):
+ self.spec = load(testrunner.get_spec_file("amqp.0-10-qpid-errata.xml"))
+
+ def testSessionHeader(self):
+ hdr = self.spec["session.header"]
+ sc = StringCodec(self.spec)
+ hdr.encode(sc, Struct(hdr, sync=True))
+ assert sc.encoded == "\x01\x01"
+
+ sc = StringCodec(self.spec)
+ hdr.encode(sc, Struct(hdr, sync=False))
+ assert sc.encoded == "\x01\x00"
+
+ def encdec(self, type, value):
+ sc = StringCodec(self.spec)
+ type.encode(sc, value)
+ decoded = type.decode(sc)
+ return decoded
+
+ def testMessageProperties(self):
+ mp = self.spec["message.message_properties"]
+ rt = self.spec["message.reply_to"]
+
+ props = Struct(mp, content_length=3735928559L,
+ reply_to=Struct(rt, exchange="the exchange name",
+ routing_key="the routing key"))
+ dec = self.encdec(mp, props)
+ assert props.content_length == dec.content_length
+ assert props.reply_to.exchange == dec.reply_to.exchange
+ assert props.reply_to.routing_key == dec.reply_to.routing_key
+
+ def testMessageSubscribe(self):
+ ms = self.spec["message.subscribe"]
+ cmd = Struct(ms, exclusive=True, destination="this is a test")
+ dec = self.encdec(self.spec["message.subscribe"], cmd)
+ assert cmd.exclusive == dec.exclusive
+ assert cmd.destination == dec.destination
+
+ def testXid(self):
+ xid = self.spec["dtx.xid"]
+ sc = StringCodec(self.spec)
+ st = Struct(xid, format=0, global_id="gid", branch_id="bid")
+ xid.encode(sc, st)
+ assert sc.encoded == '\x00\x00\x00\x10\x06\x04\x07\x00\x00\x00\x00\x00\x03gid\x03bid'
+ assert xid.decode(sc).__dict__ == st.__dict__
+
+ def testLoadReadOnly(self):
+ spec = "amqp.0-10-qpid-errata.xml"
+ f = testrunner.get_spec_file(spec)
+ dest = tempfile.mkdtemp()
+ shutil.copy(f, dest)
+ shutil.copy(os.path.join(os.path.dirname(f), "amqp.0-10.dtd"), dest)
+ os.chmod(dest, stat.S_IRUSR | stat.S_IXUSR)
+ fname = os.path.join(dest, spec)
+ load(fname)
+ assert not os.path.exists("%s.pcl" % fname)
diff --git a/RC9/qpid/python/tests_0-10/__init__.py b/RC9/qpid/python/tests_0-10/__init__.py
new file mode 100644
index 0000000000..1fd7f72357
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/__init__.py
@@ -0,0 +1,30 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from alternate_exchange import *
+from broker import *
+from dtx import *
+from example import *
+from exchange import *
+from message import *
+from query import *
+from queue import *
+from tx import *
diff --git a/RC9/qpid/python/tests_0-10/alternate_exchange.py b/RC9/qpid/python/tests_0-10/alternate_exchange.py
new file mode 100644
index 0000000000..aac8a5e15b
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/alternate_exchange.py
@@ -0,0 +1,150 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import traceback
+from qpid.queue import Empty
+from qpid.datatypes import Message
+from qpid.testlib import TestBase010
+from qpid.session import SessionException
+
+class AlternateExchangeTests(TestBase010):
+ """
+ Tests for the new mechanism for message returns introduced in 0-10
+ and available in 0-9 for preview
+ """
+
+ def test_unroutable(self):
+ """
+ Test that unroutable messages are delivered to the alternate-exchange if specified
+ """
+ session = self.session
+ #create an exchange with an alternate defined
+ session.exchange_declare(exchange="secondary", type="fanout")
+ session.exchange_declare(exchange="primary", type="direct", alternate_exchange="secondary")
+
+ #declare, bind (to the alternate exchange) and consume from a queue for 'returned' messages
+ session.queue_declare(queue="returns", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="returns", exchange="secondary")
+ session.message_subscribe(destination="a", queue="returns")
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ returned = session.incoming("a")
+
+ #declare, bind (to the primary exchange) and consume from a queue for 'processed' messages
+ session.queue_declare(queue="processed", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="processed", exchange="primary", binding_key="my-key")
+ session.message_subscribe(destination="b", queue="processed")
+ session.message_flow(destination="b", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="b", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ processed = session.incoming("b")
+
+ #publish to the primary exchange
+ #...one message that makes it to the 'processed' queue:
+ dp=self.session.delivery_properties(routing_key="my-key")
+ session.message_transfer(destination="primary", message=Message(dp, "Good"))
+ #...and one that does not:
+ dp=self.session.delivery_properties(routing_key="unused-key")
+ session.message_transfer(destination="primary", message=Message(dp, "Bad"))
+
+ #delete the exchanges
+ session.exchange_delete(exchange="primary")
+ session.exchange_delete(exchange="secondary")
+
+ #verify behaviour
+ self.assertEqual("Good", processed.get(timeout=1).body)
+ self.assertEqual("Bad", returned.get(timeout=1).body)
+ self.assertEmpty(processed)
+ self.assertEmpty(returned)
+
+ def test_queue_delete(self):
+ """
+ Test that messages in a queue being deleted are delivered to the alternate-exchange if specified
+ """
+ session = self.session
+ #set up a 'dead letter queue':
+ session.exchange_declare(exchange="dlq", type="fanout")
+ session.queue_declare(queue="deleted", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="dlq", queue="deleted")
+ session.message_subscribe(destination="dlq", queue="deleted")
+ session.message_flow(destination="dlq", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="dlq", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ dlq = session.incoming("dlq")
+
+ #create a queue using the dlq as its alternate exchange:
+ session.queue_declare(queue="delete-me", alternate_exchange="dlq")
+ #send it some messages:
+ dp=self.session.delivery_properties(routing_key="delete-me")
+ session.message_transfer(message=Message(dp, "One"))
+ session.message_transfer(message=Message(dp, "Two"))
+ session.message_transfer(message=Message(dp, "Three"))
+ #delete it:
+ session.queue_delete(queue="delete-me")
+ #delete the dlq exchange:
+ session.exchange_delete(exchange="dlq")
+
+ #check the messages were delivered to the dlq:
+ self.assertEqual("One", dlq.get(timeout=1).body)
+ self.assertEqual("Two", dlq.get(timeout=1).body)
+ self.assertEqual("Three", dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+
+ def test_delete_while_used_by_queue(self):
+ """
+ Ensure an exchange still in use as an alternate-exchange for a
+ queue can't be deleted
+ """
+ session = self.session
+ session.exchange_declare(exchange="alternate", type="fanout")
+
+ session2 = self.conn.session("alternate", 2)
+ session2.queue_declare(queue="q", alternate_exchange="alternate")
+ try:
+ session2.exchange_delete(exchange="alternate")
+ self.fail("Expected deletion of in-use alternate-exchange to fail")
+ except SessionException, e:
+ session = self.session
+ session.queue_delete(queue="q")
+ session.exchange_delete(exchange="alternate")
+ self.assertEquals(530, e.args[0].error_code)
+
+
+ def test_delete_while_used_by_exchange(self):
+ """
+ Ensure an exchange still in use as an alternate-exchange for
+ another exchange can't be deleted
+ """
+ session = self.session
+ session.exchange_declare(exchange="alternate", type="fanout")
+
+ session = self.conn.session("alternate", 2)
+ session.exchange_declare(exchange="e", type="fanout", alternate_exchange="alternate")
+ try:
+ session.exchange_delete(exchange="alternate")
+ self.fail("Expected deletion of in-use alternate-exchange to fail")
+ except SessionException, e:
+ session = self.session
+ session.exchange_delete(exchange="e")
+ session.exchange_delete(exchange="alternate")
+ self.assertEquals(530, e.args[0].error_code)
+
+
+ def assertEmpty(self, queue):
+ try:
+ msg = queue.get(timeout=1)
+ self.fail("Queue not empty: " + msg)
+ except Empty: None
diff --git a/RC9/qpid/python/tests_0-10/broker.py b/RC9/qpid/python/tests_0-10/broker.py
new file mode 100644
index 0000000000..d4aa57765c
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/broker.py
@@ -0,0 +1,93 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message, RangedSet
+
+class BrokerTests(TestBase010):
+ """Tests for basic Broker functionality"""
+
+ def test_ack_and_no_ack(self):
+ """
+ First, this test tries to receive a message with a no-ack
+ consumer. Second, this test tries to explicitly receive and
+ acknowledge a message with an acknowledging consumer.
+ """
+ session = self.session
+ session.queue_declare(queue = "myqueue", exclusive=True, auto_delete=True)
+
+ # No ack consumer
+ ctag = "tag1"
+ session.message_subscribe(queue = "myqueue", destination = ctag)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ body = "test no-ack"
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="myqueue"), body))
+ msg = session.incoming(ctag).get(timeout = 5)
+ self.assert_(msg.body == body)
+
+ # Acknowledging consumer
+ session.queue_declare(queue = "otherqueue", exclusive=True, auto_delete=True)
+ ctag = "tag2"
+ session.message_subscribe(queue = "otherqueue", destination = ctag, accept_mode = 1)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ body = "test ack"
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="otherqueue"), body))
+ msg = session.incoming(ctag).get(timeout = 5)
+ session.message_accept(RangedSet(msg.id))
+ self.assert_(msg.body == body)
+
+ def test_simple_delivery_immediate(self):
+ """
+ Test simple message delivery where consume is issued before publish
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="amq.fanout")
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="test-queue", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+
+ body = "Immediate Delivery"
+ session.message_transfer("amq.fanout", None, None, Message(body))
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
+
+ def test_simple_delivery_queued(self):
+ """
+ Test basic message delivery where publish is issued before consume
+ (i.e. requires queueing of the message)
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="amq.fanout")
+ body = "Queued Delivery"
+ session.message_transfer("amq.fanout", None, None, Message(body))
+
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="test-queue", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
diff --git a/RC9/qpid/python/tests_0-10/dtx.py b/RC9/qpid/python/tests_0-10/dtx.py
new file mode 100644
index 0000000000..25c2defd3b
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/dtx.py
@@ -0,0 +1,775 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.session import SessionException
+from qpid.testlib import TestBase010
+from qpid.compat import set
+from struct import pack, unpack
+from time import sleep
+
+class DtxTests(TestBase010):
+ """
+ Tests for the amqp dtx related classes.
+
+ Tests of the form test_simple_xxx test the basic transactional
+ behaviour. The approach here is to 'swap' a message from one queue
+ to another by consuming and re-publishing in the same
+ transaction. That transaction is then completed in different ways
+ and the appropriate result verified.
+
+ The other tests enforce more specific rules and behaviour on a
+ per-method or per-field basis.
+ """
+
+ XA_RBROLLBACK = 1
+ XA_RBTIMEOUT = 2
+ XA_OK = 0
+ tx_counter = 0
+
+ def reset_channel(self):
+ self.session.close()
+ self.session = self.conn.session("dtx-session", 1)
+
+ def test_simple_commit(self):
+ """
+ Test basic one-phase commit behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "commit")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=tx, one_phase=True).status)
+
+ #should close and reopen session to ensure no unacked messages are held
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("commit", "queue-b")
+
+ def test_simple_prepare_commit(self):
+ """
+ Test basic two-phase commit behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-commit")
+
+ #prepare
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=tx).status)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=tx, one_phase=False).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("prepare-commit", "queue-b")
+
+
+ def test_simple_rollback(self):
+ """
+ Test basic rollback behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "rollback")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("rollback", "queue-a")
+
+ def test_simple_prepare_rollback(self):
+ """
+ Test basic rollback behaviour after the transaction has been prepared.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-rollback")
+
+ #prepare
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=tx).status)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("prepare-rollback", "queue-a")
+
+ def test_select_required(self):
+ """
+ check that an error is flagged if select is not issued before
+ start or end
+ """
+ session = self.session
+ tx = self.xid("dummy")
+ try:
+ session.dtx_start(xid=tx)
+
+ #if we get here we have failed, but need to do some cleanup:
+ session.dtx_end(xid=tx)
+ session.dtx_rollback(xid=tx)
+ self.fail("Session not selected for use with dtx, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_start_already_known(self):
+ """
+ Verify that an attempt to start an association with a
+ transaction that is already known is not allowed (unless the
+ join flag is set).
+ """
+ #create two sessions on different connection & select them for use with dtx:
+ session1 = self.session
+ session1.dtx_select()
+
+ other = self.connect()
+ session2 = other.session("other", 0)
+ session2.dtx_select()
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one session under that xid:
+ session1.dtx_start(xid=tx)
+ #then start on the other without the join set
+ failed = False
+ try:
+ session2.dtx_start(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ #cleanup:
+ if not failed:
+ session2.dtx_end(xid=tx)
+ other.close()
+ session1.dtx_end(xid=tx)
+ session1.dtx_rollback(xid=tx)
+
+ #verification:
+ if failed: self.assertEquals(530, error.args[0].error_code)
+ else: self.fail("Xid already known, expected exception!")
+
+ def test_forget_xid_on_completion(self):
+ """
+ Verify that a xid is 'forgotten' - and can therefore be used
+ again - once it is completed.
+ """
+ #do some transactional work & complete the transaction
+ self.test_simple_commit()
+ # session has been reset, so reselect for use with dtx
+ self.session.dtx_select()
+
+ #start association for the same xid as the previously completed txn
+ tx = self.xid("my-xid")
+ self.session.dtx_start(xid=tx)
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+
+ def test_start_join_and_resume(self):
+ """
+ Ensure the correct error is signalled when both the join and
+ resume flags are set on starting an association between a
+ session and a transcation.
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("dummy")
+ try:
+ session.dtx_start(xid=tx, join=True, resume=True)
+ #failed, but need some cleanup:
+ session.dtx_end(xid=tx)
+ session.dtx_rollback(xid=tx)
+ self.fail("Join and resume both set, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_start_join(self):
+ """
+ Verify 'join' behaviour, where a session is associated with a
+ transaction that is already associated with another session.
+ """
+ guard = self.keepQueuesAlive(["one", "two"])
+ #create two sessions & select them for use with dtx:
+ session1 = self.session
+ session1.dtx_select()
+
+ session2 = self.conn.session("second", 2)
+ session2.dtx_select()
+
+ #setup
+ session1.queue_declare(queue="one", auto_delete=True)
+ session1.queue_declare(queue="two", auto_delete=True)
+ session1.message_transfer(self.createMessage(session1, "one", "a", "DtxMessage"))
+ session1.message_transfer(self.createMessage(session1, "two", "b", "DtxMessage"))
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one session under that xid:
+ session1.dtx_start(xid=tx)
+ #then start on the other with the join flag set
+ session2.dtx_start(xid=tx, join=True)
+
+ #do work through each session
+ self.swap(session1, "one", "two")#swap 'a' from 'one' to 'two'
+ self.swap(session2, "two", "one")#swap 'b' from 'two' to 'one'
+
+ #mark end on both sessions
+ session1.dtx_end(xid=tx)
+ session2.dtx_end(xid=tx)
+
+ #commit and check
+ session1.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+
+ def test_suspend_resume(self):
+ """
+ Test suspension and resumption of an association
+ """
+ session = self.session
+ session.dtx_select()
+
+ #setup
+ session.queue_declare(queue="one", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="two", exclusive=True, auto_delete=True)
+ session.message_transfer(self.createMessage(session, "one", "a", "DtxMessage"))
+ session.message_transfer(self.createMessage(session, "two", "b", "DtxMessage"))
+
+ tx = self.xid("dummy")
+
+ session.dtx_start(xid=tx)
+ self.swap(session, "one", "two")#swap 'a' from 'one' to 'two'
+ session.dtx_end(xid=tx, suspend=True)
+
+ session.dtx_start(xid=tx, resume=True)
+ self.swap(session, "two", "one")#swap 'b' from 'two' to 'one'
+ session.dtx_end(xid=tx)
+
+ #commit and check
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+ def test_suspend_start_end_resume(self):
+ """
+ Test suspension and resumption of an association with work
+ done on another transaction when the first transaction is
+ suspended
+ """
+ session = self.session
+ session.dtx_select()
+
+ #setup
+ session.queue_declare(queue="one", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="two", exclusive=True, auto_delete=True)
+ session.message_transfer(self.createMessage(session, "one", "a", "DtxMessage"))
+ session.message_transfer(self.createMessage(session, "two", "b", "DtxMessage"))
+
+ tx = self.xid("dummy")
+
+ session.dtx_start(xid=tx)
+ self.swap(session, "one", "two")#swap 'a' from 'one' to 'two'
+ session.dtx_end(xid=tx, suspend=True)
+
+ session.dtx_start(xid=tx, resume=True)
+ self.swap(session, "two", "one")#swap 'b' from 'two' to 'one'
+ session.dtx_end(xid=tx)
+
+ #commit and check
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+ def test_end_suspend_and_fail(self):
+ """
+ Verify that the correct error is signalled if the suspend and
+ fail flag are both set when disassociating a transaction from
+ the session
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("suspend_and_fail")
+ session.dtx_start(xid=tx)
+ try:
+ session.dtx_end(xid=tx, suspend=True, fail=True)
+ self.fail("Suspend and fail both set, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ #cleanup
+ other = self.connect()
+ session = other.session("cleanup", 1)
+ session.dtx_rollback(xid=tx)
+ session.close()
+ other.close()
+
+
+ def test_end_unknown_xid(self):
+ """
+ Verifies that the correct exception is thrown when an attempt
+ is made to end the association for a xid not previously
+ associated with the session
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("unknown-xid")
+ try:
+ session.dtx_end(xid=tx)
+ self.fail("Attempted to end association with unknown xid, expected exception!")
+ except SessionException, e:
+ self.assertEquals(409, e.args[0].error_code)
+
+ def test_end(self):
+ """
+ Verify that the association is terminated by end and subsequent
+ operations are non-transactional
+ """
+ guard = self.keepQueuesAlive(["tx-queue"])
+ session = self.conn.session("alternate", 1)
+ session.queue_declare(queue="tx-queue", exclusive=True, auto_delete=True)
+
+ #publish a message under a transaction
+ session.dtx_select()
+ tx = self.xid("dummy")
+ session.dtx_start(xid=tx)
+ session.message_transfer(self.createMessage(session, "tx-queue", "one", "DtxMessage"))
+ session.dtx_end(xid=tx)
+
+ #now that association with txn is ended, publish another message
+ session.message_transfer(self.createMessage(session, "tx-queue", "two", "DtxMessage"))
+
+ #check the second message is available, but not the first
+ self.assertMessageCount(1, "tx-queue")
+ self.subscribe(session, queue="tx-queue", destination="results")
+ msg = session.incoming("results").get(timeout=1)
+ self.assertEqual("two", self.getMessageProperty(msg, 'correlation_id'))
+ session.message_cancel(destination="results")
+ #ack the message then close the session
+ session.message_accept(RangedSet(msg.id))
+ session.close()
+
+ session = self.session
+ #commit the transaction and check that the first message (and
+ #only the first message) is then delivered
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "tx-queue")
+ self.assertMessageId("one", "tx-queue")
+
+ def test_invalid_commit_one_phase_true(self):
+ """
+ Test that a commit with one_phase = True is rejected if the
+ transaction in question has already been prepared.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ tester.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ tester.dtx_select()
+ tx = self.xid("dummy")
+ tester.dtx_start(xid=tx)
+ tester.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+ tester.dtx_end(xid=tx)
+ tester.dtx_prepare(xid=tx)
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=True)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Invalid use of one_phase=True, expected exception!")
+
+ def test_invalid_commit_one_phase_false(self):
+ """
+ Test that a commit with one_phase = False is rejected if the
+ transaction in question has not yet been prepared.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ tester.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ tester.dtx_select()
+ tx = self.xid("dummy")
+ tester.dtx_start(xid=tx)
+ tester.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+ tester.dtx_end(xid=tx)
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=False)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Invalid use of one_phase=False, expected exception!")
+
+ def test_invalid_commit_not_ended(self):
+ """
+ Test that a commit fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=False)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Commit should fail as xid is still associated!")
+
+ def test_invalid_rollback_not_ended(self):
+ """
+ Test that a rollback fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_rollback(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Rollback should fail as xid is still associated!")
+
+
+ def test_invalid_prepare_not_ended(self):
+ """
+ Test that a prepare fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_prepare(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Rollback should fail as xid is still associated!")
+
+ def test_implicit_end(self):
+ """
+ Test that an association is implicitly ended when the session
+ is closed (whether by exception or explicit client request)
+ and the transaction in question is marked as rollback only.
+ """
+ session1 = self.session
+ session2 = self.conn.session("other", 2)
+
+ #setup:
+ session2.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ session2.message_transfer(self.createMessage(session2, "dummy", "a", "whatever"))
+ tx = self.xid("dummy")
+
+ session2.dtx_select()
+ session2.dtx_start(xid=tx)
+ session2.message_subscribe(queue="dummy", destination="dummy")
+ session2.message_flow(destination="dummy", unit=session2.credit_unit.message, value=1)
+ session2.message_flow(destination="dummy", unit=session2.credit_unit.byte, value=0xFFFFFFFF)
+ msg = session2.incoming("dummy").get(timeout=1)
+ session2.message_accept(RangedSet(msg.id))
+ session2.message_cancel(destination="dummy")
+ session2.message_transfer(self.createMessage(session2, "dummy", "b", "whatever"))
+ session2.close()
+
+ self.assertEqual(self.XA_RBROLLBACK, session1.dtx_prepare(xid=tx).status)
+ session1.dtx_rollback(xid=tx)
+
+ def test_get_timeout(self):
+ """
+ Check that get-timeout returns the correct value, (and that a
+ transaction with a timeout can complete normally)
+ """
+ session = self.session
+ tx = self.xid("dummy")
+
+ session.dtx_select()
+ session.dtx_start(xid=tx)
+ self.assertEqual(0, session.dtx_get_timeout(xid=tx).timeout)
+ session.dtx_set_timeout(xid=tx, timeout=60)
+ self.assertEqual(60, session.dtx_get_timeout(xid=tx).timeout)
+ self.assertEqual(self.XA_OK, session.dtx_end(xid=tx).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ def test_set_timeout(self):
+ """
+ Test the timeout of a transaction results in the expected
+ behaviour
+ """
+
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ #open new session to allow self.session to be used in checking the queue
+ session = self.conn.session("worker", 1)
+ #setup:
+ tx = self.xid("dummy")
+ session.queue_declare(queue="queue-a", auto_delete=True)
+ session.queue_declare(queue="queue-b", auto_delete=True)
+ session.message_transfer(self.createMessage(session, "queue-a", "timeout", "DtxMessage"))
+
+ session.dtx_select()
+ session.dtx_start(xid=tx)
+ self.swap(session, "queue-a", "queue-b")
+ session.dtx_set_timeout(xid=tx, timeout=2)
+ sleep(3)
+ #check that the work has been rolled back already
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("timeout", "queue-a")
+ #check the correct codes are returned when we try to complete the txn
+ self.assertEqual(self.XA_RBTIMEOUT, session.dtx_end(xid=tx).status)
+ self.assertEqual(self.XA_RBTIMEOUT, session.dtx_rollback(xid=tx).status)
+
+
+
+ def test_recover(self):
+ """
+ Test basic recover behaviour
+ """
+ session = self.session
+
+ session.dtx_select()
+ session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+
+ prepared = []
+ for i in range(1, 10):
+ tx = self.xid("tx%s" % (i))
+ session.dtx_start(xid=tx)
+ session.message_transfer(self.createMessage(session, "dummy", "message%s" % (i), "message%s" % (i)))
+ session.dtx_end(xid=tx)
+ if i in [2, 5, 6, 8]:
+ session.dtx_prepare(xid=tx)
+ prepared.append(tx)
+ else:
+ session.dtx_rollback(xid=tx)
+
+ xids = session.dtx_recover().in_doubt
+
+ #rollback the prepared transactions returned by recover
+ for x in xids:
+ session.dtx_rollback(xid=x)
+
+ #validate against the expected list of prepared transactions
+ actual = set([x.global_id for x in xids]) #TODO: come up with nicer way to test these
+ expected = set([x.global_id for x in prepared])
+ intersection = actual.intersection(expected)
+
+ if intersection != expected:
+ missing = expected.difference(actual)
+ extra = actual.difference(expected)
+ self.fail("Recovered xids not as expected. missing: %s; extra: %s" % (missing, extra))
+
+ def test_bad_resume(self):
+ """
+ Test that a resume on a session not selected for use with dtx fails
+ """
+ session = self.session
+ try:
+ session.dtx_start(resume=True)
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_prepare_unknown(self):
+ session = self.session
+ try:
+ session.dtx_prepare(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_commit_unknown(self):
+ session = self.session
+ try:
+ session.dtx_commit(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_rollback_unknown(self):
+ session = self.session
+ try:
+ session.dtx_rollback(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_get_timeout_unknown(self):
+ session = self.session
+ try:
+ session.dtx_get_timeout(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def xid(self, txid):
+ DtxTests.tx_counter += 1
+ branchqual = "v%s" % DtxTests.tx_counter
+ return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
+
+ def txswap(self, tx, id):
+ session = self.session
+ #declare two queues:
+ session.queue_declare(queue="queue-a", auto_delete=True)
+ session.queue_declare(queue="queue-b", auto_delete=True)
+
+ #put message with specified id on one queue:
+ dp=session.delivery_properties(routing_key="queue-a")
+ mp=session.message_properties(correlation_id=id)
+ session.message_transfer(message=Message(dp, mp, "DtxMessage"))
+
+ #start the transaction:
+ session.dtx_select()
+ self.assertEqual(self.XA_OK, self.session.dtx_start(xid=tx).status)
+
+ #'swap' the message from one queue to the other, under that transaction:
+ self.swap(self.session, "queue-a", "queue-b")
+
+ #mark the end of the transactional work:
+ self.assertEqual(self.XA_OK, self.session.dtx_end(xid=tx).status)
+
+ def swap(self, session, src, dest):
+ #consume from src:
+ session.message_subscribe(destination="temp-swap", queue=src)
+ session.message_flow(destination="temp-swap", unit=session.credit_unit.message, value=1)
+ session.message_flow(destination="temp-swap", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ msg = session.incoming("temp-swap").get(timeout=1)
+ session.message_cancel(destination="temp-swap")
+ session.message_accept(RangedSet(msg.id))
+ #todo: also complete at this point?
+
+ #re-publish to dest:
+ dp=session.delivery_properties(routing_key=dest)
+ mp=session.message_properties(correlation_id=self.getMessageProperty(msg, 'correlation_id'))
+ session.message_transfer(message=Message(dp, mp, msg.body))
+
+ def assertMessageCount(self, expected, queue):
+ self.assertEqual(expected, self.session.queue_query(queue=queue).message_count)
+
+ def assertMessageId(self, expected, queue):
+ self.session.message_subscribe(queue=queue, destination="results")
+ self.session.message_flow(destination="results", unit=self.session.credit_unit.message, value=1)
+ self.session.message_flow(destination="results", unit=self.session.credit_unit.byte, value=0xFFFFFFFF)
+ self.assertEqual(expected, self.getMessageProperty(self.session.incoming("results").get(timeout=1), 'correlation_id'))
+ self.session.message_cancel(destination="results")
+
+ def getMessageProperty(self, msg, prop):
+ for h in msg.headers:
+ if hasattr(h, prop): return getattr(h, prop)
+ return None
+
+ def keepQueuesAlive(self, names):
+ session = self.conn.session("nasty", 99)
+ for n in names:
+ session.queue_declare(queue=n, auto_delete=True)
+ session.message_subscribe(destination=n, queue=n)
+ return session
+
+ def createMessage(self, session, key, id, body):
+ dp=session.delivery_properties(routing_key=key)
+ mp=session.message_properties(correlation_id=id)
+ session.message_transfer(message=Message(dp, mp, body))
diff --git a/RC9/qpid/python/tests_0-10/example.py b/RC9/qpid/python/tests_0-10/example.py
new file mode 100644
index 0000000000..83d208192b
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/example.py
@@ -0,0 +1,95 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+
+class ExampleTest (TestBase010):
+ """
+ An example Qpid test, illustrating the unittest framework and the
+ python Qpid client. The test class must inherit TestBase. The
+ test code uses the Qpid client to interact with a qpid broker and
+ verify it behaves as expected.
+ """
+
+ def test_example(self):
+ """
+ An example test. Note that test functions must start with 'test_'
+ to be recognized by the test framework.
+ """
+
+ # By inheriting TestBase, self.client is automatically connected
+ # and self.session is automatically opened as session(1)
+ # Other session methods mimic the protocol.
+ session = self.session
+
+ # Now we can send regular commands. If you want to see what the method
+ # arguments mean or what other commands are available, you can use the
+ # python builtin help() method. For example:
+ #help(chan)
+ #help(chan.exchange_declare)
+
+ # If you want browse the available protocol methods without being
+ # connected to a live server you can use the amqp-doc utility:
+ #
+ # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
+ #
+ # Options:
+ # -e, --regexp use regex instead of glob when matching
+
+ # Now that we know what commands are available we can use them to
+ # interact with the server.
+
+ # Here we use ordinal arguments.
+ session.exchange_declare("test", "direct")
+
+ # Here we use keyword arguments.
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="test", binding_key="key")
+
+ # Call Session.subscribe to register as a consumer.
+ # All the protocol methods return a message object. The message object
+ # has fields corresponding to the reply method fields, plus a content
+ # field that is filled if the reply includes content. In this case the
+ # interesting field is the consumer_tag.
+ session.message_subscribe(queue="test-queue", destination="consumer_tag")
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+
+ # We can use the session.incoming(...) method to access the messages
+ # delivered for our consumer_tag.
+ queue = session.incoming("consumer_tag")
+
+ # Now lets publish a message and see if our consumer gets it. To do
+ # this we need to import the Message class.
+ delivery_properties = session.delivery_properties(routing_key="key")
+ sent = Message(delivery_properties, "Hello World!")
+ session.message_transfer(destination="test", message=sent)
+
+ # Now we'll wait for the message to arrive. We can use the timeout
+ # argument in case the server hangs. By default queue.get() will wait
+ # until a message arrives or the connection to the server dies.
+ msg = queue.get(timeout=10)
+
+ # And check that we got the right response with assertEqual
+ self.assertEqual(sent.body, msg.body)
+
+ # Now acknowledge the message.
+ session.message_accept(RangedSet(msg.id))
+
diff --git a/RC9/qpid/python/tests_0-10/exchange.py b/RC9/qpid/python/tests_0-10/exchange.py
new file mode 100644
index 0000000000..4b5dc78143
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/exchange.py
@@ -0,0 +1,416 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for exchange behaviour.
+
+Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
+"""
+
+import Queue, logging, traceback
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message
+from qpid.client import Closed
+from qpid.session import SessionException
+
+
+class TestHelper(TestBase010):
+ def setUp(self):
+ TestBase010.setUp(self)
+ self.queues = []
+ self.exchanges = []
+
+ def tearDown(self):
+ try:
+ for ssn, q in self.queues:
+ ssn.queue_delete(queue=q)
+ for ssn, ex in self.exchanges:
+ ssn.exchange_delete(exchange=ex)
+ except:
+ print "Error on tearDown:"
+ print traceback.print_exc()
+ TestBase010.tearDown(self)
+
+ def createMessage(self, key="", body=""):
+ return Message(self.session.delivery_properties(routing_key=key), body)
+
+ def getApplicationHeaders(self, msg):
+ for h in msg.headers:
+ if hasattr(h, 'application_headers'): return getattr(h, 'application_headers')
+ return None
+
+ def assertPublishGet(self, queue, exchange="", routing_key="", properties=None):
+ """
+ Publish to exchange and assert queue.get() returns the same message.
+ """
+ body = self.uniqueString()
+ dp=self.session.delivery_properties(routing_key=routing_key)
+ mp=self.session.message_properties(application_headers=properties)
+ self.session.message_transfer(destination=exchange, message=Message(dp, mp, body))
+ msg = queue.get(timeout=1)
+ self.assertEqual(body, msg.body)
+ if (properties):
+ self.assertEqual(properties, self.getApplicationHeaders(msg))
+
+ def assertPublishConsume(self, queue="", exchange="", routing_key="", properties=None):
+ """
+ Publish a message and consume it, assert it comes back intact.
+ Return the Queue object used to consume.
+ """
+ self.assertPublishGet(self.consume(queue), exchange, routing_key, properties)
+
+ def assertEmpty(self, queue):
+ """Assert that the queue is empty"""
+ try:
+ queue.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Queue.Empty: None # Ignore
+
+ def queue_declare(self, session=None, *args, **keys):
+ session = session or self.session
+ reply = session.queue_declare(*args, **keys)
+ self.queues.append((session, keys["queue"]))
+ return reply
+
+ def exchange_declare(self, session=None, ticket=0, exchange='',
+ type='', passive=False, durable=False,
+ auto_delete=False,
+ arguments={}):
+ session = session or self.session
+ reply = session.exchange_declare(exchange=exchange, type=type, passive=passive,durable=durable, auto_delete=auto_delete, arguments=arguments)
+ self.exchanges.append((session,exchange))
+ return reply
+
+ def uniqueString(self):
+ """Generate a unique string, unique for this TestBase instance"""
+ if not "uniqueCounter" in dir(self): self.uniqueCounter = 1;
+ return "Test Message " + str(self.uniqueCounter)
+
+ def consume(self, queueName):
+ """Consume from named queue returns the Queue object."""
+ if not "uniqueTag" in dir(self): self.uniqueTag = 1
+ else: self.uniqueTag += 1
+ consumer_tag = "tag" + str(self.uniqueTag)
+ self.session.message_subscribe(queue=queueName, destination=consumer_tag)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.message, value=0xFFFFFFFF)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.byte, value=0xFFFFFFFF)
+ return self.session.incoming(consumer_tag)
+
+
+class StandardExchangeVerifier:
+ """Verifies standard exchange behavior.
+
+ Used as base class for classes that test standard exchanges."""
+
+ def verifyDirectExchange(self, ex):
+ """Verify that ex behaves like a direct exchange."""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex, binding_key="k")
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
+ self.fail("Expected Empty exception")
+ except Queue.Empty: None # Expected
+
+ def verifyFanOutExchange(self, ex):
+ """Verify that ex behaves like a fanout exchange."""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex)
+ self.queue_declare(queue="p")
+ self.session.exchange_bind(queue="p", exchange=ex)
+ for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
+
+ def verifyTopicExchange(self, ex):
+ """Verify that ex behaves like a topic exchange"""
+ self.queue_declare(queue="a")
+ self.session.exchange_bind(queue="a", exchange=ex, binding_key="a.#.b.*")
+ q = self.consume("a")
+ self.assertPublishGet(q, ex, "a.b.x")
+ self.assertPublishGet(q, ex, "a.x.b.x")
+ self.assertPublishGet(q, ex, "a.x.x.b.x")
+ # Shouldn't match
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b.x.y"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("x.a.b.x"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b"))
+ self.assert_(q.empty())
+
+ def verifyHeadersExchange(self, ex):
+ """Verify that ex is a headers exchange"""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
+ q = self.consume("q")
+ headers = {"name":"fred", "age":3}
+ self.assertPublishGet(q, exchange=ex, properties=headers)
+ self.session.message_transfer(destination=ex) # No headers, won't deliver
+ self.assertEmpty(q);
+
+
+class RecommendedTypesRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server SHOULD implement these standard exchange types: topic, headers.
+
+ Client attempts to declare an exchange with each of these standard types.
+ """
+
+ def testDirect(self):
+ """Declare and test a direct exchange"""
+ self.exchange_declare(0, exchange="d", type="direct")
+ self.verifyDirectExchange("d")
+
+ def testFanout(self):
+ """Declare and test a fanout exchange"""
+ self.exchange_declare(0, exchange="f", type="fanout")
+ self.verifyFanOutExchange("f")
+
+ def testTopic(self):
+ """Declare and test a topic exchange"""
+ self.exchange_declare(0, exchange="t", type="topic")
+ self.verifyTopicExchange("t")
+
+ def testHeaders(self):
+ """Declare and test a headers exchange"""
+ self.exchange_declare(0, exchange="h", type="headers")
+ self.verifyHeadersExchange("h")
+
+
+class RequiredInstancesRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server MUST, in each virtual host, pre-declare an exchange instance
+ for each standard exchange type that it implements, where the name of the
+ exchange instance is amq. followed by the exchange type name.
+
+ Client creates a temporary queue and attempts to bind to each required
+ exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
+ those types are defined).
+ """
+ def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
+
+ def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
+
+ def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
+
+ def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
+
+class DefaultExchangeRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server MUST predeclare a direct exchange to act as the default exchange
+ for content Publish methods and for default queue bindings.
+
+ Client checks that the default exchange is active by specifying a queue
+ binding with no exchange name, and publishing a message with a suitable
+ routing key but without specifying the exchange name, then ensuring that
+ the message arrives in the queue correctly.
+ """
+ def testDefaultExchange(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+ # Test explicit bind to default queue
+ self.verifyDirectExchange("")
+
+
+# TODO aconway 2006-09-27: Fill in empty tests:
+
+class DefaultAccessRuleTests(TestHelper):
+ """
+ The server MUST NOT allow clients to access the default exchange except
+ by specifying an empty exchange name in the Queue.Bind and content Publish
+ methods.
+ """
+
+class ExtensionsRuleTests(TestHelper):
+ """
+ The server MAY implement other exchange types as wanted.
+ """
+
+
+class DeclareMethodMinimumRuleTests(TestHelper):
+ """
+ The server SHOULD support a minimum of 16 exchanges per virtual host and
+ ideally, impose no limit except as defined by available resources.
+
+ The client creates as many exchanges as it can until the server reports
+ an error; the number of exchanges successfuly created must be at least
+ sixteen.
+ """
+
+
+class DeclareMethodTicketFieldValidityRuleTests(TestHelper):
+ """
+ The client MUST provide a valid access ticket giving "active" access to
+ the realm in which the exchange exists or will be created, or "passive"
+ access if the if-exists flag is set.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeclareMethodExchangeFieldReservedRuleTests(TestHelper):
+ """
+ Exchange names starting with "amq." are reserved for predeclared and
+ standardised exchanges. The client MUST NOT attempt to create an exchange
+ starting with "amq.".
+
+
+ """
+
+
+class DeclareMethodTypeFieldTypedRuleTests(TestHelper):
+ """
+ Exchanges cannot be redeclared with different types. The client MUST not
+ attempt to redeclare an existing exchange with a different type than used
+ in the original Exchange.Declare method.
+
+
+ """
+
+
+class DeclareMethodTypeFieldSupportRuleTests(TestHelper):
+ """
+ The client MUST NOT attempt to create an exchange with a type that the
+ server does not support.
+
+
+ """
+
+
+class DeclareMethodPassiveFieldNotFoundRuleTests(TestHelper):
+ """
+ If set, and the exchange does not already exist, the server MUST raise a
+ channel exception with reply code 404 (not found).
+ """
+ def test(self):
+ try:
+ self.session.exchange_declare(exchange="humpty_dumpty", passive=True)
+ self.fail("Expected 404 for passive declaration of unknown exchange.")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+class DeclareMethodDurableFieldSupportRuleTests(TestHelper):
+ """
+ The server MUST support both durable and transient exchanges.
+
+
+ """
+
+
+class DeclareMethodDurableFieldStickyRuleTests(TestHelper):
+ """
+ The server MUST ignore the durable field if the exchange already exists.
+
+
+ """
+
+
+class DeclareMethodAutoDeleteFieldStickyRuleTests(TestHelper):
+ """
+ The server MUST ignore the auto-delete field if the exchange already
+ exists.
+
+
+ """
+
+
+class DeleteMethodTicketFieldValidityRuleTests(TestHelper):
+ """
+ The client MUST provide a valid access ticket giving "active" access
+ rights to the exchange's access realm.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeleteMethodExchangeFieldExistsRuleTests(TestHelper):
+ """
+ The client MUST NOT attempt to delete an exchange that does not exist.
+ """
+
+
+class HeadersExchangeTests(TestHelper):
+ """
+ Tests for headers exchange functionality.
+ """
+ def setUp(self):
+ TestHelper.setUp(self)
+ self.queue_declare(queue="q")
+ self.q = self.consume("q")
+
+ def myAssertPublishGet(self, headers):
+ self.assertPublishGet(self.q, exchange="amq.match", properties=headers)
+
+ def myBasicPublish(self, headers):
+ mp=self.session.message_properties(application_headers=headers)
+ self.session.message_transfer(destination="amq.match", message=Message(mp, "foobar"))
+
+ def testMatchAll(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
+
+ # None of these should match
+ self.myBasicPublish({})
+ self.myBasicPublish({"name":"barney"})
+ self.myBasicPublish({"name":10})
+ self.myBasicPublish({"name":"fred", "age":2})
+ self.assertEmpty(self.q)
+
+ def testMatchAny(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"fred", "ignoreme":10})
+ self.myAssertPublishGet({"ignoreme":10, "age":3})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+
+class MiscellaneousErrorsTests(TestHelper):
+ """
+ Test some miscellaneous error conditions
+ """
+ def testTypeNotKnown(self):
+ try:
+ self.session.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
+ self.fail("Expected 503 for declaration of unknown exchange type.")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def testDifferentDeclaredType(self):
+ self.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
+ try:
+ session = self.conn.session("alternate", 2)
+ session.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+
+class ExchangeTests(TestHelper):
+ def testHeadersBindNoMatchArg(self):
+ self.session.queue_declare(queue="q", exclusive=True, auto_delete=True)
+ try:
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={"name":"fred" , "age":3} )
+ self.fail("Expected failure for missing x-match arg.")
+ except SessionException, e:
+ self.assertEquals(541, e.args[0].error_code)
diff --git a/RC9/qpid/python/tests_0-10/management.py b/RC9/qpid/python/tests_0-10/management.py
new file mode 100644
index 0000000000..0632d85da4
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/management.py
@@ -0,0 +1,240 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+from qpid.management import managementChannel, managementClient
+
+class ManagementTest (TestBase010):
+ """
+ Tests for the management hooks
+ """
+
+ def test_broker_connectivity_oldAPI (self):
+ """
+ Call the "echo" method on the broker to verify it is alive and talking.
+ """
+ session = self.session
+
+ mc = managementClient (session.spec)
+ mch = mc.addChannel (session)
+
+ mc.syncWaitForStable (mch)
+ brokers = mc.syncGetObjects (mch, "broker")
+ self.assertEqual (len (brokers), 1)
+ broker = brokers[0]
+ args = {}
+ body = "Echo Message Body"
+ args["body"] = body
+
+ for seq in range (1, 5):
+ args["sequence"] = seq
+ res = mc.syncCallMethod (mch, broker.id, broker.classKey, "echo", args)
+ self.assertEqual (res.status, 0)
+ self.assertEqual (res.statusText, "OK")
+ self.assertEqual (res.sequence, seq)
+ self.assertEqual (res.body, body)
+ mc.removeChannel (mch)
+
+ def test_broker_connectivity (self):
+ """
+ Call the "echo" method on the broker to verify it is alive and talking.
+ """
+ session = self.session
+ self.startQmf()
+
+ brokers = self.qmf.getObjects(_class="broker")
+ self.assertEqual (len(brokers), 1)
+ broker = brokers[0]
+
+ body = "Echo Message Body"
+ for seq in range (1, 10):
+ res = broker.echo(seq, body)
+ self.assertEqual (res.status, 0)
+ self.assertEqual (res.text, "OK")
+ self.assertEqual (res.sequence, seq)
+ self.assertEqual (res.body, body)
+
+ def test_get_objects(self):
+ self.startQmf()
+
+ # get the package list, verify that the qpid broker package is there
+ packages = self.qmf.getPackages()
+ assert 'org.apache.qpid.broker' in packages
+
+ # get the schema class keys for the broker, verify the broker table and link-down event
+ keys = self.qmf.getClasses('org.apache.qpid.broker')
+ broker = None
+ linkDown = None
+ for key in keys:
+ if key.getClassName() == "broker": broker = key
+ if key.getClassName() == "brokerLinkDown" : linkDown = key
+ assert broker
+ assert linkDown
+
+ brokerObjs = self.qmf.getObjects(_class="broker")
+ assert len(brokerObjs) == 1
+ brokerObjs = self.qmf.getObjects(_key=broker)
+ assert len(brokerObjs) == 1
+
+ def test_self_session_id (self):
+ self.startQmf()
+ sessionId = self.qmf_broker.getSessionId()
+ brokerSessions = self.qmf.getObjects(_class="session")
+
+ found = False
+ for bs in brokerSessions:
+ if bs.name == sessionId:
+ found = True
+ self.assertEqual (found, True)
+
+ def test_standard_exchanges (self):
+ self.startQmf()
+
+ exchanges = self.qmf.getObjects(_class="exchange")
+ exchange = self.findExchange (exchanges, "")
+ self.assertEqual (exchange.type, "direct")
+ exchange = self.findExchange (exchanges, "amq.direct")
+ self.assertEqual (exchange.type, "direct")
+ exchange = self.findExchange (exchanges, "amq.topic")
+ self.assertEqual (exchange.type, "topic")
+ exchange = self.findExchange (exchanges, "amq.fanout")
+ self.assertEqual (exchange.type, "fanout")
+ exchange = self.findExchange (exchanges, "amq.match")
+ self.assertEqual (exchange.type, "headers")
+ exchange = self.findExchange (exchanges, "qpid.management")
+ self.assertEqual (exchange.type, "topic")
+
+ def findExchange (self, exchanges, name):
+ for exchange in exchanges:
+ if exchange.name == name:
+ return exchange
+ return None
+
+ def test_move_queued_messages(self):
+ """
+ Test ability to move messages from the head of one queue to another.
+ Need to test moveing all and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up source queue"
+ session.queue_declare(queue="src-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="src-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Move Message %d" % count
+ src_msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=src_msg)
+
+ "Set up destination queue"
+ session.queue_declare(queue="dest-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="dest-queue", exchange="amq.direct")
+
+ queues = self.qmf.getObjects(_class="queue")
+
+ "Move 10 messages from src-queue to dest-queue"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 10)
+ self.assertEqual (result.status, 0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,10)
+ self.assertEqual (dq.msgDepth,10)
+
+ "Move all remaining messages to destination"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 0)
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,0)
+ self.assertEqual (dq.msgDepth,20)
+
+ "Use a bad source queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("bad-src-queue", "dest-queue", 0)
+ self.assertEqual (result.status,4)
+
+ "Use a bad destination queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "bad-dest-queue", 0)
+ self.assertEqual (result.status,4)
+
+ " Use a large qty (40) to move from dest-queue back to "
+ " src-queue- should move all "
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("dest-queue", "src-queue", 40)
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,20)
+ self.assertEqual (dq.msgDepth,0)
+
+ "Consume the messages of the queue and check they are all there in order"
+ session.message_subscribe(queue="src-queue", destination="tag")
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("tag")
+ for count in twenty:
+ consumed_msg = queue.get(timeout=1)
+ body = "Move Message %d" % count
+ self.assertEqual(body, consumed_msg.body)
+
+ def test_purge_queue(self):
+ """
+ Test ability to purge messages from the head of a queue.
+ Need to test moveing all, 1 (top message) and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up purge queue"
+ session.queue_declare(queue="purge-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="purge-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Purge Message %d" % count
+ msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=msg)
+
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+
+ "Purge top message from purge-queue"
+ result = pq.purge(1)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,19)
+
+ "Purge top 9 messages from purge-queue"
+ result = pq.purge(9)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,10)
+
+ "Purge all messages from purge-queue"
+ result = pq.purge(0)
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,0)
+
diff --git a/RC9/qpid/python/tests_0-10/message.py b/RC9/qpid/python/tests_0-10/message.py
new file mode 100644
index 0000000000..cbcef5602f
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/message.py
@@ -0,0 +1,847 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message, RangedSet
+from qpid.session import SessionException
+
+from qpid.content import Content
+from time import sleep
+
+class MessageTests(TestBase010):
+ """Tests for 'methods' on the amqp message 'class'"""
+
+ def test_no_local(self):
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Test that the qpid specific no_local arg is honoured.
+ """
+ session = self.session
+ #setup, declare two queues one of which excludes delivery of locally sent messages
+ session.queue_declare(queue="test-queue-1a", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="test-queue-1b", exclusive=True, auto_delete=True, arguments={'no-local':'true'})
+ #establish two consumers
+ self.subscribe(destination="local_included", queue="test-queue-1a")
+ self.subscribe(destination="local_excluded", queue="test-queue-1b")
+
+ #send a message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1a"), "deliver-me"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me"))
+
+ #send a message from another session on the same connection to each queue
+ session2 = self.conn.session("my-local-session")
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1a"), "deliver-me-as-well"))
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me-either"))
+
+ #send a message from a session on another connection to each queue
+ for q in ["test-queue-1a", "test-queue-1b"]:
+ session.exchange_bind(queue=q, exchange="amq.fanout", binding_key="my-key")
+ other = self.connect()
+ session3 = other.session("my-other-session")
+ session3.message_transfer(destination="amq.fanout", message=Message("i-am-not-local"))
+ other.close()
+
+ #check the queues of the two consumers
+ excluded = session.incoming("local_excluded")
+ included = session.incoming("local_included")
+ for b in ["deliver-me", "deliver-me-as-well", "i-am-not-local"]:
+ msg = included.get(timeout=1)
+ self.assertEqual(b, msg.body)
+ msg = excluded.get(timeout=1)
+ self.assertEqual("i-am-not-local", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+ def test_no_local_awkward(self):
+
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Check that messages which will be excluded through no-local
+ processing will not block subsequent deliveries
+ """
+
+ session = self.session
+ #setup:
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True, arguments={'no-local':'true'})
+ #establish consumer which excludes delivery of locally sent messages
+ self.subscribe(destination="local_excluded", queue="test-queue")
+
+ #send a 'local' message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "local"))
+
+ #send a non local message
+ other = self.connect()
+ session2 = other.session("my-session", 1)
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue"), "foreign"))
+ session2.close()
+ other.close()
+
+ #check that the second message only is delivered
+ excluded = session.incoming("local_excluded")
+ msg = excluded.get(timeout=1)
+ self.assertEqual("foreign", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received extra message")
+ except Empty: None
+ #check queue is empty
+ self.assertEqual(0, session.queue_query(queue="test-queue").message_count)
+
+ def test_no_local_exclusive_subscribe(self):
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Test that the no_local processing works on queues not declared
+ as exclusive, but with an exclusive subscription
+ """
+ session = self.session
+
+ #setup, declare two queues one of which excludes delivery of
+ #locally sent messages but is not declared as exclusive
+ session.queue_declare(queue="test-queue-1a", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="test-queue-1b", auto_delete=True, arguments={'no-local':'true'})
+ #establish two consumers
+ self.subscribe(destination="local_included", queue="test-queue-1a")
+ self.subscribe(destination="local_excluded", queue="test-queue-1b", exclusive=True)
+
+ #send a message from the same session to each queue
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1a"), "deliver-me"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me"))
+
+ #send a message from another session on the same connection to each queue
+ session2 = self.conn.session("my-session")
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1a"), "deliver-me-as-well"))
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me-either"))
+
+ #send a message from a session on another connection to each queue
+ for q in ["test-queue-1a", "test-queue-1b"]:
+ session.exchange_bind(queue=q, exchange="amq.fanout", binding_key="my-key")
+ other = self.connect()
+ session3 = other.session("my-other-session")
+ session3.message_transfer(destination="amq.fanout", message=Message("i-am-not-local"))
+ other.close()
+
+ #check the queues of the two consumers
+ excluded = session.incoming("local_excluded")
+ included = session.incoming("local_included")
+ for b in ["deliver-me", "deliver-me-as-well", "i-am-not-local"]:
+ msg = included.get(timeout=1)
+ self.assertEqual(b, msg.body)
+ msg = excluded.get(timeout=1)
+ self.assertEqual("i-am-not-local", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test an exclusive consumer prevents other consumer being created
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue-2", exclusive=True, auto_delete=True)
+ session.message_subscribe(destination="first", queue="test-queue-2", exclusive=True)
+ try:
+ session.message_subscribe(destination="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_consume_exclusive2(self):
+ """
+ Check that an exclusive consumer cannot be created if a consumer already exists:
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue-2", exclusive=True, auto_delete=True)
+ session.message_subscribe(destination="first", queue="test-queue-2")
+ try:
+ session.message_subscribe(destination="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_consume_queue_not_found(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ session = self.session
+ try:
+ #queue specified but doesn't exist:
+ session.message_subscribe(queue="invalid-queue", destination="a")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_consume_queue_not_specified(self):
+ session = self.session
+ try:
+ #queue not specified and none previously declared for channel:
+ session.message_subscribe(destination="a")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except SessionException, e:
+ self.assertEquals(531, e.args[0].error_code)
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ session = self.session
+ #setup, declare a queue:
+ session.queue_declare(queue="test-queue-3", exclusive=True, auto_delete=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ session.message_subscribe(destination="first", queue="test-queue-3")
+ try:
+ session.message_subscribe(destination="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ session = self.session
+ #setup, declare a queue:
+ session.queue_declare(queue="test-queue-4", exclusive=True, auto_delete=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-4"), "One"))
+
+ session.message_subscribe(destination="my-consumer", queue="test-queue-4")
+ myqueue = session.incoming("my-consumer")
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+
+ #should flush here
+
+ #cancel should stop messages being delivered
+ session.message_cancel(destination="my-consumer")
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-4"), "Two"))
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.body)
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be handled without error
+ session.message_cancel(destination="my-consumer")
+ session.message_cancel(destination="this-never-existed")
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour
+ """
+ session = self.conn.session("alternate-session", timeout=10)
+ session.queue_declare(queue="test-ack-queue", auto_delete=True)
+
+ session.message_subscribe(queue = "test-ack-queue", destination = "consumer")
+ session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("consumer")
+
+ delivery_properties = session.delivery_properties(routing_key="test-ack-queue")
+ for i in ["One", "Two", "Three", "Four", "Five"]:
+ session.message_transfer(message=Message(delivery_properties, i))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.body)
+ self.assertEqual("Two", msg2.body)
+ self.assertEqual("Three", msg3.body)
+ self.assertEqual("Four", msg4.body)
+ self.assertEqual("Five", msg5.body)
+
+ session.message_accept(RangedSet(msg1.id, msg2.id, msg4.id))#One, Two and Four
+
+ #subscribe from second session here to ensure queue is not
+ #auto-deleted when alternate session closes (no need to ack on these):
+ self.session.message_subscribe(queue = "test-ack-queue", destination = "checker", accept_mode=1)
+
+ #now close the session, and see that the unacked messages are
+ #then redelivered to another subscriber:
+ session.close(timeout=10)
+
+ session = self.session
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("checker")
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.body)
+ self.assertEqual("Five", msg5b.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ def test_reject(self):
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True, alternate_exchange="amq.fanout")
+ session.queue_declare(queue = "r", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue = "r", exchange = "amq.fanout")
+
+ session.message_subscribe(queue = "q", destination = "consumer")
+ session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "blah, blah"))
+ msg = session.incoming("consumer").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+ session.message_reject(RangedSet(msg.id))
+
+ session.message_subscribe(queue = "r", destination = "checker")
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ msg = session.incoming("checker").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+
+ def test_credit_flow_messages(self):
+ """
+ Test basic credit based flow control with unit = message
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 0, destination = "c")
+ #send batch of messages to queue
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %d" % i))
+
+ #set message credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ #set infinite byte credit
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(1, 6):
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+ #increase credit again and check more are received
+ for i in range(6, 11):
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "c")
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+ def test_credit_flow_bytes(self):
+ """
+ Test basic credit based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 0, destination = "c")
+ #send batch of messages to queue
+ for i in range(10):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "abcdefgh"))
+
+ #each message is currently interpreted as requiring msg_size bytes of credit
+ msg_size = 19
+
+ #set byte credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
+ #set infinite message credit
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(5):
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+ #increase credit again and check more are received
+ for i in range(5):
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size, destination = "c")
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+
+ def test_window_flow_messages(self):
+ """
+ Test basic window based flow control with unit = message
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 1, destination = "c")
+ #send batch of messages to queue
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %d" % i))
+
+ #set message credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ #set infinite byte credit
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(1, 6):
+ msg = q.get(timeout = 1)
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ self.assertDataEquals(session, msg, "Message %d" % i)
+ self.assertEmpty(q)
+
+ #acknowledge messages and check more are received
+ #TODO: there may be a nicer way of doing this
+ session.channel.session_completed(session.receiver._completed)
+
+ for i in range(6, 11):
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+
+ def test_window_flow_bytes(self):
+ """
+ Test basic window based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 1, destination = "c")
+ #send batch of messages to queue
+ for i in range(10):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "abcdefgh"))
+
+ #each message is currently interpreted as requiring msg_size bytes of credit
+ msg_size = 19
+
+ #set byte credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
+ #set infinite message credit
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ msgs = []
+ for i in range(5):
+ msg = q.get(timeout = 1)
+ msgs.append(msg)
+ self.assertDataEquals(session, msg, "abcdefgh")
+ self.assertEmpty(q)
+
+ #ack each message individually and check more are received
+ for i in range(5):
+ msg = msgs.pop()
+ #TODO: there may be a nicer way of doing this
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+ def test_subscribe_not_acquired(self):
+ """
+ Test the not-acquired modes works as expected for a simple case
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 6):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode = 1)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+
+ for i in range(6, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
+
+ #both subscribers should see all messages
+ qA = session.incoming("a")
+ qB = session.incoming("b")
+ for i in range(1, 11):
+ for q in [qA, qB]:
+ msg = q.get(timeout = 1)
+ self.assertEquals("Message %s" % i, msg.body)
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+
+ #TODO: tidy up completion
+ session.channel.session_completed(session.receiver._completed)
+ #messages should still be on the queue:
+ self.assertEquals(10, session.queue_query(queue = "q").message_count)
+
+ def test_acquire_with_no_accept_and_credit_flow(self):
+ """
+ Test that messages recieved unacquired, with accept not
+ required in windowing mode can be acquired.
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "acquire me"))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1, accept_mode = 1)
+ session.message_set_flow_mode(flow_mode = session.flow_mode.credit, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("acquire me", msg.body)
+ #message should still be on the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ transfers = RangedSet(msg.id)
+ response = session.message_acquire(transfers)
+ #check that we get notification (i.e. message_acquired)
+ self.assert_(msg.id in response.transfers)
+ #message should have been removed from the queue:
+ self.assertEquals(0, session.queue_query(queue = "q").message_count)
+
+ def test_acquire(self):
+ """
+ Test explicit acquire function
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "acquire me"))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("acquire me", msg.body)
+ #message should still be on the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ transfers = RangedSet(msg.id)
+ response = session.message_acquire(transfers)
+ #check that we get notification (i.e. message_acquired)
+ self.assert_(msg.id in response.transfers)
+ #message should have been removed from the queue:
+ self.assertEquals(0, session.queue_query(queue = "q").message_count)
+ session.message_accept(transfers)
+
+
+ def test_release(self):
+ """
+ Test explicit release function
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "release me"))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("release me", msg.body)
+ session.message_cancel(destination = "a")
+ session.message_release(RangedSet(msg.id))
+
+ #message should not have been removed from the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ def test_release_ordering(self):
+ """
+ Test order of released messages is as expected
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range (1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "released message %s" % (i)))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ queue = session.incoming("a")
+ first = queue.get(timeout = 1)
+ for i in range(2, 10):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("released message %s" % (i), msg.body)
+
+ last = queue.get(timeout = 1)
+ self.assertEmpty(queue)
+ released = RangedSet()
+ released.add(first.id, last.id)
+ session.message_release(released)
+
+ #TODO: may want to clean this up...
+ session.receiver._completed.add(first.id, last.id)
+ session.channel.session_completed(session.receiver._completed)
+
+ for i in range(1, 11):
+ self.assertEquals("released message %s" % (i), queue.get(timeout = 1).body)
+
+ def test_ranged_ack(self):
+ """
+ Test acking of messages ranges
+ """
+ session = self.conn.session("alternate-session", timeout=10)
+
+ session.queue_declare(queue = "q", auto_delete=True)
+ delivery_properties = session.delivery_properties(routing_key="q")
+ for i in range (1, 11):
+ session.message_transfer(message=Message(delivery_properties, "message %s" % (i)))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ queue = session.incoming("a")
+ ids = []
+ for i in range (1, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message %s" % (i), msg.body)
+ ids.append(msg.id)
+
+ self.assertEmpty(queue)
+
+ #ack all but the fourth message (command id 2)
+ accepted = RangedSet()
+ accepted.add(ids[0], ids[2])
+ accepted.add(ids[4], ids[9])
+ session.message_accept(accepted)
+
+ #subscribe from second session here to ensure queue is not
+ #auto-deleted when alternate session closes (no need to ack on these):
+ self.session.message_subscribe(queue = "q", destination = "checker")
+
+ #now close the session, and see that the unacked messages are
+ #then redelivered to another subscriber:
+ session.close(timeout=10)
+
+ session = self.session
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("checker")
+
+ self.assertEquals("message 4", queue.get(timeout = 1).body)
+ self.assertEmpty(queue)
+
+ def test_subscribe_not_acquired_2(self):
+ session = self.session
+
+ #publish some messages
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #consume some of them
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_set_flow_mode(flow_mode = 0, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+
+ queue = session.incoming("a")
+ for i in range(1, 6):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ #complete and accept
+ session.message_accept(RangedSet(msg.id))
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertEmpty(queue)
+
+ #now create a not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+
+ #check it gets those not consumed
+ queue = session.incoming("b")
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ for i in range(6, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ session.message_release(RangedSet(msg.id))
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ self.assertEmpty(queue)
+
+ #check all 'browsed' messages are still on the queue
+ self.assertEqual(5, session.queue_query(queue="q").message_count)
+
+ def test_subscribe_not_acquired_3(self):
+ session = self.session
+
+ #publish some messages
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #create a not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+
+ #browse through messages
+ queue = session.incoming("a")
+ for i in range(1, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ if (i % 2):
+ #try to acquire every second message
+ response = session.message_acquire(RangedSet(msg.id))
+ #check that acquire succeeds
+ self.assert_(msg.id in response.transfers)
+ session.message_accept(RangedSet(msg.id))
+ else:
+ session.message_release(RangedSet(msg.id))
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertEmpty(queue)
+
+ #create a second not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ #check it gets those not consumed
+ queue = session.incoming("b")
+ for i in [2,4,6,8,10]:
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ session.message_release(RangedSet(msg.id))
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ self.assertEmpty(queue)
+
+ #check all 'browsed' messages are still on the queue
+ self.assertEqual(5, session.queue_query(queue="q").message_count)
+
+ def test_release_unacquired(self):
+ session = self.session
+
+ #create queue
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ #send message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "my-message"))
+
+ #create two 'browsers'
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ queueA = session.incoming("a")
+
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "b")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "b")
+ queueB = session.incoming("b")
+
+ #have each browser release the message
+ msgA = queueA.get(timeout = 1)
+ session.message_release(RangedSet(msgA.id))
+
+ msgB = queueB.get(timeout = 1)
+ session.message_release(RangedSet(msgB.id))
+
+ #cancel browsers
+ session.message_cancel(destination = "a")
+ session.message_cancel(destination = "b")
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "c")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "c")
+ queueC = session.incoming("c")
+ #consume the message then ack it
+ msgC = queueC.get(timeout = 1)
+ session.message_accept(RangedSet(msgC.id))
+ #ensure there are no other messages
+ self.assertEmpty(queueC)
+
+ def test_empty_body(self):
+ session = self.session
+ session.queue_declare(queue="xyz", exclusive=True, auto_delete=True)
+ props = session.delivery_properties(routing_key="xyz")
+ session.message_transfer(message=Message(props, ""))
+
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="xyz", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFF, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEquals("", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ def test_incoming_start(self):
+ q = "test_incoming_start"
+ session = self.session
+
+ session.queue_declare(queue=q, exclusive=True, auto_delete=True)
+ session.message_subscribe(queue=q, destination="msgs")
+ messages = session.incoming("msgs")
+ assert messages.destination == "msgs"
+
+ dp = session.delivery_properties(routing_key=q)
+ session.message_transfer(message=Message(dp, "test"))
+
+ messages.start()
+ msg = messages.get()
+ assert msg.body == "test"
+
+ def test_ttl(self):
+ q = "test_ttl"
+ session = self.session
+
+ session.queue_declare(queue=q, exclusive=True, auto_delete=True)
+
+ dp = session.delivery_properties(routing_key=q, ttl=500)#expire in half a second
+ session.message_transfer(message=Message(dp, "first"))
+
+ dp = session.delivery_properties(routing_key=q, ttl=300000)#expire in fives minutes
+ session.message_transfer(message=Message(dp, "second"))
+
+ d = "msgs"
+ session.message_subscribe(queue=q, destination=d)
+ messages = session.incoming(d)
+ sleep(1)
+ session.message_flow(unit = session.credit_unit.message, value=2, destination=d)
+ session.message_flow(unit = session.credit_unit.byte, value=0xFFFFFFFF, destination=d)
+ assert messages.get(timeout=1).body == "second"
+ self.assertEmpty(messages)
+
+
+ def assertDataEquals(self, session, msg, expected):
+ self.assertEquals(expected, msg.body)
+
+ def assertEmpty(self, queue):
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Queue not empty, contains: " + extra.body)
+ except Empty: None
+
+class SizelessContent(Content):
+
+ def size(self):
+ return None
diff --git a/RC9/qpid/python/tests_0-10/persistence.py b/RC9/qpid/python/tests_0-10/persistence.py
new file mode 100644
index 0000000000..815ad1f3dc
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/persistence.py
@@ -0,0 +1,67 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import testrunner, TestBase010
+
+class PersistenceTests(TestBase010):
+ def test_delete_queue_after_publish(self):
+ session = self.session
+ session.auto_sync = False
+
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ for i in range(1, 10):
+ dp = session.delivery_properties(routing_key="q", delivery_mode=2)
+ session.message_transfer(message=Message(dp, "my-message"))
+
+ session.auto_sync = True
+ #explicitly delete queue
+ session.queue_delete(queue = "q")
+
+ def test_ack_message_from_deleted_queue(self):
+ session = self.session
+ session.auto_sync = False
+
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ dp = session.delivery_properties(routing_key="q", delivery_mode=2)
+ session.message_transfer(message=Message(dp, "my-message"))
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "a", accept_mode = 1, acquire_mode=0)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ queue = session.incoming("a")
+
+ #consume the message, cancel subscription (triggering auto-delete), then ack it
+ msg = queue.get(timeout = 5)
+ session.message_cancel(destination = "a")
+ session.message_accept(RangedSet(msg.id))
+
+ def test_queue_deletion(self):
+ session = self.session
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ dp = session.delivery_properties(routing_key="xyz", delivery_mode=2)
+ session.message_transfer(destination="amq.topic", message=Message(dp, "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
diff --git a/RC9/qpid/python/tests_0-10/query.py b/RC9/qpid/python/tests_0-10/query.py
new file mode 100644
index 0000000000..311df84096
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/query.py
@@ -0,0 +1,235 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase010
+
+class QueryTests(TestBase010):
+ """Tests for various query methods"""
+
+ def test_queue_query(self):
+ session = self.session
+ session.queue_declare(queue="my-queue", exclusive=True)
+ result = session.queue_query(queue="my-queue")
+ self.assertEqual("my-queue", result.queue)
+
+ def test_queue_query_unknown(self):
+ session = self.session
+ result = session.queue_query(queue="I don't exist")
+ self.assert_(not result.queue)
+
+ def test_exchange_query(self):
+ """
+ Test that the exchange_query method works as expected
+ """
+ session = self.session
+ #check returned type for the standard exchanges
+ self.assertEqual("direct", session.exchange_query(name="amq.direct").type)
+ self.assertEqual("topic", session.exchange_query(name="amq.topic").type)
+ self.assertEqual("fanout", session.exchange_query(name="amq.fanout").type)
+ self.assertEqual("headers", session.exchange_query(name="amq.match").type)
+ self.assertEqual("direct", session.exchange_query(name="").type)
+ #declare an exchange
+ session.exchange_declare(exchange="my-test-exchange", type= "direct", durable=False)
+ #check that the result of a query is as expected
+ response = session.exchange_query(name="my-test-exchange")
+ self.assertEqual("direct", response.type)
+ self.assert_(not response.durable)
+ self.assert_(not response.not_found)
+ #delete the exchange
+ session.exchange_delete(exchange="my-test-exchange")
+ #check that the query now reports not-found
+ self.assert_(session.exchange_query(name="my-test-exchange").not_found)
+
+ def test_exchange_bound_direct(self):
+ """
+ Test that the exchange_bound method works as expected with the direct exchange
+ """
+ self.exchange_bound_with_key("amq.direct")
+
+ def test_exchange_bound_topic(self):
+ """
+ Test that the exchange_bound method works as expected with the direct exchange
+ """
+ self.exchange_bound_with_key("amq.topic")
+
+ def exchange_bound_with_key(self, exchange_name):
+ session = self.session
+ #setup: create two queues
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+
+ session.exchange_bind(exchange=exchange_name, queue="used-queue", binding_key="used-key")
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = session.exchange_bound(exchange=exchange_name, binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.key_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue", binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assert_(not response.key_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test matched queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue", binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test unmatched queue, matched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue", binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assert_(not response.key_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue", binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, session.exchange_bound(exchange=exchange_name, queue="unknown-queue").queue_not_found)
+
+
+ def test_exchange_bound_fanout(self):
+ """
+ Test that the exchange_bound method works as expected with fanout exchange
+ """
+ session = self.session
+ #setup
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="amq.fanout", queue="used-queue")
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange="amq.fanout", queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange="amq.fanout", queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, session.exchange_bound(exchange="amq.fanout", queue="unknown-queue").queue_not_found)
+
+ def test_exchange_bound_header(self):
+ """
+ Test that the exchange_bound method works as expected with headers exchanges
+ """
+ session = self.session
+ #setup
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"} )
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = session.exchange_bound(exchange="amq.match", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.args_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assert_(not response.args_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test matched queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test unmatched queue, matched binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assert_(not response.args_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, session.exchange_bound(exchange="amq.match", queue="unknown-queue").queue_not_found)
+
diff --git a/RC9/qpid/python/tests_0-10/queue.py b/RC9/qpid/python/tests_0-10/queue.py
new file mode 100644
index 0000000000..05e18081fa
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/queue.py
@@ -0,0 +1,366 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message
+from qpid.session import SessionException
+
+class QueueTests(TestBase010):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_purge(self):
+ """
+ Test that the purge method removes messages from the queue
+ """
+ session = self.session
+ #setup, declare a queue and add some messages to it:
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "one"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "two"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "three"))
+
+ #check that the queue now reports 3 messages:
+ session.queue_declare(queue="test-queue")
+ reply = session.queue_query(queue="test-queue")
+ self.assertEqual(3, reply.message_count)
+
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ session.queue_purge(queue="test-queue");
+ reply = session.queue_query(queue="test-queue")
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "four"))
+ session.message_subscribe(queue="test-queue", destination="tag")
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("four", msg.body)
+
+ def test_purge_queue_exists(self):
+ """
+ Test that the correct exception is thrown is no queue exists
+ for the name specified in purge
+ """
+ session = self.session
+ try:
+ #queue specified but doesn't exist:
+ session.queue_purge(queue="invalid-queue")
+ self.fail("Expected failure when purging non-existent queue")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code) #not-found
+
+ def test_purge_empty_name(self):
+ """
+ Test that the correct exception is thrown is no queue name
+ is specified for purge
+ """
+ session = self.session
+ try:
+ #queue not specified and none previously declared for channel:
+ session.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except SessionException, e:
+ self.assertEquals(531, e.args[0].error_code) #illegal-argument
+
+ def test_declare_exclusive(self):
+ """
+ Test that the exclusive field is honoured in queue.declare
+ """
+ # TestBase.setUp has already opened session(1)
+ s1 = self.session
+ # Here we open a second separate connection:
+ s2 = self.conn.session("other")
+
+ #declare an exclusive queue:
+ s1.queue_declare(queue="exclusive-queue", exclusive=True, auto_delete=True)
+ try:
+ #other connection should not be allowed to declare this:
+ s2.queue_declare(queue="exclusive-queue", exclusive=True, auto_delete=True)
+ self.fail("Expected second exclusive queue_declare to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s3 = self.conn.session("subscriber")
+ try:
+ #other connection should not be allowed to declare this:
+ s3.message_subscribe(queue="exclusive-queue")
+ self.fail("Expected message_subscribe on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s4 = self.conn.session("deleter")
+ try:
+ #other connection should not be allowed to declare this:
+ s4.queue_delete(queue="exclusive-queue")
+ self.fail("Expected queue_delete on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+
+ def test_declare_passive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ session = self.session
+ #declare an exclusive queue:
+ session.queue_declare(queue="passive-queue-1", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="passive-queue-1", passive=True)
+ try:
+ #other connection should not be allowed to declare this:
+ session.queue_declare(queue="passive-queue-2", passive=True)
+ self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code) #not-found
+
+
+ def test_bind(self):
+ """
+ Test various permutations of the queue.bind method
+ """
+ session = self.session
+ session.queue_declare(queue="queue-1", exclusive=True, auto_delete=True)
+
+ #straightforward case, both exchange & queue exist so no errors expected:
+ session.exchange_bind(queue="queue-1", exchange="amq.direct", binding_key="key1")
+
+ #use the queue name where the routing key is not specified:
+ session.exchange_bind(queue="queue-1", exchange="amq.direct")
+
+ #try and bind to non-existant exchange
+ try:
+ session.exchange_bind(queue="queue-1", exchange="an-invalid-exchange", binding_key="key1")
+ self.fail("Expected bind to non-existant exchange to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def test_bind_queue_existence(self):
+ session = self.session
+ #try and bind non-existant queue:
+ try:
+ session.exchange_bind(queue="queue-2", exchange="amq.direct", binding_key="key1")
+ self.fail("Expected bind of non-existant queue to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_unbind_direct(self):
+ self.unbind_test(exchange="amq.direct", routing_key="key")
+
+ def test_unbind_topic(self):
+ self.unbind_test(exchange="amq.topic", routing_key="key")
+
+ def test_unbind_fanout(self):
+ self.unbind_test(exchange="amq.fanout")
+
+ def test_unbind_headers(self):
+ self.unbind_test(exchange="amq.match", args={ "x-match":"all", "a":"b"}, headers={"a":"b"})
+
+ def unbind_test(self, exchange, routing_key="", args=None, headers=None):
+ #bind two queues and consume from them
+ session = self.session
+
+ session.queue_declare(queue="queue-1", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="queue-2", exclusive=True, auto_delete=True)
+
+ session.message_subscribe(queue="queue-1", destination="queue-1")
+ session.message_flow(destination="queue-1", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="queue-1", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ session.message_subscribe(queue="queue-2", destination="queue-2")
+ session.message_flow(destination="queue-2", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="queue-2", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+
+ queue1 = session.incoming("queue-1")
+ queue2 = session.incoming("queue-2")
+
+ session.exchange_bind(exchange=exchange, queue="queue-1", binding_key=routing_key, arguments=args)
+ session.exchange_bind(exchange=exchange, queue="queue-2", binding_key=routing_key, arguments=args)
+
+ dp = session.delivery_properties(routing_key=routing_key)
+ if (headers):
+ mp = session.message_properties(application_headers=headers)
+ msg1 = Message(dp, mp, "one")
+ msg2 = Message(dp, mp, "two")
+ else:
+ msg1 = Message(dp, "one")
+ msg2 = Message(dp, "two")
+
+ #send a message that will match both bindings
+ session.message_transfer(destination=exchange, message=msg1)
+
+ #unbind first queue
+ session.exchange_unbind(exchange=exchange, queue="queue-1", binding_key=routing_key)
+
+ #send another message
+ session.message_transfer(destination=exchange, message=msg2)
+
+ #check one queue has both messages and the other has only one
+ self.assertEquals("one", queue1.get(timeout=1).body)
+ try:
+ msg = queue1.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+ self.assertEquals("one", queue2.get(timeout=1).body)
+ self.assertEquals("two", queue2.get(timeout=1).body)
+ try:
+ msg = queue2.get(timeout=1)
+ self.fail("Got extra message: " + msg)
+ except Empty: pass
+
+
+ def test_delete_simple(self):
+ """
+ Test core queue deletion behaviour
+ """
+ session = self.session
+
+ #straight-forward case:
+ session.queue_declare(queue="delete-me")
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "a"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "b"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "c"))
+ session.queue_delete(queue="delete-me")
+ #check that it has gone by declaring passively
+ try:
+ session.queue_declare(queue="delete-me", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_delete_queue_exists(self):
+ """
+ Test core queue deletion behaviour
+ """
+ #check attempted deletion of non-existant queue is handled correctly:
+ session = self.session
+ try:
+ session.queue_delete(queue="i-dont-exist", if_empty=True)
+ self.fail("Expected delete of non-existant queue to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+
+ def test_delete_ifempty(self):
+ """
+ Test that if_empty field of queue_delete is honoured
+ """
+ session = self.session
+
+ #create a queue and add a message to it (use default binding):
+ session.queue_declare(queue="delete-me-2")
+ session.queue_declare(queue="delete-me-2", passive=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me-2"), "message"))
+
+ #try to delete, but only if empty:
+ try:
+ session.queue_delete(queue="delete-me-2", if_empty=True)
+ self.fail("Expected delete if_empty to fail for non-empty queue")
+ except SessionException, e:
+ self.assertEquals(406, e.args[0].error_code)
+
+ #need new session now:
+ session = self.conn.session("replacement", 2)
+
+ #empty queue:
+ session.message_subscribe(destination="consumer_tag", queue="delete-me-2")
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFF)
+ queue = session.incoming("consumer_tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("message", msg.body)
+ session.message_cancel(destination="consumer_tag")
+
+ #retry deletion on empty queue:
+ session.queue_delete(queue="delete-me-2", if_empty=True)
+
+ #check that it has gone by declaring passively:
+ try:
+ session.queue_declare(queue="delete-me-2", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_delete_ifunused(self):
+ """
+ Test that if_unused field of queue_delete is honoured
+ """
+ session = self.session
+
+ #create a queue and register a consumer:
+ session.queue_declare(queue="delete-me-3")
+ session.queue_declare(queue="delete-me-3", passive=True)
+ session.message_subscribe(destination="consumer_tag", queue="delete-me-3")
+
+ #need new session now:
+ session2 = self.conn.session("replacement", 2)
+
+ #try to delete, but only if empty:
+ try:
+ session2.queue_delete(queue="delete-me-3", if_unused=True)
+ self.fail("Expected delete if_unused to fail for queue with existing consumer")
+ except SessionException, e:
+ self.assertEquals(406, e.args[0].error_code)
+
+ session.message_cancel(destination="consumer_tag")
+ session.queue_delete(queue="delete-me-3", if_unused=True)
+ #check that it has gone by declaring passively:
+ try:
+ session.queue_declare(queue="delete-me-3", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def test_autodelete_shared(self):
+ """
+ Test auto-deletion (of non-exclusive queues)
+ """
+ session = self.session
+ session2 =self.conn.session("other", 1)
+
+ session.queue_declare(queue="auto-delete-me", auto_delete=True)
+
+ #consume from both sessions
+ tag = "my-tag"
+ session.message_subscribe(queue="auto-delete-me", destination=tag)
+ session2.message_subscribe(queue="auto-delete-me", destination=tag)
+
+ #implicit cancel
+ session2.close()
+
+ #check it is still there
+ session.queue_declare(queue="auto-delete-me", passive=True)
+
+ #explicit cancel => queue is now unused again:
+ session.message_cancel(destination=tag)
+
+ #NOTE: this assumes there is no timeout in use
+
+ #check that it has gone by declaring it passively
+ try:
+ session.queue_declare(queue="auto-delete-me", passive=True)
+ self.fail("Expected queue to have been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
diff --git a/RC9/qpid/python/tests_0-10/tx.py b/RC9/qpid/python/tests_0-10/tx.py
new file mode 100644
index 0000000000..da162d54ec
--- /dev/null
+++ b/RC9/qpid/python/tests_0-10/tx.py
@@ -0,0 +1,265 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import testrunner, TestBase010
+
+class TxTests(TestBase010):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ session = self.session
+
+ #declare queues and create subscribers in the checking session
+ #to ensure that the queues are not auto-deleted too early:
+ self.declare_queues(["tx-commit-a", "tx-commit-b", "tx-commit-c"])
+ session.message_subscribe(queue="tx-commit-a", destination="qa")
+ session.message_subscribe(queue="tx-commit-b", destination="qb")
+ session.message_subscribe(queue="tx-commit-c", destination="qc")
+
+ #use a separate session for actual work
+ session2 = self.conn.session("worker", 2)
+ self.perform_txn_work(session2, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ session2.tx_commit()
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a session closed with an open transaction is effectively rolled back
+ """
+ session = self.session
+ self.declare_queues(["tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c"])
+ session.message_subscribe(queue="tx-autorollback-a", destination="qa")
+ session.message_subscribe(queue="tx-autorollback-b", destination="qb")
+ session.message_subscribe(queue="tx-autorollback-c", destination="qc")
+
+ session2 = self.conn.session("worker", 2)
+ queue_a, queue_b, queue_c, ignore = self.perform_txn_work(session2, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ session = self.session
+ queue_a, queue_b, queue_c, consumed = self.perform_txn_work(session, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session.tx_rollback()
+
+ #need to release messages to get them redelivered now:
+ session.message_release(consumed)
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def perform_txn_work(self, session, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ self.declare_queues([name_a, name_b, name_c])
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ session.exchange_bind(queue=name_b, exchange="amq.direct", binding_key=key)
+ session.exchange_bind(queue=name_c, exchange="amq.topic", binding_key=topic)
+
+ dp = session.delivery_properties(routing_key=name_a)
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="msg%d" % i)
+ session.message_transfer(message=Message(dp, mp, "Message %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "Message 6"))
+
+ dp = session.delivery_properties(routing_key=topic)
+ mp = session.message_properties(message_id="msg7")
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "Message 7"))
+
+ session.tx_select()
+
+ #consume and ack messages
+ acked = RangedSet()
+ self.subscribe(session, queue=name_a, destination="sub_a")
+ queue_a = session.incoming("sub_a")
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ acked.add(msg.id)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ self.subscribe(session, queue=name_b, destination="sub_b")
+ queue_b = session.incoming("sub_b")
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ acked.add(msg.id)
+
+ sub_c = self.subscribe(session, queue=name_c, destination="sub_c")
+ queue_c = session.incoming("sub_c")
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ acked.add(msg.id)
+
+ session.message_accept(acked)
+
+ dp = session.delivery_properties(routing_key=topic)
+ #publish messages
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="tx-msg%d" % i)
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "TxMessage %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="tx-msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "TxMessage 6"))
+
+ dp = session.delivery_properties(routing_key=name_a)
+ mp = session.message_properties(message_id="tx-msg7")
+ session.message_transfer(message=Message(dp, mp, "TxMessage 7"))
+ return queue_a, queue_b, queue_c, acked
+
+ def declare_queues(self, names, session=None):
+ session = session or self.session
+ for n in names:
+ session.queue_declare(queue=n, auto_delete=True)
+
+ def subscribe(self, session=None, **keys):
+ session = session or self.session
+ consumer_tag = keys["destination"]
+ session.message_subscribe(**keys)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+
+ def enable_flow(self, tag, session=None):
+ session = session or self.session
+ session.message_flow(destination=tag, unit=session.credit_unit.message, value=0xFFFFFFFF)
+ session.message_flow(destination=tag, unit=session.credit_unit.byte, value=0xFFFFFFFF)
+
+ def complete(self, session, msg):
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ session.channel.session_completed(session.receiver._completed)
+
diff --git a/RC9/qpid/python/tests_0-8/__init__.py b/RC9/qpid/python/tests_0-8/__init__.py
new file mode 100644
index 0000000000..9a09d2d04f
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/__init__.py
@@ -0,0 +1,20 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/RC9/qpid/python/tests_0-8/basic.py b/RC9/qpid/python/tests_0-8/basic.py
new file mode 100644
index 0000000000..95ca0d7287
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/basic.py
@@ -0,0 +1,395 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class BasicTests(TestBase):
+ """Tests for 'methods' on the amqp basic 'class'"""
+
+ def test_consume_no_local(self):
+ """
+ Test that the no_local flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare two queues:
+ channel.queue_declare(queue="test-queue-1a", exclusive=True)
+ channel.queue_declare(queue="test-queue-1b", exclusive=True)
+ #establish two consumers one of which excludes delivery of locally sent messages
+ channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
+ channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
+
+ #send a message
+ channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
+ channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
+
+ #check the queues of the two consumers
+ excluded = self.client.queue("local_excluded")
+ included = self.client.queue("local_included")
+ msg = included.get(timeout=1)
+ self.assertEqual("consume_no_local", msg.content.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test that the exclusive flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-2", exclusive=True)
+
+ #check that an exclusive consumer prevents other consumer being created:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ #open new channel and cleanup last consumer:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #check that an exclusive consumer cannot be created if a consumer already exists:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2")
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ def test_consume_queue_errors(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ channel = self.channel
+ try:
+ #queue specified but doesn't exist:
+ channel.basic_consume(queue="invalid-queue")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.basic_consume(queue="")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-3", exclusive=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ try:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-4", exclusive=True)
+ channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
+
+ myqueue = self.client.queue("my-consumer")
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.content.body)
+
+ #cancel should stop messages being delivered
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be handled without error
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_cancel(consumer_tag="this-never-existed")
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-ack-queue", exclusive=True)
+
+ reply = channel.basic_consume(queue="test-ack-queue", no_ack=False)
+ queue = self.client.queue(reply.consumer_tag)
+
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_recover(requeue=False)
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ def test_recover_requeue(self):
+ """
+ Test requeing on recovery
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-requeue", exclusive=True)
+
+ subscription = channel.basic_consume(queue="test-requeue", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ channel.basic_publish(routing_key="test-requeue", content=Content("One"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_cancel(consumer_tag=subscription.consumer_tag)
+ subscription2 = channel.basic_consume(queue="test-requeue")
+ queue2 = self.client.queue(subscription2.consumer_tag)
+
+ channel.basic_recover(requeue=True)
+
+ msg3b = queue2.get(timeout=1)
+ msg5b = queue2.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ self.assertEqual(True, msg3b.redelivered)
+ self.assertEqual(True, msg5b.redelivered)
+
+ try:
+ extra = queue2.get(timeout=1)
+ self.fail("Got unexpected message in second queue: " + extra.content.body)
+ except Empty: None
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+ def test_qos_prefetch_count(self):
+ """
+ Test that the prefetch count specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-count", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-count", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 5:
+ channel.basic_qos(prefetch_count=5)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered:
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+
+ def test_qos_prefetch_size(self):
+ """
+ Test that the prefetch size specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-size", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-size", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 50 bytes (each message is 9 or 10 bytes):
+ channel.basic_qos(prefetch_size=50)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered (i.e. 45 bytes worth):
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #make sure that a single oversized message still gets delivered
+ large = "abcdefghijklmnopqrstuvwxyz"
+ large = large + "-" + large;
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
+ msg = queue.get(timeout=1)
+ self.assertEqual(large, msg.content.body)
+
+ def test_get(self):
+ """
+ Test basic_get method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-get", exclusive=True)
+
+ #publish some messages (no_ack=True)
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ #use basic_get to read back the messages, and check that we get an empty at the end
+ for i in range(1, 11):
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #repeat for no_ack=False
+ for i in range(11, 21):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ for i in range(11, 21):
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ if(i == 13):
+ channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
+ if(i in [15, 17, 19]):
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #recover(requeue=True)
+ channel.basic_recover(requeue=True)
+
+ #get the unacked messages again (14, 16, 18, 20)
+ for i in [14, 16, 18, 20]:
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ channel.basic_recover(requeue=True)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
diff --git a/RC9/qpid/python/tests_0-8/broker.py b/RC9/qpid/python/tests_0-8/broker.py
new file mode 100644
index 0000000000..d9ac69c5e3
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/broker.py
@@ -0,0 +1,104 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class BrokerTests(TestBase):
+ """Tests for basic Broker functionality"""
+
+ def test_amqp_basic_13(self):
+ """
+ First, this test tries to receive a message with a no-ack
+ consumer. Second, this test tries to explicitely receive and
+ acknowledge a message with an acknowledging consumer.
+ """
+ ch = self.channel
+ self.queue_declare(ch, queue = "myqueue")
+
+ # No ack consumer
+ ctag = ch.basic_consume(queue = "myqueue", no_ack = True).consumer_tag
+ body = "test no-ack"
+ ch.basic_publish(routing_key = "myqueue", content = Content(body))
+ msg = self.client.queue(ctag).get(timeout = 5)
+ self.assert_(msg.content.body == body)
+
+ # Acknowleding consumer
+ self.queue_declare(ch, queue = "otherqueue")
+ ctag = ch.basic_consume(queue = "otherqueue", no_ack = False).consumer_tag
+ body = "test ack"
+ ch.basic_publish(routing_key = "otherqueue", content = Content(body))
+ msg = self.client.queue(ctag).get(timeout = 5)
+ ch.basic_ack(delivery_tag = msg.delivery_tag)
+ self.assert_(msg.content.body == body)
+
+ def test_basic_delivery_immediate(self):
+ """
+ Test basic message delivery where consume is issued before publish
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+
+ body = "Immediate Delivery"
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body), immediate=True)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.content.body == body)
+
+ # TODO: Ensure we fail if immediate=True and there's no consumer.
+
+
+ def test_basic_delivery_queued(self):
+ """
+ Test basic message delivery where publish is issued before consume
+ (i.e. requires queueing of the message)
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ body = "Queued Delivery"
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body))
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.content.body == body)
+
+ def test_invalid_channel(self):
+ channel = self.client.channel(200)
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for invalid channel")
+ except Closed, e:
+ self.assertConnectionException(504, e.args[0])
+
+ def test_closed_channel(self):
+ channel = self.client.channel(200)
+ channel.channel_open()
+ channel.channel_close()
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for closed channel")
+ except Closed, e:
+ self.assertConnectionException(504, e.args[0])
+
diff --git a/RC9/qpid/python/tests_0-8/example.py b/RC9/qpid/python/tests_0-8/example.py
new file mode 100644
index 0000000000..a1949ccb9f
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/example.py
@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class ExampleTest (TestBase):
+ """
+ An example Qpid test, illustrating the unittest frameowkr and the
+ python Qpid client. The test class must inherit TestCase. The
+ test code uses the Qpid client to interact with a qpid broker and
+ verify it behaves as expected.
+ """
+
+ def test_example(self):
+ """
+ An example test. Note that test functions must start with 'test_'
+ to be recognized by the test framework.
+ """
+
+ # By inheriting TestBase, self.client is automatically connected
+ # and self.channel is automatically opened as channel(1)
+ # Other channel methods mimic the protocol.
+ channel = self.channel
+
+ # Now we can send regular commands. If you want to see what the method
+ # arguments mean or what other commands are available, you can use the
+ # python builtin help() method. For example:
+ #help(chan)
+ #help(chan.exchange_declare)
+
+ # If you want browse the available protocol methods without being
+ # connected to a live server you can use the amqp-doc utility:
+ #
+ # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
+ #
+ # Options:
+ # -e, --regexp use regex instead of glob when matching
+
+ # Now that we know what commands are available we can use them to
+ # interact with the server.
+
+ # Here we use ordinal arguments.
+ self.exchange_declare(channel, 0, "test", "direct")
+
+ # Here we use keyword arguments.
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test", routing_key="key")
+
+ # Call Channel.basic_consume to register as a consumer.
+ # All the protocol methods return a message object. The message object
+ # has fields corresponding to the reply method fields, plus a content
+ # field that is filled if the reply includes content. In this case the
+ # interesting field is the consumer_tag.
+ reply = channel.basic_consume(queue="test-queue")
+
+ # We can use the Client.queue(...) method to access the queue
+ # corresponding to our consumer_tag.
+ queue = self.client.queue(reply.consumer_tag)
+
+ # Now lets publish a message and see if our consumer gets it. To do
+ # this we need to import the Content class.
+ body = "Hello World!"
+ channel.basic_publish(exchange="test",
+ routing_key="key",
+ content=Content(body))
+
+ # Now we'll wait for the message to arrive. We can use the timeout
+ # argument in case the server hangs. By default queue.get() will wait
+ # until a message arrives or the connection to the server dies.
+ msg = queue.get(timeout=10)
+
+ # And check that we got the right response with assertEqual
+ self.assertEqual(body, msg.content.body)
+
+ # Now acknowledge the message.
+ channel.basic_ack(msg.delivery_tag, True)
+
diff --git a/RC9/qpid/python/tests_0-8/exchange.py b/RC9/qpid/python/tests_0-8/exchange.py
new file mode 100644
index 0000000000..56d6fa82e4
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/exchange.py
@@ -0,0 +1,327 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for exchange behaviour.
+
+Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
+"""
+
+import Queue, logging
+from qpid.testlib import TestBase
+from qpid.content import Content
+from qpid.client import Closed
+
+
+class StandardExchangeVerifier:
+ """Verifies standard exchange behavior.
+
+ Used as base class for classes that test standard exchanges."""
+
+ def verifyDirectExchange(self, ex):
+ """Verify that ex behaves like a direct exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, routing_key="k")
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
+ self.fail("Expected Empty exception")
+ except Queue.Empty: None # Expected
+
+ def verifyFanOutExchange(self, ex):
+ """Verify that ex behaves like a fanout exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex)
+ self.queue_declare(queue="p")
+ self.channel.queue_bind(queue="p", exchange=ex)
+ for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
+
+ def verifyTopicExchange(self, ex):
+ """Verify that ex behaves like a topic exchange"""
+ self.queue_declare(queue="a")
+ self.channel.queue_bind(queue="a", exchange=ex, routing_key="a.#.b.*")
+ q = self.consume("a")
+ self.assertPublishGet(q, ex, "a.b.x")
+ self.assertPublishGet(q, ex, "a.x.b.x")
+ self.assertPublishGet(q, ex, "a.x.x.b.x")
+ # Shouldn't match
+ self.channel.basic_publish(exchange=ex, routing_key="a.b")
+ self.channel.basic_publish(exchange=ex, routing_key="a.b.x.y")
+ self.channel.basic_publish(exchange=ex, routing_key="x.a.b.x")
+ self.channel.basic_publish(exchange=ex, routing_key="a.b")
+ self.assert_(q.empty())
+
+ def verifyHeadersExchange(self, ex):
+ """Verify that ex is a headers exchange"""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
+ q = self.consume("q")
+ headers = {"name":"fred", "age":3}
+ self.assertPublishGet(q, exchange=ex, properties={'headers':headers})
+ self.channel.basic_publish(exchange=ex) # No headers, won't deliver
+ self.assertEmpty(q);
+
+
+class RecommendedTypesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server SHOULD implement these standard exchange types: topic, headers.
+
+ Client attempts to declare an exchange with each of these standard types.
+ """
+
+ def testDirect(self):
+ """Declare and test a direct exchange"""
+ self.exchange_declare(0, exchange="d", type="direct")
+ self.verifyDirectExchange("d")
+
+ def testFanout(self):
+ """Declare and test a fanout exchange"""
+ self.exchange_declare(0, exchange="f", type="fanout")
+ self.verifyFanOutExchange("f")
+
+ def testTopic(self):
+ """Declare and test a topic exchange"""
+ self.exchange_declare(0, exchange="t", type="topic")
+ self.verifyTopicExchange("t")
+
+ def testHeaders(self):
+ """Declare and test a headers exchange"""
+ self.exchange_declare(0, exchange="h", type="headers")
+ self.verifyHeadersExchange("h")
+
+
+class RequiredInstancesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST, in each virtual host, pre-declare an exchange instance
+ for each standard exchange type that it implements, where the name of the
+ exchange instance is amq. followed by the exchange type name.
+
+ Client creates a temporary queue and attempts to bind to each required
+ exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
+ those types are defined).
+ """
+ def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
+
+ def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
+
+ def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
+
+ def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
+
+class DefaultExchangeRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST predeclare a direct exchange to act as the default exchange
+ for content Publish methods and for default queue bindings.
+
+ Client checks that the default exchange is active by specifying a queue
+ binding with no exchange name, and publishing a message with a suitable
+ routing key but without specifying the exchange name, then ensuring that
+ the message arrives in the queue correctly.
+ """
+ def testDefaultExchange(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+ # Test explicit bind to default queue
+ self.verifyDirectExchange("")
+
+
+# TODO aconway 2006-09-27: Fill in empty tests:
+
+class DefaultAccessRuleTests(TestBase):
+ """
+ The server MUST NOT allow clients to access the default exchange except
+ by specifying an empty exchange name in the Queue.Bind and content Publish
+ methods.
+ """
+
+class ExtensionsRuleTests(TestBase):
+ """
+ The server MAY implement other exchange types as wanted.
+ """
+
+
+class DeclareMethodMinimumRuleTests(TestBase):
+ """
+ The server SHOULD support a minimum of 16 exchanges per virtual host and
+ ideally, impose no limit except as defined by available resources.
+
+ The client creates as many exchanges as it can until the server reports
+ an error; the number of exchanges successfuly created must be at least
+ sixteen.
+ """
+
+
+class DeclareMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access to
+ the realm in which the exchange exists or will be created, or "passive"
+ access if the if-exists flag is set.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeclareMethodExchangeFieldReservedRuleTests(TestBase):
+ """
+ Exchange names starting with "amq." are reserved for predeclared and
+ standardised exchanges. The client MUST NOT attempt to create an exchange
+ starting with "amq.".
+
+
+ """
+
+
+class DeclareMethodTypeFieldTypedRuleTests(TestBase):
+ """
+ Exchanges cannot be redeclared with different types. The client MUST not
+ attempt to redeclare an existing exchange with a different type than used
+ in the original Exchange.Declare method.
+
+
+ """
+
+
+class DeclareMethodTypeFieldSupportRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to create an exchange with a type that the
+ server does not support.
+
+
+ """
+
+
+class DeclareMethodPassiveFieldNotFoundRuleTests(TestBase):
+ """
+ If set, and the exchange does not already exist, the server MUST raise a
+ channel exception with reply code 404 (not found).
+ """
+ def test(self):
+ try:
+ self.channel.exchange_declare(exchange="humpty_dumpty", passive=True)
+ self.fail("Expected 404 for passive declaration of unknown exchange.")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+class DeclareMethodDurableFieldSupportRuleTests(TestBase):
+ """
+ The server MUST support both durable and transient exchanges.
+
+
+ """
+
+
+class DeclareMethodDurableFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the durable field if the exchange already exists.
+
+
+ """
+
+
+class DeclareMethodAutoDeleteFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the auto-delete field if the exchange already
+ exists.
+
+
+ """
+
+
+class DeleteMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access
+ rights to the exchange's access realm.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeleteMethodExchangeFieldExistsRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to delete an exchange that does not exist.
+ """
+
+
+class HeadersExchangeTests(TestBase):
+ """
+ Tests for headers exchange functionality.
+ """
+ def setUp(self):
+ TestBase.setUp(self)
+ self.queue_declare(queue="q")
+ self.q = self.consume("q")
+
+ def myAssertPublishGet(self, headers):
+ self.assertPublishGet(self.q, exchange="amq.match", properties={'headers':headers})
+
+ def myBasicPublish(self, headers):
+ self.channel.basic_publish(exchange="amq.match", content=Content("foobar", properties={'headers':headers}))
+
+ def testMatchAll(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
+
+ # None of these should match
+ self.myBasicPublish({})
+ self.myBasicPublish({"name":"barney"})
+ self.myBasicPublish({"name":10})
+ self.myBasicPublish({"name":"fred", "age":2})
+ self.assertEmpty(self.q)
+
+ def testMatchAny(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"fred", "ignoreme":10})
+ self.myAssertPublishGet({"ignoreme":10, "age":3})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+
+class MiscellaneousErrorsTests(TestBase):
+ """
+ Test some miscellaneous error conditions
+ """
+ def testTypeNotKnown(self):
+ try:
+ self.channel.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
+ self.fail("Expected 503 for declaration of unknown exchange type.")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def testDifferentDeclaredType(self):
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
+ try:
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+ #cleanup
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+ c2.exchange_delete(exchange="test_different_declared_type_exchange")
+
diff --git a/RC9/qpid/python/tests_0-8/queue.py b/RC9/qpid/python/tests_0-8/queue.py
new file mode 100644
index 0000000000..60ac4c3dfb
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/queue.py
@@ -0,0 +1,255 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class QueueTests(TestBase):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_purge(self):
+ """
+ Test that the purge method removes messages from the queue
+ """
+ channel = self.channel
+ #setup, declare a queue and add some messages to it:
+ channel.exchange_declare(exchange="test-exchange", type="direct")
+ channel.queue_declare(queue="test-queue", exclusive=True)
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("one"))
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("two"))
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("three"))
+
+ #check that the queue now reports 3 messages:
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(3, reply.message_count)
+
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ reply = channel.queue_purge(queue="test-queue");
+ self.assertEqual(3, reply.message_count)
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("four"))
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEqual("four", msg.content.body)
+
+ #check error conditions (use new channels):
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue specified but doesn't exist:
+ channel.queue_purge(queue="invalid-queue")
+ self.fail("Expected failure when purging non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(3)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ #cleanup
+ other = self.connect()
+ channel = other.channel(1)
+ channel.channel_open()
+ channel.exchange_delete(exchange="test-exchange")
+
+ def test_declare_exclusive(self):
+ """
+ Test that the exclusive field is honoured in queue.declare
+ """
+ # TestBase.setUp has already opened channel(1)
+ c1 = self.channel
+ # Here we open a second separate connection:
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+
+ #declare an exclusive queue:
+ c1.queue_declare(queue="exclusive-queue", exclusive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ c2.queue_declare(queue="exclusive-queue", exclusive="True")
+ self.fail("Expected second exclusive queue_declare to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(405, e.args[0])
+
+
+ def test_declare_passive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ channel = self.channel
+ #declare an exclusive queue:
+ channel.queue_declare(queue="passive-queue-1", exclusive="True")
+ channel.queue_declare(queue="passive-queue-1", passive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ channel.queue_declare(queue="passive-queue-2", passive="True")
+ self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_bind(self):
+ """
+ Test various permutations of the queue.bind method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="queue-1", exclusive="True")
+
+ #straightforward case, both exchange & queue exist so no errors expected:
+ channel.queue_bind(queue="queue-1", exchange="amq.direct", routing_key="key1")
+
+ #bind the default queue for the channel (i.e. last one declared):
+ channel.queue_bind(exchange="amq.direct", routing_key="key2")
+
+ #use the queue name where neither routing key nor queue are specified:
+ channel.queue_bind(exchange="amq.direct")
+
+ #try and bind to non-existant exchange
+ try:
+ channel.queue_bind(queue="queue-1", exchange="an-invalid-exchange", routing_key="key1")
+ self.fail("Expected bind to non-existant exchange to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #need to reopen a channel:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #try and bind non-existant queue:
+ try:
+ channel.queue_bind(queue="queue-2", exchange="amq.direct", routing_key="key1")
+ self.fail("Expected bind of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_delete_simple(self):
+ """
+ Test basic queue deletion
+ """
+ channel = self.channel
+
+ #straight-forward case:
+ channel.queue_declare(queue="delete-me")
+ channel.basic_publish(routing_key="delete-me", content=Content("a"))
+ channel.basic_publish(routing_key="delete-me", content=Content("b"))
+ channel.basic_publish(routing_key="delete-me", content=Content("c"))
+ reply = channel.queue_delete(queue="delete-me")
+ self.assertEqual(3, reply.message_count)
+ #check that it has gone be declaring passively
+ try:
+ channel.queue_declare(queue="delete-me", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #check attempted deletion of non-existant queue is handled correctly:
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ channel.queue_delete(queue="i-dont-exist", if_empty="True")
+ self.fail("Expected delete of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+
+ def test_delete_ifempty(self):
+ """
+ Test that if_empty field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and add a message to it (use default binding):
+ channel.queue_declare(queue="delete-me-2")
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ channel.basic_publish(routing_key="delete-me-2", content=Content("message"))
+
+ #try to delete, but only if empty:
+ try:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+ self.fail("Expected delete if_empty to fail for non-empty queue")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+ #need new channel now:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #empty queue:
+ reply = channel.basic_consume(queue="delete-me-2", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEqual("message", msg.content.body)
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+
+ #retry deletion on empty queue:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ def test_delete_ifunused(self):
+ """
+ Test that if_unused field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and register a consumer:
+ channel.queue_declare(queue="delete-me-3")
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ reply = channel.basic_consume(queue="delete-me-3", no_ack=True)
+
+ #need new channel now:
+ channel2 = self.client.channel(2)
+ channel2.channel_open()
+ #try to delete, but only if empty:
+ try:
+ channel2.queue_delete(queue="delete-me-3", if_unused="True")
+ self.fail("Expected delete if_unused to fail for queue with existing consumer")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+ channel.queue_delete(queue="delete-me-3", if_unused="True")
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
diff --git a/RC9/qpid/python/tests_0-8/testlib.py b/RC9/qpid/python/tests_0-8/testlib.py
new file mode 100644
index 0000000000..cab07cc4ac
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/testlib.py
@@ -0,0 +1,66 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# Tests for the testlib itself.
+#
+
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+from Queue import Empty
+
+import sys
+from traceback import *
+
+def mytrace(frame, event, arg):
+ print_stack(frame);
+ print "===="
+ return mytrace
+
+class TestBaseTest(TestBase):
+ """Verify TestBase functions work as expected"""
+
+ def testAssertEmptyPass(self):
+ """Test assert empty works"""
+ self.queue_declare(queue="empty")
+ q = self.consume("empty")
+ self.assertEmpty(q)
+ try:
+ q.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Empty: None # Ignore
+
+ def testAssertEmptyFail(self):
+ self.queue_declare(queue="full")
+ q = self.consume("full")
+ self.channel.basic_publish(routing_key="full")
+ try:
+ self.assertEmpty(q);
+ self.fail("assertEmpty did not assert on non-empty queue")
+ except AssertionError: None # Ignore
+
+ def testMessageProperties(self):
+ """Verify properties are passed with message"""
+ props={"headers":{"x":1, "y":2}}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+
+
diff --git a/RC9/qpid/python/tests_0-8/tx.py b/RC9/qpid/python/tests_0-8/tx.py
new file mode 100644
index 0000000000..054fb8d8b7
--- /dev/null
+++ b/RC9/qpid/python/tests_0-8/tx.py
@@ -0,0 +1,209 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class TxTests(TestBase):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ channel.tx_commit()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.content.body)
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a channel closed with an open transaction is effectively rolled back
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def perform_txn_work(self, channel, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ channel.queue_declare(queue=name_a, exclusive=True)
+ channel.queue_declare(queue=name_b, exclusive=True)
+ channel.queue_declare(queue=name_c, exclusive=True)
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ channel.queue_bind(queue=name_b, exchange="amq.direct", routing_key=key)
+ channel.queue_bind(queue=name_c, exchange="amq.topic", routing_key=topic)
+
+ for i in range(1, 5):
+ channel.basic_publish(routing_key=name_a, content=Content("Message %d" % i))
+
+ channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("Message 6"))
+ channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("Message 7"))
+
+ channel.tx_select()
+
+ #consume and ack messages
+ sub_a = channel.basic_consume(queue=name_a, no_ack=False)
+ queue_a = self.client.queue(sub_a.consumer_tag)
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ sub_b = channel.basic_consume(queue=name_b, no_ack=False)
+ queue_b = self.client.queue(sub_b.consumer_tag)
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ sub_c = channel.basic_consume(queue=name_c, no_ack=False)
+ queue_c = self.client.queue(sub_c.consumer_tag)
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ #publish messages
+ for i in range(1, 5):
+ channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("TxMessage %d" % i))
+
+ channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("TxMessage 6"))
+ channel.basic_publish(routing_key=name_a, content=Content("TxMessage 7"))
+
+ return queue_a, queue_b, queue_c
+
+ def test_commit_overlapping_acks(self):
+ """
+ Test that logically 'overlapping' acks do not cause errors on commit
+ """
+ channel = self.channel
+ channel.queue_declare(queue="commit-overlapping", exclusive=True)
+ for i in range(1, 10):
+ channel.basic_publish(routing_key="commit-overlapping", content=Content("Message %d" % i))
+
+
+ channel.tx_select()
+
+ sub = channel.basic_consume(queue="commit-overlapping", no_ack=False)
+ queue = self.client.queue(sub.consumer_tag)
+ for i in range(1, 10):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ if i in [3, 6, 10]:
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ channel.tx_commit()
+
+ #check all have been acked:
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
diff --git a/RC9/qpid/python/tests_0-9/__init__.py b/RC9/qpid/python/tests_0-9/__init__.py
new file mode 100644
index 0000000000..9a09d2d04f
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/__init__.py
@@ -0,0 +1,20 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/RC9/qpid/python/tests_0-9/basic.py b/RC9/qpid/python/tests_0-9/basic.py
new file mode 100644
index 0000000000..607ba26343
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/basic.py
@@ -0,0 +1,396 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class BasicTests(TestBase):
+ """Tests for 'methods' on the amqp basic 'class'"""
+
+ def test_consume_no_local(self):
+ """
+ Test that the no_local flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare two queues:
+ channel.queue_declare(queue="test-queue-1a", exclusive=True)
+ channel.queue_declare(queue="test-queue-1b", exclusive=True)
+ #establish two consumers one of which excludes delivery of locally sent messages
+ channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
+ channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
+
+ #send a message
+ channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
+ channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
+
+ #check the queues of the two consumers
+ excluded = self.client.queue("local_excluded")
+ included = self.client.queue("local_included")
+ msg = included.get(timeout=1)
+ self.assertEqual("consume_no_local", msg.content.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test that the exclusive flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-2", exclusive=True)
+
+ #check that an exclusive consumer prevents other consumer being created:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ #open new channel and cleanup last consumer:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #check that an exclusive consumer cannot be created if a consumer already exists:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2")
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ def test_consume_queue_errors(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ channel = self.channel
+ try:
+ #queue specified but doesn't exist:
+ channel.basic_consume(queue="invalid-queue")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.basic_consume(queue="")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-3", exclusive=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ try:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-4", exclusive=True)
+ channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
+
+ myqueue = self.client.queue("my-consumer")
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.content.body)
+
+ #cancel should stop messages being delivered
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be handled without error
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_cancel(consumer_tag="this-never-existed")
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-ack-queue", exclusive=True)
+
+ reply = channel.basic_consume(queue="test-ack-queue", no_ack=False)
+ queue = self.client.queue(reply.consumer_tag)
+
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_recover(requeue=False)
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ def test_recover_requeue(self):
+ """
+ Test requeing on recovery
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-requeue", exclusive=True)
+
+ subscription = channel.basic_consume(queue="test-requeue", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ channel.basic_publish(routing_key="test-requeue", content=Content("One"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_cancel(consumer_tag=subscription.consumer_tag)
+
+ channel.basic_recover(requeue=True)
+
+ subscription2 = channel.basic_consume(queue="test-requeue")
+ queue2 = self.client.queue(subscription2.consumer_tag)
+
+ msg3b = queue2.get(timeout=1)
+ msg5b = queue2.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ self.assertEqual(True, msg3b.redelivered)
+ self.assertEqual(True, msg5b.redelivered)
+
+ try:
+ extra = queue2.get(timeout=1)
+ self.fail("Got unexpected message in second queue: " + extra.content.body)
+ except Empty: None
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+ def test_qos_prefetch_count(self):
+ """
+ Test that the prefetch count specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-count", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-count", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 5:
+ channel.basic_qos(prefetch_count=5)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered:
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+
+ def test_qos_prefetch_size(self):
+ """
+ Test that the prefetch size specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-size", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-size", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 50 bytes (each message is 9 or 10 bytes):
+ channel.basic_qos(prefetch_size=50)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered (i.e. 45 bytes worth):
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #make sure that a single oversized message still gets delivered
+ large = "abcdefghijklmnopqrstuvwxyz"
+ large = large + "-" + large;
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
+ msg = queue.get(timeout=1)
+ self.assertEqual(large, msg.content.body)
+
+ def test_get(self):
+ """
+ Test basic_get method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-get", exclusive=True)
+
+ #publish some messages (no_ack=True)
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ #use basic_get to read back the messages, and check that we get an empty at the end
+ for i in range(1, 11):
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #repeat for no_ack=False
+ for i in range(11, 21):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ for i in range(11, 21):
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ if(i == 13):
+ channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
+ if(i in [15, 17, 19]):
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #recover(requeue=True)
+ channel.basic_recover(requeue=True)
+
+ #get the unacked messages again (14, 16, 18, 20)
+ for i in [14, 16, 18, 20]:
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ channel.basic_recover(requeue=True)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
diff --git a/RC9/qpid/python/tests_0-9/broker.py b/RC9/qpid/python/tests_0-9/broker.py
new file mode 100644
index 0000000000..03b4132d3e
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/broker.py
@@ -0,0 +1,133 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class BrokerTests(TestBase):
+ """Tests for basic Broker functionality"""
+
+ def test_ack_and_no_ack(self):
+ """
+ First, this test tries to receive a message with a no-ack
+ consumer. Second, this test tries to explicitly receive and
+ acknowledge a message with an acknowledging consumer.
+ """
+ ch = self.channel
+ self.queue_declare(ch, queue = "myqueue")
+
+ # No ack consumer
+ ctag = "tag1"
+ ch.message_consume(queue = "myqueue", destination = ctag, no_ack = True)
+ body = "test no-ack"
+ ch.message_transfer(routing_key = "myqueue", body = body)
+ msg = self.client.queue(ctag).get(timeout = 5)
+ self.assert_(msg.body == body)
+
+ # Acknowledging consumer
+ self.queue_declare(ch, queue = "otherqueue")
+ ctag = "tag2"
+ ch.message_consume(queue = "otherqueue", destination = ctag, no_ack = False)
+ body = "test ack"
+ ch.message_transfer(routing_key = "otherqueue", body = body)
+ msg = self.client.queue(ctag).get(timeout = 5)
+ msg.ok()
+ self.assert_(msg.body == body)
+
+ def test_simple_delivery_immediate(self):
+ """
+ Test simple message delivery where consume is issued before publish
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ consumer_tag = "tag1"
+ channel.message_consume(queue="test-queue", destination=consumer_tag, no_ack=True)
+ queue = self.client.queue(consumer_tag)
+
+ body = "Immediate Delivery"
+ channel.message_transfer(destination="test-exchange", routing_key="key", body=body, immediate=True)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
+
+ # TODO: Ensure we fail if immediate=True and there's no consumer.
+
+
+ def test_simple_delivery_queued(self):
+ """
+ Test basic message delivery where publish is issued before consume
+ (i.e. requires queueing of the message)
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ body = "Queued Delivery"
+ channel.message_transfer(destination="test-exchange", routing_key="key", body=body)
+
+ consumer_tag = "tag1"
+ channel.message_consume(queue="test-queue", destination=consumer_tag, no_ack=True)
+ queue = self.client.queue(consumer_tag)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
+
+ def test_invalid_channel(self):
+ channel = self.client.channel(200)
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for invalid channel")
+ except Closed, e:
+ self.assertConnectionException(504, e.args[0])
+
+ def test_closed_channel(self):
+ channel = self.client.channel(200)
+ channel.channel_open()
+ channel.channel_close()
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for closed channel")
+ except Closed, e:
+ if isinstance(e.args[0], str): self.fail(e)
+ self.assertConnectionException(504, e.args[0])
+
+ def test_ping_pong(self):
+ channel = self.channel
+ reply = channel.channel_ping()
+ self.assertEqual(reply.method.klass.name, "channel")
+ self.assertEqual(reply.method.name, "ok")
+ #todo: provide a way to get notified of incoming pongs...
+
+ def test_channel_flow(self):
+ channel = self.channel
+ channel.queue_declare(queue="flow_test_queue", exclusive=True)
+ channel.message_consume(destination="my-tag", queue="flow_test_queue")
+ incoming = self.client.queue("my-tag")
+
+ channel.channel_flow(active=False)
+ channel.message_transfer(routing_key="flow_test_queue", body="abcdefghijklmnopqrstuvwxyz")
+ try:
+ incoming.get(timeout=1)
+ self.fail("Received message when flow turned off.")
+ except Empty: None
+
+ channel.channel_flow(active=True)
+ msg = incoming.get(timeout=1)
+ self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.body)
diff --git a/RC9/qpid/python/tests_0-9/dtx.py b/RC9/qpid/python/tests_0-9/dtx.py
new file mode 100644
index 0000000000..bc268f4129
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/dtx.py
@@ -0,0 +1,587 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+from struct import pack, unpack
+from time import sleep
+
+class DtxTests(TestBase):
+ """
+ Tests for the amqp dtx related classes.
+
+ Tests of the form test_simple_xxx test the basic transactional
+ behaviour. The approach here is to 'swap' a message from one queue
+ to another by consuming and re-publishing in the same
+ transaction. That transaction is then completed in different ways
+ and the appropriate result verified.
+
+ The other tests enforce more specific rules and behaviour on a
+ per-method or per-field basis.
+ """
+
+ XA_RBROLLBACK = 1
+ XA_RBTIMEOUT = 2
+ XA_OK = 8
+
+ def test_simple_commit(self):
+ """
+ Test basic one-phase commit behaviour.
+ """
+ channel = self.channel
+ tx = self.xid("my-xid")
+ self.txswap(tx, "commit")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_commit(xid=tx, one_phase=True).flags)
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("commit", "queue-b")
+
+ def test_simple_prepare_commit(self):
+ """
+ Test basic two-phase commit behaviour.
+ """
+ channel = self.channel
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-commit")
+
+ #prepare
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_prepare(xid=tx).flags)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_commit(xid=tx, one_phase=False).flags)
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("prepare-commit", "queue-b")
+
+
+ def test_simple_rollback(self):
+ """
+ Test basic rollback behaviour.
+ """
+ channel = self.channel
+ tx = self.xid("my-xid")
+ self.txswap(tx, "rollback")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("rollback", "queue-a")
+
+ def test_simple_prepare_rollback(self):
+ """
+ Test basic rollback behaviour after the transaction has been prepared.
+ """
+ channel = self.channel
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-rollback")
+
+ #prepare
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_prepare(xid=tx).flags)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("prepare-rollback", "queue-a")
+
+ def test_select_required(self):
+ """
+ check that an error is flagged if select is not issued before
+ start or end
+ """
+ channel = self.channel
+ tx = self.xid("dummy")
+ try:
+ channel.dtx_demarcation_start(xid=tx)
+
+ #if we get here we have failed, but need to do some cleanup:
+ channel.dtx_demarcation_end(xid=tx)
+ channel.dtx_coordination_rollback(xid=tx)
+ self.fail("Channel not selected for use with dtx, expected exception!")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_start_already_known(self):
+ """
+ Verify that an attempt to start an association with a
+ transaction that is already known is not allowed (unless the
+ join flag is set).
+ """
+ #create two channels on different connection & select them for use with dtx:
+ channel1 = self.channel
+ channel1.dtx_demarcation_select()
+
+ other = self.connect()
+ channel2 = other.channel(1)
+ channel2.channel_open()
+ channel2.dtx_demarcation_select()
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one channel under that xid:
+ channel1.dtx_demarcation_start(xid=tx)
+ #then start on the other without the join set
+ failed = False
+ try:
+ channel2.dtx_demarcation_start(xid=tx)
+ except Closed, e:
+ failed = True
+ error = e
+
+ #cleanup:
+ if not failed:
+ channel2.dtx_demarcation_end(xid=tx)
+ other.close()
+ channel1.dtx_demarcation_end(xid=tx)
+ channel1.dtx_coordination_rollback(xid=tx)
+
+ #verification:
+ if failed: self.assertConnectionException(503, e.args[0])
+ else: self.fail("Xid already known, expected exception!")
+
+ def test_forget_xid_on_completion(self):
+ """
+ Verify that a xid is 'forgotten' - and can therefore be used
+ again - once it is completed.
+ """
+ channel = self.channel
+ #do some transactional work & complete the transaction
+ self.test_simple_commit()
+
+ #start association for the same xid as the previously completed txn
+ tx = self.xid("my-xid")
+ channel.dtx_demarcation_start(xid=tx)
+ channel.dtx_demarcation_end(xid=tx)
+ channel.dtx_coordination_rollback(xid=tx)
+
+ def test_start_join_and_resume(self):
+ """
+ Ensure the correct error is signalled when both the join and
+ resume flags are set on starting an association between a
+ channel and a transcation.
+ """
+ channel = self.channel
+ channel.dtx_demarcation_select()
+ tx = self.xid("dummy")
+ try:
+ channel.dtx_demarcation_start(xid=tx, join=True, resume=True)
+ #failed, but need some cleanup:
+ channel.dtx_demarcation_end(xid=tx)
+ channel.dtx_coordination_rollback(xid=tx)
+ self.fail("Join and resume both set, expected exception!")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_start_join(self):
+ """
+ Verify 'join' behaviour, where a channel is associated with a
+ transaction that is already associated with another channel.
+ """
+ #create two channels & select them for use with dtx:
+ channel1 = self.channel
+ channel1.dtx_demarcation_select()
+
+ channel2 = self.client.channel(2)
+ channel2.channel_open()
+ channel2.dtx_demarcation_select()
+
+ #setup
+ channel1.queue_declare(queue="one", exclusive=True)
+ channel1.queue_declare(queue="two", exclusive=True)
+ channel1.message_transfer(routing_key="one", message_id="a", body="DtxMessage")
+ channel1.message_transfer(routing_key="two", message_id="b", body="DtxMessage")
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one channel under that xid:
+ channel1.dtx_demarcation_start(xid=tx)
+ #then start on the other with the join flag set
+ channel2.dtx_demarcation_start(xid=tx, join=True)
+
+ #do work through each channel
+ self.swap(channel1, "one", "two")#swap 'a' from 'one' to 'two'
+ self.swap(channel2, "two", "one")#swap 'b' from 'two' to 'one'
+
+ #mark end on both channels
+ channel1.dtx_demarcation_end(xid=tx)
+ channel2.dtx_demarcation_end(xid=tx)
+
+ #commit and check
+ channel1.dtx_coordination_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+
+ def test_suspend_resume(self):
+ """
+ Test suspension and resumption of an association
+ """
+ channel = self.channel
+ channel.dtx_demarcation_select()
+
+ #setup
+ channel.queue_declare(queue="one", exclusive=True)
+ channel.queue_declare(queue="two", exclusive=True)
+ channel.message_transfer(routing_key="one", message_id="a", body="DtxMessage")
+ channel.message_transfer(routing_key="two", message_id="b", body="DtxMessage")
+
+ tx = self.xid("dummy")
+
+ channel.dtx_demarcation_start(xid=tx)
+ self.swap(channel, "one", "two")#swap 'a' from 'one' to 'two'
+ channel.dtx_demarcation_end(xid=tx, suspend=True)
+
+ channel.dtx_demarcation_start(xid=tx, resume=True)
+ self.swap(channel, "two", "one")#swap 'b' from 'two' to 'one'
+ channel.dtx_demarcation_end(xid=tx)
+
+ #commit and check
+ channel.dtx_coordination_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+ def test_end_suspend_and_fail(self):
+ """
+ Verify that the correct error is signalled if the suspend and
+ fail flag are both set when disassociating a transaction from
+ the channel
+ """
+ channel = self.channel
+ channel.dtx_demarcation_select()
+ tx = self.xid("suspend_and_fail")
+ channel.dtx_demarcation_start(xid=tx)
+ try:
+ channel.dtx_demarcation_end(xid=tx, suspend=True, fail=True)
+ self.fail("Suspend and fail both set, expected exception!")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ #cleanup
+ other = self.connect()
+ channel = other.channel(1)
+ channel.channel_open()
+ channel.dtx_coordination_rollback(xid=tx)
+ channel.channel_close()
+ other.close()
+
+
+ def test_end_unknown_xid(self):
+ """
+ Verifies that the correct exception is thrown when an attempt
+ is made to end the association for a xid not previously
+ associated with the channel
+ """
+ channel = self.channel
+ channel.dtx_demarcation_select()
+ tx = self.xid("unknown-xid")
+ try:
+ channel.dtx_demarcation_end(xid=tx)
+ self.fail("Attempted to end association with unknown xid, expected exception!")
+ except Closed, e:
+ #FYI: this is currently *not* the exception specified, but I think the spec is wrong! Confirming...
+ self.assertConnectionException(503, e.args[0])
+
+ def test_end(self):
+ """
+ Verify that the association is terminated by end and subsequent
+ operations are non-transactional
+ """
+ channel = self.client.channel(2)
+ channel.channel_open()
+ channel.queue_declare(queue="tx-queue", exclusive=True)
+
+ #publish a message under a transaction
+ channel.dtx_demarcation_select()
+ tx = self.xid("dummy")
+ channel.dtx_demarcation_start(xid=tx)
+ channel.message_transfer(routing_key="tx-queue", message_id="one", body="DtxMessage")
+ channel.dtx_demarcation_end(xid=tx)
+
+ #now that association with txn is ended, publish another message
+ channel.message_transfer(routing_key="tx-queue", message_id="two", body="DtxMessage")
+
+ #check the second message is available, but not the first
+ self.assertMessageCount(1, "tx-queue")
+ channel.message_consume(queue="tx-queue", destination="results", no_ack=False)
+ msg = self.client.queue("results").get(timeout=1)
+ self.assertEqual("two", msg.message_id)
+ channel.message_cancel(destination="results")
+ #ack the message then close the channel
+ msg.ok()
+ channel.channel_close()
+
+ channel = self.channel
+ #commit the transaction and check that the first message (and
+ #only the first message) is then delivered
+ channel.dtx_coordination_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "tx-queue")
+ self.assertMessageId("one", "tx-queue")
+
+ def test_invalid_commit_one_phase_true(self):
+ """
+ Test that a commit with one_phase = True is rejected if the
+ transaction in question has already been prepared.
+ """
+ other = self.connect()
+ tester = other.channel(1)
+ tester.channel_open()
+ tester.queue_declare(queue="dummy", exclusive=True)
+ tester.dtx_demarcation_select()
+ tx = self.xid("dummy")
+ tester.dtx_demarcation_start(xid=tx)
+ tester.message_transfer(routing_key="dummy", body="whatever")
+ tester.dtx_demarcation_end(xid=tx)
+ tester.dtx_coordination_prepare(xid=tx)
+ failed = False
+ try:
+ tester.dtx_coordination_commit(xid=tx, one_phase=True)
+ except Closed, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.channel.dtx_coordination_rollback(xid=tx)
+ self.assertConnectionException(503, e.args[0])
+ else:
+ tester.channel_close()
+ other.close()
+ self.fail("Invalid use of one_phase=True, expected exception!")
+
+ def test_invalid_commit_one_phase_false(self):
+ """
+ Test that a commit with one_phase = False is rejected if the
+ transaction in question has not yet been prepared.
+ """
+ """
+ Test that a commit with one_phase = True is rejected if the
+ transaction in question has already been prepared.
+ """
+ other = self.connect()
+ tester = other.channel(1)
+ tester.channel_open()
+ tester.queue_declare(queue="dummy", exclusive=True)
+ tester.dtx_demarcation_select()
+ tx = self.xid("dummy")
+ tester.dtx_demarcation_start(xid=tx)
+ tester.message_transfer(routing_key="dummy", body="whatever")
+ tester.dtx_demarcation_end(xid=tx)
+ failed = False
+ try:
+ tester.dtx_coordination_commit(xid=tx, one_phase=False)
+ except Closed, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.channel.dtx_coordination_rollback(xid=tx)
+ self.assertConnectionException(503, e.args[0])
+ else:
+ tester.channel_close()
+ other.close()
+ self.fail("Invalid use of one_phase=False, expected exception!")
+
+ def test_implicit_end(self):
+ """
+ Test that an association is implicitly ended when the channel
+ is closed (whether by exception or explicit client request)
+ and the transaction in question is marked as rollback only.
+ """
+ channel1 = self.channel
+ channel2 = self.client.channel(2)
+ channel2.channel_open()
+
+ #setup:
+ channel2.queue_declare(queue="dummy", exclusive=True)
+ channel2.message_transfer(routing_key="dummy", body="whatever")
+ tx = self.xid("dummy")
+
+ channel2.dtx_demarcation_select()
+ channel2.dtx_demarcation_start(xid=tx)
+ channel2.message_get(queue="dummy", destination="dummy")
+ self.client.queue("dummy").get(timeout=1).ok()
+ channel2.message_transfer(routing_key="dummy", body="whatever")
+ channel2.channel_close()
+
+ self.assertEqual(self.XA_RBROLLBACK, channel1.dtx_coordination_prepare(xid=tx).flags)
+ channel1.dtx_coordination_rollback(xid=tx)
+
+ def test_get_timeout(self):
+ """
+ Check that get-timeout returns the correct value, (and that a
+ transaction with a timeout can complete normally)
+ """
+ channel = self.channel
+ tx = self.xid("dummy")
+
+ channel.dtx_demarcation_select()
+ channel.dtx_demarcation_start(xid=tx)
+ self.assertEqual(0, channel.dtx_coordination_get_timeout(xid=tx).timeout)
+ channel.dtx_coordination_set_timeout(xid=tx, timeout=60)
+ self.assertEqual(60, channel.dtx_coordination_get_timeout(xid=tx).timeout)
+ self.assertEqual(self.XA_OK, channel.dtx_demarcation_end(xid=tx).flags)
+ self.assertEqual(self.XA_OK, channel.dtx_coordination_rollback(xid=tx).flags)
+
+ def test_set_timeout(self):
+ """
+ Test the timeout of a transaction results in the expected
+ behaviour
+ """
+ #open new channel to allow self.channel to be used in checking te queue
+ channel = self.client.channel(2)
+ channel.channel_open()
+ #setup:
+ tx = self.xid("dummy")
+ channel.queue_declare(queue="queue-a", exclusive=True)
+ channel.queue_declare(queue="queue-b", exclusive=True)
+ channel.message_transfer(routing_key="queue-a", message_id="timeout", body="DtxMessage")
+
+ channel.dtx_demarcation_select()
+ channel.dtx_demarcation_start(xid=tx)
+ self.swap(channel, "queue-a", "queue-b")
+ channel.dtx_coordination_set_timeout(xid=tx, timeout=2)
+ sleep(3)
+ #check that the work has been rolled back already
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("timeout", "queue-a")
+ #check the correct codes are returned when we try to complete the txn
+ self.assertEqual(self.XA_RBTIMEOUT, channel.dtx_demarcation_end(xid=tx).flags)
+ self.assertEqual(self.XA_RBTIMEOUT, channel.dtx_coordination_rollback(xid=tx).flags)
+
+
+
+ def test_recover(self):
+ """
+ Test basic recover behaviour
+ """
+ channel = self.channel
+
+ channel.dtx_demarcation_select()
+ channel.queue_declare(queue="dummy", exclusive=True)
+
+ prepared = []
+ for i in range(1, 10):
+ tx = self.xid("tx%s" % (i))
+ channel.dtx_demarcation_start(xid=tx)
+ channel.message_transfer(routing_key="dummy", body="message%s" % (i))
+ channel.dtx_demarcation_end(xid=tx)
+ if i in [2, 5, 6, 8]:
+ channel.dtx_coordination_prepare(xid=tx)
+ prepared.append(tx)
+ else:
+ channel.dtx_coordination_rollback(xid=tx)
+
+ indoubt = channel.dtx_coordination_recover().xids
+ #convert indoubt table to a list of xids (note: this will change for 0-10)
+ data = indoubt["xids"]
+ xids = []
+ pos = 0
+ while pos < len(data):
+ size = unpack("!B", data[pos])[0]
+ start = pos + 1
+ end = start + size
+ xid = data[start:end]
+ xids.append(xid)
+ pos = end
+
+ #rollback the prepared transactions returned by recover
+ for x in xids:
+ channel.dtx_coordination_rollback(xid=x)
+
+ #validate against the expected list of prepared transactions
+ actual = set(xids)
+ expected = set(prepared)
+ intersection = actual.intersection(expected)
+
+ if intersection != expected:
+ missing = expected.difference(actual)
+ extra = actual.difference(expected)
+ for x in missing:
+ channel.dtx_coordination_rollback(xid=x)
+ self.fail("Recovered xids not as expected. missing: %s; extra: %s" % (missing, extra))
+
+ def xid(self, txid, branchqual = ''):
+ return pack('LBB', 0, len(txid), len(branchqual)) + txid + branchqual
+
+ def txswap(self, tx, id):
+ channel = self.channel
+ #declare two queues:
+ channel.queue_declare(queue="queue-a", exclusive=True)
+ channel.queue_declare(queue="queue-b", exclusive=True)
+ #put message with specified id on one queue:
+ channel.message_transfer(routing_key="queue-a", message_id=id, body="DtxMessage")
+
+ #start the transaction:
+ channel.dtx_demarcation_select()
+ self.assertEqual(self.XA_OK, self.channel.dtx_demarcation_start(xid=tx).flags)
+
+ #'swap' the message from one queue to the other, under that transaction:
+ self.swap(self.channel, "queue-a", "queue-b")
+
+ #mark the end of the transactional work:
+ self.assertEqual(self.XA_OK, self.channel.dtx_demarcation_end(xid=tx).flags)
+
+ def swap(self, channel, src, dest):
+ #consume from src:
+ channel.message_get(destination="temp-swap", queue=src)
+ msg = self.client.queue("temp-swap").get(timeout=1)
+ msg.ok();
+
+ #re-publish to dest
+ channel.message_transfer(routing_key=dest, message_id=msg.message_id, body=msg.body)
+
+ def assertMessageCount(self, expected, queue):
+ self.assertEqual(expected, self.channel.queue_declare(queue=queue, passive=True).message_count)
+
+ def assertMessageId(self, expected, queue):
+ self.channel.message_consume(queue=queue, destination="results", no_ack=True)
+ self.assertEqual(expected, self.client.queue("results").get(timeout=1).message_id)
+ self.channel.message_cancel(destination="results")
diff --git a/RC9/qpid/python/tests_0-9/example.py b/RC9/qpid/python/tests_0-9/example.py
new file mode 100644
index 0000000000..7ab4cc7d0a
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/example.py
@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class ExampleTest (TestBase):
+ """
+ An example Qpid test, illustrating the unittest frameowkr and the
+ python Qpid client. The test class must inherit TestCase. The
+ test code uses the Qpid client to interact with a qpid broker and
+ verify it behaves as expected.
+ """
+
+ def test_example(self):
+ """
+ An example test. Note that test functions must start with 'test_'
+ to be recognized by the test framework.
+ """
+
+ # By inheriting TestBase, self.client is automatically connected
+ # and self.channel is automatically opened as channel(1)
+ # Other channel methods mimic the protocol.
+ channel = self.channel
+
+ # Now we can send regular commands. If you want to see what the method
+ # arguments mean or what other commands are available, you can use the
+ # python builtin help() method. For example:
+ #help(chan)
+ #help(chan.exchange_declare)
+
+ # If you want browse the available protocol methods without being
+ # connected to a live server you can use the amqp-doc utility:
+ #
+ # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
+ #
+ # Options:
+ # -e, --regexp use regex instead of glob when matching
+
+ # Now that we know what commands are available we can use them to
+ # interact with the server.
+
+ # Here we use ordinal arguments.
+ self.exchange_declare(channel, 0, "test", "direct")
+
+ # Here we use keyword arguments.
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test", routing_key="key")
+
+ # Call Channel.basic_consume to register as a consumer.
+ # All the protocol methods return a message object. The message object
+ # has fields corresponding to the reply method fields, plus a content
+ # field that is filled if the reply includes content. In this case the
+ # interesting field is the consumer_tag.
+ channel.message_consume(queue="test-queue", destination="consumer_tag")
+
+ # We can use the Client.queue(...) method to access the queue
+ # corresponding to our consumer_tag.
+ queue = self.client.queue("consumer_tag")
+
+ # Now lets publish a message and see if our consumer gets it. To do
+ # this we need to import the Content class.
+ body = "Hello World!"
+ channel.message_transfer(destination="test",
+ routing_key="key",
+ body = body)
+
+ # Now we'll wait for the message to arrive. We can use the timeout
+ # argument in case the server hangs. By default queue.get() will wait
+ # until a message arrives or the connection to the server dies.
+ msg = queue.get(timeout=10)
+
+ # And check that we got the right response with assertEqual
+ self.assertEqual(body, msg.body)
+
+ # Now acknowledge the message.
+ msg.ok()
+
diff --git a/RC9/qpid/python/tests_0-9/exchange.py b/RC9/qpid/python/tests_0-9/exchange.py
new file mode 100644
index 0000000000..3a47ffff8c
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/exchange.py
@@ -0,0 +1,327 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for exchange behaviour.
+
+Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
+"""
+
+import Queue, logging
+from qpid.testlib import TestBase
+from qpid.content import Content
+from qpid.client import Closed
+
+
+class StandardExchangeVerifier:
+ """Verifies standard exchange behavior.
+
+ Used as base class for classes that test standard exchanges."""
+
+ def verifyDirectExchange(self, ex):
+ """Verify that ex behaves like a direct exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, routing_key="k")
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
+ self.fail("Expected Empty exception")
+ except Queue.Empty: None # Expected
+
+ def verifyFanOutExchange(self, ex):
+ """Verify that ex behaves like a fanout exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex)
+ self.queue_declare(queue="p")
+ self.channel.queue_bind(queue="p", exchange=ex)
+ for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
+
+ def verifyTopicExchange(self, ex):
+ """Verify that ex behaves like a topic exchange"""
+ self.queue_declare(queue="a")
+ self.channel.queue_bind(queue="a", exchange=ex, routing_key="a.#.b.*")
+ q = self.consume("a")
+ self.assertPublishGet(q, ex, "a.b.x")
+ self.assertPublishGet(q, ex, "a.x.b.x")
+ self.assertPublishGet(q, ex, "a.x.x.b.x")
+ # Shouldn't match
+ self.channel.message_transfer(destination=ex, routing_key="a.b", body="")
+ self.channel.message_transfer(destination=ex, routing_key="a.b.x.y", body="")
+ self.channel.message_transfer(destination=ex, routing_key="x.a.b.x", body="")
+ self.channel.message_transfer(destination=ex, routing_key="a.b", body="")
+ self.assert_(q.empty())
+
+ def verifyHeadersExchange(self, ex):
+ """Verify that ex is a headers exchange"""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
+ q = self.consume("q")
+ headers = {"name":"fred", "age":3}
+ self.assertPublishGet(q, exchange=ex, properties=headers)
+ self.channel.message_transfer(destination=ex, body="") # No headers, won't deliver
+ self.assertEmpty(q);
+
+
+class RecommendedTypesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server SHOULD implement these standard exchange types: topic, headers.
+
+ Client attempts to declare an exchange with each of these standard types.
+ """
+
+ def testDirect(self):
+ """Declare and test a direct exchange"""
+ self.exchange_declare(0, exchange="d", type="direct")
+ self.verifyDirectExchange("d")
+
+ def testFanout(self):
+ """Declare and test a fanout exchange"""
+ self.exchange_declare(0, exchange="f", type="fanout")
+ self.verifyFanOutExchange("f")
+
+ def testTopic(self):
+ """Declare and test a topic exchange"""
+ self.exchange_declare(0, exchange="t", type="topic")
+ self.verifyTopicExchange("t")
+
+ def testHeaders(self):
+ """Declare and test a headers exchange"""
+ self.exchange_declare(0, exchange="h", type="headers")
+ self.verifyHeadersExchange("h")
+
+
+class RequiredInstancesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST, in each virtual host, pre-declare an exchange instance
+ for each standard exchange type that it implements, where the name of the
+ exchange instance is amq. followed by the exchange type name.
+
+ Client creates a temporary queue and attempts to bind to each required
+ exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
+ those types are defined).
+ """
+ def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
+
+ def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
+
+ def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
+
+ def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
+
+class DefaultExchangeRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST predeclare a direct exchange to act as the default exchange
+ for content Publish methods and for default queue bindings.
+
+ Client checks that the default exchange is active by specifying a queue
+ binding with no exchange name, and publishing a message with a suitable
+ routing key but without specifying the exchange name, then ensuring that
+ the message arrives in the queue correctly.
+ """
+ def testDefaultExchange(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+ # Test explicit bind to default queue
+ self.verifyDirectExchange("")
+
+
+# TODO aconway 2006-09-27: Fill in empty tests:
+
+class DefaultAccessRuleTests(TestBase):
+ """
+ The server MUST NOT allow clients to access the default exchange except
+ by specifying an empty exchange name in the Queue.Bind and content Publish
+ methods.
+ """
+
+class ExtensionsRuleTests(TestBase):
+ """
+ The server MAY implement other exchange types as wanted.
+ """
+
+
+class DeclareMethodMinimumRuleTests(TestBase):
+ """
+ The server SHOULD support a minimum of 16 exchanges per virtual host and
+ ideally, impose no limit except as defined by available resources.
+
+ The client creates as many exchanges as it can until the server reports
+ an error; the number of exchanges successfuly created must be at least
+ sixteen.
+ """
+
+
+class DeclareMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access to
+ the realm in which the exchange exists or will be created, or "passive"
+ access if the if-exists flag is set.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeclareMethodExchangeFieldReservedRuleTests(TestBase):
+ """
+ Exchange names starting with "amq." are reserved for predeclared and
+ standardised exchanges. The client MUST NOT attempt to create an exchange
+ starting with "amq.".
+
+
+ """
+
+
+class DeclareMethodTypeFieldTypedRuleTests(TestBase):
+ """
+ Exchanges cannot be redeclared with different types. The client MUST not
+ attempt to redeclare an existing exchange with a different type than used
+ in the original Exchange.Declare method.
+
+
+ """
+
+
+class DeclareMethodTypeFieldSupportRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to create an exchange with a type that the
+ server does not support.
+
+
+ """
+
+
+class DeclareMethodPassiveFieldNotFoundRuleTests(TestBase):
+ """
+ If set, and the exchange does not already exist, the server MUST raise a
+ channel exception with reply code 404 (not found).
+ """
+ def test(self):
+ try:
+ self.channel.exchange_declare(exchange="humpty_dumpty", passive=True)
+ self.fail("Expected 404 for passive declaration of unknown exchange.")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+class DeclareMethodDurableFieldSupportRuleTests(TestBase):
+ """
+ The server MUST support both durable and transient exchanges.
+
+
+ """
+
+
+class DeclareMethodDurableFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the durable field if the exchange already exists.
+
+
+ """
+
+
+class DeclareMethodAutoDeleteFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the auto-delete field if the exchange already
+ exists.
+
+
+ """
+
+
+class DeleteMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access
+ rights to the exchange's access realm.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeleteMethodExchangeFieldExistsRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to delete an exchange that does not exist.
+ """
+
+
+class HeadersExchangeTests(TestBase):
+ """
+ Tests for headers exchange functionality.
+ """
+ def setUp(self):
+ TestBase.setUp(self)
+ self.queue_declare(queue="q")
+ self.q = self.consume("q")
+
+ def myAssertPublishGet(self, headers):
+ self.assertPublishGet(self.q, exchange="amq.match", properties=headers)
+
+ def myBasicPublish(self, headers):
+ self.channel.message_transfer(destination="amq.match", body="foobar", application_headers=headers)
+
+ def testMatchAll(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
+
+ # None of these should match
+ self.myBasicPublish({})
+ self.myBasicPublish({"name":"barney"})
+ self.myBasicPublish({"name":10})
+ self.myBasicPublish({"name":"fred", "age":2})
+ self.assertEmpty(self.q)
+
+ def testMatchAny(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"fred", "ignoreme":10})
+ self.myAssertPublishGet({"ignoreme":10, "age":3})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+
+class MiscellaneousErrorsTests(TestBase):
+ """
+ Test some miscellaneous error conditions
+ """
+ def testTypeNotKnown(self):
+ try:
+ self.channel.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
+ self.fail("Expected 503 for declaration of unknown exchange type.")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def testDifferentDeclaredType(self):
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
+ try:
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+ #cleanup
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+ c2.exchange_delete(exchange="test_different_declared_type_exchange")
+
diff --git a/RC9/qpid/python/tests_0-9/execution.py b/RC9/qpid/python/tests_0-9/execution.py
new file mode 100644
index 0000000000..f2facfe42b
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/execution.py
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class ExecutionTests (TestBase):
+ def test_flush(self):
+ channel = self.channel
+ for i in [1, 2, 3]:
+ channel.basic_publish()
+ channel.execution_flush()
+ assert(channel.completion.wait(channel.completion.command_id, timeout=1))
diff --git a/RC9/qpid/python/tests_0-9/message.py b/RC9/qpid/python/tests_0-9/message.py
new file mode 100644
index 0000000000..b25016e680
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/message.py
@@ -0,0 +1,657 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+from qpid.reference import Reference, ReferenceId
+
+class MessageTests(TestBase):
+ """Tests for 'methods' on the amqp message 'class'"""
+
+ def test_consume_no_local(self):
+ """
+ Test that the no_local flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare two queues:
+ channel.queue_declare(queue="test-queue-1a", exclusive=True)
+ channel.queue_declare(queue="test-queue-1b", exclusive=True)
+ #establish two consumers one of which excludes delivery of locally sent messages
+ channel.message_consume(destination="local_included", queue="test-queue-1a")
+ channel.message_consume(destination="local_excluded", queue="test-queue-1b", no_local=True)
+
+ #send a message
+ channel.message_transfer(routing_key="test-queue-1a", body="consume_no_local")
+ channel.message_transfer(routing_key="test-queue-1b", body="consume_no_local")
+
+ #check the queues of the two consumers
+ excluded = self.client.queue("local_excluded")
+ included = self.client.queue("local_included")
+ msg = included.get(timeout=1)
+ self.assertEqual("consume_no_local", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test that the exclusive flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-2", exclusive=True)
+
+ #check that an exclusive consumer prevents other consumer being created:
+ channel.message_consume(destination="first", queue="test-queue-2", exclusive=True)
+ try:
+ channel.message_consume(destination="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ #open new channel and cleanup last consumer:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #check that an exclusive consumer cannot be created if a consumer already exists:
+ channel.message_consume(destination="first", queue="test-queue-2")
+ try:
+ channel.message_consume(destination="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ def test_consume_queue_errors(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ channel = self.channel
+ try:
+ #queue specified but doesn't exist:
+ channel.message_consume(queue="invalid-queue")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.message_consume(queue="")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-3", exclusive=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ channel.message_consume(destination="first", queue="test-queue-3")
+ try:
+ channel.message_consume(destination="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-4", exclusive=True)
+ channel.message_consume(destination="my-consumer", queue="test-queue-4")
+ channel.message_transfer(routing_key="test-queue-4", body="One")
+
+ #cancel should stop messages being delivered
+ channel.message_cancel(destination="my-consumer")
+ channel.message_transfer(routing_key="test-queue-4", body="Two")
+ myqueue = self.client.queue("my-consumer")
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.body)
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be handled without error
+ channel.message_cancel(destination="my-consumer")
+ channel.message_cancel(destination="this-never-existed")
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-ack-queue", exclusive=True)
+
+ channel.message_consume(queue="test-ack-queue", destination="consumer_tag", no_ack=False)
+ queue = self.client.queue("consumer_tag")
+
+ channel.message_transfer(routing_key="test-ack-queue", body="One")
+ channel.message_transfer(routing_key="test-ack-queue", body="Two")
+ channel.message_transfer(routing_key="test-ack-queue", body="Three")
+ channel.message_transfer(routing_key="test-ack-queue", body="Four")
+ channel.message_transfer(routing_key="test-ack-queue", body="Five")
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.body)
+ self.assertEqual("Two", msg2.body)
+ self.assertEqual("Three", msg3.body)
+ self.assertEqual("Four", msg4.body)
+ self.assertEqual("Five", msg5.body)
+
+ msg1.ok(batchoffset=1)#One and Two
+ msg4.ok()
+
+ channel.message_recover(requeue=False)
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.body)
+ self.assertEqual("Five", msg5b.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ def test_recover_requeue(self):
+ """
+ Test requeing on recovery
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-requeue", exclusive=True)
+
+ channel.message_consume(queue="test-requeue", destination="consumer_tag", no_ack=False)
+ queue = self.client.queue("consumer_tag")
+
+ channel.message_transfer(routing_key="test-requeue", body="One")
+ channel.message_transfer(routing_key="test-requeue", body="Two")
+ channel.message_transfer(routing_key="test-requeue", body="Three")
+ channel.message_transfer(routing_key="test-requeue", body="Four")
+ channel.message_transfer(routing_key="test-requeue", body="Five")
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.body)
+ self.assertEqual("Two", msg2.body)
+ self.assertEqual("Three", msg3.body)
+ self.assertEqual("Four", msg4.body)
+ self.assertEqual("Five", msg5.body)
+
+ msg1.ok(batchoffset=1) #One and Two
+ msg4.ok() #Four
+
+ channel.message_cancel(destination="consumer_tag")
+
+ #publish a new message
+ channel.message_transfer(routing_key="test-requeue", body="Six")
+ #requeue unacked messages (Three and Five)
+ channel.message_recover(requeue=True)
+
+ channel.message_consume(queue="test-requeue", destination="consumer_tag")
+ queue2 = self.client.queue("consumer_tag")
+
+ msg3b = queue2.get(timeout=1)
+ msg5b = queue2.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.body)
+ self.assertEqual("Five", msg5b.body)
+
+ self.assertEqual(True, msg3b.redelivered)
+ self.assertEqual(True, msg5b.redelivered)
+
+ self.assertEqual("Six", queue2.get(timeout=1).body)
+
+ try:
+ extra = queue2.get(timeout=1)
+ self.fail("Got unexpected message in second queue: " + extra.body)
+ except Empty: None
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message in original queue: " + extra.body)
+ except Empty: None
+
+
+ def test_qos_prefetch_count(self):
+ """
+ Test that the prefetch count specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-count", exclusive=True)
+ subscription = channel.message_consume(queue="test-prefetch-count", destination="consumer_tag", no_ack=False)
+ queue = self.client.queue("consumer_tag")
+
+ #set prefetch to 5:
+ channel.message_qos(prefetch_count=5)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.message_transfer(routing_key="test-prefetch-count", body="Message %d" % i)
+
+ #only 5 messages should have been delivered:
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ #todo: once batching is implmented, send a single response for all messages
+ msg.ok(batchoffset=-4)#1-5
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ msg.ok(batchoffset=-4)#6-10
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.body)
+ except Empty: None
+
+
+
+ def test_qos_prefetch_size(self):
+ """
+ Test that the prefetch size specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-size", exclusive=True)
+ subscription = channel.message_consume(queue="test-prefetch-size", destination="consumer_tag", no_ack=False)
+ queue = self.client.queue("consumer_tag")
+
+ #set prefetch to 50 bytes (each message is 9 or 10 bytes):
+ channel.message_qos(prefetch_size=50)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.message_transfer(routing_key="test-prefetch-size", body="Message %d" % i)
+
+ #only 5 messages should have been delivered (i.e. 45 bytes worth):
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ msg.ok(batchoffset=-4)#1-5
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ msg.ok(batchoffset=-4)#6-10
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.body)
+ except Empty: None
+
+ #make sure that a single oversized message still gets delivered
+ large = "abcdefghijklmnopqrstuvwxyz"
+ large = large + "-" + large;
+ channel.message_transfer(routing_key="test-prefetch-size", body=large)
+ msg = queue.get(timeout=1)
+ self.assertEqual(large, msg.body)
+
+ def test_get(self):
+ """
+ Test message_get method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-get", exclusive=True)
+
+ #publish some messages (no_ack=True)
+ for i in range(1, 11):
+ channel.message_transfer(routing_key="test-get", body="Message %d" % i)
+
+ #use message_get to read back the messages, and check that we get an empty at the end
+ for i in range(1, 11):
+ tag = "queue %d" % i
+ reply = channel.message_get(no_ack=True, queue="test-get", destination=tag)
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "ok")
+ self.assertEqual("Message %d" % i, self.client.queue(tag).get(timeout=1).body)
+
+ reply = channel.message_get(no_ack=True, queue="test-get")
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "empty")
+
+ #repeat for no_ack=False
+ for i in range(11, 21):
+ channel.message_transfer(routing_key="test-get", body="Message %d" % i)
+
+ for i in range(11, 21):
+ tag = "queue %d" % i
+ reply = channel.message_get(no_ack=False, queue="test-get", destination=tag)
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "ok")
+ msg = self.client.queue(tag).get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ if (i==13):
+ msg.ok(batchoffset=-2)#11, 12 & 13
+ if(i in [15, 17, 19]):
+ msg.ok()
+
+ reply = channel.message_get(no_ack=True, queue="test-get")
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "empty")
+
+ #recover(requeue=True)
+ channel.message_recover(requeue=True)
+
+ #get the unacked messages again (14, 16, 18, 20)
+ for i in [14, 16, 18, 20]:
+ tag = "queue %d" % i
+ reply = channel.message_get(no_ack=False, queue="test-get", destination=tag)
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "ok")
+ msg = self.client.queue(tag).get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ msg.ok()
+ #channel.message_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.message_get(no_ack=True, queue="test-get")
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "empty")
+
+ channel.message_recover(requeue=True)
+
+ reply = channel.message_get(no_ack=True, queue="test-get")
+ self.assertEqual(reply.method.klass.name, "message")
+ self.assertEqual(reply.method.name, "empty")
+
+ def test_reference_simple(self):
+ """
+ Test basic ability to handle references
+ """
+ channel = self.channel
+ channel.queue_declare(queue="ref_queue", exclusive=True)
+ channel.message_consume(queue="ref_queue", destination="c1")
+ queue = self.client.queue("c1")
+
+ refId = "myref"
+ channel.message_open(reference=refId)
+ channel.message_append(reference=refId, bytes="abcd")
+ channel.synchronous = False
+ ack = channel.message_transfer(routing_key="ref_queue", body=ReferenceId(refId))
+ channel.synchronous = True
+
+ channel.message_append(reference=refId, bytes="efgh")
+ channel.message_append(reference=refId, bytes="ijkl")
+ channel.message_close(reference=refId)
+
+ #first, wait for the ok for the transfer
+ ack.get_response(timeout=1)
+
+ self.assertDataEquals(channel, queue.get(timeout=1), "abcdefghijkl")
+
+
+ def test_reference_large(self):
+ """
+ Test basic ability to handle references whose content exceeds max frame size
+ """
+ channel = self.channel
+ self.queue_declare(queue="ref_queue")
+
+ #generate a big data string (> max frame size of consumer):
+ data = "0123456789"
+ for i in range(0, 10):
+ data += data
+ #send it inline
+ channel.synchronous = False
+ ack = channel.message_transfer(routing_key="ref_queue", body=data)
+ channel.synchronous = True
+ #first, wait for the ok for the transfer
+ ack.get_response(timeout=1)
+
+ #create a new connection for consumer, with specific max frame size (< data)
+ other = self.connect(tune_params={"channel_max":10, "frame_max":5120, "heartbeat":0})
+ ch2 = other.channel(1)
+ ch2.channel_open()
+ ch2.message_consume(queue="ref_queue", destination="c1")
+ queue = other.queue("c1")
+
+ msg = queue.get(timeout=1)
+ self.assertTrue(isinstance(msg.body, ReferenceId))
+ self.assertTrue(msg.reference)
+ self.assertEquals(data, msg.reference.get_complete())
+
+ def test_reference_completion(self):
+ """
+ Test that reference transfer are not deemed complete until
+ closed (therefore are not acked or routed until that point)
+ """
+ channel = self.channel
+ channel.queue_declare(queue="ref_queue", exclusive=True)
+ channel.message_consume(queue="ref_queue", destination="c1")
+ queue = self.client.queue("c1")
+
+ refId = "myref"
+ channel.message_open(reference=refId)
+ channel.message_append(reference=refId, bytes="abcd")
+ channel.synchronous = False
+ ack = channel.message_transfer(routing_key="ref_queue", body=ReferenceId(refId))
+ channel.synchronous = True
+
+ try:
+ msg = queue.get(timeout=1)
+ self.fail("Got unexpected message on queue: " + msg)
+ except Empty: None
+
+ self.assertTrue(not ack.is_complete())
+
+ channel.message_close(reference=refId)
+
+ #first, wait for the ok for the transfer
+ ack.get_response(timeout=1)
+
+ self.assertDataEquals(channel, queue.get(timeout=1), "abcd")
+
+ def test_reference_multi_transfer(self):
+ """
+ Test that multiple transfer requests for the same reference are
+ correctly handled.
+ """
+ channel = self.channel
+ #declare and consume from two queues
+ channel.queue_declare(queue="q-one", exclusive=True)
+ channel.queue_declare(queue="q-two", exclusive=True)
+ channel.message_consume(queue="q-one", destination="q-one")
+ channel.message_consume(queue="q-two", destination="q-two")
+ queue1 = self.client.queue("q-one")
+ queue2 = self.client.queue("q-two")
+
+ #transfer a single ref to both queues (in separate commands)
+ channel.message_open(reference="my-ref")
+ channel.synchronous = False
+ ack1 = channel.message_transfer(routing_key="q-one", body=ReferenceId("my-ref"))
+ channel.message_append(reference="my-ref", bytes="my data")
+ ack2 = channel.message_transfer(routing_key="q-two", body=ReferenceId("my-ref"))
+ channel.synchronous = True
+ channel.message_close(reference="my-ref")
+
+ #check that both queues have the message
+ self.assertDataEquals(channel, queue1.get(timeout=1), "my data")
+ self.assertDataEquals(channel, queue2.get(timeout=1), "my data")
+ self.assertEmpty(queue1)
+ self.assertEmpty(queue2)
+
+ #transfer a single ref to the same queue twice (in separate commands)
+ channel.message_open(reference="my-ref")
+ channel.synchronous = False
+ ack1 = channel.message_transfer(routing_key="q-one", message_id="abc", body=ReferenceId("my-ref"))
+ channel.message_append(reference="my-ref", bytes="second message")
+ ack2 = channel.message_transfer(routing_key="q-one", message_id="xyz", body=ReferenceId("my-ref"))
+ channel.synchronous = True
+ channel.message_close(reference="my-ref")
+
+ msg1 = queue1.get(timeout=1)
+ msg2 = queue1.get(timeout=1)
+ #order is undefined
+ if msg1.message_id == "abc":
+ self.assertEquals(msg2.message_id, "xyz")
+ else:
+ self.assertEquals(msg1.message_id, "xyz")
+ self.assertEquals(msg2.message_id, "abc")
+
+ #would be legal for the incoming messages to be transfered
+ #inline or by reference in any combination
+
+ if isinstance(msg1.body, ReferenceId):
+ self.assertEquals("second message", msg1.reference.get_complete())
+ if isinstance(msg2.body, ReferenceId):
+ if msg1.body != msg2.body:
+ self.assertEquals("second message", msg2.reference.get_complete())
+ #else ok, as same ref as msg1
+ else:
+ self.assertEquals("second message", msg1.body)
+ if isinstance(msg2.body, ReferenceId):
+ self.assertEquals("second message", msg2.reference.get_complete())
+ else:
+ self.assertEquals("second message", msg2.body)
+
+ self.assertEmpty(queue1)
+
+ def test_reference_unopened_on_append_error(self):
+ channel = self.channel
+ try:
+ channel.message_append(reference="unopened")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_reference_unopened_on_close_error(self):
+ channel = self.channel
+ try:
+ channel.message_close(reference="unopened")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_reference_unopened_on_transfer_error(self):
+ channel = self.channel
+ try:
+ channel.message_transfer(body=ReferenceId("unopened"))
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_reference_already_opened_error(self):
+ channel = self.channel
+ channel.message_open(reference="a")
+ try:
+ channel.message_open(reference="a")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def test_empty_reference(self):
+ channel = self.channel
+ channel.queue_declare(queue="ref_queue", exclusive=True)
+ channel.message_consume(queue="ref_queue", destination="c1")
+ queue = self.client.queue("c1")
+
+ refId = "myref"
+ channel.message_open(reference=refId)
+ channel.synchronous = False
+ ack = channel.message_transfer(routing_key="ref_queue", message_id="empty-msg", body=ReferenceId(refId))
+ channel.synchronous = True
+ channel.message_close(reference=refId)
+
+ #first, wait for the ok for the transfer
+ ack.get_response(timeout=1)
+
+ msg = queue.get(timeout=1)
+ self.assertEquals(msg.message_id, "empty-msg")
+ self.assertDataEquals(channel, msg, "")
+
+ def test_reject(self):
+ channel = self.channel
+ channel.queue_declare(queue = "q", exclusive=True)
+
+ channel.message_consume(queue = "q", destination = "consumer")
+ channel.message_transfer(routing_key = "q", body="blah, blah")
+ msg = self.client.queue("consumer").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+ channel.message_cancel(destination = "consumer")
+ msg.reject()
+
+ channel.message_consume(queue = "q", destination = "checker")
+ msg = self.client.queue("checker").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+
+ def test_checkpoint(self):
+ channel = self.channel
+ channel.queue_declare(queue = "q", exclusive=True)
+
+ channel.message_open(reference="my-ref")
+ channel.message_append(reference="my-ref", bytes="abcdefgh")
+ channel.message_append(reference="my-ref", bytes="ijklmnop")
+ channel.message_checkpoint(reference="my-ref", identifier="my-checkpoint")
+ channel.channel_close()
+
+ channel = self.client.channel(2)
+ channel.channel_open()
+ channel.message_consume(queue = "q", destination = "consumer")
+ offset = channel.message_resume(reference="my-ref", identifier="my-checkpoint").value
+ self.assertTrue(offset<=16)
+ channel.message_append(reference="my-ref", bytes="qrstuvwxyz")
+ channel.synchronous = False
+ channel.message_transfer(routing_key="q-one", message_id="abcd", body=ReferenceId("my-ref"))
+ channel.synchronous = True
+ channel.message_close(reference="my-ref")
+
+ self.assertDataEquals(channel, self.client.queue("consumer").get(timeout = 1), "abcdefghijklmnopqrstuvwxyz")
+ self.assertEmpty(self.client.queue("consumer"))
+
+
+ def assertDataEquals(self, channel, msg, expected):
+ if isinstance(msg.body, ReferenceId):
+ data = msg.reference.get_complete()
+ else:
+ data = msg.body
+ self.assertEquals(expected, data)
diff --git a/RC9/qpid/python/tests_0-9/query.py b/RC9/qpid/python/tests_0-9/query.py
new file mode 100644
index 0000000000..c2e08c003c
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/query.py
@@ -0,0 +1,224 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class QueryTests(TestBase):
+ """Tests for various query methods introduced in 0-10 and available in 0-9 for preview"""
+
+ def test_exchange_query(self):
+ """
+ Test that the exchange_query method works as expected
+ """
+ channel = self.channel
+ #check returned type for the standard exchanges
+ self.assertEqual("direct", channel.exchange_query(name="amq.direct").type)
+ self.assertEqual("topic", channel.exchange_query(name="amq.topic").type)
+ self.assertEqual("fanout", channel.exchange_query(name="amq.fanout").type)
+ self.assertEqual("headers", channel.exchange_query(name="amq.match").type)
+ self.assertEqual("direct", channel.exchange_query(name="").type)
+ #declare an exchange
+ channel.exchange_declare(exchange="my-test-exchange", type= "direct", durable=False)
+ #check that the result of a query is as expected
+ response = channel.exchange_query(name="my-test-exchange")
+ self.assertEqual("direct", response.type)
+ self.assertEqual(False, response.durable)
+ self.assertEqual(False, response.not_found)
+ #delete the exchange
+ channel.exchange_delete(exchange="my-test-exchange")
+ #check that the query now reports not-found
+ self.assertEqual(True, channel.exchange_query(name="my-test-exchange").not_found)
+
+ def test_binding_query_direct(self):
+ """
+ Test that the binding_query method works as expected with the direct exchange
+ """
+ self.binding_query_with_key("amq.direct")
+
+ def test_binding_query_topic(self):
+ """
+ Test that the binding_query method works as expected with the direct exchange
+ """
+ self.binding_query_with_key("amq.topic")
+
+ def binding_query_with_key(self, exchange_name):
+ channel = self.channel
+ #setup: create two queues
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+
+ channel.queue_bind(exchange=exchange_name, queue="used-queue", routing_key="used-key")
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = channel.binding_query(exchange=exchange_name, routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue", routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test matched queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue", routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test unmatched queue, matched binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue", routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue", routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange=exchange_name, queue="unknown-queue").queue_not_found)
+
+
+ def test_binding_query_fanout(self):
+ """
+ Test that the binding_query method works as expected with fanout exchange
+ """
+ channel = self.channel
+ #setup
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+ channel.queue_bind(exchange="amq.fanout", queue="used-queue")
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange="amq.fanout", queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange="amq.fanout", queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange="amq.fanout", queue="unknown-queue").queue_not_found)
+
+ def test_binding_query_header(self):
+ """
+ Test that the binding_query method works as expected with headers exchanges
+ """
+ channel = self.channel
+ #setup
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+ channel.queue_bind(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"} )
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange="amq.match", queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = channel.binding_query(exchange="amq.match", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = channel.binding_query(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test matched queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test unmatched queue, matched binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange="amq.match", queue="unknown-queue").queue_not_found)
+
diff --git a/RC9/qpid/python/tests_0-9/queue.py b/RC9/qpid/python/tests_0-9/queue.py
new file mode 100644
index 0000000000..e7fe0b3ed4
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/queue.py
@@ -0,0 +1,340 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class QueueTests(TestBase):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_purge(self):
+ """
+ Test that the purge method removes messages from the queue
+ """
+ channel = self.channel
+ #setup, declare a queue and add some messages to it:
+ channel.exchange_declare(exchange="test-exchange", type="direct")
+ channel.queue_declare(queue="test-queue", exclusive=True)
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ channel.message_transfer(destination="test-exchange", routing_key="key", body="one")
+ channel.message_transfer(destination="test-exchange", routing_key="key", body="two")
+ channel.message_transfer(destination="test-exchange", routing_key="key", body="three")
+
+ #check that the queue now reports 3 messages:
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(3, reply.message_count)
+
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ reply = channel.queue_purge(queue="test-queue");
+ self.assertEqual(3, reply.message_count)
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+ channel.message_transfer(destination="test-exchange", routing_key="key", body="four")
+ channel.message_consume(queue="test-queue", destination="tag", no_ack=True)
+ queue = self.client.queue("tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("four", msg.body)
+
+ #check error conditions (use new channels):
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue specified but doesn't exist:
+ channel.queue_purge(queue="invalid-queue")
+ self.fail("Expected failure when purging non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(3)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ #cleanup
+ other = self.connect()
+ channel = other.channel(1)
+ channel.channel_open()
+ channel.exchange_delete(exchange="test-exchange")
+
+ def test_declare_exclusive(self):
+ """
+ Test that the exclusive field is honoured in queue.declare
+ """
+ # TestBase.setUp has already opened channel(1)
+ c1 = self.channel
+ # Here we open a second separate connection:
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+
+ #declare an exclusive queue:
+ c1.queue_declare(queue="exclusive-queue", exclusive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ c2.queue_declare(queue="exclusive-queue", exclusive="True")
+ self.fail("Expected second exclusive queue_declare to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(405, e.args[0])
+
+
+ def test_declare_passive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ channel = self.channel
+ #declare an exclusive queue:
+ channel.queue_declare(queue="passive-queue-1", exclusive="True")
+ channel.queue_declare(queue="passive-queue-1", passive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ channel.queue_declare(queue="passive-queue-2", passive="True")
+ self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_bind(self):
+ """
+ Test various permutations of the queue.bind method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="queue-1", exclusive="True")
+
+ #straightforward case, both exchange & queue exist so no errors expected:
+ channel.queue_bind(queue="queue-1", exchange="amq.direct", routing_key="key1")
+
+ #bind the default queue for the channel (i.e. last one declared):
+ channel.queue_bind(exchange="amq.direct", routing_key="key2")
+
+ #use the queue name where neither routing key nor queue are specified:
+ channel.queue_bind(exchange="amq.direct")
+
+ #try and bind to non-existant exchange
+ try:
+ channel.queue_bind(queue="queue-1", exchange="an-invalid-exchange", routing_key="key1")
+ self.fail("Expected bind to non-existant exchange to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #need to reopen a channel:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #try and bind non-existant queue:
+ try:
+ channel.queue_bind(queue="queue-2", exchange="amq.direct", routing_key="key1")
+ self.fail("Expected bind of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ def test_unbind_direct(self):
+ self.unbind_test(exchange="amq.direct", routing_key="key")
+
+ def test_unbind_topic(self):
+ self.unbind_test(exchange="amq.topic", routing_key="key")
+
+ def test_unbind_fanout(self):
+ self.unbind_test(exchange="amq.fanout")
+
+ def test_unbind_headers(self):
+ self.unbind_test(exchange="amq.match", args={ "x-match":"all", "a":"b"}, headers={"a":"b"})
+
+ def unbind_test(self, exchange, routing_key="", args=None, headers={}):
+ #bind two queues and consume from them
+ channel = self.channel
+
+ channel.queue_declare(queue="queue-1", exclusive="True")
+ channel.queue_declare(queue="queue-2", exclusive="True")
+
+ channel.message_consume(queue="queue-1", destination="queue-1", no_ack=True)
+ channel.message_consume(queue="queue-2", destination="queue-2", no_ack=True)
+
+ queue1 = self.client.queue("queue-1")
+ queue2 = self.client.queue("queue-2")
+
+ channel.queue_bind(exchange=exchange, queue="queue-1", routing_key=routing_key, arguments=args)
+ channel.queue_bind(exchange=exchange, queue="queue-2", routing_key=routing_key, arguments=args)
+
+ #send a message that will match both bindings
+ channel.message_transfer(destination=exchange, routing_key=routing_key, application_headers=headers, body="one")
+
+ #unbind first queue
+ channel.queue_unbind(exchange=exchange, queue="queue-1", routing_key=routing_key, arguments=args)
+
+ #send another message
+ channel.message_transfer(destination=exchange, routing_key=routing_key, application_headers=headers, body="two")
+
+ #check one queue has both messages and the other has only one
+ self.assertEquals("one", queue1.get(timeout=1).body)
+ try:
+ msg = queue1.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+ self.assertEquals("one", queue2.get(timeout=1).body)
+ self.assertEquals("two", queue2.get(timeout=1).body)
+ try:
+ msg = queue2.get(timeout=1)
+ self.fail("Got extra message: " + msg)
+ except Empty: pass
+
+
+ def test_delete_simple(self):
+ """
+ Test core queue deletion behaviour
+ """
+ channel = self.channel
+
+ #straight-forward case:
+ channel.queue_declare(queue="delete-me")
+ channel.message_transfer(routing_key="delete-me", body="a")
+ channel.message_transfer(routing_key="delete-me", body="b")
+ channel.message_transfer(routing_key="delete-me", body="c")
+ reply = channel.queue_delete(queue="delete-me")
+ self.assertEqual(3, reply.message_count)
+ #check that it has gone be declaring passively
+ try:
+ channel.queue_declare(queue="delete-me", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #check attempted deletion of non-existant queue is handled correctly:
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ channel.queue_delete(queue="i-dont-exist", if_empty="True")
+ self.fail("Expected delete of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+
+ def test_delete_ifempty(self):
+ """
+ Test that if_empty field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and add a message to it (use default binding):
+ channel.queue_declare(queue="delete-me-2")
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ channel.message_transfer(routing_key="delete-me-2", body="message")
+
+ #try to delete, but only if empty:
+ try:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+ self.fail("Expected delete if_empty to fail for non-empty queue")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+ #need new channel now:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #empty queue:
+ channel.message_consume(destination="consumer_tag", queue="delete-me-2", no_ack=True)
+ queue = self.client.queue("consumer_tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("message", msg.body)
+ channel.message_cancel(destination="consumer_tag")
+
+ #retry deletion on empty queue:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ def test_delete_ifunused(self):
+ """
+ Test that if_unused field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and register a consumer:
+ channel.queue_declare(queue="delete-me-3")
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ channel.message_consume(destination="consumer_tag", queue="delete-me-3", no_ack=True)
+
+ #need new channel now:
+ channel2 = self.client.channel(2)
+ channel2.channel_open()
+ #try to delete, but only if empty:
+ try:
+ channel2.queue_delete(queue="delete-me-3", if_unused="True")
+ self.fail("Expected delete if_unused to fail for queue with existing consumer")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+
+ channel.message_cancel(destination="consumer_tag")
+ channel.queue_delete(queue="delete-me-3", if_unused="True")
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_autodelete_shared(self):
+ """
+ Test auto-deletion (of non-exclusive queues)
+ """
+ channel = self.channel
+ other = self.connect()
+ channel2 = other.channel(1)
+ channel2.channel_open()
+
+ channel.queue_declare(queue="auto-delete-me", auto_delete=True)
+
+ #consume from both channels
+ reply = channel.basic_consume(queue="auto-delete-me", no_ack=True)
+ channel2.basic_consume(queue="auto-delete-me", no_ack=True)
+
+ #implicit cancel
+ channel2.channel_close()
+
+ #check it is still there
+ channel.queue_declare(queue="auto-delete-me", passive=True)
+
+ #explicit cancel => queue is now unused again:
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+
+ #NOTE: this assumes there is no timeout in use
+
+ #check that it has gone be declaring passively
+ try:
+ channel.queue_declare(queue="auto-delete-me", passive=True)
+ self.fail("Expected queue to have been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
diff --git a/RC9/qpid/python/tests_0-9/testlib.py b/RC9/qpid/python/tests_0-9/testlib.py
new file mode 100644
index 0000000000..f345fbbd80
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/testlib.py
@@ -0,0 +1,66 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# Tests for the testlib itself.
+#
+
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+from Queue import Empty
+
+import sys
+from traceback import *
+
+def mytrace(frame, event, arg):
+ print_stack(frame);
+ print "===="
+ return mytrace
+
+class TestBaseTest(TestBase):
+ """Verify TestBase functions work as expected"""
+
+ def testAssertEmptyPass(self):
+ """Test assert empty works"""
+ self.queue_declare(queue="empty")
+ q = self.consume("empty")
+ self.assertEmpty(q)
+ try:
+ q.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Empty: None # Ignore
+
+ def testAssertEmptyFail(self):
+ self.queue_declare(queue="full")
+ q = self.consume("full")
+ self.channel.message_transfer(routing_key="full", body="")
+ try:
+ self.assertEmpty(q);
+ self.fail("assertEmpty did not assert on non-empty queue")
+ except AssertionError: None # Ignore
+
+ def testMessageProperties(self):
+ """Verify properties are passed with message"""
+ props={"x":1, "y":2}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+
+
diff --git a/RC9/qpid/python/tests_0-9/tx.py b/RC9/qpid/python/tests_0-9/tx.py
new file mode 100644
index 0000000000..0f6b4f5cd1
--- /dev/null
+++ b/RC9/qpid/python/tests_0-9/tx.py
@@ -0,0 +1,188 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import testrunner, TestBase
+
+class TxTests(TestBase):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ channel.tx_commit()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.body)
+ msg.ok()
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.body)
+ msg.ok()
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.body)
+ msg.ok()
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ channel.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a channel closed with an open transaction is effectively rolled back
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ msg.ok()
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ msg.ok()
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ msg.ok()
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ channel.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ msg.ok()
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ msg.ok()
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ msg.ok()
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ channel.tx_commit()
+
+ def perform_txn_work(self, channel, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ channel.queue_declare(queue=name_a, exclusive=True)
+ channel.queue_declare(queue=name_b, exclusive=True)
+ channel.queue_declare(queue=name_c, exclusive=True)
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ channel.queue_bind(queue=name_b, exchange="amq.direct", routing_key=key)
+ channel.queue_bind(queue=name_c, exchange="amq.topic", routing_key=topic)
+
+ for i in range(1, 5):
+ channel.message_transfer(routing_key=name_a, body="Message %d" % i)
+
+ channel.message_transfer(routing_key=key, destination="amq.direct", body="Message 6")
+ channel.message_transfer(routing_key=topic, destination="amq.topic", body="Message 7")
+
+ channel.tx_select()
+
+ #consume and ack messages
+ channel.message_consume(queue=name_a, destination="sub_a", no_ack=False)
+ queue_a = self.client.queue("sub_a")
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ msg.ok(batchoffset=-3)
+
+ channel.message_consume(queue=name_b, destination="sub_b", no_ack=False)
+ queue_b = self.client.queue("sub_b")
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ msg.ok()
+
+ sub_c = channel.message_consume(queue=name_c, destination="sub_c", no_ack=False)
+ queue_c = self.client.queue("sub_c")
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ msg.ok()
+
+ #publish messages
+ for i in range(1, 5):
+ channel.message_transfer(routing_key=topic, destination="amq.topic", body="TxMessage %d" % i)
+
+ channel.message_transfer(routing_key=key, destination="amq.direct", body="TxMessage 6")
+ channel.message_transfer(routing_key=name_a, body="TxMessage 7")
+
+ return queue_a, queue_b, queue_c