summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Rudyy <orudyy@apache.org>2013-01-25 18:20:39 +0000
committerAlex Rudyy <orudyy@apache.org>2013-01-25 18:20:39 +0000
commit2e6141970e4d4b74abbe415707e7b2f15d9b3078 (patch)
treed53c9c625d1dc7698b0a695ee34717df210177b3
parente77004f92ea7b77dfcddb95652d409ba3e75a272 (diff)
downloadqpid-python-2e6141970e4d4b74abbe415707e7b2f15d9b3078.tar.gz
merge from trunk up to revision 1438053
git-svn-id: https://svn.apache.org/repos/asf/qpid/branches/java-broker-config-qpid-4390@1438629 13f79535-47bb-0310-9956-ffa450edef68
-rwxr-xr-xqpid/bin/release.sh1
-rw-r--r--qpid/cpp/CMakeLists.txt3
-rw-r--r--qpid/cpp/INSTALL80
-rw-r--r--qpid/cpp/Makefile.am2
-rw-r--r--qpid/cpp/bindings/qmf/Makefile.am1
-rw-r--r--qpid/cpp/bindings/qmf2/Makefile.am1
-rw-r--r--qpid/cpp/bindings/qpid/Makefile.am5
-rw-r--r--qpid/cpp/bindings/qpid/perl/ChangeLog5
-rw-r--r--qpid/cpp/bindings/qpid/perl/qpid.pm134
-rw-r--r--qpid/cpp/bindings/qpid/perl/t/Address.t102
-rw-r--r--qpid/cpp/bindings/qpid/perl/t/Duration.t124
-rw-r--r--qpid/cpp/bindings/qpid/perl/t/Message.t268
-rw-r--r--qpid/cpp/bindings/qpid/perl/t/utils.pm (renamed from qpid/cpp/examples/messaging/extra_dist/Makefile)26
-rw-r--r--qpid/cpp/bindings/qpid/perl/test/test-null-inside-map.pl59
-rw-r--r--qpid/cpp/bindings/qpid/ruby/lib/qpid_messaging/duration.rb16
-rw-r--r--qpid/cpp/bindings/qpid/ruby/spec/qpid_messaging/duration_spec.rb27
-rw-r--r--qpid/cpp/configure.ac1
-rw-r--r--qpid/cpp/examples/messaging/CMakeLists.txt53
-rw-r--r--qpid/cpp/examples/messaging/Makefile.am2
-rw-r--r--qpid/cpp/examples/messaging/extra_dist/CMakeLists.txt62
-rw-r--r--qpid/cpp/include/qpid/Url.h7
-rw-r--r--qpid/cpp/include/qpid/qpid.i31
-rw-r--r--qpid/cpp/include/qpid/sys/SystemInfo.h6
-rw-r--r--qpid/cpp/src/CMakeLists.txt50
-rw-r--r--qpid/cpp/src/Makefile.am5
-rw-r--r--qpid/cpp/src/finddb.cmake74
-rwxr-xr-xqpid/cpp/src/generate.sh67
-rw-r--r--qpid/cpp/src/legacystore.cmake157
-rw-r--r--qpid/cpp/src/qpid/Modules.cpp49
-rw-r--r--qpid/cpp/src/qpid/Modules.h2
-rw-r--r--qpid/cpp/src/qpid/Options.cpp53
-rw-r--r--qpid/cpp/src/qpid/Url.cpp13
-rw-r--r--qpid/cpp/src/qpid/UrlArray.h6
-rw-r--r--qpid/cpp/src/qpid/acl/AclConnectionCounter.cpp92
-rw-r--r--qpid/cpp/src/qpid/agent/ManagementAgentImpl.h4
-rw-r--r--qpid/cpp/src/qpid/amqp/descriptors.h4
-rw-r--r--qpid/cpp/src/qpid/amqp_0_10/Connection.cpp4
-rw-r--r--qpid/cpp/src/qpid/amqp_0_10/Connection.h4
-rw-r--r--qpid/cpp/src/qpid/broker/Bridge.cpp2
-rw-r--r--qpid/cpp/src/qpid/broker/Broker.cpp121
-rw-r--r--qpid/cpp/src/qpid/broker/Broker.h77
-rw-r--r--qpid/cpp/src/qpid/broker/Connection.cpp49
-rw-r--r--qpid/cpp/src/qpid/broker/Connection.h30
-rw-r--r--qpid/cpp/src/qpid/broker/ConnectionHandler.cpp4
-rw-r--r--qpid/cpp/src/qpid/broker/ConnectionState.cpp38
-rw-r--r--qpid/cpp/src/qpid/broker/ConnectionState.h32
-rw-r--r--qpid/cpp/src/qpid/broker/ConsumerFactory.h7
-rw-r--r--qpid/cpp/src/qpid/broker/DtxManager.cpp41
-rw-r--r--qpid/cpp/src/qpid/broker/DtxManager.h18
-rw-r--r--qpid/cpp/src/qpid/broker/DtxWorkRecord.cpp23
-rw-r--r--qpid/cpp/src/qpid/broker/DtxWorkRecord.h13
-rw-r--r--qpid/cpp/src/qpid/broker/Exchange.cpp3
-rw-r--r--qpid/cpp/src/qpid/broker/ExchangeRegistry.cpp5
-rw-r--r--qpid/cpp/src/qpid/broker/Link.cpp212
-rw-r--r--qpid/cpp/src/qpid/broker/Link.h10
-rw-r--r--qpid/cpp/src/qpid/broker/LinkRegistry.cpp78
-rw-r--r--qpid/cpp/src/qpid/broker/LinkRegistry.h15
-rw-r--r--qpid/cpp/src/qpid/broker/MessageGroupManager.cpp110
-rw-r--r--qpid/cpp/src/qpid/broker/MessageGroupManager.h16
-rw-r--r--qpid/cpp/src/qpid/broker/MessageStore.h14
-rw-r--r--qpid/cpp/src/qpid/broker/MessageStoreModule.cpp5
-rw-r--r--qpid/cpp/src/qpid/broker/MessageStoreModule.h1
-rw-r--r--qpid/cpp/src/qpid/broker/Messages.h7
-rw-r--r--qpid/cpp/src/qpid/broker/NullMessageStore.cpp2
-rw-r--r--qpid/cpp/src/qpid/broker/NullMessageStore.h1
-rw-r--r--qpid/cpp/src/qpid/broker/Queue.cpp16
-rw-r--r--qpid/cpp/src/qpid/broker/Queue.h11
-rw-r--r--qpid/cpp/src/qpid/broker/QueueCleaner.cpp33
-rw-r--r--qpid/cpp/src/qpid/broker/QueueCleaner.h20
-rw-r--r--qpid/cpp/src/qpid/broker/QueueFlowLimit.cpp82
-rw-r--r--qpid/cpp/src/qpid/broker/QueueFlowLimit.h9
-rw-r--r--qpid/cpp/src/qpid/broker/QueueRegistry.cpp4
-rw-r--r--qpid/cpp/src/qpid/broker/SaslAuthenticator.cpp11
-rw-r--r--qpid/cpp/src/qpid/broker/SecureConnectionFactory.cpp6
-rw-r--r--qpid/cpp/src/qpid/broker/SemanticState.cpp60
-rw-r--r--qpid/cpp/src/qpid/broker/SemanticState.h205
-rw-r--r--qpid/cpp/src/qpid/broker/SessionAdapter.cpp3
-rw-r--r--qpid/cpp/src/qpid/broker/SessionHandler.cpp12
-rw-r--r--qpid/cpp/src/qpid/broker/SessionHandler.h12
-rw-r--r--qpid/cpp/src/qpid/broker/SessionState.cpp12
-rw-r--r--qpid/cpp/src/qpid/broker/SessionState.h17
-rw-r--r--qpid/cpp/src/qpid/broker/StatefulQueueObserver.h63
-rw-r--r--qpid/cpp/src/qpid/broker/TxAccept.h3
-rw-r--r--qpid/cpp/src/qpid/broker/windows/SaslAuthenticator.cpp1
-rw-r--r--qpid/cpp/src/qpid/broker/windows/SslProtocolFactory.cpp746
-rw-r--r--qpid/cpp/src/qpid/client/LoadPlugins.cpp2
-rw-r--r--qpid/cpp/src/qpid/client/SessionImpl.cpp108
-rw-r--r--qpid/cpp/src/qpid/client/SessionImpl.h19
-rw-r--r--qpid/cpp/src/qpid/client/SslConnector.cpp22
-rw-r--r--qpid/cpp/src/qpid/framing/FrameSet.h8
-rw-r--r--qpid/cpp/src/qpid/ha/Backup.cpp100
-rw-r--r--qpid/cpp/src/qpid/ha/Backup.h20
-rw-r--r--qpid/cpp/src/qpid/ha/BrokerInfo.cpp5
-rw-r--r--qpid/cpp/src/qpid/ha/BrokerInfo.h3
-rw-r--r--qpid/cpp/src/qpid/ha/BrokerReplicator.cpp59
-rw-r--r--qpid/cpp/src/qpid/ha/BrokerReplicator.h2
-rw-r--r--qpid/cpp/src/qpid/ha/HaBroker.cpp231
-rw-r--r--qpid/cpp/src/qpid/ha/HaBroker.h56
-rw-r--r--qpid/cpp/src/qpid/ha/HaPlugin.cpp2
-rw-r--r--qpid/cpp/src/qpid/ha/Membership.cpp110
-rw-r--r--qpid/cpp/src/qpid/ha/Membership.h45
-rw-r--r--qpid/cpp/src/qpid/ha/Primary.cpp35
-rw-r--r--qpid/cpp/src/qpid/ha/Primary.h14
-rw-r--r--qpid/cpp/src/qpid/ha/QueueGuard.cpp2
-rw-r--r--qpid/cpp/src/qpid/ha/QueueReplicator.cpp4
-rw-r--r--qpid/cpp/src/qpid/ha/RemoteBackup.cpp16
-rw-r--r--qpid/cpp/src/qpid/ha/RemoteBackup.h9
-rw-r--r--qpid/cpp/src/qpid/ha/ReplicatingSubscription.h2
-rw-r--r--qpid/cpp/src/qpid/ha/Role.h55
-rw-r--r--qpid/cpp/src/qpid/ha/StandAlone.h (renamed from qpid/gentools/templ.cpp/model/AMQP_HighestVersion.h.tmpl)43
-rw-r--r--qpid/cpp/src/qpid/ha/StatusCheck.h9
-rw-r--r--qpid/cpp/src/qpid/legacystore/BindingDbt.cpp50
-rw-r--r--qpid/cpp/src/qpid/legacystore/BindingDbt.h56
-rw-r--r--qpid/cpp/src/qpid/legacystore/BufferValue.cpp56
-rw-r--r--qpid/cpp/src/qpid/legacystore/BufferValue.h46
-rw-r--r--qpid/cpp/src/qpid/legacystore/Cursor.h50
-rw-r--r--qpid/cpp/src/qpid/legacystore/DataTokenImpl.cpp28
-rw-r--r--qpid/cpp/src/qpid/legacystore/DataTokenImpl.h47
-rw-r--r--qpid/cpp/src/qpid/legacystore/IdDbt.cpp42
-rw-r--r--qpid/cpp/src/qpid/legacystore/IdDbt.h42
-rw-r--r--qpid/cpp/src/qpid/legacystore/IdSequence.cpp40
-rw-r--r--qpid/cpp/src/qpid/legacystore/IdSequence.h44
-rw-r--r--qpid/cpp/src/qpid/legacystore/JournalImpl.cpp633
-rw-r--r--qpid/cpp/src/qpid/legacystore/JournalImpl.h265
-rw-r--r--qpid/cpp/src/qpid/legacystore/MessageStoreImpl.cpp1732
-rw-r--r--qpid/cpp/src/qpid/legacystore/MessageStoreImpl.h380
-rw-r--r--qpid/cpp/src/qpid/legacystore/PreparedTransaction.cpp81
-rw-r--r--qpid/cpp/src/qpid/legacystore/PreparedTransaction.h74
-rw-r--r--qpid/cpp/src/qpid/legacystore/StoreException.h56
-rw-r--r--qpid/cpp/src/qpid/legacystore/StorePlugin.cpp81
-rw-r--r--qpid/cpp/src/qpid/legacystore/TxnCtxt.cpp184
-rw-r--r--qpid/cpp/src/qpid/legacystore/TxnCtxt.h117
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/aio.cpp41
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/aio.h153
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/aio_callback.h (renamed from qpid/gentools/templ.cpp/model/AMQP_ClientProxy.cpp.tmpl)55
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/cvar.cpp33
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/cvar.h87
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/data_tok.cpp194
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/data_tok.h172
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/deq_hdr.h141
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.cpp459
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.h103
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enq_hdr.h165
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enq_map.cpp183
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enq_map.h127
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.cpp638
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.h116
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/enums.h108
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/fcntl.cpp375
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/fcntl.h156
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/file_hdr.h211
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jcfg.h91
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jcntl.cpp984
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jcntl.h722
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jdir.cpp463
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jdir.h379
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jerrno.cpp253
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jerrno.h173
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jexception.cpp183
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jexception.h142
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jinf.cpp540
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jinf.h133
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jrec.cpp119
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/jrec.h183
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/lp_map.cpp82
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/lp_map.h83
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.cpp226
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.h303
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/pmgr.cpp215
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/pmgr.h142
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rcvdat.h181
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rec_hdr.h143
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rec_tail.h98
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rfc.cpp82
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rfc.h193
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rmgr.cpp698
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rmgr.h114
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rrfc.cpp125
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/rrfc.h179
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/slock.cpp33
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/slock.h85
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/smutex.cpp33
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/smutex.h64
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/time_ns.cpp (renamed from qpid/gentools/templ.cpp/model/AMQP_ServerProxy.cpp.tmpl)60
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/time_ns.h105
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/txn_hdr.h125
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/txn_map.cpp256
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/txn_map.h159
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.cpp447
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.h101
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/wmgr.cpp1051
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/wmgr.h147
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/wrfc.cpp162
-rw-r--r--qpid/cpp/src/qpid/legacystore/jrnl/wrfc.h154
-rw-r--r--qpid/cpp/src/qpid/legacystore/management-schema.xml99
-rw-r--r--qpid/cpp/src/qpid/management/ManagementAgent.cpp264
-rw-r--r--qpid/cpp/src/qpid/management/ManagementAgent.h54
-rw-r--r--qpid/cpp/src/qpid/store/MessageStorePlugin.cpp6
-rw-r--r--qpid/cpp/src/qpid/store/MessageStorePlugin.h26
-rw-r--r--qpid/cpp/src/qpid/store/StorageProvider.h14
-rw-r--r--qpid/cpp/src/qpid/store/ms-clfs/MSSqlClfsProvider.cpp20
-rw-r--r--qpid/cpp/src/qpid/store/ms-sql/MSSqlProvider.cpp19
-rw-r--r--qpid/cpp/src/qpid/sys/AsynchIOHandler.cpp4
-rw-r--r--qpid/cpp/src/qpid/sys/AsynchIOHandler.h4
-rwxr-xr-xqpid/cpp/src/qpid/sys/FileSysDir.h9
-rw-r--r--qpid/cpp/src/qpid/sys/OutputControl.h8
-rw-r--r--qpid/cpp/src/qpid/sys/ProtocolFactory.h1
-rw-r--r--qpid/cpp/src/qpid/sys/RdmaIOPlugin.cpp6
-rw-r--r--qpid/cpp/src/qpid/sys/SslPlugin.cpp34
-rw-r--r--qpid/cpp/src/qpid/sys/TCPIOPlugin.cpp33
-rw-r--r--qpid/cpp/src/qpid/sys/Timer.cpp15
-rw-r--r--qpid/cpp/src/qpid/sys/Timer.h6
-rw-r--r--qpid/cpp/src/qpid/sys/posix/BSDSocket.cpp5
-rwxr-xr-xqpid/cpp/src/qpid/sys/posix/FileSysDir.cpp26
-rwxr-xr-xqpid/cpp/src/qpid/sys/posix/SystemInfo.cpp45
-rwxr-xr-xqpid/cpp/src/qpid/sys/solaris/SystemInfo.cpp25
-rw-r--r--qpid/cpp/src/qpid/sys/ssl/util.cpp19
-rw-r--r--qpid/cpp/src/qpid/sys/windows/FileSysDir.cpp35
-rwxr-xr-xqpid/cpp/src/qpid/sys/windows/SystemInfo.cpp29
-rw-r--r--qpid/cpp/src/qpid/types/Variant.cpp21
-rw-r--r--qpid/cpp/src/qpidd.cpp8
-rw-r--r--qpid/cpp/src/ssl.mk3
-rw-r--r--qpid/cpp/src/tests/.valgrind.supp23
-rw-r--r--qpid/cpp/src/tests/BrokerMgmtAgent.cpp356
-rw-r--r--qpid/cpp/src/tests/CMakeLists.txt8
-rw-r--r--qpid/cpp/src/tests/ClusterFailover.cpp115
-rw-r--r--qpid/cpp/src/tests/ClusterFixture.cpp160
-rw-r--r--qpid/cpp/src/tests/ClusterFixture.h115
-rw-r--r--qpid/cpp/src/tests/ForkedBroker.cpp157
-rw-r--r--qpid/cpp/src/tests/ForkedBroker.h82
-rw-r--r--qpid/cpp/src/tests/InitialStatusMap.cpp239
-rw-r--r--qpid/cpp/src/tests/Makefile.am11
-rw-r--r--qpid/cpp/src/tests/MessageUtils.h5
-rw-r--r--qpid/cpp/src/tests/PartialFailure.cpp291
-rw-r--r--qpid/cpp/src/tests/QueueTest.cpp1
-rw-r--r--qpid/cpp/src/tests/StoreStatus.cpp117
-rw-r--r--qpid/cpp/src/tests/Variant.cpp10
-rwxr-xr-xqpid/cpp/src/tests/benchmark95
-rw-r--r--qpid/cpp/src/tests/brokertest.py92
-rw-r--r--qpid/cpp/src/tests/cluster_authentication_soak.cpp310
-rwxr-xr-xqpid/cpp/src/tests/cluster_failover19
-rwxr-xr-xqpid/cpp/src/tests/cluster_python_tests28
-rw-r--r--qpid/cpp/src/tests/cluster_python_tests_failing.txt4
-rwxr-xr-xqpid/cpp/src/tests/cluster_read_credit29
-rw-r--r--qpid/cpp/src/tests/cluster_test.cpp1231
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_logs.py123
-rw-r--r--qpid/cpp/src/tests/cluster_test_scripts/README.txt20
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_scripts/cluster_check37
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_scripts/cluster_start56
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_scripts/cluster_stop38
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_scripts/config_example.sh44
-rwxr-xr-xqpid/cpp/src/tests/cluster_test_scripts/perftest54
-rw-r--r--qpid/cpp/src/tests/cluster_tests.fail3
-rwxr-xr-xqpid/cpp/src/tests/cluster_tests.py1834
-rw-r--r--qpid/cpp/src/tests/failover_soak.cpp827
-rwxr-xr-xqpid/cpp/src/tests/federated_cluster_test153
-rwxr-xr-xqpid/cpp/src/tests/federation_sys.py1251
-rwxr-xr-xqpid/cpp/src/tests/ha_tests.py48
-rwxr-xr-xqpid/cpp/src/tests/ipv6_test40
-rw-r--r--qpid/cpp/src/tests/legacystore/.valgrind.supp35
-rw-r--r--qpid/cpp/src/tests/legacystore/.valgrindrc7
-rw-r--r--qpid/cpp/src/tests/legacystore/CMakeLists.txt117
-rw-r--r--qpid/cpp/src/tests/legacystore/MessageUtils.h105
-rw-r--r--qpid/cpp/src/tests/legacystore/OrderingTest.cpp168
-rw-r--r--qpid/cpp/src/tests/legacystore/SimpleTest.cpp497
-rw-r--r--qpid/cpp/src/tests/legacystore/TestFramework.cpp30
-rw-r--r--qpid/cpp/src/tests/legacystore/TestFramework.h37
-rw-r--r--qpid/cpp/src/tests/legacystore/TransactionalTest.cpp351
-rw-r--r--qpid/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp675
-rw-r--r--[-rwxr-xr-x]qpid/cpp/src/tests/legacystore/clean.sh (renamed from qpid/cpp/src/tests/stop_cluster)23
-rw-r--r--qpid/cpp/src/tests/legacystore/persistence.py574
-rw-r--r--[-rwxr-xr-x]qpid/cpp/src/tests/legacystore/run_long_python_tests (renamed from qpid/cpp/src/tests/federated_cluster_test_with_node_failure)8
-rw-r--r--qpid/cpp/src/tests/legacystore/run_python_tests64
-rw-r--r--[-rwxr-xr-x]qpid/cpp/src/tests/legacystore/run_short_python_tests (renamed from qpid/cpp/src/tests/run_long_cluster_tests)9
-rw-r--r--qpid/cpp/src/tests/legacystore/run_test69
-rw-r--r--[-rwxr-xr-x]qpid/cpp/src/tests/legacystore/start_broker (renamed from qpid/cpp/src/tests/run_cluster_test)14
-rw-r--r--[-rwxr-xr-x]qpid/cpp/src/tests/legacystore/stop_broker (renamed from qpid/cpp/src/tests/cpg_check.sh.in)36
-rw-r--r--qpid/cpp/src/tests/legacystore/system_test.sh51
-rw-r--r--qpid/cpp/src/tests/legacystore/tests_env.sh260
-rw-r--r--qpid/cpp/src/tests/legacystore/unit_test.cpp28
-rw-r--r--qpid/cpp/src/tests/legacystore/unit_test.h69
-rwxr-xr-xqpid/cpp/src/tests/long_cluster_tests.py38
-rwxr-xr-xqpid/cpp/src/tests/qpid-test-cluster109
-rwxr-xr-xqpid/cpp/src/tests/restart_cluster38
-rwxr-xr-xqpid/cpp/src/tests/run_cluster_authentication_soak27
-rwxr-xr-xqpid/cpp/src/tests/run_cluster_authentication_test27
-rwxr-xr-xqpid/cpp/src/tests/run_cluster_tests39
-rwxr-xr-xqpid/cpp/src/tests/run_failover_soak39
-rwxr-xr-xqpid/cpp/src/tests/run_federation_sys_tests28
-rwxr-xr-xqpid/cpp/src/tests/sasl_fed_ex92
-rwxr-xr-xqpid/cpp/src/tests/sasl_fed_ex_dynamic_cluster30
-rwxr-xr-xqpid/cpp/src/tests/sasl_fed_ex_link_cluster29
-rwxr-xr-xqpid/cpp/src/tests/sasl_fed_ex_queue_cluster29
-rwxr-xr-xqpid/cpp/src/tests/sasl_fed_ex_route_cluster29
-rwxr-xr-xqpid/cpp/src/tests/ssl_test26
-rwxr-xr-xqpid/cpp/src/tests/start_cluster43
-rwxr-xr-xqpid/cpp/src/tests/start_cluster_hosts70
-rw-r--r--qpid/cpp/src/tests/test_env.sh.in2
-rw-r--r--qpid/cpp/src/tests/test_store.cpp1
-rw-r--r--qpid/cpp/src/tests/testlib.py766
-rwxr-xr-xqpid/cpp/src/tests/verify_cluster_objects107
-rw-r--r--qpid/cpp/src/versions.cmake2
-rw-r--r--qpid/cpp/src/windows/QpiddBroker.cpp1
-rw-r--r--qpid/cpp/xml/cluster.xml339
-rw-r--r--qpid/doc/book/src/cpp-broker/AMQP-Messaging-Broker-CPP-Book.xml1
-rw-r--r--qpid/doc/book/src/cpp-broker/Active-Active-Cluster.xml561
-rw-r--r--qpid/doc/book/src/cpp-broker/Active-Passive-Cluster.xml38
-rw-r--r--qpid/doc/book/src/java-broker/Java-Broker-Runtime-Handling-Undeliverable-Messages.xml169
-rw-r--r--qpid/doc/book/src/java-broker/Java-Broker-Runtime.xml1
-rw-r--r--qpid/extras/nexus/CMakeLists.txt94
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/alloc.h70
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/container.h122
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/ctools.h146
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/hash.h37
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/iterator.h114
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/log.h31
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/message.h162
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/server.h403
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/threading.h45
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/timer.h86
-rw-r--r--qpid/extras/nexus/include/qpid/nexus/user_fd.h121
-rw-r--r--qpid/extras/nexus/site/css/style.css280
-rw-r--r--qpid/extras/nexus/site/images/gwarch.diabin0 -> 1370 bytes
-rw-r--r--qpid/extras/nexus/site/images/gwarch.pngbin0 -> 7941 bytes
-rw-r--r--qpid/extras/nexus/site/includes/footer.include7
-rw-r--r--qpid/extras/nexus/site/includes/header.include6
-rw-r--r--qpid/extras/nexus/site/includes/menu.include71
-rwxr-xr-xqpid/extras/nexus/site/index.html98
-rw-r--r--qpid/extras/nexus/src/alloc.c202
-rw-r--r--qpid/extras/nexus/src/alloc_private.h26
-rw-r--r--qpid/extras/nexus/src/auth.c75
-rw-r--r--qpid/extras/nexus/src/auth.h27
-rw-r--r--qpid/extras/nexus/src/container.c620
-rw-r--r--qpid/extras/nexus/src/hash.c223
-rw-r--r--qpid/extras/nexus/src/iterator.c268
-rw-r--r--qpid/extras/nexus/src/log.c56
-rw-r--r--qpid/extras/nexus/src/message.c1164
-rw-r--r--qpid/extras/nexus/src/posix/threading.c126
-rw-r--r--qpid/extras/nexus/src/server.c903
-rw-r--r--qpid/extras/nexus/src/server_private.h95
-rw-r--r--qpid/extras/nexus/src/timer.c240
-rw-r--r--qpid/extras/nexus/src/timer_private.h51
-rw-r--r--qpid/extras/nexus/src/work_queue.c132
-rw-r--r--qpid/extras/nexus/src/work_queue.h33
-rw-r--r--qpid/extras/nexus/tests/CMakeLists.txt34
-rw-r--r--qpid/extras/nexus/tests/alloc_test.c86
-rw-r--r--qpid/extras/nexus/tests/message_test.c131
-rw-r--r--qpid/extras/nexus/tests/run_tests.c (renamed from qpid/gentools/templ.java/model/version/AmqpConstantsClass.vm)29
-rw-r--r--qpid/extras/nexus/tests/server_test.c195
-rw-r--r--qpid/extras/nexus/tests/test_case.h36
-rw-r--r--qpid/extras/nexus/tests/timer_test.c386
-rw-r--r--qpid/extras/nexus/tests/tool_test.c159
-rw-r--r--qpid/gentools/LICENSE202
-rw-r--r--qpid/gentools/NOTICE2
-rw-r--r--qpid/gentools/README.txt61
-rwxr-xr-xqpid/gentools/build37
-rw-r--r--qpid/gentools/build.xml43
-rw-r--r--qpid/gentools/lib/LICENSE0
-rw-r--r--qpid/gentools/lib/NOTICE0
-rw-r--r--qpid/gentools/lib/README.txt0
-rw-r--r--qpid/gentools/lib/velocity-1.4.jarbin361173 -> 0 bytes
-rw-r--r--qpid/gentools/lib/velocity-dep-1.4.jarbin517761 -> 0 bytes
-rw-r--r--qpid/gentools/templ.cpp/method/MethodBodyClass.h.tmpl112
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_ClientOperations.h.tmpl82
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_ClientProxy.h.tmpl75
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_Constants.h.tmpl34
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.cpp.tmpl62
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.h.tmpl57
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_ServerOperations.h.tmpl83
-rw-r--r--qpid/gentools/templ.cpp/model/AMQP_ServerProxy.h.tmpl74
-rw-r--r--qpid/gentools/templ.java/PropertyContentHeaderClass.tmpl208
-rw-r--r--qpid/gentools/templ.java/method/version/MethodBodyClass.vm190
-rw-r--r--qpid/gentools/templ.java/model/ProtocolVersionListClass.vm154
-rw-r--r--qpid/gentools/templ.java/model/version/MethodRegistryClass.vm145
-rw-r--r--qpid/gentools/xml-src/amqp-0.10.test.xml4241
-rw-r--r--qpid/gentools/xml-src/amqp-0.8.test.xml3959
-rw-r--r--qpid/gentools/xml-src/amqp-0.9.test.xml4282
-rw-r--r--qpid/gentools/xml-src/cluster-0.9.test.xml59
-rw-r--r--qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/AbstractBDBMessageStore.java16
-rw-r--r--qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/CommitThreadWrapper.java31
-rw-r--r--qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/QueueMBean.java5
-rw-r--r--qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/VirtualHostManagerMBean.java5
-rw-r--r--qpid/java/broker/build.xml2
-rw-r--r--qpid/java/broker/etc/log4j.xml6
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/AMQChannel.java96
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/TransactionTimeoutHelper.java69
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/output/ProtocolOutputConverterImpl.java117
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/AMQProtocolEngine.java17
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/v1_0/ExchangeDestination.java2
-rwxr-xr-xqpid/java/broker/src/main/java/org/apache/qpid/server/queue/InboundMessageAdapter.java2
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/queue/QueueEntryImpl.java2
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/queue/SimpleAMQQueue.java2
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/transport/ServerSession.java56
-rwxr-xr-xqpid/java/broker/src/main/java/org/apache/qpid/server/txn/AsyncAutoCommitTransaction.java9
-rwxr-xr-xqpid/java/broker/src/main/java/org/apache/qpid/server/txn/AutoCommitTransaction.java11
-rw-r--r--qpid/java/broker/src/main/java/org/apache/qpid/server/txn/DistributedTransaction.java20
-rwxr-xr-xqpid/java/broker/src/main/java/org/apache/qpid/server/txn/LocalTransaction.java75
-rwxr-xr-xqpid/java/broker/src/main/java/org/apache/qpid/server/txn/ServerTransaction.java11
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/TransactionTimeoutHelperTest.java206
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/queue/InboundMessageAdapterTest.java97
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/queue/SimpleAMQQueueTest.java2
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/store/MessageStoreTest.java2
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AsyncAutoCommitTransactionTest.java2
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AutoCommitTransactionTest.java8
-rw-r--r--qpid/java/broker/src/test/java/org/apache/qpid/server/txn/LocalTransactionTest.java127
-rw-r--r--qpid/java/build.deps2
-rw-r--r--qpid/java/client/src/main/java/org/apache/qpid/client/AMQSession_0_10.java4
-rw-r--r--qpid/java/client/src/main/java/org/apache/qpid/client/BasicMessageConsumer_0_10.java1
-rw-r--r--qpid/java/client/src/main/java/org/apache/qpid/client/security/DynamicSaslRegistrar.java64
-rw-r--r--qpid/java/client/src/main/java/org/apache/qpid/client/security/JCAProvider.java10
-rw-r--r--qpid/java/client/src/main/java/org/apache/qpid/client/state/AMQStateManager.java7
-rw-r--r--qpid/java/client/src/test/java/org/apache/qpid/client/security/DynamicSaslRegistrarTest.java140
-rw-r--r--qpid/java/common.xml2
-rw-r--r--qpid/java/common/build.xml21
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClass.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpClass.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstant.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpConstant.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomain.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpDomain.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpField.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpField.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpModel.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpModel.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpParseException.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpParseException.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersion.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpVersion.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/CppGenerator.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/CppGenerator.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/GenerateMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/GenerateMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/Generator.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/Generator.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/JavaGenerator.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/JavaGenerator.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/LanguageConverter.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/LanguageConverter.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/Main.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/Main.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/NodeAware.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/NodeAware.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/Printable.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/Printable.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionField.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/SingleVersionField.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/Utils.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/Utils.java)0
-rw-r--r--qpid/java/common/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java (renamed from qpid/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java)0
-rw-r--r--qpid/java/common/protocol-version.xml70
-rw-r--r--qpid/java/common/src/main/java/org/apache/qpid/transport/Session.java7
-rw-r--r--qpid/java/common/src/test/java/org/apache/qpid/test/utils/QpidTestCase.java3
-rw-r--r--qpid/java/ivy.retrieve.xml2
-rw-r--r--qpid/java/jca/README-JBOSS-EAP6.txt183
-rw-r--r--qpid/java/jca/README-JBOSS.txt26
-rw-r--r--qpid/java/jca/build.xml37
-rw-r--r--qpid/java/jca/rar/src/main/resources/META-INF/jboss-ra.xml (renamed from qpid/java/jca/src/main/resources/META-INF/jboss-ra.xml)0
-rwxr-xr-xqpid/java/jca/rar/src/main/resources/META-INF/ra.xml (renamed from qpid/java/jca/src/main/resources/META-INF/ra.xml)0
-rw-r--r--qpid/java/jca/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxy.java73
-rw-r--r--qpid/java/module.xml49
-rw-r--r--qpid/java/perftests/etc/chartdefs/1030-BatchSize-Equal.chartdef (renamed from qpid/java/perftests/etc/chartdefs/1030-BatchSize.chartdef)13
-rw-r--r--qpid/java/perftests/etc/chartdefs/1031-BatchSize-Unequal.chartdef53
-rw-r--r--qpid/java/perftests/etc/testdefs/BatchSize.js102
-rw-r--r--qpid/java/perftests/etc/testdefs/BatchSize.json84
-rw-r--r--qpid/java/perftests/etc/testdefs/BatchSizeConsumerVaries.js102
-rw-r--r--qpid/java/perftests/etc/testdefs/BatchSizeProducerVaries.js102
-rw-r--r--qpid/java/perftests/example/brokerconfig/log4j.xml2
-rw-r--r--qpid/java/perftests/src/main/java/org/apache/qpid/disttest/client/ConsumerParticipant.java10
-rw-r--r--qpid/java/perftests/src/main/java/org/apache/qpid/disttest/jms/QpidQueueCreator.java28
-rw-r--r--qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/ChartingUtil.java7
-rw-r--r--qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/writer/ChartWriter.java8
-rw-r--r--qpid/java/perftests/visualisation-jfc/src/test/java/org/apache/qpid/disttest/charting/writer/ChartWriterTest.java4
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxyTest.java120
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/server/BrokerStartupTest.java8
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/server/logging/AbstractTestLogging.java2
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/server/logging/BrokerLoggingTest.java7
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/LoggingManagementTest.java9
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/QueueManagementTest.java54
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/client/destination/AddressBasedDestinationTest.java8
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/MaxDeliveryCountTest.java6
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseOkTest.java236
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseTest.java399
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTest.java2
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTestCase.java19
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelper.java52
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelperTest.java50
-rwxr-xr-x[-rw-r--r--]qpid/java/systests/src/main/java/org/apache/qpid/test/utils/QpidBrokerTestCase.java135
-rw-r--r--qpid/java/systests/src/main/java/org/apache/qpid/util/LogMonitor.java4
-rwxr-xr-xqpid/java/test-profiles/CPPExcludes2
-rwxr-xr-xqpid/java/test-profiles/Java010Excludes3
-rw-r--r--qpid/java/test-profiles/testprofile.defaults7
-rw-r--r--qpid/packaging/windows/qpidc.wxs16
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/management.py24
-rwxr-xr-xqpid/tools/src/py/qpid-stat2
500 files changed, 33513 insertions, 28663 deletions
diff --git a/qpid/bin/release.sh b/qpid/bin/release.sh
index 9189cd3cf3..ca8f8b6c09 100755
--- a/qpid/bin/release.sh
+++ b/qpid/bin/release.sh
@@ -198,6 +198,7 @@ if [ "PERL" == "$PERL" ]; then
qpid-${VER}/cpp/bindings/qpid/perl/*pm \
qpid-${VER}/cpp/bindings/qpid/perl/LICENSE \
qpid-${VER}/cpp/bindings/qpid/perl/Makefile.PL \
+ qpid-${VER}/cpp/bindings/qpid/perl/t/*.t \
qpid-${VER}/perl-qpid-${VER}
mkdir qpid-${VER}/perl-qpid-${VER}/examples
cp qpid-${VER}/cpp/bindings/qpid/examples/perl/* \
diff --git a/qpid/cpp/CMakeLists.txt b/qpid/cpp/CMakeLists.txt
index 6f506a5b0e..350b430fdb 100644
--- a/qpid/cpp/CMakeLists.txt
+++ b/qpid/cpp/CMakeLists.txt
@@ -68,8 +68,7 @@ set (QPIDD_CONF_FILE ${QPIDD_CONF_PATH} CACHE STRING
"Name of the Qpid broker configuration file")
install(FILES LICENSE NOTICE DESTINATION ${QPID_INSTALL_DOCDIR})
-install(FILES xml/cluster.xml
- bindings/swig_perl_typemaps.i
+install(FILES bindings/swig_perl_typemaps.i
bindings/swig_python_typemaps.i
bindings/swig_ruby_typemaps.i
DESTINATION ${QPID_INSTALL_DATADIR})
diff --git a/qpid/cpp/INSTALL b/qpid/cpp/INSTALL
index dbd41c7cc1..c503f8de19 100644
--- a/qpid/cpp/INSTALL
+++ b/qpid/cpp/INSTALL
@@ -10,7 +10,6 @@ Table of Contents
2.2. How to Install
2.2.1. Using Package Management Tools
2.2.2. From Source
- a. openais
b. boost
c. autotools
2.3. Important Environment Variable Settings
@@ -25,7 +24,7 @@ Table of Contents
1. Introduction
===============
-Note that the daemon and client API can be installed separately.
+Note that the daemon and client API can be installed separately.
This document describes how to build the Qpid/C++ broker and client, either
from a checkout of the source or from a source distribution, on Linux/UNIX.
@@ -55,13 +54,9 @@ a source distribution:
to get 1.32 working in the svn tree though that is only recommended as
a last resort.
-Optional cluster functionality requires ONE of:
- * openais <http://openais.org> (0.80.3)
- * corosync <http://corosync.org> (1.0.0.rc1)
-
- Optional XML exchange requires:
- * xqilla <http://xqilla.sourceforge.net/HomePage> (2.0.0)
- * xerces-c <http://xerces.apache.org/xerces-c/> (2.7.0)
+Optional XML exchange requires:
+ * xqilla <http://xqilla.sourceforge.net/HomePage> (2.0.0)
+ * xerces-c <http://xerces.apache.org/xerces-c/> (2.7.0)
Optional SSL support requires:
* nss <http://www.mozilla.org/projects/security/pki/nss/>
@@ -95,8 +90,8 @@ the following must also be installed:
* ruby-devel
* python-devel
* swig <http://www.swig.org> (1.3.35)
-
-UUID problems:
+
+UUID problems:
In some earlier Linux releases (such as Fedora 11), the uuid/uuid.h
file is located in the e2fsprogs-devel package instead of
libuuid-devel. If you are using an older Linux release and run into a
@@ -114,13 +109,6 @@ package management tool. For example on Fedora:
# yum install boost-devel libuuid-devel pkgconfig gcc-c++ make autoconf automake ruby libtool help2man doxygen graphviz
-The optional clustering packages changed name in Fedora 10. On Fedora 9 or earlier:
- # yum install openais-devel cman-devel
-On Fedora 10 or later
- # yum install corosync-devel cmanlib-devel
-On Fedora 12 they changed again:
- # yum install corosynclib-devel clusterlib-devel
-
For SASL and SSL, include
# yum install cyrus-sasl-devel nss-devel nspr-devel
@@ -147,55 +135,9 @@ It is recommended that you create a directory to install them to, for example,
# ./configure --prefix=~/qpid-tools
# make install
-The exceptions are openais and boost.
-
-a. openais
-==========
-
-If ais is shipped with you platform and you have 0.80.3-x or later, skip
-builing ais
-
-To build ais: Unpack the source distribution and do:
- # make
- # sudo make install DESTDIR=
- # sudo ldconfig
-
-This will install in the standard places (/usr/lib, /usr/include etc.)
-
-Configuring ais:
-
-Edit /etc/ais/openais.conf and modify the "bindnetaddr" setting
-to your hosts IP address. Do not use 127.0.0.1.
+The exception is boost.
-Make sure the UDP port set for mcastport in openais.conf (5405 by
-default) is not blocked by your firewall. Disable the firewall or
-configure it to allow this port for UDP.
-
-Finally start the ais daemon (must be done as root):
- # sudo /sbin/aisexec
-
-Note that to run the AIS tests your primary group must be "ais". You
-can change your primary group with the usermod command or set it
-temporarily with the newgrp command.
-
-Troubleshooting tips:
-
-If aisexec goes into a loop printing "entering GATHER state", verify
-your firewall is allowing UDP traffic on the mcastport set in
-openais.conf.
-
-If aisexec reports "got nodejoin message 127.0.0.1" verify the
-bindnetaddr in openais.conf is an active local IP address. ifconfig
-will list local addresses.
-
-When aisexec is working correctly, the start-up log messages will end
-with "entering OPERATIONAL state." and "got nodejoin message <ip
-address>" where <ip address> is the local IP address specified for
-bindnetaddr in openais.conf.
-
-For further info on openais http://openais.org/
-
-b. boost
+boost
========
1. Unpack boost-jam.
2. Add bjam in the unpacked directory to your path.
@@ -358,8 +300,8 @@ To try it out "make doxygen" then open doxygen/html/index.html.
==================
When building, get the following on configure
configure: error: Package requirements (apr-1 >= 1.2.2) were not met:
-
+
No package 'apr-1' found
-
-The following has not been set
+
+The following has not been set
export PKG_CONFIG_PATH=$HOME/qpid-tools/lib/pkgconfig:/usr/lib/pkgconfig
diff --git a/qpid/cpp/Makefile.am b/qpid/cpp/Makefile.am
index 0bb2e7500a..374f09ebaf 100644
--- a/qpid/cpp/Makefile.am
+++ b/qpid/cpp/Makefile.am
@@ -24,7 +24,7 @@ ACLOCAL_AMFLAGS = -I m4
EXTRA_DIST = \
LICENSE NOTICE README.txt SSL RELEASE_NOTES DESIGN \
- xml/cluster.xml INSTALL-WINDOWS CMakeLists.txt BuildInstallSettings.cmake \
+ INSTALL-WINDOWS CMakeLists.txt BuildInstallSettings.cmake \
packaging/NSIS QPID_VERSION.txt bindings/CMakeLists.txt \
bindings/swig_python_typemaps.i bindings/swig_ruby_typemaps.i bindings/swig_perl_typemaps.i \
include/qpid/qpid.i include/qmf/qmfengine.i include/qmf/qmf2.i
diff --git a/qpid/cpp/bindings/qmf/Makefile.am b/qpid/cpp/bindings/qmf/Makefile.am
index eebb4b94de..ee4ff1d3c1 100644
--- a/qpid/cpp/bindings/qmf/Makefile.am
+++ b/qpid/cpp/bindings/qmf/Makefile.am
@@ -19,7 +19,6 @@
if HAVE_SWIG
-EXTRA_DIST = qmfengine.i
SUBDIRS = tests
if HAVE_RUBY_DEVEL
diff --git a/qpid/cpp/bindings/qmf2/Makefile.am b/qpid/cpp/bindings/qmf2/Makefile.am
index 52b1bbd457..0f50e757a2 100644
--- a/qpid/cpp/bindings/qmf2/Makefile.am
+++ b/qpid/cpp/bindings/qmf2/Makefile.am
@@ -19,7 +19,6 @@
if HAVE_SWIG
-EXTRA_DIST = qmf2.i
SUBDIRS = examples/cpp
if HAVE_RUBY_DEVEL
diff --git a/qpid/cpp/bindings/qpid/Makefile.am b/qpid/cpp/bindings/qpid/Makefile.am
index 77eba6a524..aace6f2d95 100644
--- a/qpid/cpp/bindings/qpid/Makefile.am
+++ b/qpid/cpp/bindings/qpid/Makefile.am
@@ -21,8 +21,6 @@ SUBDIRS = dotnet
if HAVE_SWIG
-EXTRA_DIST = CMakeLists.txt qpid.i
-
if HAVE_RUBY_DEVEL
SUBDIRS += ruby
endif
@@ -35,7 +33,7 @@ if HAVE_PERL_DEVEL
INCLUDES = -I$(top_srcdir)/include -I$(top_srcdir)/bindings -I$(top_builddir)/include -I$(top_srcdir)/src -I$(top_builddir)/src -I$(PERL_INC)
-EXTRA_DIST += perl/perl.i perl/CMakeLists.txt
+EXTRA_DIST = perl/perl.i perl/CMakeLists.txt
BUILT_SOURCES = perl/cqpid_perl.cpp
SWIG_FLAGS = -w362,401
@@ -72,7 +70,6 @@ maintainer-clean-local:
$(PERL) maintainer-clean ; \
cd ..
-DISTCLEANFILES = perl/Makefile.PL
CLEANFILES = perl/cqpid_perl.cpp perl/Makefile.old perl/cqpid_perl.pm
endif
diff --git a/qpid/cpp/bindings/qpid/perl/ChangeLog b/qpid/cpp/bindings/qpid/perl/ChangeLog
new file mode 100644
index 0000000000..4de5631217
--- /dev/null
+++ b/qpid/cpp/bindings/qpid/perl/ChangeLog
@@ -0,0 +1,5 @@
+Version 0.22 (TBA):
+ * QPID-4466: qpid::messaging::Duration now supports multiplication
+ * QPID-4416: Messages with embedded nulls won't break on getContentPtr
+ * QPID-4505: Provides unit tests for Address, Duration and Message
+
diff --git a/qpid/cpp/bindings/qpid/perl/qpid.pm b/qpid/cpp/bindings/qpid/perl/qpid.pm
index a0f8ef7aa2..9edac9ca0b 100644
--- a/qpid/cpp/bindings/qpid/perl/qpid.pm
+++ b/qpid/cpp/bindings/qpid/perl/qpid.pm
@@ -41,7 +41,8 @@ sub decode_map {
package qpid::messaging::Address;
use overload (
- 'bool' => \&boolify,
+ 'bool' => \& boolify,
+ '""' => \& stringify,
);
sub boolify {
@@ -51,6 +52,19 @@ sub boolify {
return length($impl->getName());
}
+sub stringify {
+ my ($self) = @_;
+ my $impl = $self->{_impl};
+
+ return $self->str();
+}
+
+sub str {
+ my ($self) = @_;
+
+ return $self->get_implementation()->str();
+}
+
sub new {
my ($class) = @_;
my ($self) = {};
@@ -115,6 +129,9 @@ sub get_subject {
sub set_options {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $options = $_[1];
+
+ die "Options cannot be null" if !defined($options);
$impl->setOptions($_[1]);
}
@@ -129,8 +146,11 @@ sub get_options {
sub set_type {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $type = $_[1];
- $impl->setType($_[1]);
+ die "Type must be defined" if !defined($type);
+
+ $impl->setType($type);
}
sub get_type {
@@ -144,10 +164,54 @@ sub get_type {
package qpid::messaging::Duration;
+use overload (
+ "*" => \&multiply,
+ "==" => \&equalify,
+ "!=" => \&unequalify,
+ );
+
+sub multiply {
+ my ($self) = @_;
+ my $factor = $_[1];
+
+ die "Factor must be non-negative values" if !defined($factor) || ($factor < 0);
+
+ my $duration = $self->{_impl} * $factor;
+
+ return new qpid::messaging::Duration($duration);
+}
+
+sub equalify {
+ my ($self) = @_;
+ my $that = $_[1];
+
+ return 0 if !defined($that) || !UNIVERSAL::isa($that, 'qpid::messaging::Duration');;
+
+ return ($self->get_milliseconds() == $that->get_milliseconds()) ? 1 : 0;
+}
+
+sub unequalify {
+ my ($self) = @_;
+ my $that = $_[1];
+
+ return 1 if !defined($that) || !UNIVERSAL::isa($that, 'qpid::messaging::Duration');;
+
+ return ($self->get_milliseconds() != $that->get_milliseconds()) ? 1 : 0;
+}
+
sub new {
my ($class) = @_;
+ my $duration = $_[1];
+
+ die "Duration time period must be defined" if !defined($duration);
+
+ if (!UNIVERSAL::isa($duration, 'cqpid_perl::Duration')) {
+ die "Duration must be non-negative" if $duration < 0;
+ $duration = new cqpid_perl::Duration($duration);
+ }
+
my ($self) = {
- _impl => new cqpid_perl::Duration($_[1]),
+ _impl => $duration,
};
bless $self, $class;
@@ -169,7 +233,7 @@ sub get_implementation {
# TODO: Need a better way to define FOREVER
use constant {
- FOREVER => new qpid::messaging::Duration(10000),
+ FOREVER => new qpid::messaging::Duration(1000000),
IMMEDIATE => new qpid::messaging::Duration(0),
SECOND => new qpid::messaging::Duration(1000),
MINUTE => new qpid::messaging::Duration(60000),
@@ -207,8 +271,15 @@ sub get_implementation {
sub set_reply_to {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $address = $_[1];
- $impl->setReplyTo($_[1]->get_implementation());
+ # if the address was a string, then wrap it
+ # in a qpid::messaging::Address instance
+ if (!UNIVERSAL::isa($address, 'qpid::messaging::Address')) {
+ $address = new qpid::messaging::Address($_[1]);
+ }
+
+ $impl->setReplyTo($address->get_implementation());
}
sub get_reply_to {
@@ -250,8 +321,11 @@ sub get_content_type {
sub set_message_id {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $id = $_[1];
+
+ die "message id must be defined" if !defined($id);
- $impl->setMessageId($_[1]);
+ $impl->setMessageId($id);
}
sub get_message_id {
@@ -292,8 +366,14 @@ sub get_correlation_id {
sub set_priority {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $priority = $_[1];
+
+ die "Priority must be provided" if !defined($priority);
- $impl->setPriority($_[1]);
+ $priority = int($priority);
+ die "Priority must be non-negative" if $priority < 0;
+
+ $impl->setPriority($priority);
}
sub get_priority {
@@ -306,22 +386,39 @@ sub get_priority {
sub set_ttl {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $duration = $_[1];
+
+ die "Duration must be provided" if !defined($duration);
+ if (!UNIVERSAL::isa($duration, 'qpid::messaging::Duration')) {
+ $duration = int($duration);
+
+ if ($duration < 0) {
+ $duration = qpid::messaging::Duration::FOREVER;
+ } elsif ($duration == 0) {
+ $duration = qpid::messaging::Duration::IMMEDIATE;
+ } else {
+ $duration = new qpid::messaging::Duration(int($duration));
+ }
+ }
- $impl->setTtl($_[1]);
+ $impl->setTtl($duration->get_implementation());
}
sub get_ttl {
my ($self) = @_;
my $impl = $self->{_impl};
- return $impl->getTtl;
+ return new qpid::messaging::Duration($impl->getTtl);
}
sub set_durable {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $durable = $_[1];
- $impl->setDurable($_[1]);
+ die "Durable must be specified" if !defined($durable);
+
+ $impl->setDurable($durable);
}
sub get_durable {
@@ -334,8 +431,11 @@ sub get_durable {
sub set_redelivered {
my ($self) = @_;
my $impl = $self->{_impl};
+ my $redelivered = $_[1];
+
+ die "Redelivered must be specified" if !defined($redelivered);
- $impl->setRedelivered($_[1]);
+ $impl->setRedelivered($redelivered);
}
sub get_redelivered {
@@ -345,13 +445,13 @@ sub get_redelivered {
return $impl->getRedelivered;
}
-sub get_property {
+sub set_property {
my ($self) = @_;
- my $key = $_[1];
-
my $impl = $self->{_impl};
+ my $key = $_[1];
+ my $value = $_[2];
- return $impl->getPropert($key);
+ $impl->setProperty($key, $value);
}
sub get_properties {
@@ -363,9 +463,11 @@ sub get_properties {
sub set_content {
my ($self) = @_;
- my $content = $_[1] || "";
+ my $content = $_[1];
my $impl = $self->{_impl};
+ die "Content must be provided" if !defined($content);
+
$impl->setContent($content);
}
diff --git a/qpid/cpp/bindings/qpid/perl/t/Address.t b/qpid/cpp/bindings/qpid/perl/t/Address.t
new file mode 100644
index 0000000000..4e74f8cad2
--- /dev/null
+++ b/qpid/cpp/bindings/qpid/perl/t/Address.t
@@ -0,0 +1,102 @@
+#!/usr/bin/env perl -w
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+use Test::More qw(no_plan);
+use Test::Exception;
+
+require 'utils.pm';
+
+# verify that qpid is available
+BEGIN { use_ok( 'qpid' ); }
+require_ok ('qpid' );
+
+# construction
+# address cannot be null
+dies_ok (sub {new qpid::messaging::Address(undef);},
+ "Address cannot be null");
+
+# can use an address
+my $address = new qpid::messaging::Address("0.0.0.0");
+ok ($address, "Can be created with an arbitrary address");
+
+# name
+# name cannot be null
+dies_ok (sub {$address->set_name(undef);},
+ "Name cannot be null");
+
+# name can be an empty string
+$address->set_name("");
+ok ($address->get_name() eq "",
+ "Name can be empty");
+
+# name can be an arbitrary string
+my $name = random_string(25);
+$address->set_name($name);
+ok ($address->get_name() eq $name,
+ "Name can be an arbitrary string");
+
+# subject
+# cannot be null
+dies_ok (sub {$address->set_subject(undef);},
+ "Subject cannot be null");
+
+# can be an empty string
+$address->set_subject("");
+ok ($address->get_subject() eq "",
+ "Subject can be empty");
+
+# can be an arbitrary string
+my $subject = random_string(64);
+$address->set_subject($subject);
+ok ($address->get_subject() eq $subject,
+ "Subject can be an arbitrary string");
+
+# options
+# options cannot be null
+dies_ok (sub {$address->set_options(undef);},
+ "Options cannot be null");
+
+# options can be an empty hash
+$address->set_options({});
+ok (eq_hash($address->get_options(), {}),
+ "Options can be an empty hash");
+
+# options cannot be arbitrary values
+my %options = ("create", "always", "delete", "always");
+$address->set_options(\%options);
+ok (eq_hash($address->get_options(), \%options),
+ "Options can be arbitrary keys");
+
+# type
+# cannot be null
+dies_ok (sub {$address->set_type(undef);},
+ "Type cannot be null");
+
+# can be an empty string
+$address->set_type("");
+ok ($address->get_type() eq "",
+ "Type can be an empty string");
+
+# can be an arbitrary string
+my $type = random_string(16);
+$address->set_type($type);
+ok ($address->get_type() eq $type,
+ "Type can be an arbitrary type");
+
diff --git a/qpid/cpp/bindings/qpid/perl/t/Duration.t b/qpid/cpp/bindings/qpid/perl/t/Duration.t
new file mode 100644
index 0000000000..6975e8006f
--- /dev/null
+++ b/qpid/cpp/bindings/qpid/perl/t/Duration.t
@@ -0,0 +1,124 @@
+#!/usr/bin/env perl -w
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+use Test::More qw(no_plan);
+use Test::Exception;
+
+require 'utils.pm';
+
+# verify that qpid is available
+BEGIN { use_ok( 'qpid' ); }
+require_ok ('qpid' );
+
+# milliseconds
+# duration cannot be null
+{
+ dies_ok (sub {new qpid::messaging::Duration(undef);},
+ "Durations cannot have null time periods");
+}
+
+# duration cannot be negative
+{
+ my $period = 0 - (int(rand(65535)) + 1);
+ dies_ok(sub {new qpid::messaging::Duration($period);},
+ "Duration times cannot be negative");
+}
+
+# duration can be an arbitrary value
+{
+ my $period = int(rand(65535));
+ my $duration = new qpid::messaging::Duration($period);
+ ok ($duration->get_milliseconds() == $period,
+ "Milliseconds are properly stored and fetched");
+}
+
+# multiplier
+# cannot multiply by null
+dies_ok(sub {qpid::messaging::Duration::FOREVER * undef;},
+ "Cannot multiply a duration times a null");
+
+# cannot multiply by a negative
+dies_ok (sub {qpid::messaging::Duration::MINUTE * -2;},
+ "Duration cannot be multiplied by a negative");
+
+# multiply by zero returns a zero time period
+{
+ my $result = qpid::messaging::Duration::MINUTE * 0;
+
+ ok ($result->get_milliseconds() == 0,
+ "Multiplying duration by 0 returns a 0 duration");
+}
+
+# multiply by arbitrary values works
+{
+ my $factor = int(1 + rand(100));
+ my $result = qpid::messaging::Duration::MINUTE * $factor;
+ ok ($result->get_milliseconds() == 60000 * $factor,
+ "Multiplying by a factor returns a new Duration with that period");
+}
+
+# equality
+# always fails with null
+ok (!(qpid::messaging::Duration::MINUTE == undef),
+ "Duration is never equal to null");
+
+# never equal to a non-duration class
+ok (!(qpid::messaging::Duration::MINUTE == random_string(12)),
+ "Duration is never equal to a non-Duration");
+
+# works with self
+ok (qpid::messaging::Duration::MINUTE == qpid::messaging::Duration::MINUTE,
+ "Duration is always equal to itself");
+
+# fails with non-equal instance
+ok (!(qpid::messaging::Duration::MINUTE == qpid::messaging::Duration::SECOND),
+ "Duration non-equality works");
+
+# works with equal instance
+{
+ my $result = qpid::messaging::Duration::MINUTE * 0;
+ ok ($result == qpid::messaging::Duration::IMMEDIATE,
+ "Equality comparison works correctly");
+}
+
+# non-equality
+# always not equal to null
+ok (qpid::messaging::Duration::MINUTE != undef,
+ "Always unequal to null");
+
+# always not equal to a non-duration class
+ok (qpid::messaging::Duration::MINUTE != random_string(64),
+ "Always unequal to a non-duration class");
+
+# not unequal to itself
+ok (!(qpid::messaging::Duration::MINUTE != qpid::messaging::Duration::MINUTE),
+ "Never unequal to itself");
+
+# not unequal to an equal instance
+{
+ my $duration = qpid::messaging::Duration::MINUTE * 1;
+ ok (!(qpid::messaging::Duration::MINUTE != $duration),
+ "Never unequal to an equal instance");
+}
+
+# works with unequal instances
+ok (qpid::messaging::Duration::MINUTE != qpid::messaging::Duration::FOREVER,
+ "Always unequal to a non-equal instance");
+
diff --git a/qpid/cpp/bindings/qpid/perl/t/Message.t b/qpid/cpp/bindings/qpid/perl/t/Message.t
new file mode 100644
index 0000000000..142e1719b3
--- /dev/null
+++ b/qpid/cpp/bindings/qpid/perl/t/Message.t
@@ -0,0 +1,268 @@
+#!/usr/bin/env perl -w
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+use Test::More qw(no_plan);
+use Test::Exception;
+
+require 'utils.pm';
+
+# verify that qpid is available
+BEGIN { use_ok( 'qpid' ); }
+require_ok ('qpid' );
+
+# Create a new message
+my $message = new qpid::messaging::Message();
+isa_ok($message, 'qpid::messaging::Message');
+
+# reply to
+# rejects an null address
+dies_ok (sub {$message->set_reply_to(undef);},
+ "Reply to cannot be null.");
+
+# can handle a string address
+$message->set_reply_to("test");
+ok ($message->get_reply_to()->str() eq "test",
+ "Reply to can be set");
+
+# subject
+# cannot have an null subject
+dies_ok (sub {$message->set_subject(undef);},
+ "Subject cannot be null");
+
+# can have an empty subject
+$message->set_subject("");
+ok ($message->get_subject() eq "",
+ "Subject can be empty");
+
+# can have a subject
+my $subject = random_string(16);
+$message->set_subject($subject);
+ok ($message->get_subject() eq $subject,
+ "Subject can be set.");
+
+# content type
+# cannot have an null content type
+dies_ok (sub {$message->set_content_type(undef);},
+ "Content type must be defined.");
+
+# can an empty content type
+$message->set_content_type("");
+ok ($message->get_content_type() eq "",
+ "Content type can be empty");
+
+# can have an arbitrary content type
+my $content_type = random_string(10);
+$message->set_content_type($content_type);
+ok ($message->get_content_type() eq $content_type,
+ "Content type can be arbitrary");
+
+# can be for a map
+$content_type = "amqp/map";
+$message->set_content_type($content_type);
+ok ($message->get_content_type() eq $content_type,
+ "Content type can be for a map");
+
+# message id
+# cannot be null
+dies_ok (sub {$message->set_message_id(undef);},
+ "Message id cannot be null");
+
+# can be an empty string
+$message->set_message_id("");
+ok ($message->get_message_id() eq "",
+ "Message id can be empty");
+
+# can be an arbitrary string
+my $id = random_string(32);
+$message->set_message_id($id);
+ok ($message->get_message_id() eq $id,
+ "Message id can be an arbitrary string");
+
+# can be a UUID
+$id = generate_uuid();
+$message->set_message_id($id);
+ok ($message->get_message_id() eq $id,
+ "Message id can be a valid UUID");
+
+# user id
+# cannot be null
+dies_ok (sub {$message->set_user_id(undef);},
+ "User id cannot be null");
+
+# can be an empty string
+my $user_id = "";
+$message->set_user_id($user_id);
+ok ($message->get_user_id() eq $user_id,
+ "User id can be empty");
+
+# can be an arbitrary string
+$id = random_string(65);
+$message->set_user_id($user_id);
+ok ($message->get_user_id() eq $user_id,
+ "User id can be an arbitrary string");
+
+# correlation id
+# cannot be null
+dies_ok (sub {$message->set_correlation_id(undef);},
+ "Correlation id cannot be null");
+
+# can be empty
+my $correlation_id = "";
+$message->set_correlation_id($correlation_id);
+ok ($message->get_correlation_id() eq $correlation_id,
+ "Correlation id can be an empty string");
+
+# can be an arbitrary string
+$correlation_id = random_string(32);
+$message->set_correlation_id($correlation_id);
+ok ($message->get_correlation_id() eq $correlation_id,
+ "Correlation id can be an arbitrary string");
+
+# priority
+# cannot be nul
+dies_ok (sub {$message->set_priority(undef);},
+ "Priority cannot be null");
+
+# cannot be negative
+my $priority = 0 - (rand(2**8) + 1);
+dies_ok (sub {$message->set_priority($priority);},
+ "Priority cannot be negative");
+
+# can be 0
+$message->set_priority(0);
+ok ($message->get_priority() == 0,
+ "Priority can be zero");
+
+# can be an arbitrary value
+$priority = int(rand(2**8) + 1);
+$message->set_priority($priority);
+ok ($message->get_priority() == $priority,
+ "Priority can be any positive value");
+
+# ttl
+# cannot be null
+dies_ok (sub {$message->set_ttl(undef);},
+ "TTL cannot be null");
+
+# can be a duration
+$message->set_ttl(qpid::messaging::Duration::FOREVER);
+ok ($message->get_ttl()->get_milliseconds() == qpid::messaging::Duration::FOREVER->get_milliseconds(),
+ "TTL can be a Duration");
+
+# if numeric, is converted to a duration
+my $duration = rand(65535);
+$message->set_ttl($duration);
+ok ($message->get_ttl()->get_milliseconds() == int($duration),
+ "TTL can be any arbitrary duration");
+
+# if 0 it's converted to IMMEDIATE
+$message->set_ttl(0);
+ok ($message->get_ttl()->get_milliseconds() == qpid::messaging::Duration::IMMEDIATE->get_milliseconds(),
+ "TTL of 0 is converted to IMMEDIATE");
+
+# if negative it's converted to FOREVER
+$message->set_ttl(0 - (rand(65535) + 1));
+ok ($message->get_ttl()->get_milliseconds() == qpid::messaging::Duration::FOREVER->get_milliseconds(),
+ "TTL of <0 is converted to FOREVER");
+
+# durable
+# cannot be null
+dies_ok (sub {$message->set_durable(undef);},
+ "Durable cannot be null");
+
+# can be set to true
+$message->set_durable(1);
+ok ($message->get_durable(),
+ "Durable can be true");
+
+# can be set to false
+$message->set_durable(0);
+ok (!$message->get_durable(),
+ "Durable can be false");
+
+# redelivered
+# redelivered cannot be null
+dies_ok (sub {$message->set_redelivered(undef);},
+ "Redelivered cannot be null");
+
+# can be set to true
+$message->set_redelivered(1);
+ok ($message->get_redelivered(),
+ "Redelivered can be true");
+
+# can be set to false
+$message->set_redelivered(0);
+ok (!$message->get_redelivered(),
+ "Redelivered can be false");
+
+# properties
+# can retrieve all properties
+my $properties = $message->get_properties();
+ok (UNIVERSAL::isa($properties, 'HASH'),
+ "Returns the properties as a hash map");
+
+# property
+# setting a property using a null key fails
+dies_ok (sub {$message->set_property(undef, "bar");},
+ "Property cannot have a null key");
+
+# setting a property with a null value succeeds
+my $key = random_string(16);
+$message->set_property($key, undef);
+ok (!$message->get_properties()->{$key},
+ "Properties can have null values");
+
+# setting a property succeeds
+my $value = random_string(255);
+$message->set_property($key, $value);
+ok ($message->get_properties()->{$key} eq $value,
+ "Messages can have arbitrary property values");
+
+# content
+# cannot be null
+dies_ok (sub {$message->set_content(undef);},
+ "Content cannot be null");
+
+# can be an empty string
+$message->set_content("");
+ok ($message->get_content() eq "",
+ "Content can be an empty string");
+
+# can be an arbitrary string
+my $content = random_string(255);
+$message->set_content($content);
+ok ($message->get_content() eq $content,
+ "Content can be an arbitrary string");
+
+# Embedded nulls should be handled properly
+$content = { id => 1234, name => "With\x00null" };
+qpid::messaging::encode($content, $message);
+my $map = qpid::messaging::decode_map($message);
+
+ok ($map->{name} eq "With\x00null",
+ "Nulls embedded in map values work.");
+
+# content size
+# content size is correct
+my $content_size = int(rand(256));
+$content = random_string($content_size);
+$message->set_content($content);
+ok ($message->get_content_size() == $content_size,
+ "Content size is correct");
diff --git a/qpid/cpp/examples/messaging/extra_dist/Makefile b/qpid/cpp/bindings/qpid/perl/t/utils.pm
index 8dd7f658f7..db8093d324 100644
--- a/qpid/cpp/examples/messaging/extra_dist/Makefile
+++ b/qpid/cpp/bindings/qpid/perl/t/utils.pm
@@ -1,4 +1,3 @@
-#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
@@ -16,15 +15,24 @@
# specific language governing permissions and limitations
# under the License.
#
-CXX=g++
-CXXFLAGS=-g -O2
-LDFLAGS=-lqpidmessaging
-all: drain spout client server map_sender map_receiver hello_world
+use Digest::MD5;
+
+sub random_string
+{
+ my $len=$_[0];
+ my @chars=('a'..'z','A'..'Z','0'..'9','_');
+ my $result;
-drain: drain.o OptionParser.o
+ foreach (1..$len) {
+ $result .= $chars[rand @chars];
+ }
+ return $result;
+}
-spout: spout.o OptionParser.o
+sub generate_uuid
+{
+ return Digest::MD5::md5_base64( rand );
+}
-clean:
- rm -f drain spout client server map_sender map_receiver hello_world *.o
+1;
diff --git a/qpid/cpp/bindings/qpid/perl/test/test-null-inside-map.pl b/qpid/cpp/bindings/qpid/perl/test/test-null-inside-map.pl
deleted file mode 100644
index 2c1e698abb..0000000000
--- a/qpid/cpp/bindings/qpid/perl/test/test-null-inside-map.pl
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/perl
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-use strict;
-use warnings;
-use Data::Dumper;
-
-use cqpid_perl;
-
-my $broker = ( @ARGV > 0 ) ? $ARGV[0] : "localhost:5672";
-my $address = ( @ARGV > 1 ) ? $ARGV[0] : "amq.match";
-my $connectionOptions = ( @ARGV > 2 ) ? $ARGV[1] : "";
-
-my $in_address = "amq.match; {link:{x-bindings:[{exchange: 'amq.match', arguments:{'x-match': 'all', 'header2' : 'value2'}}]}}";
-
-my $connection = new cqpid_perl::Connection($broker, $connectionOptions);
-
-eval {
- $connection->open();
- my $session = $connection->createSession();
-
- my $receiver = $session->createReceiver($in_address);
- my $sender = $session->createSender($address);
-
- my $hash = { id => 1234, name => "Blah\x00Blah" };
- my $outmsg = new cqpid_perl::Message("Hello\x00World");
- cqpid_perl::encode($hash, $outmsg);
- $outmsg->setProperty("header2", "value2");
- $sender->send($outmsg);
-
- my $message = $receiver->fetch($cqpid_perl::Duration::SECOND);
-
- print Dumper($message->getProperties());
-
- print $message->getContent() . "\n";
- my $outmap = cqpid_perl::decodeMap($message);
- print Dumper($outmap);
- $session->acknowledge();
-
- $connection->close();
-};
-
-die $@ if ($@);
diff --git a/qpid/cpp/bindings/qpid/ruby/lib/qpid_messaging/duration.rb b/qpid/cpp/bindings/qpid/ruby/lib/qpid_messaging/duration.rb
index aca8fab8ae..1ff7857935 100644
--- a/qpid/cpp/bindings/qpid/ruby/lib/qpid_messaging/duration.rb
+++ b/qpid/cpp/bindings/qpid/ruby/lib/qpid_messaging/duration.rb
@@ -71,6 +71,22 @@ module Qpid
@duration_impl.getMilliseconds
end
+ # Returns a new Duration with a period of time that is a multiple
+ # of the original Duration.
+ #
+ # Raises exceptions on a negative factor. Returns
+ # Qpid::Messaging::Duration::IMMEDIATE when the factor is 0.
+ #
+ # ==== Examples
+ #
+ # twominutes = Qpid::Messaging::Duration::MINUTE * 2
+ #
+ def *(factor)
+ raise TypeError.new "Factors must be non-zero positive values" if factor < 0
+ return Qpid::Messaging::Duration::IMMEDIATE if factor.zero?
+ Qpid::Messaging::Duration.new((self.milliseconds * factor).floor)
+ end
+
def self.add_item(key, value) # :nodoc:
@hash ||= {}
@hash[key] = Duration.new value
diff --git a/qpid/cpp/bindings/qpid/ruby/spec/qpid_messaging/duration_spec.rb b/qpid/cpp/bindings/qpid/ruby/spec/qpid_messaging/duration_spec.rb
index 4980b6ffe7..202332d232 100644
--- a/qpid/cpp/bindings/qpid/ruby/spec/qpid_messaging/duration_spec.rb
+++ b/qpid/cpp/bindings/qpid/ruby/spec/qpid_messaging/duration_spec.rb
@@ -49,6 +49,33 @@ module Qpid
milliseconds.should == 1000
end
+ it "raises an error when multiplied by a negative" do
+ expect {
+ twomin = Qpid::Messaging::Duration::MINUTE * -2
+ }.to raise_error
+ end
+
+ it "returns IMMEDIATE if the factor is zero" do
+ result = Qpid::Messaging::Duration::MINUTE * 0
+ result.should be(Qpid::Messaging::Duration::IMMEDIATE)
+ end
+
+ it "fractional factors return a reduced duration" do
+ factor = rand(1)
+ first = Qpid::Messaging::Duration::MINUTE
+ second = first * factor
+
+ second.milliseconds.should == ((first.milliseconds * factor).floor)
+ end
+
+ it "can return a multiple of its duration" do
+ factor = rand(10).floor
+ first = Qpid::Messaging::Duration.new(rand(10).floor * 10000)
+ second = first * factor
+
+ second.milliseconds.should == first.milliseconds * factor
+ end
+
end
end
diff --git a/qpid/cpp/configure.ac b/qpid/cpp/configure.ac
index b8243fcd16..f3681073fd 100644
--- a/qpid/cpp/configure.ac
+++ b/qpid/cpp/configure.ac
@@ -539,7 +539,6 @@ AC_CONFIG_FILES([
managementgen/Makefile
etc/Makefile
src/Makefile
- src/tests/cpg_check.sh
src/tests/Makefile
src/tests/test_env.sh
src/tests/install_env.sh
diff --git a/qpid/cpp/examples/messaging/CMakeLists.txt b/qpid/cpp/examples/messaging/CMakeLists.txt
index 03ed2daaad..b7ecfb034e 100644
--- a/qpid/cpp/examples/messaging/CMakeLists.txt
+++ b/qpid/cpp/examples/messaging/CMakeLists.txt
@@ -17,47 +17,18 @@
# under the License.
#
-# drain and spout have explicit Boost.program_options usage in them, so be
-# sure that lib is linked in.
-
-macro(add_messaging_example example)
- add_executable(${example} ${example}.cpp OptionParser.cpp)
- set_target_properties(${example} PROPERTIES OUTPUT_NAME ${example})
- target_link_libraries(${example} qpidmessaging ${_boost_libs_needed})
- # For installs, don't install the built example; that would be pointless.
- # Install the things a user needs to build the example on-site.
- install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/${example}.cpp ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.h ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.cpp
- DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
- COMPONENT ${QPID_COMPONENT_EXAMPLES})
- if (MSVC)
- install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/messaging_${example}.vcproj
- DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
- COMPONENT ${QPID_COMPONENT_EXAMPLES})
- endif (MSVC)
-
-endmacro(add_messaging_example)
-
-add_messaging_example(drain)
-add_messaging_example(spout)
-
-add_messaging_example(map_receiver)
-add_messaging_example(map_sender)
-
-add_messaging_example(client)
-add_messaging_example(server)
-
-# These don't need Boost or OptionParser
-add_executable(hello_world hello_world.cpp)
-set_target_properties(hello_world PROPERTIES OUTPUT_NAME hello_world)
-target_link_libraries(hello_world qpidmessaging)
-install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/hello_world.cpp
- DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
- COMPONENT ${QPID_COMPONENT_EXAMPLES})
-
-add_executable(hello_xml hello_xml.cpp)
-set_target_properties(hello_xml PROPERTIES OUTPUT_NAME hello_xml)
-target_link_libraries(hello_xml qpidmessaging)
-install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/hello_xml.cpp
+install (FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/extra_dist/CMakeLists.txt
+ ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hello_world.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/hello_xml.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/drain.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/spout.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/map_receiver.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/map_sender.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/client.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/server.cpp
DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
COMPONENT ${QPID_COMPONENT_EXAMPLES})
diff --git a/qpid/cpp/examples/messaging/Makefile.am b/qpid/cpp/examples/messaging/Makefile.am
index f11ca20c71..5fefb37f8a 100644
--- a/qpid/cpp/examples/messaging/Makefile.am
+++ b/qpid/cpp/examples/messaging/Makefile.am
@@ -62,7 +62,7 @@ examples_DATA= \
server.cpp \
map_sender.cpp \
map_receiver.cpp \
- extra_dist/Makefile
+ extra_dist/CMakeLists.txt
EXTRA_DIST= \
$(examples_DATA) \
diff --git a/qpid/cpp/examples/messaging/extra_dist/CMakeLists.txt b/qpid/cpp/examples/messaging/extra_dist/CMakeLists.txt
new file mode 100644
index 0000000000..88df55337c
--- /dev/null
+++ b/qpid/cpp/examples/messaging/extra_dist/CMakeLists.txt
@@ -0,0 +1,62 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+cmake_minimum_required (VERSION 2.6)
+
+project (Examples)
+
+# drain and spout have explicit Boost.program_options usage in them, so be
+# sure that lib is linked in.
+
+macro(add_messaging_example example)
+ add_executable(${example} ${example}.cpp OptionParser.cpp)
+ set_target_properties(${example} PROPERTIES OUTPUT_NAME ${example})
+ target_link_libraries(${example} qpidtypes qpidmessaging ${_boost_libs_needed})
+ # For installs, don't install the built example; that would be pointless.
+ # Install the things a user needs to build the example on-site.
+ install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/${example}.cpp
+ ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/OptionParser.cpp
+ DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
+ COMPONENT ${QPID_COMPONENT_EXAMPLES})
+ if (MSVC)
+ install (FILES ${CMAKE_CURRENT_SOURCE_DIR}/messaging_${example}.vcproj
+ DESTINATION ${QPID_INSTALL_EXAMPLESDIR}/messaging
+ COMPONENT ${QPID_COMPONENT_EXAMPLES})
+ endif (MSVC)
+
+endmacro(add_messaging_example)
+
+add_messaging_example(drain)
+add_messaging_example(spout)
+
+add_messaging_example(map_receiver)
+add_messaging_example(map_sender)
+
+add_messaging_example(client)
+add_messaging_example(server)
+
+# These don't need Boost or OptionParser
+add_executable(hello_world hello_world.cpp)
+set_target_properties(hello_world PROPERTIES OUTPUT_NAME hello_world)
+target_link_libraries(hello_world qpidmessaging)
+
+add_executable(hello_xml hello_xml.cpp)
+set_target_properties(hello_xml PROPERTIES OUTPUT_NAME hello_xml)
+target_link_libraries(hello_xml qpidmessaging)
diff --git a/qpid/cpp/include/qpid/Url.h b/qpid/cpp/include/qpid/Url.h
index b3ff9576e2..f9ed87c24b 100644
--- a/qpid/cpp/include/qpid/Url.h
+++ b/qpid/cpp/include/qpid/Url.h
@@ -32,13 +32,6 @@ namespace qpid {
/** An AMQP URL contains a list of addresses */
struct Url : public std::vector<Address> {
- /** Url with the hostname as returned by gethostname(2) */
- QPID_COMMON_EXTERN static Url getHostNameUrl(uint16_t port);
-
- /** Url with local IP address(es), may be more than one address
- * on a multi-homed host. */
- QPID_COMMON_EXTERN static Url getIpAddressesUrl(uint16_t port);
-
struct Invalid : public Exception { QPID_COMMON_EXTERN Invalid(const std::string& s); };
/** Convert to string form. */
diff --git a/qpid/cpp/include/qpid/qpid.i b/qpid/cpp/include/qpid/qpid.i
index 352bafa3c8..28a9064ebb 100644
--- a/qpid/cpp/include/qpid/qpid.i
+++ b/qpid/cpp/include/qpid/qpid.i
@@ -17,8 +17,33 @@
* under the License.
*/
+/*
+ * Need some magic to wrap getContentPtr, otherwise it could return char *
+ * containing NULL, which would be incorrectly interpreted as end of string
+ */
+%extend qpid::messaging::Message
+{
+ mystr getContentPtr()
+ {
+ mystr s;
+ s.ptr = self->getContentPtr();
+ s.len = self->getContentSize();
+ return s;
+ }
+}
+%ignore qpid::messaging::Message::getContentPtr;
+%typemap(out,fragment="SWIG_FromCharPtrAndSize") (mystr) {
+ %append_output(SWIG_FromCharPtrAndSize($1.ptr, $1.len));
+}
+
%{
+struct mystr
+{
+ size_t len;
+ const char *ptr;
+};
+
#include <qpid/messaging/exceptions.h>
#include <qpid/messaging/Address.h>
#include <qpid/messaging/Connection.h>
@@ -68,3 +93,9 @@ qpid::types::Variant::List& decodeList(const qpid::messaging::Message&);
%};
+%extend qpid::messaging::Duration {
+ qpid::messaging::Duration __mul__(uint64_t multiplier) {
+ return qpid::messaging::Duration(self->getMilliseconds() * multiplier);
+ }
+};
+
diff --git a/qpid/cpp/include/qpid/sys/SystemInfo.h b/qpid/cpp/include/qpid/sys/SystemInfo.h
index ec0e653881..1b5720a5f0 100644
--- a/qpid/cpp/include/qpid/sys/SystemInfo.h
+++ b/qpid/cpp/include/qpid/sys/SystemInfo.h
@@ -47,12 +47,6 @@ QPID_COMMON_EXTERN long concurrency();
QPID_COMMON_EXTERN bool getLocalHostname (Address &address);
/**
- * Get the (possibly multiple) local IP addresses of this host
- * using the specified port.
- */
-QPID_COMMON_EXTERN void getLocalIpAddresses (uint16_t port, std::vector<Address> &addrList);
-
-/**
* Get the names of all the network interfaces connected to
* this host.
* @param names Receives the list of interface names
diff --git a/qpid/cpp/src/CMakeLists.txt b/qpid/cpp/src/CMakeLists.txt
index 731451754f..d0a5aece64 100644
--- a/qpid/cpp/src/CMakeLists.txt
+++ b/qpid/cpp/src/CMakeLists.txt
@@ -208,6 +208,7 @@ execute_process(COMMAND ${RUBY_EXECUTABLE} -I ${rgen_dir} ${rgen_dir}/generate $
set(mgmt_specs ${AMQP_SPEC_DIR}/management-schema.xml
${CMAKE_CURRENT_SOURCE_DIR}/qpid/acl/management-schema.xml
${CMAKE_CURRENT_SOURCE_DIR}/qpid/ha/management-schema.xml
+ ${CMAKE_CURRENT_SOURCE_DIR}/qpid/legacystore/management-schema.xml
)
set(mgen_dir ${qpid-cpp_SOURCE_DIR}/managementgen)
set(regen_mgmt OFF)
@@ -305,9 +306,9 @@ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${WARNING_FLAGS}")
# Expand a bit from the basic Find_Boost; be specific about what's needed.
# Boost.system is sometimes needed; it's handled separately, below.
if (CMAKE_SYSTEM_NAME STREQUAL Windows)
- set (Boost_components filesystem program_options date_time thread unit_test_framework regex)
+ set (Boost_components program_options date_time thread unit_test_framework regex)
else (CMAKE_SYSTEM_NAME STREQUAL Windows)
- set (Boost_components filesystem program_options unit_test_framework)
+ set (Boost_components program_options unit_test_framework)
endif (CMAKE_SYSTEM_NAME STREQUAL Windows)
# Visual Studio 2010 requires boost 1.45 or better.
@@ -315,7 +316,10 @@ endif (CMAKE_SYSTEM_NAME STREQUAL Windows)
# where Boost 1.45 is supported, or we can just accept some versions using
# the Additional_versions variable.
if (NOT DEFINED Boost_ADDITIONAL_VERSIONS)
- set (Boost_ADDITIONAL_VERSIONS "1.45" "1.45.0" "1.46" "1.46.0" "1.47" "1.47.0")
+ set (Boost_ADDITIONAL_VERSIONS
+ "1.45" "1.45.0" "1.46" "1.46.0" "1.47" "1.47.0"
+ "1.48" "1.48.0" "1.49" "1.49.0" "1.50" "1.50.0"
+ "1.51" "1.51.0" "1.52" "1.52.0")
endif (NOT DEFINED Boost_ADDITIONAL_VERSIONS)
find_package(Boost 1.33 REQUIRED COMPONENTS ${Boost_components})
@@ -340,10 +344,6 @@ if (NOT Boost_PROGRAM_OPTIONS_LIBRARY)
set(Boost_PROGRAM_OPTIONS_LIBRARY boost_program_options)
endif (NOT Boost_PROGRAM_OPTIONS_LIBRARY)
-if (NOT Boost_FILESYSTEM_LIBRARY)
- set(Boost_FILESYSTEM_LIBRARY boost_filesystem)
-endif (NOT Boost_FILESYSTEM_LIBRARY)
-
if (NOT Boost_UNIT_TEST_FRAMEWORK_LIBRARY)
set(Boost_UNIT_TEST_FRAMEWORK_LIBRARY boost_unit_test_framework)
endif (NOT Boost_UNIT_TEST_FRAMEWORK_LIBRARY)
@@ -369,7 +369,6 @@ option(QPID_LINK_BOOST_DYNAMIC "Link with dynamic Boost libs (OFF to link static
if (MSVC)
install (PROGRAMS
${Boost_DATE_TIME_LIBRARY_DEBUG} ${Boost_DATE_TIME_LIBRARY_RELEASE}
- ${Boost_FILESYSTEM_LIBRARY_DEBUG} ${Boost_FILESYSTEM_LIBRARY_RELEASE}
${Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG} ${Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE}
${Boost_REGEX_LIBRARY_DEBUG} ${Boost_REGEX_LIBRARY_RELEASE}
${Boost_THREAD_LIBRARY_DEBUG} ${Boost_THREAD_LIBRARY_RELEASE}
@@ -390,10 +389,6 @@ if (MSVC)
string (REPLACE .lib .dll
_boost_date_time_release ${Boost_DATE_TIME_LIBRARY_RELEASE})
string (REPLACE .lib .dll
- _boost_filesystem_debug ${Boost_FILESYSTEM_LIBRARY_DEBUG})
- string (REPLACE .lib .dll
- _boost_filesystem_release ${Boost_FILESYSTEM_LIBRARY_RELEASE})
- string (REPLACE .lib .dll
_boost_program_options_debug ${Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG})
string (REPLACE .lib .dll
_boost_program_options_release ${Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE})
@@ -416,7 +411,6 @@ if (MSVC)
endif (NOT Boost_VERSION LESS 103500)
install (PROGRAMS
${_boost_date_time_debug} ${_boost_date_time_release}
- ${_boost_filesystem_debug} ${_boost_filesystem_release}
${_boost_program_options_debug} ${_boost_program_options_release}
${_boost_regex_debug} ${_boost_regex_release}
${_boost_system_debug} ${_boost_system_release}
@@ -465,7 +459,6 @@ if (MSVC)
set(Boost_DATE_TIME_LIBRARY "")
set(Boost_THREAD_LIBRARY "")
set(Boost_PROGRAM_OPTIONS_LIBRARY "")
- set(Boost_FILESYSTEM_LIBRARY "")
set(Boost_UNIT_TEST_FRAMEWORK_LIBRARY "")
set(Boost_REGEX_LIBRARY "")
include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/windows/resources )
@@ -767,7 +760,7 @@ if (CMAKE_SYSTEM_NAME STREQUAL Windows)
)
set (qpidcommon_platform_LIBS
- ${Boost_THREAD_LIBRARY} ${windows_ssl_libs} ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_FILESYSTEM_LIBRARY} ${Boost_SYSTEM_LIBRARY} ws2_32 )
+ ${Boost_THREAD_LIBRARY} ${windows_ssl_libs} ${Boost_PROGRAM_OPTIONS_LIBRARY} ${Boost_DATE_TIME_LIBRARY} ${Boost_SYSTEM_LIBRARY} ws2_32 )
set (qpidbroker_platform_SOURCES
qpid/broker/windows/BrokerDefaults.cpp
qpid/broker/windows/SaslAuthenticator.cpp
@@ -871,7 +864,6 @@ else (CMAKE_SYSTEM_NAME STREQUAL Windows)
)
set (qpidcommon_platform_LIBS
${Boost_PROGRAM_OPTIONS_LIBRARY}
- ${Boost_FILESYSTEM_LIBRARY}
${CMAKE_DL_LIBS}
)
@@ -1069,16 +1061,9 @@ install_pdb (qpidclient ${QPID_COMPONENT_CLIENT})
set (qpidmessaging_SOURCES_hidden
qpid/messaging/AddressParser.h
qpid/messaging/ConnectionImpl.h
- qpid/messaging/ConnectionOptions.h
- qpid/messaging/ConnectionOptions.cpp
- qpid/messaging/MessageImpl.h
- qpid/messaging/MessageImpl.cpp
- qpid/messaging/ProtocolRegistry.cpp
qpid/messaging/ReceiverImpl.h
qpid/messaging/SessionImpl.h
qpid/messaging/SenderImpl.h
- qpid/messaging/amqp/EncodedMessage.h
- qpid/messaging/amqp/EncodedMessage.cpp
qpid/client/amqp0_10/AcceptTracker.h
qpid/client/amqp0_10/AcceptTracker.cpp
qpid/client/amqp0_10/AddressResolution.h
@@ -1116,6 +1101,14 @@ set (qpidmessaging_SOURCES
qpid/messaging/Receiver.cpp
qpid/messaging/Session.cpp
qpid/messaging/Sender.cpp
+ #functions from the following are not in the public interface but are used by the AMQP 1.0 client module
+ qpid/messaging/ConnectionOptions.h
+ qpid/messaging/ConnectionOptions.cpp
+ qpid/messaging/MessageImpl.h
+ qpid/messaging/MessageImpl.cpp
+ qpid/messaging/ProtocolRegistry.cpp
+ qpid/messaging/amqp/EncodedMessage.h
+ qpid/messaging/amqp/EncodedMessage.cpp
)
add_msvc_version (qpidmessaging library dll)
@@ -1175,6 +1168,7 @@ set (qpidbroker_SOURCES
qpid/broker/Bridge.cpp
qpid/broker/Connection.cpp
qpid/broker/ConnectionHandler.cpp
+ qpid/broker/ConnectionState.cpp
qpid/broker/DeliverableMessage.cpp
qpid/broker/DeliveryRecord.cpp
qpid/broker/DirectExchange.cpp
@@ -1252,8 +1246,7 @@ set (qpidd_SOURCES
)
add_msvc_version (qpidd application exe)
add_executable (qpidd ${qpidd_SOURCES})
-target_link_libraries (qpidd qpidbroker qpidcommon ${Boost_PROGRAM_OPTIONS_LIBRARY}
- ${Boost_FILESYSTEM_LIBRARY})
+target_link_libraries (qpidd qpidbroker qpidcommon ${Boost_PROGRAM_OPTIONS_LIBRARY})
set_target_properties (qpidd PROPERTIES COMPILE_DEFINITIONS _IN_QPID_BROKER)
install (TARGETS qpidd RUNTIME
DESTINATION ${QPID_INSTALL_SBINDIR}
@@ -1476,13 +1469,16 @@ install (FILES ${qmfconsole_HEADERS}
install_pdb (qmfconsole ${QPID_COMPONENT_QMF})
+#
+# Legacy store
+#
+include (legacystore.cmake)
+
# This is only really needed until all the trunk builds (Linux, UNIX, Windows)
# are all on cmake only. This is because cmake builds always have a config.h
# file whereas older builds only have config.h on autoconf-generated builds.
add_definitions(-DHAVE_CONFIG_H)
-add_definitions(-DBOOST_FILESYSTEM_VERSION=2)
-
# Now create the config file from all the info learned above.
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.h.cmake
${CMAKE_CURRENT_BINARY_DIR}/config.h)
diff --git a/qpid/cpp/src/Makefile.am b/qpid/cpp/src/Makefile.am
index cdddd22c41..8dca041df3 100644
--- a/qpid/cpp/src/Makefile.am
+++ b/qpid/cpp/src/Makefile.am
@@ -138,7 +138,6 @@ qpidtest_SCRIPTS =
tmoduleexecdir = $(libdir)/qpid/tests
tmoduleexec_LTLIBRARIES=
-AM_CXXFLAGS += -DBOOST_FILESYSTEM_VERSION=2
BROKER_CXXFLAGS = -D_IN_QPID_BROKER
## Automake macros to build libraries and executables.
@@ -299,6 +298,7 @@ EXTRA_DIST +=\
amqp.cmake \
rdma.cmake \
ssl.cmake \
+ legacystore.cmake \
managementgen.cmake \
rubygen.cmake \
versions.cmake \
@@ -338,7 +338,6 @@ EXTRA_DIST +=\
libqpidcommon_la_LIBADD = \
libqpidtypes.la \
-lboost_program_options \
- -lboost_filesystem \
-luuid \
-lpthread \
$(LIB_DLOPEN) \
@@ -582,6 +581,7 @@ libqpidbroker_la_SOURCES = \
qpid/broker/Connection.h \
qpid/broker/ConnectionHandler.cpp \
qpid/broker/ConnectionHandler.h \
+ qpid/broker/ConnectionState.cpp \
qpid/broker/ConnectionState.h \
qpid/broker/ConnectionToken.h \
qpid/broker/Consumer.h \
@@ -727,7 +727,6 @@ libqpidbroker_la_SOURCES = \
qpid/broker/SessionState.h \
qpid/broker/SignalHandler.cpp \
qpid/broker/SignalHandler.h \
- qpid/broker/StatefulQueueObserver.h \
qpid/broker/System.cpp \
qpid/broker/System.h \
qpid/broker/ThresholdAlerts.cpp \
diff --git a/qpid/cpp/src/finddb.cmake b/qpid/cpp/src/finddb.cmake
new file mode 100644
index 0000000000..fad827cffe
--- /dev/null
+++ b/qpid/cpp/src/finddb.cmake
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+if(UNIX)
+# - Find BerkeleyDB
+# Find the BerkeleyDB includes and library
+# This module defines
+# DB_INCLUDE_DIR, where to find db.h, etc.
+# DB_LIBRARIES, the libraries needed to use BerkeleyDB.
+# DB_FOUND, If false, do not try to use BerkeleyDB.
+# also defined, but not for general use are
+# DB_LIBRARY, where to find the BerkeleyDB library.
+
+FIND_PATH(DB_INCLUDE_DIR db.h
+ /usr/local/include/db4
+ /usr/local/include
+ /usr/include/db4
+ /usr/include
+)
+
+SET(DB_NAMES ${DB_NAMES} db_cxx)
+FIND_LIBRARY(DB_LIBRARY
+ NAMES ${DB_NAMES}
+ PATHS /usr/lib /usr/local/lib
+)
+
+IF (DB_LIBRARY AND DB_INCLUDE_DIR)
+ SET(DB_LIBRARIES ${DB_LIBRARY})
+ SET(DB_FOUND "YES")
+ELSE (DB_LIBRARY AND DB_INCLUDE_DIR)
+ UNSET( DB_FOUND )
+ENDIF (DB_LIBRARY AND DB_INCLUDE_DIR)
+
+
+IF (DB_FOUND)
+ IF (NOT DB_FIND_QUIETLY)
+ MESSAGE(STATUS "Found BerkeleyDB: ${DB_LIBRARIES}")
+ ENDIF (NOT DB_FIND_QUIETLY)
+ELSE (DB_FOUND)
+ IF (DB_FIND_REQUIRED)
+ MESSAGE(FATAL_ERROR "Could not find BerkeleyDB library")
+ ENDIF (DB_FIND_REQUIRED)
+ENDIF (DB_FOUND)
+
+# Deprecated declarations.
+SET (NATIVE_DB_INCLUDE_PATH ${DB_INCLUDE_DIR} )
+GET_FILENAME_COMPONENT (NATIVE_DB_LIB_PATH ${DB_LIBRARY} PATH)
+
+MARK_AS_ADVANCED(
+ DB_LIBRARY
+ DB_INCLUDE_DIR
+)
+
+else(UNIX)
+ MESSAGE(STATUS "BerkeleyDB is ignored on non-Unix platforms")
+ UNSET( DB_FOUND )
+endif(UNIX) \ No newline at end of file
diff --git a/qpid/cpp/src/generate.sh b/qpid/cpp/src/generate.sh
deleted file mode 100755
index 581a45ff7f..0000000000
--- a/qpid/cpp/src/generate.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-# !/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Generate code from AMQP specification.
-# specs and gentools_dir are set by Makefile
-#
-set -e
-
-test -z "$JAVA" && JAVA=java ;
-test -z "$JAVAC" && JAVAC=javac ;
-
-srcdir=`dirname $0`
-checkspecs() {
- for s in $specs; do test -f $s || return 1; done
- return 0
-}
-
-# Can we generate code?
-if { test -d $gentools_dir && checkspecs &&
- which $JAVA && which $JAVAC; } > /dev/null;
-then
- echo "Generating code."
- mkdir -p gen/qpid/framing
- ( cd $gentools_dir/src && $JAVAC `find -name '*.java' -print` ; )
- $JAVA -cp $gentools_dir/src org.apache.qpid.gentools.Main \
- -c -o gen/qpid/framing -t $gentools_dir/templ.cpp $specs
- GENERATED=yes
-fi
-
-# Print a Makefile variable assignment.
-make_assign() {
- echo -n "$1 = "; shift
- prefix=$1; shift
- for f in $*; do echo "\\" ; echo -n " $prefix$f "; done
- echo
-}
-
-# Generate a Makefile fragment
-(
- make_assign "generated_cpp" "" `find gen -name '*.cpp' -print`
- make_assign "generated_h" "" `find gen -name '*.h' -print`
- if test x$GENERATED = xyes; then
- make_assign "generator" "" $specs \
- `find ../gentools \( -name '*.java' -o -name '*.tmpl' \) -print`
- fi
-) > generate.mk-t
-mv generate.mk-t $srcdir/generate.mk
-
-
-
diff --git a/qpid/cpp/src/legacystore.cmake b/qpid/cpp/src/legacystore.cmake
new file mode 100644
index 0000000000..10a166f50c
--- /dev/null
+++ b/qpid/cpp/src/legacystore.cmake
@@ -0,0 +1,157 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# Legacy store library CMake fragment, to be included in CMakeLists.txt
+#
+
+if (DEFINED legacystore_force)
+ set (legacystore_default ${legacystore_force})
+else (DEFINED legacystore_force)
+ set (legacystore_default OFF)
+ if (UNIX)
+ #
+ # Find required BerkelyDB
+ #
+ include (finddb.cmake)
+ if (DB_FOUND)
+ #
+ # find libaio
+ #
+ CHECK_LIBRARY_EXISTS (aio io_queue_init "" HAVE_AIO)
+ CHECK_INCLUDE_FILES (libaio.h HAVE_AIO_H)
+ if (HAVE_AIO AND HAVE_AIO_H)
+ #
+ # find libuuid
+ #
+ CHECK_LIBRARY_EXISTS (uuid uuid_compare "" HAVE_UUID)
+ CHECK_INCLUDE_FILES(uuid/uuid.h HAVE_UUID_H)
+ IF (HAVE_UUID AND HAVE_UUID_H)
+ #
+ # allow legacystore to be built
+ #
+ set (legacystore_default ON)
+ ENDIF (HAVE_UUID AND HAVE_UUID_H)
+ endif (HAVE_AIO AND HAVE_AIO_H)
+ endif (DB_FOUND)
+ endif (UNIX)
+endif (DEFINED legacystore_force)
+
+option(BUILD_LEGACYSTORE "Build legacystore persistent store" ${legacystore_default})
+
+if (BUILD_LEGACYSTORE)
+ if (NOT UNIX)
+ message(FATAL_ERROR "Legacystore produced only on Unix platforms")
+ endif (NOT UNIX)
+ if (NOT DB_FOUND)
+ message(FATAL_ERROR "Legacystore requires BerkeleyDB which is absent.")
+ endif (NOT DB_FOUND)
+ if (NOT HAVE_AIO)
+ message(FATAL_ERROR "Legacystore requires libaio which is absent.")
+ endif (NOT HAVE_AIO)
+ if (NOT HAVE_AIO_H)
+ message(FATAL_ERROR "Legacystore requires libaio.h which is absent.")
+ endif (NOT HAVE_AIO_H)
+ if (NOT HAVE_UUID)
+ message(FATAL_ERROR "Legacystore requires uuid which is absent.")
+ endif (NOT HAVE_UUID)
+ if (NOT HAVE_UUID_H)
+ message(FATAL_ERROR "Legacystore requires uuid.h which is absent.")
+ endif (NOT HAVE_UUID_H)
+
+ # Journal source files
+ set (legacy_jrnl_SOURCES
+ qpid/legacystore/jrnl/aio.cpp
+ qpid/legacystore/jrnl/cvar.cpp
+ qpid/legacystore/jrnl/data_tok.cpp
+ qpid/legacystore/jrnl/deq_rec.cpp
+ qpid/legacystore/jrnl/enq_map.cpp
+ qpid/legacystore/jrnl/enq_rec.cpp
+ qpid/legacystore/jrnl/fcntl.cpp
+ qpid/legacystore/jrnl/jcntl.cpp
+ qpid/legacystore/jrnl/jdir.cpp
+ qpid/legacystore/jrnl/jerrno.cpp
+ qpid/legacystore/jrnl/jexception.cpp
+ qpid/legacystore/jrnl/jinf.cpp
+ qpid/legacystore/jrnl/jrec.cpp
+ qpid/legacystore/jrnl/lp_map.cpp
+ qpid/legacystore/jrnl/lpmgr.cpp
+ qpid/legacystore/jrnl/pmgr.cpp
+ qpid/legacystore/jrnl/rmgr.cpp
+ qpid/legacystore/jrnl/rfc.cpp
+ qpid/legacystore/jrnl/rrfc.cpp
+ qpid/legacystore/jrnl/slock.cpp
+ qpid/legacystore/jrnl/smutex.cpp
+ qpid/legacystore/jrnl/time_ns.cpp
+ qpid/legacystore/jrnl/txn_map.cpp
+ qpid/legacystore/jrnl/txn_rec.cpp
+ qpid/legacystore/jrnl/wmgr.cpp
+ qpid/legacystore/jrnl/wrfc.cpp
+ )
+
+ # legacyStore source files
+ set (legacy_store_SOURCES
+ qpid/legacystore/StorePlugin.cpp
+ qpid/legacystore/BindingDbt.cpp
+ qpid/legacystore/BufferValue.cpp
+ qpid/legacystore/DataTokenImpl.cpp
+ qpid/legacystore/IdDbt.cpp
+ qpid/legacystore/IdSequence.cpp
+ qpid/legacystore/JournalImpl.cpp
+ qpid/legacystore/MessageStoreImpl.cpp
+ qpid/legacystore/PreparedTransaction.cpp
+ qpid/legacystore/TxnCtxt.cpp
+ )
+
+ # legacyStore include directories
+ get_property(dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY INCLUDE_DIRECTORIES)
+ set (legacy_include_DIRECTORIES
+ ${dirs}
+ ${CMAKE_CURRENT_SOURCE_DIR}/qpid/legacystore
+ )
+
+ if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/db-inc.h)
+ message(STATUS "Including BDB from ${DB_INCLUDE_DIR}/db_cxx.h")
+ file(WRITE
+ ${CMAKE_CURRENT_BINARY_DIR}/db-inc.h
+ "#include <${DB_INCLUDE_DIR}/db_cxx.h>")
+ endif()
+
+ add_library (legacystore SHARED
+ ${legacy_jrnl_SOURCES}
+ ${legacy_store_SOURCES}
+ ${legacy_qmf_SOURCES}
+ )
+
+ set_target_properties (legacystore PROPERTIES
+ PREFIX ""
+ COMPILE_DEFINITIONS _IN_QPID_BROKER
+ OUTPUT_NAME legacystore
+ SOVERSION ${legacystore_version}
+ INCLUDE_DIRECTORIES "${legacy_include_DIRECTORIES}"
+ )
+
+ target_link_libraries (legacystore
+ aio
+ uuid
+ qpidcommon qpidtypes qpidbroker
+ ${DB_LIBRARY}
+ )
+else (BUILD_LEGACYSTORE)
+ message(STATUS "Legacystore is excluded from build.")
+endif (BUILD_LEGACYSTORE)
diff --git a/qpid/cpp/src/qpid/Modules.cpp b/qpid/cpp/src/qpid/Modules.cpp
index 727e05d212..049ededaa7 100644
--- a/qpid/cpp/src/qpid/Modules.cpp
+++ b/qpid/cpp/src/qpid/Modules.cpp
@@ -24,11 +24,7 @@
#include "qpid/Exception.h"
#include "qpid/log/Statement.h"
#include "qpid/sys/Shlib.h"
-
-#include <boost/filesystem/operations.hpp>
-#include <boost/filesystem/path.hpp>
-
-namespace fs=boost::filesystem;
+#include "qpid/sys/FileSysDir.h"
namespace {
@@ -43,7 +39,7 @@ inline std::string& suffix() {
}
bool isShlibName(const std::string& name) {
- return name.find (suffix()) == name.length() - suffix().length();
+ return name.substr(name.size()-suffix().size()) == suffix();
}
}
@@ -59,39 +55,40 @@ ModuleOptions::ModuleOptions(const std::string& defaultModuleDir)
("no-module-dir", optValue(noLoad), "Don't load modules from module directory");
}
-void tryShlib(const char* libname_, bool noThrow) {
- std::string libname(libname_);
- if (!isShlibName(libname)) libname += suffix();
+void tryShlib(const std::string& libname) {
+ sys::Shlib shlib( isShlibName(libname) ? libname : (libname + suffix()));
+}
+
+namespace {
+
+void tryOnlyShlib(const std::string& libname) throw() {
try {
- sys::Shlib shlib(libname);
+ if (isShlibName(libname)) sys::Shlib shlib( libname );
}
catch (const std::exception& /*e*/) {
- if (!noThrow)
- throw;
}
}
+}
+
void loadModuleDir (std::string dirname, bool isDefault)
{
- fs::path dirPath (dirname, fs::native);
- if (!fs::exists (dirPath))
+ sys::FileSysDir dirPath (dirname);
+
+ bool exists;
+ try
{
- if (isDefault)
- return;
- throw Exception ("Directory not found: " + dirname);
+ exists = dirPath.exists();
+ } catch (Exception& e) {
+ throw Exception ("Invalid value for module-dir: " + e.getMessage());
}
- if (!fs::is_directory(dirPath))
- {
- throw Exception ("Invalid value for module-dir: " + dirname + " is not a directory");
+ if (!exists) {
+ if (isDefault) return;
+ throw Exception ("Directory not found: " + dirname);
}
- fs::directory_iterator endItr;
- for (fs::directory_iterator itr (dirPath); itr != endItr; ++itr)
- {
- if (!fs::is_directory(*itr) && isShlibName(itr->string()))
- tryShlib (itr->string().data(), true);
- }
+ dirPath.forEachFile(&tryOnlyShlib);
}
} // namespace qpid
diff --git a/qpid/cpp/src/qpid/Modules.h b/qpid/cpp/src/qpid/Modules.h
index 159dd156c1..9fb91d60eb 100644
--- a/qpid/cpp/src/qpid/Modules.h
+++ b/qpid/cpp/src/qpid/Modules.h
@@ -36,7 +36,7 @@ struct ModuleOptions : public qpid::Options {
QPID_COMMON_EXTERN ModuleOptions(const std::string& defaultModuleDir);
};
-QPID_COMMON_EXTERN void tryShlib(const char* libname, bool noThrow);
+QPID_COMMON_EXTERN void tryShlib(const std::string& libname);
QPID_COMMON_EXTERN void loadModuleDir (std::string dirname, bool isDefault);
} // namespace qpid
diff --git a/qpid/cpp/src/qpid/Options.cpp b/qpid/cpp/src/qpid/Options.cpp
index b96fc59dda..c0e955e2b3 100644
--- a/qpid/cpp/src/qpid/Options.cpp
+++ b/qpid/cpp/src/qpid/Options.cpp
@@ -74,14 +74,28 @@ struct EnvOptMapper {
}
- string configFileLine (string& line) {
+ void badArg ( string& line ) {
+ ostringstream msg;
+ msg << "Bad argument: |" << line << "|\n";
+ throw Exception(msg.str());
+ }
- if ( isComment ( line ) )
- return string();
- size_t pos = line.find ('=');
- if (pos == string::npos)
+ string configFileLine (string& line, bool allowUnknowns=true) {
+
+ if ( isComment ( line ) ) {
return string();
+ }
+
+ size_t pos = line.find ('=');
+ if (pos == string::npos) {
+ if ( allowUnknowns ) {
+ return string();
+ }
+ else {
+ badArg ( line );
+ }
+ }
string key = line.substr (0, pos);
#if (BOOST_VERSION >= 103300)
typedef const std::vector< boost::shared_ptr<po::option_description> > OptDescs;
@@ -89,16 +103,31 @@ struct EnvOptMapper {
find_if(opts.options().begin(), opts.options().end(), boost::bind(matchCase, key, _1));
if (i != opts.options().end())
return string (line) + "\n";
- else
- return string();
+ else {
+ if ( allowUnknowns ) {
+ return string();
+ }
+ else {
+ badArg ( line );
+ }
+ }
#else
- // Use 'count' to see if this option exists. Using 'find' will SEGV or hang
- // if the option has not been defined yet.
+ // Use 'count' to see if this option exists. Using 'find' will
+ // SEGV or hang if the option has not been defined yet.
if ( opts.count(key.c_str()) > 0 )
return string ( line ) + "\n";
- else
- return string ( );
+ else {
+ if ( allowUnknowns ) {
+ return string ( );
+ }
+ else {
+ badArg ( line );
+ }
+ }
#endif
+ // Control will not arrive here, but the compiler things it could.
+ // Calls to badArg(), that I used above, throw.
+ return string();
}
const Options& opts;
@@ -160,7 +189,7 @@ void Options::parse(int argc, char const* const* argv, const std::string& config
while (!conf.eof()) {
string line;
getline (conf, line);
- filtered << mapper.configFileLine (line);
+ filtered << mapper.configFileLine (line, allowUnknown);
}
po::store(po::parse_config_file(filtered, *this), vm);
diff --git a/qpid/cpp/src/qpid/Url.cpp b/qpid/cpp/src/qpid/Url.cpp
index 840f46e928..21de32aaa3 100644
--- a/qpid/cpp/src/qpid/Url.cpp
+++ b/qpid/cpp/src/qpid/Url.cpp
@@ -64,19 +64,6 @@ class ProtocolTags {
Url::Invalid::Invalid(const string& s) : Exception(s) {}
-Url Url::getHostNameUrl(uint16_t port) {
- Address address("tcp", std::string(), port);
- if (!sys::SystemInfo::getLocalHostname(address))
- throw Url::Invalid(QPID_MSG("Cannot get host name: " << qpid::sys::strError(errno)));
- return Url(address);
-}
-
-Url Url::getIpAddressesUrl(uint16_t port) {
- Url url;
- sys::SystemInfo::getLocalIpAddresses(port, url);
- return url;
-}
-
string Url::str() const {
if (cache.empty() && !this->empty()) {
ostringstream os;
diff --git a/qpid/cpp/src/qpid/UrlArray.h b/qpid/cpp/src/qpid/UrlArray.h
index ce9e42f248..f0065f0f0c 100644
--- a/qpid/cpp/src/qpid/UrlArray.h
+++ b/qpid/cpp/src/qpid/UrlArray.h
@@ -1,5 +1,5 @@
-#ifndef QPID_CLUSTER_URLARRAY_H
-#define QPID_CLUSTER_URLARRAY_H
+#ifndef QPID_URLARRAY_H
+#define QPID_URLARRAY_H
/*
*
@@ -33,4 +33,4 @@ QPID_COMMON_EXTERN std::vector<Url> urlArrayToVector(const framing::Array& array
QPID_COMMON_EXTERN framing::Array vectorToUrlArray(const std::vector<Url>& urls);
} // namespace qpid
-#endif /* !QPID_CLUSTER_URLARRAY_H */
+#endif /* !QPID_URLARRAY_H */
diff --git a/qpid/cpp/src/qpid/acl/AclConnectionCounter.cpp b/qpid/cpp/src/qpid/acl/AclConnectionCounter.cpp
index 052fa3c222..195d8bee28 100644
--- a/qpid/cpp/src/qpid/acl/AclConnectionCounter.cpp
+++ b/qpid/cpp/src/qpid/acl/AclConnectionCounter.cpp
@@ -226,74 +226,44 @@ bool ConnectionCounter::approveConnection(const broker::Connection& connection)
bool okTotal = true;
if (totalLimit > 0) {
okTotal = totalCurrentConnections <= totalLimit;
- if (!connection.isShadow()) {
- QPID_LOG(trace, "ACL ConnectionApprover totalLimit=" << totalLimit
- << " curValue=" << totalCurrentConnections
- << " result=" << (okTotal ? "allow" : "deny"));
- }
+ QPID_LOG(trace, "ACL ConnectionApprover totalLimit=" << totalLimit
+ << " curValue=" << totalCurrentConnections
+ << " result=" << (okTotal ? "allow" : "deny"));
}
// Approve by IP host connections
- bool okByIP = limitApproveLH(connectByHostMap, hostName, hostLimit, !connection.isShadow());
+ bool okByIP = limitApproveLH(connectByHostMap, hostName, hostLimit, true);
// Count and Approve the connection by the user
- bool okByUser = countConnectionLH(connectByNameMap, userName, nameLimit, !connection.isShadow());
-
- if (!connection.isShadow()) {
- // Emit separate log for each disapproval
- if (!okTotal) {
- QPID_LOG(error, "Client max total connection count limit of " << totalLimit
- << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "'. Connection refused");
- }
- if (!okByIP) {
- QPID_LOG(error, "Client max per-host connection count limit of "
- << hostLimit << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "'. Connection refused.");
- }
- if (!okByUser) {
- QPID_LOG(error, "Client max per-user connection count limit of "
- << nameLimit << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "'. Connection refused.");
- }
-
- // Count/Event once for each disapproval
- bool result = okTotal && okByIP && okByUser;
- if (!result) {
- acl.reportConnectLimit(userName, hostName);
- }
+ bool okByUser = countConnectionLH(connectByNameMap, userName, nameLimit, true);
+
+ // Emit separate log for each disapproval
+ if (!okTotal) {
+ QPID_LOG(error, "Client max total connection count limit of " << totalLimit
+ << " exceeded by '"
+ << connection.getMgmtId() << "', user: '"
+ << userName << "'. Connection refused");
+ }
+ if (!okByIP) {
+ QPID_LOG(error, "Client max per-host connection count limit of "
+ << hostLimit << " exceeded by '"
+ << connection.getMgmtId() << "', user: '"
+ << userName << "'. Connection refused.");
+ }
+ if (!okByUser) {
+ QPID_LOG(error, "Client max per-user connection count limit of "
+ << nameLimit << " exceeded by '"
+ << connection.getMgmtId() << "', user: '"
+ << userName << "'. Connection refused.");
+ }
- return result;
- } else {
- // Always allow shadow connections
- if (!okTotal) {
- QPID_LOG(warning, "Client max total connection count limit of " << totalLimit
- << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "' but still within tolerance. Cluster connection allowed");
- }
- if (!okByIP) {
- QPID_LOG(warning, "Client max per-host connection count limit of "
- << hostLimit << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "' but still within tolerance. Cluster connection allowed");
- }
- if (!okByUser) {
- QPID_LOG(warning, "Client max per-user connection count limit of "
- << nameLimit << " exceeded by '"
- << connection.getMgmtId() << "', user: '"
- << userName << "' but still within tolerance. Cluster connection allowed");
- }
- if (okTotal && okByIP && okByUser) {
- QPID_LOG(debug, "Cluster client connection: '"
- << connection.getMgmtId() << "', user '"
- << userName << "' allowed");
- }
- return true;
+ // Count/Event once for each disapproval
+ bool result = okTotal && okByIP && okByUser;
+ if (!result) {
+ acl.reportConnectLimit(userName, hostName);
}
+
+ return result;
}
//
diff --git a/qpid/cpp/src/qpid/agent/ManagementAgentImpl.h b/qpid/cpp/src/qpid/agent/ManagementAgentImpl.h
index d801989f64..4c97bc89da 100644
--- a/qpid/cpp/src/qpid/agent/ManagementAgentImpl.h
+++ b/qpid/cpp/src/qpid/agent/ManagementAgentImpl.h
@@ -92,10 +92,6 @@ class ManagementAgentImpl : public ManagementAgent, public client::MessageListen
uint16_t getInterval() { return interval; }
void periodicProcessing();
- // these next are here to support the hot-wiring of state between clustered brokers
- uint64_t getNextObjectId(void) { return nextObjectId; }
- void setNextObjectId(uint64_t o) { nextObjectId = o; }
-
uint16_t getBootSequence(void) { return bootSequence; }
void setBootSequence(uint16_t b) { bootSequence = b; }
diff --git a/qpid/cpp/src/qpid/amqp/descriptors.h b/qpid/cpp/src/qpid/amqp/descriptors.h
index b2616ed93a..19a8985433 100644
--- a/qpid/cpp/src/qpid/amqp/descriptors.h
+++ b/qpid/cpp/src/qpid/amqp/descriptors.h
@@ -79,8 +79,8 @@ namespace filters {
const std::string LEGACY_DIRECT_FILTER_SYMBOL("apache.org:legacy-amqp-direct-binding:string");
const std::string LEGACY_TOPIC_FILTER_SYMBOL("apache.org:legacy-amqp-direct-binding:string");
-const uint64_t LEGACY_DIRECT_FILTER_CODE(0x0000468C00000000);
-const uint64_t LEGACY_TOPIC_FILTER_CODE(0x0000468C00000001);
+const uint64_t LEGACY_DIRECT_FILTER_CODE(0x0000468C00000000ULL);
+const uint64_t LEGACY_TOPIC_FILTER_CODE(0x0000468C00000001ULL);
}
}} // namespace qpid::amqp
diff --git a/qpid/cpp/src/qpid/amqp_0_10/Connection.cpp b/qpid/cpp/src/qpid/amqp_0_10/Connection.cpp
index 8cb675202e..15df439c9c 100644
--- a/qpid/cpp/src/qpid/amqp_0_10/Connection.cpp
+++ b/qpid/cpp/src/qpid/amqp_0_10/Connection.cpp
@@ -146,10 +146,6 @@ framing::ProtocolVersion Connection::getVersion() const {
return version;
}
-void Connection::setVersion(const framing::ProtocolVersion& v) {
- version = v;
-}
-
size_t Connection::getBuffered() const {
Mutex::ScopedLock l(frameQueueLock);
return buffered;
diff --git a/qpid/cpp/src/qpid/amqp_0_10/Connection.h b/qpid/cpp/src/qpid/amqp_0_10/Connection.h
index 4a08ee51fd..2ac9edf7a2 100644
--- a/qpid/cpp/src/qpid/amqp_0_10/Connection.h
+++ b/qpid/cpp/src/qpid/amqp_0_10/Connection.h
@@ -72,10 +72,6 @@ class Connection : public sys::ConnectionCodec,
void send(framing::AMQFrame&);
framing::ProtocolVersion getVersion() const;
size_t getBuffered() const;
-
- /** Used by cluster code to set a special version on "update" connections. */
- // FIXME aconway 2009-07-30: find a cleaner mechanism for this.
- void setVersion(const framing::ProtocolVersion&);
};
}} // namespace qpid::amqp_0_10
diff --git a/qpid/cpp/src/qpid/broker/Bridge.cpp b/qpid/cpp/src/qpid/broker/Bridge.cpp
index d7844b50ce..68bdf6d474 100644
--- a/qpid/cpp/src/qpid/broker/Bridge.cpp
+++ b/qpid/cpp/src/qpid/broker/Bridge.cpp
@@ -19,6 +19,8 @@
*
*/
#include "qpid/broker/Bridge.h"
+
+#include "qpid/broker/Broker.h"
#include "qpid/broker/FedOps.h"
#include "qpid/broker/ConnectionState.h"
#include "qpid/broker/Connection.h"
diff --git a/qpid/cpp/src/qpid/broker/Broker.cpp b/qpid/cpp/src/qpid/broker/Broker.cpp
index 094dd63527..b28216f998 100644
--- a/qpid/cpp/src/qpid/broker/Broker.cpp
+++ b/qpid/cpp/src/qpid/broker/Broker.cpp
@@ -20,11 +20,14 @@
*/
#include "qpid/broker/Broker.h"
+
+#include "qpid/broker/AclModule.h"
#include "qpid/broker/ConnectionState.h"
#include "qpid/broker/DirectExchange.h"
#include "qpid/broker/FanOutExchange.h"
#include "qpid/broker/HeadersExchange.h"
#include "qpid/broker/MessageStoreModule.h"
+#include "qpid/broker/NameGenerator.h"
#include "qpid/broker/NullMessageStore.h"
#include "qpid/broker/RecoveryManagerImpl.h"
#include "qpid/broker/SaslAuthenticator.h"
@@ -37,6 +40,7 @@
#include "qpid/broker/MessageGroupManager.h"
#include "qmf/org/apache/qpid/broker/Package.h"
+#include "qmf/org/apache/qpid/broker/ArgsBrokerConnect.h"
#include "qmf/org/apache/qpid/broker/ArgsBrokerCreate.h"
#include "qmf/org/apache/qpid/broker/ArgsBrokerDelete.h"
#include "qmf/org/apache/qpid/broker/ArgsBrokerQuery.h"
@@ -67,6 +71,7 @@
#include "qpid/sys/Dispatcher.h"
#include "qpid/sys/Thread.h"
#include "qpid/sys/Time.h"
+#include "qpid/sys/Timer.h"
#include "qpid/sys/ConnectionInputHandler.h"
#include "qpid/sys/ConnectionInputHandlerFactory.h"
#include "qpid/sys/TimeoutHandler.h"
@@ -102,6 +107,14 @@ namespace _qmf = qmf::org::apache::qpid::broker;
namespace qpid {
namespace broker {
+const std::string empty;
+const std::string amq_direct("amq.direct");
+const std::string amq_topic("amq.topic");
+const std::string amq_fanout("amq.fanout");
+const std::string amq_match("amq.match");
+const std::string qpid_management("qpid.management");
+const std::string knownHostsNone("none");
+
Broker::Options::Options(const std::string& name) :
qpid::Options(name),
noDataDir(0),
@@ -119,6 +132,7 @@ Broker::Options::Options(const std::string& name) :
queueLimit(100*1048576/*100M default limit*/),
tcpNoDelay(false),
requireEncrypted(false),
+ knownHosts(knownHostsNone),
qmf2Support(true),
qmf1Support(true),
queueFlowStopRatio(80),
@@ -177,14 +191,6 @@ Broker::Options::Options(const std::string& name) :
;
}
-const std::string empty;
-const std::string amq_direct("amq.direct");
-const std::string amq_topic("amq.topic");
-const std::string amq_fanout("amq.fanout");
-const std::string amq_match("amq.match");
-const std::string qpid_management("qpid.management");
-const std::string knownHostsNone("none");
-
namespace {
// Arguments to declare a non-replicated exchange.
framing::FieldTable noReplicateArgs() {
@@ -196,6 +202,7 @@ framing::FieldTable noReplicateArgs() {
Broker::Broker(const Broker::Options& conf) :
poller(new Poller),
+ timer(new qpid::sys::Timer),
config(conf),
managementAgent(conf.enableMgmt ? new ManagementAgent(conf.qmf1Support,
conf.qmf2Support)
@@ -207,20 +214,16 @@ Broker::Broker(const Broker::Options& conf) :
exchanges(this),
links(this),
factory(new SecureConnectionFactory(*this)),
- dtxManager(timer),
+ dtxManager(*timer.get()),
sessionManager(
qpid::SessionState::Configuration(
conf.replayFlushLimit*1024, // convert kb to bytes.
conf.replayHardLimit*1024),
*this),
- queueCleaner(queues, &timer),
+ queueCleaner(queues, timer.get()),
recoveryInProgress(false),
- recovery(true),
- inCluster(false),
- clusterUpdatee(false),
expiryPolicy(new ExpiryPolicy),
- getKnownBrokers(boost::bind(&Broker::getKnownBrokersImpl, this)),
- deferDelivery(boost::bind(&Broker::deferDeliveryImpl, this, _1, _2))
+ getKnownBrokers(boost::bind(&Broker::getKnownBrokersImpl, this))
{
try {
if (conf.enableMgmt) {
@@ -289,18 +292,11 @@ Broker::Broker(const Broker::Options& conf) :
exchanges.declare(empty, DirectExchange::typeName, false, noReplicateArgs());
if (store.get() != 0) {
- // The cluster plug-in will setRecovery(false) on all but the first
- // broker to join a cluster.
- if (getRecovery()) {
- RecoveryManagerImpl recoverer(queues, exchanges, links, dtxManager, protocolRegistry);
- recoveryInProgress = true;
- store->recover(recoverer);
- recoveryInProgress = false;
- }
- else {
- QPID_LOG(notice, "Cluster recovery: recovered journal data discarded and journal files pushed down");
- store->truncateInit(true); // save old files in subdir
- }
+ RecoveryManagerImpl recoverer(
+ queues, exchanges, links, dtxManager, protocolRegistry);
+ recoveryInProgress = true;
+ store->recover(recoverer);
+ recoveryInProgress = false;
}
//ensure standard exchanges exist (done after recovery from store)
@@ -356,19 +352,11 @@ Broker::Broker(const Broker::Options& conf) :
}
}
- if (managementAgent.get()) managementAgent->pluginsInitialized();
-
if (conf.queueCleanInterval) {
queueCleaner.start(conf.queueCleanInterval * qpid::sys::TIME_SEC);
}
- //initialize known broker urls (TODO: add support for urls for other transports (SSL, RDMA)):
- if (conf.knownHosts.empty()) {
- boost::shared_ptr<ProtocolFactory> factory = getProtocolFactory(TCP_TRANSPORT);
- if (factory) {
- knownBrokers.push_back ( qpid::Url::getIpAddressesUrl ( factory->getPort() ) );
- }
- } else if (conf.knownHosts != knownHostsNone) {
+ if (!conf.knownHosts.empty() && conf.knownHosts != knownHostsNone) {
knownBrokers.push_back(Url(conf.knownHosts));
}
@@ -450,7 +438,7 @@ Broker::~Broker() {
finalize(); // Finalize any plugins.
if (config.auth)
SaslAuthenticator::fini();
- timer.stop();
+ timer->stop();
QPID_LOG(notice, "Shut down");
}
@@ -522,7 +510,7 @@ Manageable::status_t Broker::ManagementMethod (uint32_t methodId,
_qmf::ArgsBrokerQueueMoveMessages& moveArgs=
dynamic_cast<_qmf::ArgsBrokerQueueMoveMessages&>(args);
QPID_LOG (debug, "Broker::queueMoveMessages()");
- if (queueMoveMessages(moveArgs.i_srcQueue, moveArgs.i_destQueue, moveArgs.i_qty, moveArgs.i_filter))
+ if (queueMoveMessages(moveArgs.i_srcQueue, moveArgs.i_destQueue, moveArgs.i_qty, moveArgs.i_filter) >= 0)
status = Manageable::STATUS_OK;
else
return Manageable::STATUS_PARAMETER_INVALID;
@@ -1022,39 +1010,29 @@ void Broker::accept() {
}
void Broker::connect(
+ const std::string& name,
const std::string& host, const std::string& port, const std::string& transport,
- boost::function2<void, int, std::string> failed,
- sys::ConnectionCodec::Factory* f)
+ boost::function2<void, int, std::string> failed)
{
boost::shared_ptr<ProtocolFactory> pf = getProtocolFactory(transport);
- if (pf) pf->connect(poller, host, port, f ? f : factory.get(), failed);
+ if (pf) pf->connect(poller, name, host, port, factory.get(), failed);
else throw NoSuchTransportException(QPID_MSG("Unsupported transport type: " << transport));
}
-void Broker::connect(
- const Url& url,
- boost::function2<void, int, std::string> failed,
- sys::ConnectionCodec::Factory* f)
-{
- url.throwIfEmpty();
- const Address& addr=url[0];
- connect(addr.host, boost::lexical_cast<std::string>(addr.port), addr.protocol, failed, f);
-}
-
-uint32_t Broker::queueMoveMessages(
+int32_t Broker::queueMoveMessages(
const std::string& srcQueue,
const std::string& destQueue,
uint32_t qty,
const Variant::Map& filter)
{
- Queue::shared_ptr src_queue = queues.find(srcQueue);
- if (!src_queue)
- return 0;
- Queue::shared_ptr dest_queue = queues.find(destQueue);
- if (!dest_queue)
- return 0;
-
- return src_queue->move(dest_queue, qty, &filter);
+ Queue::shared_ptr src_queue = queues.find(srcQueue);
+ if (!src_queue)
+ return -1;
+ Queue::shared_ptr dest_queue = queues.find(destQueue);
+ if (!dest_queue)
+ return -1;
+
+ return (int32_t) src_queue->move(dest_queue, qty, &filter);
}
@@ -1069,12 +1047,6 @@ Broker::getKnownBrokersImpl()
bool Broker::deferDeliveryImpl(const std::string&, const Message&)
{ return false; }
-void Broker::setClusterTimer(std::auto_ptr<sys::Timer> t) {
- clusterTimer = t;
- queueCleaner.setTimer(clusterTimer.get());
- dtxManager.setTimer(*clusterTimer.get());
-}
-
const std::string Broker::TCP_TRANSPORT("tcp");
@@ -1131,6 +1103,10 @@ std::pair<boost::shared_ptr<Queue>, bool> Broker::createQueue(
void Broker::deleteQueue(const std::string& name, const std::string& userId,
const std::string& connectionId, QueueFunctor check)
{
+ QPID_LOG_CAT(debug, model, "Deleting queue. name:" << name
+ << " user:" << userId
+ << " rhost:" << connectionId
+ );
if (acl && !acl->authorise(userId,acl::ACT_DELETE,acl::OBJ_QUEUE,name,NULL)) {
throw framing::UnauthorizedAccessException(QPID_MSG("ACL denied queue delete request from " << userId));
}
@@ -1145,11 +1121,6 @@ void Broker::deleteQueue(const std::string& name, const std::string& userId,
} else {
throw framing::NotFoundException(QPID_MSG("Delete failed. No such queue: " << name));
}
- QPID_LOG_CAT(debug, model, "Delete queue. name:" << name
- << " user:" << userId
- << " rhost:" << connectionId
- );
-
}
std::pair<Exchange::shared_ptr, bool> Broker::createExchange(
@@ -1196,6 +1167,9 @@ std::pair<Exchange::shared_ptr, bool> Broker::createExchange(
void Broker::deleteExchange(const std::string& name, const std::string& userId,
const std::string& connectionId)
{
+ QPID_LOG_CAT(debug, model, "Deleting exchange. name:" << name
+ << " user:" << userId
+ << " rhost:" << connectionId);
if (acl) {
if (!acl->authorise(userId,acl::ACT_DELETE,acl::OBJ_EXCHANGE,name,NULL) )
throw framing::UnauthorizedAccessException(QPID_MSG("ACL denied exchange delete request from " << userId));
@@ -1206,13 +1180,10 @@ void Broker::deleteExchange(const std::string& name, const std::string& userId,
}
Exchange::shared_ptr exchange(exchanges.get(name));
if (!exchange) throw framing::NotFoundException(QPID_MSG("Delete failed. No such exchange: " << name));
- if (exchange->inUseAsAlternate()) throw framing::NotAllowedException(QPID_MSG("Exchange in use as alternate-exchange."));
+ if (exchange->inUseAsAlternate()) throw framing::NotAllowedException(QPID_MSG("Cannot delete " << name <<", in use as alternate-exchange."));
if (exchange->isDurable()) store->destroy(*exchange);
if (exchange->getAlternate()) exchange->getAlternate()->decAlternateUsers();
exchanges.destroy(name, connectionId, userId);
- QPID_LOG_CAT(debug, model, "Delete exchange. name:" << name
- << " user:" << userId
- << " rhost:" << connectionId);
}
void Broker::bind(const std::string& queueName,
diff --git a/qpid/cpp/src/qpid/broker/Broker.h b/qpid/cpp/src/qpid/broker/Broker.h
index 0a8f406dbf..5c4c2a279f 100644
--- a/qpid/cpp/src/qpid/broker/Broker.h
+++ b/qpid/cpp/src/qpid/broker/Broker.h
@@ -23,11 +23,11 @@
*/
#include "qpid/broker/BrokerImportExport.h"
-#include "qpid/broker/ConnectionToken.h"
-#include "qpid/broker/DirectExchange.h"
+
+#include "qpid/DataDir.h"
+#include "qpid/Plugin.h"
#include "qpid/broker/DtxManager.h"
#include "qpid/broker/ExchangeRegistry.h"
-#include "qpid/broker/MessageStore.h"
#include "qpid/broker/Protocol.h"
#include "qpid/broker/QueueRegistry.h"
#include "qpid/broker/LinkRegistry.h"
@@ -35,30 +35,16 @@
#include "qpid/broker/QueueCleaner.h"
#include "qpid/broker/Vhost.h"
#include "qpid/broker/System.h"
-#include "qpid/broker/ExpiryPolicy.h"
#include "qpid/broker/ConsumerFactory.h"
#include "qpid/broker/ConnectionObservers.h"
#include "qpid/broker/ConfigurationObservers.h"
-#include "qpid/sys/ConnectionCodec.h"
#include "qpid/management/Manageable.h"
-#include "qpid/management/ManagementAgent.h"
-#include "qmf/org/apache/qpid/broker/Broker.h"
-#include "qmf/org/apache/qpid/broker/ArgsBrokerConnect.h"
-#include "qpid/Options.h"
-#include "qpid/Plugin.h"
-#include "qpid/DataDir.h"
-#include "qpid/framing/FrameHandler.h"
-#include "qpid/framing/OutputHandler.h"
-#include "qpid/framing/ProtocolInitiation.h"
#include "qpid/sys/ConnectionCodec.h"
-#include "qpid/sys/Runnable.h"
-#include "qpid/sys/Timer.h"
-#include "qpid/types/Variant.h"
-#include "qpid/RefCounted.h"
-#include "qpid/broker/AclModule.h"
#include "qpid/sys/Mutex.h"
+#include "qpid/sys/Runnable.h"
#include <boost/intrusive_ptr.hpp>
+
#include <string>
#include <vector>
@@ -67,12 +53,14 @@ namespace qpid {
namespace sys {
class ProtocolFactory;
class Poller;
+class Timer;
}
struct Url;
namespace broker {
+class AclModule;
class ConnectionState;
class ExpiryPolicy;
class Message;
@@ -157,8 +145,7 @@ class Broker : public sys::Runnable, public Plugin::Target,
Manageable::status_t setTimestampConfig(const bool receive,
const ConnectionState* context);
boost::shared_ptr<sys::Poller> poller;
- sys::Timer timer;
- std::auto_ptr<sys::Timer> clusterTimer;
+ std::auto_ptr<sys::Timer> timer;
Options config;
std::auto_ptr<management::ManagementAgent> managementAgent;
ProtocolFactoryMap protocolFactories;
@@ -184,8 +171,6 @@ class Broker : public sys::Runnable, public Plugin::Target,
const Message& msg);
std::string federationTag;
bool recoveryInProgress;
- bool recovery;
- bool inCluster, clusterUpdatee;
boost::intrusive_ptr<ExpiryPolicy> expiryPolicy;
ConsumerFactories consumerFactories;
ProtocolRegistry protocolRegistry;
@@ -248,19 +233,17 @@ class Broker : public sys::Runnable, public Plugin::Target,
QPID_BROKER_EXTERN void accept();
/** Create a connection to another broker. */
- void connect(const std::string& host, const std::string& port,
+ void connect(const std::string& name,
+ const std::string& host, const std::string& port,
const std::string& transport,
- boost::function2<void, int, std::string> failed,
- sys::ConnectionCodec::Factory* =0);
- /** Create a connection to another broker. */
- void connect(const Url& url,
- boost::function2<void, int, std::string> failed,
- sys::ConnectionCodec::Factory* =0);
+ boost::function2<void, int, std::string> failed);
/** Move messages from one queue to another.
A zero quantity means to move all messages
+ Return -1 if one of the queues does not exist, otherwise
+ the number of messages moved.
*/
- QPID_BROKER_EXTERN uint32_t queueMoveMessages(
+ QPID_BROKER_EXTERN int32_t queueMoveMessages(
const std::string& srcQueue,
const std::string& destQueue,
uint32_t qty,
@@ -272,47 +255,17 @@ class Broker : public sys::Runnable, public Plugin::Target,
/** Expose poller so plugins can register their descriptors. */
QPID_BROKER_EXTERN boost::shared_ptr<sys::Poller> getPoller();
- boost::shared_ptr<sys::ConnectionCodec::Factory> getConnectionFactory() { return factory; }
- void setConnectionFactory(boost::shared_ptr<sys::ConnectionCodec::Factory> f) { factory = f; }
-
/** Timer for local tasks affecting only this broker */
- sys::Timer& getTimer() { return timer; }
-
- /** Timer for tasks that must be synchronized if we are in a cluster */
- sys::Timer& getClusterTimer() { return clusterTimer.get() ? *clusterTimer : timer; }
- QPID_BROKER_EXTERN void setClusterTimer(std::auto_ptr<sys::Timer>);
+ sys::Timer& getTimer() { return *timer; }
boost::function<std::vector<Url> ()> getKnownBrokers;
static QPID_BROKER_EXTERN const std::string TCP_TRANSPORT;
- void setRecovery(bool set) { recovery = set; }
- bool getRecovery() const { return recovery; }
bool inRecovery() const { return recoveryInProgress; }
- /** True of this broker is part of a cluster.
- * Only valid after early initialization of plugins is complete.
- */
- bool isInCluster() const { return inCluster; }
- void setInCluster(bool set) { inCluster = set; }
-
- /** True if this broker is joining a cluster and in the process of
- * receiving a state update.
- */
- bool isClusterUpdatee() const { return clusterUpdatee; }
- void setClusterUpdatee(bool set) { clusterUpdatee = set; }
-
management::ManagementAgent* getManagementAgent() { return managementAgent.get(); }
- /**
- * Never true in a stand-alone broker. In a cluster, return true
- * to defer delivery of messages deliveredg in a cluster-unsafe
- * context.
- *@return true if delivery of a message should be deferred.
- */
- boost::function<bool (const std::string& queue,
- const Message& msg)> deferDelivery;
-
bool isAuthenticating ( ) { return config.auth; }
bool isTimestamping() { return config.timestampRcvMsgs; }
diff --git a/qpid/cpp/src/qpid/broker/Connection.cpp b/qpid/cpp/src/qpid/broker/Connection.cpp
index 3cb30a82e3..f6185d56a4 100644
--- a/qpid/cpp/src/qpid/broker/Connection.cpp
+++ b/qpid/cpp/src/qpid/broker/Connection.cpp
@@ -26,6 +26,7 @@
#include "qpid/broker/Broker.h"
#include "qpid/broker/Queue.h"
#include "qpid/sys/SecuritySettings.h"
+#include "qpid/sys/Timer.h"
#include "qpid/log/Statement.h"
#include "qpid/ptr_map.h"
@@ -84,15 +85,10 @@ Connection::Connection(ConnectionOutputHandler* out_,
std::string& mgmtId_,
const qpid::sys::SecuritySettings& external,
bool link_,
- uint64_t objectId_,
- bool shadow_,
- bool delayManagement,
- bool authenticated_
+ uint64_t objectId_
) :
ConnectionState(out_, broker_),
securitySettings(external),
- shadow(shadow_),
- authenticated(authenticated_),
adapter(*this, link_),
link(link_),
mgmtClosing(false),
@@ -106,11 +102,6 @@ Connection::Connection(ConnectionOutputHandler* out_,
{
outboundTracker.wrap(out);
broker.getConnectionObservers().connection(*this);
- // In a cluster, allow adding the management object to be delayed.
- if (!delayManagement) addManagementObject();
-}
-
-void Connection::addManagementObject() {
assert(agent == 0);
assert(mgmtObject == 0);
Manageable* parent = broker.GetVhostObject();
@@ -119,7 +110,6 @@ void Connection::addManagementObject() {
if (agent != 0) {
// TODO set last bool true if system connection
mgmtObject = _qmf::Connection::shared_ptr(new _qmf::Connection(agent, this, parent, mgmtId, !link, false, "AMQP 0-10"));
- mgmtObject->set_shadow(shadow);
agent->addObject(mgmtObject, objectId);
}
ConnectionState::setUrl(mgmtId);
@@ -136,8 +126,6 @@ void Connection::requestIOProcessing(boost::function0<void> callback)
Connection::~Connection()
{
if (mgmtObject != 0) {
- // In a cluster, Connections destroyed during shutdown are in
- // a cluster-unsafe context. Don't raise an event in that case.
if (!link)
agent->raiseEvent(_qmf::EventClientDisconnect(mgmtId, ConnectionState::getUserId(), mgmtObject->get_remoteProperties()));
QPID_LOG_CAT(debug, model, "Delete connection. user:" << ConnectionState::getUserId()
@@ -185,7 +173,6 @@ bool isMessage(const AMQMethodBody* method)
void Connection::recordFromServer(const framing::AMQFrame& frame)
{
- // Don't record management stats in cluster-unsafe contexts
if (mgmtObject != 0)
{
qmf::org::apache::qpid::broker::Connection::PerThreadStats *cStats = mgmtObject->getStatistics();
@@ -200,7 +187,6 @@ void Connection::recordFromServer(const framing::AMQFrame& frame)
void Connection::recordFromClient(const framing::AMQFrame& frame)
{
- // Don't record management stats in cluster-unsafe contexts
if (mgmtObject != 0)
{
qmf::org::apache::qpid::broker::Connection::PerThreadStats *cStats = mgmtObject->getStatistics();
@@ -277,20 +263,6 @@ void Connection::notifyConnectionForced(const string& text)
void Connection::setUserId(const string& userId)
{
ConnectionState::setUserId(userId);
- // In a cluster, the cluster code will raise the connect event
- // when the connection is replicated to the cluster.
- if (!broker.isInCluster()) raiseConnectEvent();
-}
-
-void Connection::raiseConnectEvent() {
- if (mgmtObject != 0) {
- mgmtObject->set_authIdentity(userId);
- agent->raiseEvent(_qmf::EventClientConnect(mgmtId, userId, mgmtObject->get_remoteProperties()));
- }
-
- QPID_LOG_CAT(debug, model, "Create connection. user:" << userId
- << " rhost:" << mgmtId );
-
}
void Connection::setUserProxyAuth(bool b)
@@ -317,19 +289,6 @@ void Connection::close(connection::CloseCode code, const string& text)
getOutput().close();
}
-// Send a close to the client but keep the channels. Used by cluster.
-void Connection::sendClose() {
- if (heartbeatTimer)
- heartbeatTimer->cancel();
- if (timeoutTimer)
- timeoutTimer->cancel();
- if (linkHeartbeatTimer) {
- linkHeartbeatTimer->cancel();
- }
- adapter.close(connection::CLOSE_CODE_NORMAL, "OK");
- getOutput().close();
-}
-
void Connection::idleOut(){}
void Connection::idleIn(){}
@@ -354,8 +313,6 @@ void Connection::closed(){ // Physically closed, suspend open sessions.
void Connection::doIoCallbacks() {
if (!isOpen()) return; // Don't process IO callbacks until we are open.
ScopedLock<Mutex> l(ioCallbackLock);
- // Although IO callbacks execute in the connection thread context, they are
- // not cluster safe because they are queued for execution in non-IO threads.
while (!ioCallbacks.empty()) {
boost::function0<void> cb = ioCallbacks.front();
ioCallbacks.pop();
@@ -488,7 +445,7 @@ void Connection::abort()
void Connection::setHeartbeatInterval(uint16_t heartbeat)
{
setHeartbeat(heartbeat);
- if (heartbeat > 0 && !isShadow()) {
+ if (heartbeat > 0) {
if (!heartbeatTimer) {
heartbeatTimer = new ConnectionHeartbeatTask(heartbeat, timer, *this);
timer.add(heartbeatTimer);
diff --git a/qpid/cpp/src/qpid/broker/Connection.h b/qpid/cpp/src/qpid/broker/Connection.h
index 2f25b0e3f9..27b0019e0d 100644
--- a/qpid/cpp/src/qpid/broker/Connection.h
+++ b/qpid/cpp/src/qpid/broker/Connection.h
@@ -55,6 +55,9 @@
#include <algorithm>
namespace qpid {
+namespace sys {
+class TimerTask;
+}
namespace broker {
class Broker;
@@ -83,10 +86,7 @@ class Connection : public sys::ConnectionInputHandler,
const std::string& mgmtId,
const qpid::sys::SecuritySettings&,
bool isLink = false,
- uint64_t objectId = 0,
- bool shadow=false,
- bool delayManagement = false,
- bool authenticated=true);
+ uint64_t objectId = 0);
~Connection ();
@@ -130,7 +130,6 @@ class Connection : public sys::ConnectionInputHandler,
void notifyConnectionForced(const std::string& text);
void setUserId(const std::string& uid);
- void raiseConnectEvent();
// credentials for connected client
const std::string& getUserId() const { return ConnectionState::getUserId(); }
@@ -144,27 +143,14 @@ class Connection : public sys::ConnectionInputHandler,
void setHeartbeatInterval(uint16_t heartbeat);
void sendHeartbeat();
void restartTimeout();
-
+
template <class F> void eachSessionHandler(F f) {
for (ChannelMap::iterator i = channels.begin(); i != channels.end(); ++i)
f(*ptr_map_ptr(i));
}
- void sendClose();
void setSecureConnection(SecureConnection* secured);
- /** True if this is a shadow connection in a cluster. */
- bool isShadow() const { return shadow; }
-
- /** True if this connection is authenticated */
- bool isAuthenticated() const { return authenticated; }
-
- // Used by cluster to update connection status
- sys::AggregateOutput& getOutputTasks() { return outputTasks; }
-
- /** Cluster delays adding management object in the constructor then calls this. */
- void addManagementObject();
-
const qpid::sys::SecuritySettings& getExternalSecuritySettings() const
{
return securitySettings;
@@ -176,9 +162,6 @@ class Connection : public sys::ConnectionInputHandler,
bool isLink() { return link; }
void startLinkHeartbeatTimeoutTask();
- // Used by cluster during catch-up, see cluster::OutputInterceptor
- void doIoCallbacks();
-
void setClientProperties(const framing::FieldTable& cp) { clientProperties = cp; }
const framing::FieldTable& getClientProperties() const { return clientProperties; }
@@ -188,8 +171,6 @@ class Connection : public sys::ConnectionInputHandler,
ChannelMap channels;
qpid::sys::SecuritySettings securitySettings;
- bool shadow;
- bool authenticated;
ConnectionHandler adapter;
const bool link;
bool mgmtClosing;
@@ -228,6 +209,7 @@ class Connection : public sys::ConnectionInputHandler,
OutboundFrameTracker outboundTracker;
void sent(const framing::AMQFrame& f);
+ void doIoCallbacks();
public:
diff --git a/qpid/cpp/src/qpid/broker/ConnectionHandler.cpp b/qpid/cpp/src/qpid/broker/ConnectionHandler.cpp
index a3c278e2e9..0017616472 100644
--- a/qpid/cpp/src/qpid/broker/ConnectionHandler.cpp
+++ b/qpid/cpp/src/qpid/broker/ConnectionHandler.cpp
@@ -20,8 +20,10 @@
*
*/
-#include "qpid/SaslFactory.h"
#include "qpid/broker/ConnectionHandler.h"
+
+#include "qpid/SaslFactory.h"
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
#include "qpid/broker/SecureConnection.h"
#include "qpid/Url.h"
diff --git a/qpid/cpp/src/qpid/broker/ConnectionState.cpp b/qpid/cpp/src/qpid/broker/ConnectionState.cpp
new file mode 100644
index 0000000000..c6a8317c2b
--- /dev/null
+++ b/qpid/cpp/src/qpid/broker/ConnectionState.cpp
@@ -0,0 +1,38 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/broker/ConnectionState.h"
+
+#include "qpid/broker/Broker.h"
+
+namespace qpid {
+namespace broker {
+
+void ConnectionState::setUserId(const std::string& uid) {
+ userId = uid;
+ size_t at = userId.find('@');
+ userName = userId.substr(0, at);
+ isDefaultRealm = (
+ at!= std::string::npos &&
+ getBroker().getOptions().realm == userId.substr(at+1,userId.size()));
+}
+
+}}
diff --git a/qpid/cpp/src/qpid/broker/ConnectionState.h b/qpid/cpp/src/qpid/broker/ConnectionState.h
index 4dfd86fd8e..a8d6e82210 100644
--- a/qpid/cpp/src/qpid/broker/ConnectionState.h
+++ b/qpid/cpp/src/qpid/broker/ConnectionState.h
@@ -21,18 +21,22 @@
#ifndef _ConnectionState_
#define _ConnectionState_
-#include <vector>
-
+#include "qpid/broker/ConnectionToken.h"
#include "qpid/sys/AggregateOutput.h"
#include "qpid/sys/ConnectionOutputHandlerPtr.h"
#include "qpid/framing/ProtocolVersion.h"
#include "qpid/management/Manageable.h"
#include "qpid/Url.h"
-#include "qpid/broker/Broker.h"
+
+#include <boost/function.hpp>
+#include <vector>
+
namespace qpid {
namespace broker {
+class Broker;
+
class ConnectionState : public ConnectionToken, public management::Manageable
{
protected:
@@ -46,9 +50,8 @@ class ConnectionState : public ConnectionToken, public management::Manageable
framemax(65535),
heartbeat(0),
heartbeatmax(120),
- userProxyAuth(false), // Can proxy msgs with non-matching auth ids when true (used by federation links & clustering)
+ userProxyAuth(false), // Can proxy msgs with non-matching auth ids when true (used by federation links)
federationLink(true),
- clusterOrderOut(0),
isDefaultRealm(false)
{}
@@ -62,14 +65,7 @@ class ConnectionState : public ConnectionToken, public management::Manageable
void setHeartbeat(uint16_t hb) { heartbeat = hb; }
void setHeartbeatMax(uint16_t hbm) { heartbeatmax = hbm; }
- virtual void setUserId(const std::string& uid) {
- userId = uid;
- size_t at = userId.find('@');
- userName = userId.substr(0, at);
- isDefaultRealm = (
- at!= std::string::npos &&
- getBroker().getOptions().realm == userId.substr(at+1,userId.size()));
- }
+ virtual void setUserId(const std::string& uid);
const std::string& getUserId() const { return userId; }
@@ -102,15 +98,6 @@ class ConnectionState : public ConnectionToken, public management::Manageable
framing::ProtocolVersion getVersion() const { return version; }
void setOutputHandler(qpid::sys::ConnectionOutputHandler* o) { out.set(o); }
- /**
- * If the broker is part of a cluster, this is a handler provided
- * by cluster code. It ensures consistent ordering of commands
- * that are sent based on criteria that are not predictably
- * ordered cluster-wide, e.g. a timer firing.
- */
- framing::FrameHandler* getClusterOrderOutput() { return clusterOrderOut; }
- void setClusterOrderOutput(framing::FrameHandler& fh) { clusterOrderOut = &fh; }
-
virtual void requestIOProcessing (boost::function0<void>) = 0;
protected:
@@ -124,7 +111,6 @@ class ConnectionState : public ConnectionToken, public management::Manageable
bool federationLink;
std::string federationPeerTag;
std::vector<Url> knownHosts;
- framing::FrameHandler* clusterOrderOut;
std::string userName;
bool isDefaultRealm;
};
diff --git a/qpid/cpp/src/qpid/broker/ConsumerFactory.h b/qpid/cpp/src/qpid/broker/ConsumerFactory.h
index abd39fb3f8..1c0f2571e2 100644
--- a/qpid/cpp/src/qpid/broker/ConsumerFactory.h
+++ b/qpid/cpp/src/qpid/broker/ConsumerFactory.h
@@ -25,11 +25,14 @@
// TODO aconway 2011-11-25: it's ugly exposing SemanticState::ConsumerImpl in public.
// Refactor to use a more abstract interface.
-#include "qpid/broker/SemanticState.h"
+#include <boost/shared_ptr.hpp>
namespace qpid {
namespace broker {
+class SemanticState;
+class SemanticStateConsumerImpl;
+
/**
* Base class for consumer factoires. Plugins can register a
* ConsumerFactory via Broker:: getConsumerFactories() Each time a
@@ -41,7 +44,7 @@ class ConsumerFactory
public:
virtual ~ConsumerFactory() {}
- virtual boost::shared_ptr<SemanticState::ConsumerImpl> create(
+ virtual boost::shared_ptr<SemanticStateConsumerImpl> create(
SemanticState* parent,
const std::string& name, boost::shared_ptr<Queue> queue,
bool ack, bool acquire, bool exclusive, const std::string& tag,
diff --git a/qpid/cpp/src/qpid/broker/DtxManager.cpp b/qpid/cpp/src/qpid/broker/DtxManager.cpp
index d482c2c327..5233e07b2b 100644
--- a/qpid/cpp/src/qpid/broker/DtxManager.cpp
+++ b/qpid/cpp/src/qpid/broker/DtxManager.cpp
@@ -27,6 +27,9 @@
#include "qpid/ptr_map.h"
#include <boost/format.hpp>
+#include <boost/bind.hpp>
+#include <boost/function.hpp>
+
#include <iostream>
using boost::intrusive_ptr;
@@ -35,6 +38,30 @@ using qpid::ptr_map_ptr;
using namespace qpid::broker;
using namespace qpid::framing;
+namespace {
+ typedef boost::function0<void> FireFunction;
+ struct DtxCleanup : public qpid::sys::TimerTask
+ {
+ FireFunction fireFunction;
+
+ DtxCleanup(uint32_t timeout, FireFunction f);
+ void fire();
+ };
+
+ DtxCleanup::DtxCleanup(uint32_t _timeout, FireFunction f)
+ : TimerTask(qpid::sys::Duration(_timeout * qpid::sys::TIME_SEC),"DtxCleanup"), fireFunction(f){}
+
+ void DtxCleanup::fire()
+ {
+ try {
+ fireFunction();
+ } catch (qpid::ConnectionException& /*e*/) {
+ //assume it was explicitly cleaned up after a call to prepare, commit or rollback
+ }
+ }
+
+}
+
DtxManager::DtxManager(qpid::sys::Timer& t) : store(0), timer(&t) {}
DtxManager::~DtxManager() {}
@@ -156,19 +183,7 @@ void DtxManager::timedout(const std::string& xid)
} else {
ptr_map_ptr(i)->timedout();
//TODO: do we want to have a timed task to cleanup, or can we rely on an explicit completion?
- //timer.add(intrusive_ptr<TimerTask>(new DtxCleanup(60*30/*30 mins*/, *this, xid)));
- }
-}
-
-DtxManager::DtxCleanup::DtxCleanup(uint32_t _timeout, DtxManager& _mgr, const std::string& _xid)
- : TimerTask(qpid::sys::Duration(_timeout * qpid::sys::TIME_SEC),"DtxCleanup"), mgr(_mgr), xid(_xid) {}
-
-void DtxManager::DtxCleanup::fire()
-{
- try {
- mgr.remove(xid);
- } catch (ConnectionException& /*e*/) {
- //assume it was explicitly cleaned up after a call to prepare, commit or rollback
+ //timer->add(new DtxCleanup(60*30/*30 mins*/, boost::bind(&DtxManager::remove, this, xid)));
}
}
diff --git a/qpid/cpp/src/qpid/broker/DtxManager.h b/qpid/cpp/src/qpid/broker/DtxManager.h
index 6f03189f66..81175e5dc3 100644
--- a/qpid/cpp/src/qpid/broker/DtxManager.h
+++ b/qpid/cpp/src/qpid/broker/DtxManager.h
@@ -31,20 +31,15 @@
#include "qpid/ptr_map.h"
namespace qpid {
+namespace sys {
+class Timer;
+}
+
namespace broker {
class DtxManager{
typedef boost::ptr_map<std::string, DtxWorkRecord> WorkMap;
- struct DtxCleanup : public sys::TimerTask
- {
- DtxManager& mgr;
- const std::string& xid;
-
- DtxCleanup(uint32_t timeout, DtxManager& mgr, const std::string& xid);
- void fire();
- };
-
WorkMap work;
TransactionalStore* store;
qpid::sys::Mutex lock;
@@ -68,11 +63,6 @@ public:
void setStore(TransactionalStore* store);
void setTimer(sys::Timer& t) { timer = &t; }
- // Used by cluster for replication.
- template<class F> void each(F f) const {
- for (WorkMap::const_iterator i = work.begin(); i != work.end(); ++i)
- f(*ptr_map_ptr(i));
- }
DtxWorkRecord* getWork(const std::string& xid);
bool exists(const std::string& xid);
static std::string convert(const framing::Xid& xid);
diff --git a/qpid/cpp/src/qpid/broker/DtxWorkRecord.cpp b/qpid/cpp/src/qpid/broker/DtxWorkRecord.cpp
index 2c26fec49f..ad02892895 100644
--- a/qpid/cpp/src/qpid/broker/DtxWorkRecord.cpp
+++ b/qpid/cpp/src/qpid/broker/DtxWorkRecord.cpp
@@ -20,7 +20,10 @@
*/
#include "qpid/broker/DtxWorkRecord.h"
#include "qpid/broker/DtxManager.h"
+#include "qpid/broker/DtxTimeout.h"
#include "qpid/framing/reply_exceptions.h"
+#include "qpid/sys/Timer.h"
+
#include <boost/format.hpp>
#include <boost/mem_fn.hpp>
using boost::mem_fn;
@@ -39,6 +42,12 @@ DtxWorkRecord::~DtxWorkRecord()
}
}
+void DtxWorkRecord::setTimeout(boost::intrusive_ptr<DtxTimeout> t)
+{ timeout = t; }
+
+boost::intrusive_ptr<DtxTimeout> DtxWorkRecord::getTimeout()
+{ return timeout; }
+
bool DtxWorkRecord::prepare()
{
Mutex::ScopedLock locker(lock);
@@ -176,17 +185,3 @@ void DtxWorkRecord::timedout()
}
abort();
}
-
-size_t DtxWorkRecord::indexOf(const DtxBuffer::shared_ptr& buf) {
- Work::iterator i = std::find(work.begin(), work.end(), buf);
- if (i == work.end()) throw NotFoundException(
- QPID_MSG("Can't find DTX buffer for xid: " << buf->getXid()));
- return i - work.begin();
-}
-
-DtxBuffer::shared_ptr DtxWorkRecord::operator[](size_t i) const {
- if (i > work.size())
- throw NotFoundException(
- QPID_MSG("Can't find DTX buffer " << i << " for xid: " << xid));
- return work[i];
-}
diff --git a/qpid/cpp/src/qpid/broker/DtxWorkRecord.h b/qpid/cpp/src/qpid/broker/DtxWorkRecord.h
index 331e42fefd..b38af907c5 100644
--- a/qpid/cpp/src/qpid/broker/DtxWorkRecord.h
+++ b/qpid/cpp/src/qpid/broker/DtxWorkRecord.h
@@ -23,7 +23,6 @@
#include "qpid/broker/BrokerImportExport.h"
#include "qpid/broker/DtxBuffer.h"
-#include "qpid/broker/DtxTimeout.h"
#include "qpid/broker/TransactionalStore.h"
#include "qpid/framing/amqp_types.h"
@@ -38,6 +37,8 @@
namespace qpid {
namespace broker {
+struct DtxTimeout;
+
/**
* Represents the work done under a particular distributed transaction
* across potentially multiple channels. Identified by a xid. Allows
@@ -71,19 +72,13 @@ public:
QPID_BROKER_EXTERN void add(DtxBuffer::shared_ptr ops);
void recover(std::auto_ptr<TPCTransactionContext> txn, DtxBuffer::shared_ptr ops);
void timedout();
- void setTimeout(boost::intrusive_ptr<DtxTimeout> t) { timeout = t; }
- boost::intrusive_ptr<DtxTimeout> getTimeout() { return timeout; }
+ void setTimeout(boost::intrusive_ptr<DtxTimeout> t);
+ boost::intrusive_ptr<DtxTimeout> getTimeout();
std::string getXid() const { return xid; }
bool isCompleted() const { return completed; }
bool isRolledback() const { return rolledback; }
bool isPrepared() const { return prepared; }
bool isExpired() const { return expired; }
-
- // Used by cluster update;
- size_t size() const { return work.size(); }
- DtxBuffer::shared_ptr operator[](size_t i) const;
- uint32_t getTimeout() const { return timeout? timeout->timeout : 0; }
- size_t indexOf(const DtxBuffer::shared_ptr&);
};
}} // qpid::broker
diff --git a/qpid/cpp/src/qpid/broker/Exchange.cpp b/qpid/cpp/src/qpid/broker/Exchange.cpp
index 9098c75f0b..f71dbc7351 100644
--- a/qpid/cpp/src/qpid/broker/Exchange.cpp
+++ b/qpid/cpp/src/qpid/broker/Exchange.cpp
@@ -210,8 +210,6 @@ Exchange::Exchange(const string& _name, bool _durable, const qpid::framing::Fiel
ive = _args.get(qpidIVE);
if (ive) {
- if (broker && broker->isInCluster())
- throw framing::NotImplementedException("Cannot use Initial Value Exchanges in a cluster");
QPID_LOG(debug, "Configured exchange " << _name << " with Initial Value");
}
}
@@ -225,6 +223,7 @@ Exchange::~Exchange ()
void Exchange::setAlternate(Exchange::shared_ptr _alternate)
{
alternate = _alternate;
+ alternate->incAlternateUsers();
if (mgmtExchange != 0) {
if (alternate.get() != 0)
mgmtExchange->set_altExchange(alternate->GetManagementObject()->getObjectId());
diff --git a/qpid/cpp/src/qpid/broker/ExchangeRegistry.cpp b/qpid/cpp/src/qpid/broker/ExchangeRegistry.cpp
index bc6a20ff9a..46175fd2e4 100644
--- a/qpid/cpp/src/qpid/broker/ExchangeRegistry.cpp
+++ b/qpid/cpp/src/qpid/broker/ExchangeRegistry.cpp
@@ -79,10 +79,7 @@ pair<Exchange::shared_ptr, bool> ExchangeRegistry::declare(
}
exchanges[name] = exchange;
result = std::pair<Exchange::shared_ptr, bool>(exchange, true);
- if (alternate) {
- exchange->setAlternate(alternate);
- alternate->incAlternateUsers();
- }
+ if (alternate) exchange->setAlternate(alternate);
// Call exchangeCreate inside the lock to ensure correct ordering.
if (broker) broker->getConfigurationObservers().exchangeCreate(exchange);
} else {
diff --git a/qpid/cpp/src/qpid/broker/Link.cpp b/qpid/cpp/src/qpid/broker/Link.cpp
index 0c18e08cd1..5d01a567b5 100644
--- a/qpid/cpp/src/qpid/broker/Link.cpp
+++ b/qpid/cpp/src/qpid/broker/Link.cpp
@@ -33,6 +33,7 @@
#include "qpid/framing/amqp_types.h"
#include "qpid/broker/AclModule.h"
#include "qpid/broker/Exchange.h"
+#include "qpid/broker/NameGenerator.h"
#include "qpid/UrlArray.h"
namespace qpid {
@@ -147,7 +148,6 @@ Link::Link(const string& _name,
persistenceId(0), broker(_broker), state(0),
visitCount(0),
currentInterval(1),
- closing(false),
reconnectNext(0), // Index of next address for reconnecting in url.
nextFreeChannel(1),
freeChannels(1, framing::CHANNEL_MAX),
@@ -170,12 +170,8 @@ Link::Link(const string& _name,
agent->addObject(mgmtObject, 0, durable);
}
}
- if (links->isPassive()) {
- setStateLH(STATE_PASSIVE);
- } else {
- setStateLH(STATE_WAITING);
- startConnectionLH();
- }
+ setStateLH(STATE_WAITING);
+ startConnectionLH();
broker->getTimer().add(timerTask);
if (failover) {
@@ -209,9 +205,6 @@ void Link::setStateLH (int newState)
state = newState;
- if (hideManagement())
- return;
-
switch (state)
{
case STATE_WAITING : mgmtObject->set_state("Waiting"); break;
@@ -219,7 +212,7 @@ void Link::setStateLH (int newState)
case STATE_OPERATIONAL : mgmtObject->set_state("Operational"); break;
case STATE_FAILED : mgmtObject->set_state("Failed"); break;
case STATE_CLOSED : mgmtObject->set_state("Closed"); break;
- case STATE_PASSIVE : mgmtObject->set_state("Passive"); break;
+ case STATE_CLOSING : mgmtObject->set_state("Closing"); break;
}
}
@@ -230,40 +223,39 @@ void Link::startConnectionLH ()
// Set the state before calling connect. It is possible that connect
// will fail synchronously and call Link::closed before returning.
setStateLH(STATE_CONNECTING);
- broker->connect (host, boost::lexical_cast<std::string>(port), transport,
+ broker->connect (name, host, boost::lexical_cast<std::string>(port), transport,
boost::bind (&Link::closed, this, _1, _2));
QPID_LOG (info, "Inter-broker link connecting to " << host << ":" << port);
} catch(const std::exception& e) {
QPID_LOG(error, "Link connection to " << host << ":" << port << " failed: "
<< e.what());
setStateLH(STATE_WAITING);
- if (!hideManagement())
- mgmtObject->set_lastError (e.what());
+ mgmtObject->set_lastError (e.what());
}
}
void Link::established(Connection* c)
{
- if (state == STATE_PASSIVE) return;
stringstream addr;
addr << host << ":" << port;
QPID_LOG (info, "Inter-broker link established to " << addr.str());
- if (!hideManagement() && agent)
+ if (agent)
agent->raiseEvent(_qmf::EventBrokerLinkUp(addr.str()));
- bool isClosing = false;
+ bool isClosing = true;
{
Mutex::ScopedLock mutex(lock);
- setStateLH(STATE_OPERATIONAL);
- currentInterval = 1;
- visitCount = 0;
- connection = c;
- isClosing = closing;
+ if (state != STATE_CLOSING) {
+ isClosing = false;
+ setStateLH(STATE_OPERATIONAL);
+ currentInterval = 1;
+ visitCount = 0;
+ connection = c;
+ c->requestIOProcessing (boost::bind(&Link::ioThreadProcessing, this));
+ }
}
if (isClosing)
destroy();
- else // Process any IO tasks bridges added before established.
- c->requestIOProcessing (boost::bind(&Link::ioThreadProcessing, this));
}
@@ -288,11 +280,12 @@ class DetachedCallback : public SessionHandler::ErrorListener {
};
}
-void Link::opened() {
+void Link::opened()
+{
Mutex::ScopedLock mutex(lock);
- if (!connection) return;
+ if (!connection || state != STATE_OPERATIONAL) return;
- if (!hideManagement() && connection->GetManagementObject()) {
+ if (connection->GetManagementObject()) {
mgmtObject->set_connectionRef(connection->GetManagementObject()->getObjectId());
}
@@ -347,37 +340,43 @@ void Link::opened() {
}
}
+
+// called when connection attempt fails (see startConnectionLH)
void Link::closed(int, std::string text)
{
- Mutex::ScopedLock mutex(lock);
QPID_LOG (info, "Inter-broker link disconnected from " << host << ":" << port << " " << text);
- connection = 0;
+ bool isClosing = false;
+ {
+ Mutex::ScopedLock mutex(lock);
+
+ connection = 0;
- if (!hideManagement()) {
mgmtObject->set_connectionRef(qpid::management::ObjectId());
if (state == STATE_OPERATIONAL && agent) {
stringstream addr;
addr << host << ":" << port;
agent->raiseEvent(_qmf::EventBrokerLinkDown(addr.str()));
}
- }
- for (Bridges::iterator i = active.begin(); i != active.end(); i++) {
- (*i)->closed();
- created.push_back(*i);
- }
- active.clear();
+ for (Bridges::iterator i = active.begin(); i != active.end(); i++) {
+ (*i)->closed();
+ created.push_back(*i);
+ }
+ active.clear();
- if (state != STATE_FAILED && state != STATE_PASSIVE)
- {
- setStateLH(STATE_WAITING);
- if (!hideManagement())
+ if (state == STATE_CLOSING) {
+ isClosing = true;
+ } else if (state != STATE_FAILED) {
+ setStateLH(STATE_WAITING);
mgmtObject->set_lastError (text);
+ }
}
+ if (isClosing) destroy();
}
-// Called in connection IO thread, cleans up the connection before destroying Link
+// Cleans up the connection before destroying Link. Must be called in connection thread
+// if the connection is active. Caller Note well: may call "delete this"!
void Link::destroy ()
{
Bridges toDelete;
@@ -407,7 +406,9 @@ void Link::destroy ()
for (Bridges::iterator i = toDelete.begin(); i != toDelete.end(); i++)
(*i)->close();
toDelete.clear();
- listener(this); // notify LinkRegistry that this Link has been destroyed
+ // notify LinkRegistry that this Link has been destroyed. Will result in "delete
+ // this" if LinkRegistry is holding the last shared pointer to *this
+ listener(this);
}
void Link::add(Bridge::shared_ptr bridge)
@@ -449,7 +450,7 @@ void Link::ioThreadProcessing()
{
Mutex::ScopedLock mutex(lock);
- if (state != STATE_OPERATIONAL || closing)
+ if (state != STATE_OPERATIONAL)
return;
// check for bridge session errors and recover
@@ -486,9 +487,9 @@ void Link::ioThreadProcessing()
void Link::maintenanceVisit ()
{
Mutex::ScopedLock mutex(lock);
- if (closing) return;
- if (state == STATE_WAITING)
- {
+
+ switch (state) {
+ case STATE_WAITING:
visitCount++;
if (visitCount >= currentInterval)
{
@@ -501,11 +502,17 @@ void Link::maintenanceVisit ()
startConnectionLH();
}
}
+ break;
+
+ case STATE_OPERATIONAL:
+ if ((!active.empty() || !created.empty() || !cancellations.empty()) &&
+ connection && connection->isOpen())
+ connection->requestIOProcessing (boost::bind(&Link::ioThreadProcessing, this));
+ break;
+
+ default: // no-op for all other states
+ break;
}
- else if (state == STATE_OPERATIONAL &&
- (!active.empty() || !created.empty() || !cancellations.empty()) &&
- connection && connection->isOpen())
- connection->requestIOProcessing (boost::bind(&Link::ioThreadProcessing, this));
}
void Link::reconnectLH(const Address& a)
@@ -514,14 +521,13 @@ void Link::reconnectLH(const Address& a)
port = a.port;
transport = a.protocol;
- if (!hideManagement()) {
- stringstream errorString;
- errorString << "Failing over to " << a;
- mgmtObject->set_lastError(errorString.str());
- mgmtObject->set_host(host);
- mgmtObject->set_port(port);
- mgmtObject->set_transport(transport);
- }
+ stringstream errorString;
+ errorString << "Failing over to " << a;
+ mgmtObject->set_lastError(errorString.str());
+ mgmtObject->set_host(host);
+ mgmtObject->set_port(port);
+ mgmtObject->set_transport(transport);
+
startConnectionLH();
}
@@ -538,12 +544,6 @@ bool Link::tryFailoverLH() {
return false;
}
-// Management updates for a link are inconsistent in a cluster, so they are
-// suppressed.
-bool Link::hideManagement() const {
- return !mgmtObject || ( broker && broker->isInCluster());
-}
-
// Allocate channel from link free pool
framing::ChannelId Link::nextChannel()
{
@@ -583,10 +583,17 @@ void Link::returnChannel(framing::ChannelId c)
void Link::notifyConnectionForced(const string text)
{
- Mutex::ScopedLock mutex(lock);
- setStateLH(STATE_FAILED);
- if (!hideManagement())
- mgmtObject->set_lastError(text);
+ bool isClosing = false;
+ {
+ Mutex::ScopedLock mutex(lock);
+ if (state == STATE_CLOSING) {
+ isClosing = true;
+ } else {
+ setStateLH(STATE_FAILED);
+ mgmtObject->set_lastError(text);
+ }
+ }
+ if (isClosing) destroy();
}
void Link::setPersistenceId(uint64_t id) const
@@ -676,14 +683,25 @@ ManagementObject::shared_ptr Link::GetManagementObject(void) const
void Link::close() {
QPID_LOG(debug, "Link::close(), link=" << name );
- Mutex::ScopedLock mutex(lock);
- if (!closing) {
- closing = true;
- if (state != STATE_CONNECTING && connection) {
- //connection can only be closed on the connections own IO processing thread
- connection->requestIOProcessing(boost::bind(&Link::destroy, this));
+ bool destroy_now = false;
+ {
+ Mutex::ScopedLock mutex(lock);
+ if (state != STATE_CLOSING) {
+ int old_state = state;
+ setStateLH(STATE_CLOSING);
+ if (connection) {
+ //connection can only be closed on the connections own IO processing thread
+ connection->requestIOProcessing(boost::bind(&Link::destroy, this));
+ } else if (old_state == STATE_CONNECTING) {
+ // cannot destroy Link now since a connection request is outstanding.
+ // destroy the link after we get a response (see Link::established,
+ // Link::closed, Link::notifyConnectionForced, etc).
+ } else {
+ destroy_now = true;
+ }
}
}
+ if (destroy_now) destroy();
}
@@ -727,22 +745,6 @@ Manageable::status_t Link::ManagementMethod (uint32_t op, Args& args, string& te
return Manageable::STATUS_UNKNOWN_METHOD;
}
-void Link::setPassive(bool passive)
-{
- Mutex::ScopedLock mutex(lock);
- if (passive) {
- setStateLH(STATE_PASSIVE);
- } else {
- if (state == STATE_PASSIVE) {
- setStateLH(STATE_WAITING);
- } else {
- QPID_LOG(warning, "Ignoring attempt to activate non-passive link "
- << host << ":" << port);
- }
- }
-}
-
-
/** utility to clean up connection resources correctly */
void Link::closeConnection( const std::string& reason)
{
@@ -778,28 +780,6 @@ namespace {
const std::string FAILOVER_INDEX("failover-index");
}
-void Link::getState(framing::FieldTable& state) const
-{
- state.clear();
- Mutex::ScopedLock mutex(lock);
- if (!url.empty()) {
- state.setString(FAILOVER_ADDRESSES, url.str());
- state.setInt(FAILOVER_INDEX, reconnectNext);
- }
-}
-
-void Link::setState(const framing::FieldTable& state)
-{
- Mutex::ScopedLock mutex(lock);
- if (state.isSet(FAILOVER_ADDRESSES)) {
- Url failovers(state.getAsString(FAILOVER_ADDRESSES));
- setUrl(failovers);
- }
- if (state.isSet(FAILOVER_INDEX)) {
- reconnectNext = state.getAsInt(FAILOVER_INDEX);
- }
-}
-
std::string Link::createName(const std::string& transport,
const std::string& host,
uint16_t port)
@@ -810,14 +790,6 @@ std::string Link::createName(const std::string& transport,
return linkName.str();
}
-
-bool Link::pendingConnection(const std::string& _host, uint16_t _port) const
-{
- Mutex::ScopedLock mutex(lock);
- return (isConnecting() && _port == port && _host == host);
-}
-
-
const std::string Link::exchangeTypeName("qpid.LinkExchange");
}} // namespace qpid::broker
diff --git a/qpid/cpp/src/qpid/broker/Link.h b/qpid/cpp/src/qpid/broker/Link.h
index 97511de08f..01ddc68d97 100644
--- a/qpid/cpp/src/qpid/broker/Link.h
+++ b/qpid/cpp/src/qpid/broker/Link.h
@@ -74,7 +74,6 @@ class Link : public PersistableConfig, public management::Manageable {
int state;
uint32_t visitCount;
uint32_t currentInterval;
- bool closing;
Url url; // URL can contain many addresses.
size_t reconnectNext; // Index for next re-connect attempt
@@ -98,7 +97,7 @@ class Link : public PersistableConfig, public management::Manageable {
static const int STATE_OPERATIONAL = 3;
static const int STATE_FAILED = 4;
static const int STATE_CLOSED = 5;
- static const int STATE_PASSIVE = 6;
+ static const int STATE_CLOSING = 6; // Waiting for outstanding connect to complete first
static const uint32_t MAX_INTERVAL = 32;
@@ -107,7 +106,6 @@ class Link : public PersistableConfig, public management::Manageable {
void destroy(); // Cleanup connection before link goes away
void ioThreadProcessing(); // Called on connection's IO thread by request
bool tryFailoverLH(); // Called during maintenance visit
- bool hideManagement() const;
void reconnectLH(const Address&); //called by LinkRegistry
// connection management (called by LinkRegistry)
@@ -116,7 +114,6 @@ class Link : public PersistableConfig, public management::Manageable {
void closed(int, std::string); // Called when connection goes away
void notifyConnectionForced(const std::string text);
void closeConnection(const std::string& reason);
- bool pendingConnection(const std::string& host, uint16_t port) const; // is Link trying to connect to this remote?
friend class LinkRegistry; // to call established, opened, closed
@@ -167,7 +164,6 @@ class Link : public PersistableConfig, public management::Manageable {
std::string getPassword() { return password; }
Broker* getBroker() { return broker; }
- void setPassive(bool p);
bool isConnecting() const { return state == STATE_CONNECTING; }
// PersistableConfig:
@@ -190,10 +186,6 @@ class Link : public PersistableConfig, public management::Manageable {
static const std::string exchangeTypeName;
static boost::shared_ptr<Exchange> linkExchangeFactory(const std::string& name);
- // replicate internal state of this Link for clustering
- void getState(framing::FieldTable& state) const;
- void setState(const framing::FieldTable& state);
-
/** create a name for a link (if none supplied by user config) */
static std::string createName(const std::string& transport,
const std::string& host,
diff --git a/qpid/cpp/src/qpid/broker/LinkRegistry.cpp b/qpid/cpp/src/qpid/broker/LinkRegistry.cpp
index 10f689c976..5af6053943 100644
--- a/qpid/cpp/src/qpid/broker/LinkRegistry.cpp
+++ b/qpid/cpp/src/qpid/broker/LinkRegistry.cpp
@@ -19,8 +19,10 @@
*
*/
#include "qpid/broker/LinkRegistry.h"
-#include "qpid/broker/Link.h"
+
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
+#include "qpid/broker/Link.h"
#include "qpid/log/Statement.h"
#include <iostream>
#include <boost/format.hpp>
@@ -42,7 +44,7 @@ namespace _qmf = qmf::org::apache::qpid::broker;
// factored: The persistence element should be factored separately
LinkRegistry::LinkRegistry () :
broker(0),
- parent(0), store(0), passive(false),
+ parent(0), store(0),
realm("")
{
}
@@ -59,7 +61,7 @@ class LinkRegistryConnectionObserver : public ConnectionObserver {
LinkRegistry::LinkRegistry (Broker* _broker) :
broker(_broker),
- parent(0), store(0), passive(false),
+ parent(0), store(0),
realm(broker->getOptions().realm)
{
broker->getConnectionObservers().add(
@@ -270,38 +272,6 @@ MessageStore* LinkRegistry::getStore() const {
return store;
}
-namespace {
- void extractHostPort(const std::string& connId, std::string *host, uint16_t *port)
- {
- // Extract host and port of remote broker from connection id string.
- //
- // TODO aconway 2011-02-01: centralize code that constructs/parses connection
- // management IDs. Currently sys:: protocol factories and IO plugins construct the
- // IDs and LinkRegistry parses them.
- // KAG: current connection id format assumed:
- // "localhost:port-remotehost:port". In the case of IpV6, the host addresses are
- // contained within brackets "[...]", example:
- // connId="[::1]:36859-[::1]:48603". Liberal use of "asserts" provided to alert us
- // if this assumption changes!
- size_t separator = connId.find('-');
- assert(separator != std::string::npos);
- std::string remote = connId.substr(separator+1, std::string::npos);
- separator = remote.rfind(":");
- assert(separator != std::string::npos);
- *host = remote.substr(0, separator);
- // IPv6 - host is bracketed by "[]", strip them
- if ((*host)[0] == '[' && (*host)[host->length() - 1] == ']') {
- *host = host->substr(1, host->length() - 2);
- }
- try {
- *port = boost::lexical_cast<uint16_t>(remote.substr(separator+1, std::string::npos));
- } catch (const boost::bad_lexical_cast&) {
- QPID_LOG(error, "Invalid format for connection identifier! '" << connId << "'");
- assert(false);
- }
- }
-}
-
/** find the Link that corresponds to the given connection */
Link::shared_ptr LinkRegistry::findLink(const std::string& connId)
{
@@ -321,19 +291,15 @@ void LinkRegistry::notifyConnection(const std::string& key, Connection* c)
// create a mapping from connection id to link
QPID_LOG(debug, "LinkRegistry::notifyConnection(); key=" << key );
std::string host;
- uint16_t port = 0;
- extractHostPort( key, &host, &port );
Link::shared_ptr link;
{
Mutex::ScopedLock locker(lock);
- for (LinkMap::iterator l = pendingLinks.begin(); l != pendingLinks.end(); ++l) {
- if (l->second->pendingConnection(host, port)) {
- link = l->second;
- pendingLinks.erase(l);
- connections[key] = link->getName();
- QPID_LOG(debug, "LinkRegistry:: found pending =" << link->getName());
- break;
- }
+ LinkMap::iterator l = pendingLinks.find(key);
+ if (l != pendingLinks.end()) {
+ link = l->second;
+ pendingLinks.erase(l);
+ connections[key] = link->getName();
+ QPID_LOG(debug, "LinkRegistry:: found pending =" << link->getName());
}
}
@@ -448,26 +414,4 @@ std::string LinkRegistry::getAuthIdentity(const std::string& key)
return link->getUsername();
}
-
-void LinkRegistry::setPassive(bool p)
-{
- Mutex::ScopedLock locker(lock);
- passive = p;
- if (passive) { QPID_LOG(info, "Passivating links"); }
- else { QPID_LOG(info, "Activating links"); }
- for (LinkMap::iterator i = links.begin(); i != links.end(); i++) {
- i->second->setPassive(passive);
- }
-}
-
-void LinkRegistry::eachLink(boost::function<void(boost::shared_ptr<Link>)> f) {
- Mutex::ScopedLock locker(lock);
- for (LinkMap::iterator i = links.begin(); i != links.end(); ++i) f(i->second);
-}
-
-void LinkRegistry::eachBridge(boost::function<void(boost::shared_ptr<Bridge>)> f) {
- Mutex::ScopedLock locker(lock);
- for (BridgeMap::iterator i = bridges.begin(); i != bridges.end(); ++i) f(i->second);
-}
-
}} // namespace qpid::broker
diff --git a/qpid/cpp/src/qpid/broker/LinkRegistry.h b/qpid/cpp/src/qpid/broker/LinkRegistry.h
index 076ab831c9..21e8ddec81 100644
--- a/qpid/cpp/src/qpid/broker/LinkRegistry.h
+++ b/qpid/cpp/src/qpid/broker/LinkRegistry.h
@@ -53,7 +53,6 @@ namespace broker {
Broker* broker;
management::Manageable* parent;
MessageStore* store;
- bool passive;
std::string realm;
boost::shared_ptr<Link> findLink(const std::string& key);
@@ -144,20 +143,6 @@ namespace broker {
QPID_BROKER_EXTERN std::string getPassword (const std::string& key);
QPID_BROKER_EXTERN std::string getHost (const std::string& key);
QPID_BROKER_EXTERN uint16_t getPort (const std::string& key);
-
- /**
- * Called to alter passive state. In passive state the links
- * and bridges managed by a link registry will be recorded and
- * updated but links won't actually establish connections and
- * bridges won't therefore pull or push any messages.
- */
- QPID_BROKER_EXTERN void setPassive(bool);
- QPID_BROKER_EXTERN bool isPassive() { return passive; }
-
- /** Iterate over each link in the registry. Used for cluster updates. */
- QPID_BROKER_EXTERN void eachLink(boost::function<void(boost::shared_ptr<Link>)> f);
- /** Iterate over each bridge in the registry. Used for cluster updates. */
- QPID_BROKER_EXTERN void eachBridge(boost::function<void(boost::shared_ptr< Bridge>)> f);
};
}
}
diff --git a/qpid/cpp/src/qpid/broker/MessageGroupManager.cpp b/qpid/cpp/src/qpid/broker/MessageGroupManager.cpp
index 47e40a4794..c083e4ee0f 100644
--- a/qpid/cpp/src/qpid/broker/MessageGroupManager.cpp
+++ b/qpid/cpp/src/qpid/broker/MessageGroupManager.cpp
@@ -302,19 +302,6 @@ void MessageGroupManager::setDefaults(const std::string& groupId) // static
defaultGroupId = groupId;
}
-/** Cluster replication:
-
- state map format:
-
- { "group-state": [ {"name": <group-name>,
- "owner": <consumer-name>-or-empty,
- "acquired-ct": <acquired count>,
- "positions": [Seqnumbers, ... ]},
- {...}
- ]
- }
-*/
-
namespace {
const std::string GROUP_NAME("name");
const std::string GROUP_OWNER("owner");
@@ -324,100 +311,3 @@ namespace {
const std::string GROUP_STATE("group-state");
}
-
-/** Runs on UPDATER to snapshot current state */
-void MessageGroupManager::getState(qpid::framing::FieldTable& state ) const
-{
- using namespace qpid::framing;
- state.clear();
- framing::Array groupState(TYPE_CODE_MAP);
- for (GroupMap::const_iterator g = messageGroups.begin();
- g != messageGroups.end(); ++g) {
-
- framing::FieldTable group;
- group.setString(GROUP_NAME, g->first);
- group.setString(GROUP_OWNER, g->second.owner);
- group.setInt(GROUP_ACQUIRED_CT, g->second.acquired);
- framing::Array positions(TYPE_CODE_UINT32);
- framing::Array acquiredMsgs(TYPE_CODE_BOOLEAN);
- for (GroupState::MessageFifo::const_iterator p = g->second.members.begin();
- p != g->second.members.end(); ++p) {
- positions.push_back(framing::Array::ValuePtr(new IntegerValue( p->position )));
- acquiredMsgs.push_back(framing::Array::ValuePtr(new BoolValue( p->acquired )));
- }
- group.setArray(GROUP_POSITIONS, positions);
- group.setArray(GROUP_ACQUIRED_MSGS, acquiredMsgs);
- groupState.push_back(framing::Array::ValuePtr(new FieldTableValue(group)));
- }
- state.setArray(GROUP_STATE, groupState);
-
- QPID_LOG(debug, "Queue \"" << qName << "\": replicating message group state, key=" << groupIdHeader);
-}
-
-
-/** called on UPDATEE to set state from snapshot */
-void MessageGroupManager::setState(const qpid::framing::FieldTable& state)
-{
- using namespace qpid::framing;
- messageGroups.clear();
- freeGroups.clear();
- cachedGroup = 0;
-
- framing::Array groupState(TYPE_CODE_MAP);
-
- bool ok = state.getArray(GROUP_STATE, groupState);
- if (!ok) {
- QPID_LOG(error, "Unable to find message group state information for queue \"" <<
- qName << "\": cluster inconsistency error!");
- return;
- }
-
- for (framing::Array::const_iterator g = groupState.begin();
- g != groupState.end(); ++g) {
- framing::FieldTable group;
- ok = framing::getEncodedValue<FieldTable>(*g, group);
- if (!ok) {
- QPID_LOG(error, "Invalid message group state information for queue \"" <<
- qName << "\": table encoding error!");
- return;
- }
- MessageGroupManager::GroupState state;
- if (!group.isSet(GROUP_NAME) || !group.isSet(GROUP_OWNER) || !group.isSet(GROUP_ACQUIRED_CT)) {
- QPID_LOG(error, "Invalid message group state information for queue \"" <<
- qName << "\": fields missing error!");
- return;
- }
- state.group = group.getAsString(GROUP_NAME);
- state.owner = group.getAsString(GROUP_OWNER);
- state.acquired = group.getAsInt(GROUP_ACQUIRED_CT);
- framing::Array positions(TYPE_CODE_UINT32);
- ok = group.getArray(GROUP_POSITIONS, positions);
- if (!ok) {
- QPID_LOG(error, "Invalid message group state information for queue \"" <<
- qName << "\": position encoding error!");
- return;
- }
- framing::Array acquiredMsgs(TYPE_CODE_BOOLEAN);
- ok = group.getArray(GROUP_ACQUIRED_MSGS, acquiredMsgs);
- if (!ok || positions.count() != acquiredMsgs.count()) {
- QPID_LOG(error, "Invalid message group state information for queue \"" <<
- qName << "\": acquired flag encoding error!");
- return;
- }
-
- Array::const_iterator a = acquiredMsgs.begin();
- for (Array::const_iterator p = positions.begin(); p != positions.end(); ++p) {
- GroupState::MessageState mState((*p)->getIntegerValue<uint32_t, 4>());
- mState.acquired = (*a++)->getIntegerValue<bool>();
- state.members.push_back(mState);
- }
-
- messageGroups[state.group] = state;
- if (!state.owned()) {
- assert(state.members.size());
- freeGroups[state.members.front().position] = &messageGroups[state.group];
- }
- }
-
- QPID_LOG(debug, "Queue \"" << qName << "\": message group state replicated, key =" << groupIdHeader)
-}
diff --git a/qpid/cpp/src/qpid/broker/MessageGroupManager.h b/qpid/cpp/src/qpid/broker/MessageGroupManager.h
index fe39e007b5..bf45e776c8 100644
--- a/qpid/cpp/src/qpid/broker/MessageGroupManager.h
+++ b/qpid/cpp/src/qpid/broker/MessageGroupManager.h
@@ -25,11 +25,12 @@
/* for managing message grouping on Queues */
#include "qpid/broker/BrokerImportExport.h"
-#include "qpid/broker/StatefulQueueObserver.h"
+#include "qpid/broker/QueueObserver.h"
#include "qpid/broker/MessageDistributor.h"
#include "qpid/framing/SequenceNumber.h"
#include "qpid/sys/unordered_map.h"
+#include "boost/shared_ptr.hpp"
#include <deque>
namespace qpid {
@@ -39,8 +40,9 @@ class QueueObserver;
struct QueueSettings;
class MessageDistributor;
class Messages;
+class Consumer;
-class MessageGroupManager : public StatefulQueueObserver, public MessageDistributor
+class MessageGroupManager : public QueueObserver, public MessageDistributor
{
static std::string defaultGroupId; // assigned if no group id header present
@@ -101,10 +103,10 @@ class MessageGroupManager : public StatefulQueueObserver, public MessageDistribu
MessageGroupManager(const std::string& header, const std::string& _qName,
Messages& container, unsigned int _timestamp=0 )
- : StatefulQueueObserver(std::string("MessageGroupManager:") + header),
- groupIdHeader( header ), timestamp(_timestamp), messages(container), qName(_qName),
- hits(0), misses(0),
- lastMsg(0), cachedGroup(0) {}
+ : groupIdHeader( header ), timestamp(_timestamp), messages(container),
+ qName(_qName),
+ hits(0), misses(0),
+ lastMsg(0), cachedGroup(0) {}
virtual ~MessageGroupManager();
// QueueObserver iface
@@ -114,8 +116,6 @@ class MessageGroupManager : public StatefulQueueObserver, public MessageDistribu
void dequeued( const Message& qm );
void consumerAdded( const Consumer& ) {};
void consumerRemoved( const Consumer& ) {};
- void getState(qpid::framing::FieldTable& state ) const;
- void setState(const qpid::framing::FieldTable&);
// MessageDistributor iface
bool acquire(const std::string& c, Message& );
diff --git a/qpid/cpp/src/qpid/broker/MessageStore.h b/qpid/cpp/src/qpid/broker/MessageStore.h
index ab0225ef6b..68b5da7852 100644
--- a/qpid/cpp/src/qpid/broker/MessageStore.h
+++ b/qpid/cpp/src/qpid/broker/MessageStore.h
@@ -46,20 +46,6 @@ class MessageStore : public TransactionalStore, public Recoverable {
public:
/**
- * If called after initialization but before recovery, will discard the database
- * content and reinitialize as though it were a new installation. If the parameter
- * saveStoreContent is true, the content of the store will be saved in such a way
- * that the truncate can be reversed. This is used when cluster nodes recover and
- * must get their content from a cluster sync rather than directly from the store.
- *
- * @param saveStoreContent If true, will move content of the store to a backup
- * location where they may be restored later if needed. It is
- * not necessary to save more than one prior version of the
- * store.
- */
- virtual void truncateInit(const bool saveStoreContent = false) = 0;
-
- /**
* Record the existence of a durable queue
*/
virtual void create(PersistableQueue& queue,
diff --git a/qpid/cpp/src/qpid/broker/MessageStoreModule.cpp b/qpid/cpp/src/qpid/broker/MessageStoreModule.cpp
index cd9fd4c933..f19b31fa76 100644
--- a/qpid/cpp/src/qpid/broker/MessageStoreModule.cpp
+++ b/qpid/cpp/src/qpid/broker/MessageStoreModule.cpp
@@ -42,11 +42,6 @@ MessageStoreModule::~MessageStoreModule()
bool MessageStoreModule::init(const Options*) { return true; }
-void MessageStoreModule::truncateInit(const bool pushDownStoreFiles)
-{
- TRANSFER_EXCEPTION(store->truncateInit(pushDownStoreFiles));
-}
-
void MessageStoreModule::create(PersistableQueue& queue, const FieldTable& args)
{
TRANSFER_EXCEPTION(store->create(queue, args));
diff --git a/qpid/cpp/src/qpid/broker/MessageStoreModule.h b/qpid/cpp/src/qpid/broker/MessageStoreModule.h
index 56b5a3c1ae..82308db84c 100644
--- a/qpid/cpp/src/qpid/broker/MessageStoreModule.h
+++ b/qpid/cpp/src/qpid/broker/MessageStoreModule.h
@@ -41,7 +41,6 @@ class MessageStoreModule : public MessageStore
MessageStoreModule(boost::shared_ptr<MessageStore>& store);
bool init(const Options* options);
- void truncateInit(const bool pushDownStoreFiles = false);
std::auto_ptr<TransactionContext> begin();
std::auto_ptr<TPCTransactionContext> begin(const std::string& xid);
void prepare(TPCTransactionContext& txn);
diff --git a/qpid/cpp/src/qpid/broker/Messages.h b/qpid/cpp/src/qpid/broker/Messages.h
index a94ac7e0bf..cd846a4973 100644
--- a/qpid/cpp/src/qpid/broker/Messages.h
+++ b/qpid/cpp/src/qpid/broker/Messages.h
@@ -91,13 +91,6 @@ class Messages
virtual Message* find(const QueueCursor&) = 0;
/**
- * Add an already acquired message to the queue.
- * Used by a cluster updatee to replicate acquired messages from the updater.
- * Only need be implemented by subclasses that keep track of
- * acquired messages.
- */
- //virtual void updateAcquired(const QueuedMessage&) { }
- /**
* Apply, the functor to each message held
*/
virtual void foreach(Functor) = 0;
diff --git a/qpid/cpp/src/qpid/broker/NullMessageStore.cpp b/qpid/cpp/src/qpid/broker/NullMessageStore.cpp
index 43f600eaf1..800731d304 100644
--- a/qpid/cpp/src/qpid/broker/NullMessageStore.cpp
+++ b/qpid/cpp/src/qpid/broker/NullMessageStore.cpp
@@ -52,8 +52,6 @@ NullMessageStore::NullMessageStore() : nextPersistenceId(1) {}
bool NullMessageStore::init(const Options* /*options*/) {return true;}
-void NullMessageStore::truncateInit(const bool /*pushDownStoreFiles*/) {}
-
void NullMessageStore::create(PersistableQueue& queue, const framing::FieldTable& /*args*/)
{
queue.setPersistenceId(nextPersistenceId++);
diff --git a/qpid/cpp/src/qpid/broker/NullMessageStore.h b/qpid/cpp/src/qpid/broker/NullMessageStore.h
index c6f402662e..7afc0a47a8 100644
--- a/qpid/cpp/src/qpid/broker/NullMessageStore.h
+++ b/qpid/cpp/src/qpid/broker/NullMessageStore.h
@@ -44,7 +44,6 @@ class QPID_BROKER_CLASS_EXTERN NullMessageStore : public MessageStore
QPID_BROKER_EXTERN NullMessageStore();
QPID_BROKER_EXTERN virtual bool init(const Options* options);
- QPID_BROKER_EXTERN virtual void truncateInit(const bool pushDownStoreFiles = false);
QPID_BROKER_EXTERN virtual std::auto_ptr<TransactionContext> begin();
QPID_BROKER_EXTERN virtual std::auto_ptr<TPCTransactionContext> begin(const std::string& xid);
QPID_BROKER_EXTERN virtual void prepare(TPCTransactionContext& txn);
diff --git a/qpid/cpp/src/qpid/broker/Queue.cpp b/qpid/cpp/src/qpid/broker/Queue.cpp
index 8af61bb49a..9a0e4a96f4 100644
--- a/qpid/cpp/src/qpid/broker/Queue.cpp
+++ b/qpid/cpp/src/qpid/broker/Queue.cpp
@@ -45,6 +45,7 @@
#include "qpid/framing/FieldValue.h"
#include "qpid/sys/Monitor.h"
#include "qpid/sys/Time.h"
+#include "qpid/sys/Timer.h"
#include "qpid/types/Variant.h"
#include "qmf/org/apache/qpid/broker/ArgsQueuePurge.h"
#include "qmf/org/apache/qpid/broker/ArgsQueueReroute.h"
@@ -237,9 +238,6 @@ void Queue::deliver(Message msg, TxBuffer* txn){
//'link' for whatever protocol is used; that would let protocol
//specific stuff be kept out the queue
- // Check for deferred delivery in a cluster.
- if (broker && broker->deferDelivery(name, msg))
- return;
if (broker::amqp_0_10::MessageTransfer::isImmediateDeliveryRequired(msg) && getConsumerCount() == 0) {
if (alternateExchange) {
DeliverableMessage deliverable(msg, 0);
@@ -1152,6 +1150,7 @@ Queue::shared_ptr Queue::restore( QueueRegistry& queues, Buffer& buffer )
void Queue::setAlternateExchange(boost::shared_ptr<Exchange> exchange)
{
alternateExchange = exchange;
+ alternateExchange->incAlternateUsers();
if (mgmtObject) {
if (exchange.get() != 0)
mgmtObject->set_altExchange(exchange->GetManagementObject()->getObjectId());
@@ -1201,7 +1200,7 @@ void Queue::tryAutoDelete(Broker& broker, Queue::shared_ptr queue, const std::st
if (queue->settings.autoDeleteDelay && queue->canAutoDelete()) {
AbsTime time(now(), Duration(queue->settings.autoDeleteDelay * TIME_SEC));
queue->autoDeleteTask = boost::intrusive_ptr<qpid::sys::TimerTask>(new AutoDeleteTask(broker, queue, connectionId, userId, time));
- broker.getClusterTimer().add(queue->autoDeleteTask);
+ broker.getTimer().add(queue->autoDeleteTask);
QPID_LOG(debug, "Timed auto-delete for " << queue->getName() << " initiated");
} else {
tryAutoDeleteImpl(broker, queue, connectionId, userId);
@@ -1431,15 +1430,6 @@ void Queue::observeEnqueue(const Message& m, const Mutex::ScopedLock&)
mgntEnqStats(m, mgmtObject, brokerMgmtObject);
}
-// Note: accessing listeners outside of lock is dangerous. Caller must ensure the queue's
-// state is not changed while listeners is referenced.
-QueueListeners& Queue::getListeners() { return listeners; }
-
-// Note: accessing messages outside of lock is dangerous. Caller must ensure the queue's
-// state is not changed while messages is referenced.
-Messages& Queue::getMessages() { return *messages; }
-const Messages& Queue::getMessages() const { return *messages; }
-
bool Queue::checkNotDeleted(const Consumer::shared_ptr& c)
{
if (deleted && !c->hideDeletedError())
diff --git a/qpid/cpp/src/qpid/broker/Queue.h b/qpid/cpp/src/qpid/broker/Queue.h
index bf1103902e..ef4d956826 100644
--- a/qpid/cpp/src/qpid/broker/Queue.h
+++ b/qpid/cpp/src/qpid/broker/Queue.h
@@ -38,7 +38,6 @@
#include "qpid/framing/SequenceNumber.h"
#include "qpid/sys/AtomicValue.h"
#include "qpid/sys/Monitor.h"
-#include "qpid/sys/Timer.h"
#include "qpid/management/Manageable.h"
#include "qmf/org/apache/qpid/broker/Queue.h"
#include "qmf/org/apache/qpid/broker/Broker.h"
@@ -56,6 +55,9 @@
#include <algorithm>
namespace qpid {
+namespace sys {
+class TimerTask;
+}
namespace broker {
class Broker;
class Exchange;
@@ -370,7 +372,7 @@ class Queue : public boost::enable_shared_from_this<Queue>,
*
* The _caller_ must ensure that any messages after pos have been dequeued.
*
- * Used by HA/cluster code for queue replication.
+ * Used by HA code for queue replication.
*/
QPID_BROKER_EXTERN void setPosition(framing::SequenceNumber pos);
@@ -402,11 +404,6 @@ class Queue : public boost::enable_shared_from_this<Queue>,
*/
QPID_BROKER_EXTERN void recoveryComplete(ExchangeRegistry& exchanges);
- // For cluster update
- QPID_BROKER_EXTERN QueueListeners& getListeners();
- QPID_BROKER_EXTERN Messages& getMessages();
- QPID_BROKER_EXTERN const Messages& getMessages() const;
-
/**
* Reserve space in policy for an enqueued message that
* has been recovered in the prepared state (dtx only)
diff --git a/qpid/cpp/src/qpid/broker/QueueCleaner.cpp b/qpid/cpp/src/qpid/broker/QueueCleaner.cpp
index 838bc28be8..8d9e3f43dd 100644
--- a/qpid/cpp/src/qpid/broker/QueueCleaner.cpp
+++ b/qpid/cpp/src/qpid/broker/QueueCleaner.cpp
@@ -18,15 +18,36 @@
* under the License.
*
*/
-#include "qpid/broker/Queue.h"
#include "qpid/broker/QueueCleaner.h"
#include "qpid/broker/Broker.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/sys/Timer.h"
+
+#include <boost/function.hpp>
#include <boost/bind.hpp>
namespace qpid {
namespace broker {
+namespace {
+ typedef boost::function0<void> FireFunction;
+ class Task : public sys::TimerTask
+ {
+ public:
+ Task(FireFunction f, sys::Duration duration);
+ void fire();
+ private:
+ FireFunction fireFunction;
+ };
+
+ Task::Task(FireFunction f, qpid::sys::Duration d) : sys::TimerTask(d,"QueueCleaner"), fireFunction(f) {}
+
+ void Task::fire()
+ {
+ fireFunction();
+ }
+}
QueueCleaner::QueueCleaner(QueueRegistry& q, sys::Timer* t) : queues(q), timer(t) {}
QueueCleaner::~QueueCleaner()
@@ -37,7 +58,7 @@ QueueCleaner::~QueueCleaner()
void QueueCleaner::start(qpid::sys::Duration p)
{
period = p;
- task = new Task(*this, p);
+ task = new Task(boost::bind(&QueueCleaner::fired, this), p);
timer->add(task);
}
@@ -45,14 +66,6 @@ void QueueCleaner::setTimer(qpid::sys::Timer* timer) {
this->timer = timer;
}
-
-QueueCleaner::Task::Task(QueueCleaner& p, qpid::sys::Duration d) : sys::TimerTask(d,"QueueCleaner"), parent(p) {}
-
-void QueueCleaner::Task::fire()
-{
- parent.fired();
-}
-
namespace {
struct CollectQueues
{
diff --git a/qpid/cpp/src/qpid/broker/QueueCleaner.h b/qpid/cpp/src/qpid/broker/QueueCleaner.h
index ffebfe3e1b..896af1dcd5 100644
--- a/qpid/cpp/src/qpid/broker/QueueCleaner.h
+++ b/qpid/cpp/src/qpid/broker/QueueCleaner.h
@@ -23,9 +23,17 @@
*/
#include "qpid/broker/BrokerImportExport.h"
-#include "qpid/sys/Timer.h"
+#include "qpid/sys/Time.h"
+
+#include <boost/intrusive_ptr.hpp>
namespace qpid {
+
+namespace sys {
+ class Timer;
+ class TimerTask;
+}
+
namespace broker {
class QueueRegistry;
@@ -39,16 +47,8 @@ class QueueCleaner
QPID_BROKER_EXTERN ~QueueCleaner();
QPID_BROKER_EXTERN void start(sys::Duration period);
QPID_BROKER_EXTERN void setTimer(sys::Timer* timer);
- private:
- class Task : public sys::TimerTask
- {
- public:
- Task(QueueCleaner& parent, sys::Duration duration);
- void fire();
- private:
- QueueCleaner& parent;
- };
+ private:
boost::intrusive_ptr<sys::TimerTask> task;
QueueRegistry& queues;
sys::Timer* timer;
diff --git a/qpid/cpp/src/qpid/broker/QueueFlowLimit.cpp b/qpid/cpp/src/qpid/broker/QueueFlowLimit.cpp
index 944cc7e838..9b2e31c925 100644
--- a/qpid/cpp/src/qpid/broker/QueueFlowLimit.cpp
+++ b/qpid/cpp/src/qpid/broker/QueueFlowLimit.cpp
@@ -65,7 +65,7 @@ namespace {
QueueFlowLimit::QueueFlowLimit(Queue *_queue,
uint32_t _flowStopCount, uint32_t _flowResumeCount,
uint64_t _flowStopSize, uint64_t _flowResumeSize)
- : StatefulQueueObserver(std::string("QueueFlowLimit")), queue(_queue), queueName("<unknown>"),
+ : queue(_queue), queueName("<unknown>"),
flowStopCount(_flowStopCount), flowResumeCount(_flowResumeCount),
flowStopSize(_flowStopSize), flowResumeSize(_flowResumeSize),
flowStopped(false), count(0), size(0), broker(0)
@@ -129,11 +129,6 @@ void QueueFlowLimit::enqueued(const Message& msg)
}
if (flowStopped || !index.empty()) {
- // ignore flow control if we are populating the queue due to cluster replication:
- if (broker && broker->isClusterUpdatee()) {
- QPID_LOG(trace, "Queue \"" << queueName << "\": ignoring flow control for msg pos=" << msg.getSequence());
- return;
- }
QPID_LOG(trace, "Queue \"" << queueName << "\": setting flow control for msg pos=" << msg.getSequence());
msg.getPersistentContext()->getIngressCompletion().startCompleter(); // don't complete until flow resumes
bool unique;
@@ -296,79 +291,8 @@ QueueFlowLimit *QueueFlowLimit::createLimit(Queue *queue, const QueueSettings& s
return 0;
}
-/* Cluster replication */
-
-namespace {
- /** pack a set of sequence number ranges into a framing::Array */
- void buildSeqRangeArray(qpid::framing::Array *seqs,
- const qpid::framing::SequenceNumber& first,
- const qpid::framing::SequenceNumber& last)
- {
- seqs->push_back(qpid::framing::Array::ValuePtr(new Unsigned32Value(first)));
- seqs->push_back(qpid::framing::Array::ValuePtr(new Unsigned32Value(last)));
- }
-}
-
-/** Runs on UPDATER to snapshot current state */
-void QueueFlowLimit::getState(qpid::framing::FieldTable& state ) const
-{
- sys::Mutex::ScopedLock l(indexLock);
- state.clear();
-
- framing::SequenceSet ss;
- if (!index.empty()) {
- /* replicate the set of messages pending flow control */
- for (std::map<framing::SequenceNumber, Message >::const_iterator itr = index.begin();
- itr != index.end(); ++itr) {
- ss.add(itr->first);
- }
- framing::Array seqs(TYPE_CODE_UINT32);
- typedef boost::function<void(framing::SequenceNumber, framing::SequenceNumber)> arrayBuilder;
- ss.for_each((arrayBuilder)boost::bind(&buildSeqRangeArray, &seqs, _1, _2));
- state.setArray("pendingMsgSeqs", seqs);
- }
- QPID_LOG(debug, "Queue \"" << queueName << "\": flow limit replicating pending msgs, range=" << ss);
-}
-
-
-/** called on UPDATEE to set state from snapshot */
-void QueueFlowLimit::setState(const qpid::framing::FieldTable& state)
-{
- sys::Mutex::ScopedLock l(indexLock);
- index.clear();
-
- framing::SequenceSet fcmsg;
- framing::Array seqArray(TYPE_CODE_UINT32);
- if (state.getArray("pendingMsgSeqs", seqArray)) {
- assert((seqArray.count() & 0x01) == 0); // must be even since they are sequence ranges
- framing::Array::const_iterator i = seqArray.begin();
- while (i != seqArray.end()) {
- framing::SequenceNumber first((*i)->getIntegerValue<uint32_t, 4>());
- ++i;
- framing::SequenceNumber last((*i)->getIntegerValue<uint32_t, 4>());
- ++i;
- fcmsg.add(first, last);
- for (SequenceNumber seq = first; seq <= last; ++seq) {
- Message msg;
- queue->find(seq, msg); // fyi: may not be found if msg is acquired & unacked
- bool unique;
- unique = index.insert(std::pair<framing::SequenceNumber, Message >(seq, msg)).second;
- // Like this to avoid tripping up unused variable warning when NDEBUG set
- if (!unique) assert(unique);
- }
- }
- }
-
- flowStopped = index.size() != 0;
- if (queueMgmtObj) {
- queueMgmtObj->set_flowStopped(isFlowControlActive());
- }
- QPID_LOG(debug, "Queue \"" << queueName << "\": flow limit replicated the pending msgs, range=" << fcmsg)
-}
-
-
namespace qpid {
- namespace broker {
+namespace broker {
std::ostream& operator<<(std::ostream& out, const QueueFlowLimit& f)
{
@@ -377,6 +301,6 @@ std::ostream& operator<<(std::ostream& out, const QueueFlowLimit& f)
return out;
}
- }
+}
}
diff --git a/qpid/cpp/src/qpid/broker/QueueFlowLimit.h b/qpid/cpp/src/qpid/broker/QueueFlowLimit.h
index 0e83457efa..b9aa09ec3a 100644
--- a/qpid/cpp/src/qpid/broker/QueueFlowLimit.h
+++ b/qpid/cpp/src/qpid/broker/QueueFlowLimit.h
@@ -26,7 +26,7 @@
#include <iostream>
#include <memory>
#include "qpid/broker/BrokerImportExport.h"
-#include "qpid/broker/StatefulQueueObserver.h"
+#include "qpid/broker/QueueObserver.h"
#include "qpid/framing/FieldTable.h"
#include "qpid/framing/SequenceNumber.h"
#include "qpid/sys/AtomicValue.h"
@@ -40,6 +40,7 @@ namespace broker {
class Broker;
class Queue;
+class Message;
struct QueueSettings;
/**
@@ -49,7 +50,7 @@ struct QueueSettings;
* passing _either_ level may turn flow control ON, but _both_ must be
* below level before flow control will be turned OFF.
*/
- class QueueFlowLimit : public StatefulQueueObserver
+ class QueueFlowLimit : public QueueObserver
{
static uint64_t defaultMaxSize;
static uint defaultFlowStopRatio;
@@ -84,10 +85,6 @@ struct QueueSettings;
QPID_BROKER_EXTERN void acquired(const Message&) {};
QPID_BROKER_EXTERN void requeued(const Message&) {};
- /** for clustering: */
- QPID_BROKER_EXTERN void getState(qpid::framing::FieldTable&) const;
- QPID_BROKER_EXTERN void setState(const qpid::framing::FieldTable&);
-
uint32_t getFlowStopCount() const { return flowStopCount; }
uint32_t getFlowResumeCount() const { return flowResumeCount; }
uint64_t getFlowStopSize() const { return flowStopSize; }
diff --git a/qpid/cpp/src/qpid/broker/QueueRegistry.cpp b/qpid/cpp/src/qpid/broker/QueueRegistry.cpp
index b59eb530f0..576d0f198b 100644
--- a/qpid/cpp/src/qpid/broker/QueueRegistry.cpp
+++ b/qpid/cpp/src/qpid/broker/QueueRegistry.cpp
@@ -60,10 +60,8 @@ QueueRegistry::declare(const string& name, const QueueSettings& settings,
if (i == queues.end()) {
Queue::shared_ptr queue = create(name, settings);
//Move this to factory also?
- if (alternate) {
+ if (alternate)
queue->setAlternateExchange(alternate);//need to do this *before* create
- alternate->incAlternateUsers();
- }
if (!recovering) {
//create persistent record if required
queue->create();
diff --git a/qpid/cpp/src/qpid/broker/SaslAuthenticator.cpp b/qpid/cpp/src/qpid/broker/SaslAuthenticator.cpp
index a02882d05c..8211657e04 100644
--- a/qpid/cpp/src/qpid/broker/SaslAuthenticator.cpp
+++ b/qpid/cpp/src/qpid/broker/SaslAuthenticator.cpp
@@ -24,6 +24,7 @@
#endif
#include "qpid/broker/AclModule.h"
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
#include "qpid/log/Statement.h"
#include "qpid/framing/reply_exceptions.h"
@@ -169,14 +170,8 @@ void SaslAuthenticator::fini(void)
std::auto_ptr<SaslAuthenticator> SaslAuthenticator::createAuthenticator(Connection& c )
{
if (c.getBroker().getOptions().auth) {
- // The cluster creates non-authenticated connections for internal shadow connections
- // that are never connected to an external client.
- if ( !c.isAuthenticated() )
- return std::auto_ptr<SaslAuthenticator>(
- new NullAuthenticator(c, c.getBroker().getOptions().requireEncrypted));
- else
- return std::auto_ptr<SaslAuthenticator>(
- new CyrusAuthenticator(c, c.getBroker().getOptions().requireEncrypted));
+ return std::auto_ptr<SaslAuthenticator>(
+ new CyrusAuthenticator(c, c.getBroker().getOptions().requireEncrypted));
} else {
QPID_LOG(debug, "SASL: No Authentication Performed");
return std::auto_ptr<SaslAuthenticator>(new NullAuthenticator(c, c.getBroker().getOptions().requireEncrypted));
diff --git a/qpid/cpp/src/qpid/broker/SecureConnectionFactory.cpp b/qpid/cpp/src/qpid/broker/SecureConnectionFactory.cpp
index e5657fd93e..7bc2c94d1c 100644
--- a/qpid/cpp/src/qpid/broker/SecureConnectionFactory.cpp
+++ b/qpid/cpp/src/qpid/broker/SecureConnectionFactory.cpp
@@ -19,12 +19,14 @@
*
*/
#include "qpid/broker/SecureConnectionFactory.h"
-#include "qpid/framing/ProtocolVersion.h"
+
#include "qpid/amqp_0_10/Connection.h"
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
#include "qpid/broker/SecureConnection.h"
-#include "qpid/sys/SecuritySettings.h"
+#include "qpid/framing/ProtocolVersion.h"
#include "qpid/log/Statement.h"
+#include "qpid/sys/SecuritySettings.h"
namespace qpid {
namespace broker {
diff --git a/qpid/cpp/src/qpid/broker/SemanticState.cpp b/qpid/cpp/src/qpid/broker/SemanticState.cpp
index 0965381fcd..f97d37e893 100644
--- a/qpid/cpp/src/qpid/broker/SemanticState.cpp
+++ b/qpid/cpp/src/qpid/broker/SemanticState.cpp
@@ -20,6 +20,8 @@
*/
#include "qpid/broker/SessionState.h"
+
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
#include "qpid/broker/DeliverableMessage.h"
#include "qpid/broker/DtxAck.h"
@@ -283,7 +285,7 @@ void SemanticState::record(const DeliveryRecord& delivery)
const std::string QPID_SYNC_FREQUENCY("qpid.sync_frequency");
-SemanticState::ConsumerImpl::ConsumerImpl(SemanticState* _parent,
+SemanticStateConsumerImpl::SemanticStateConsumerImpl(SemanticState* _parent,
const string& _name,
Queue::shared_ptr _queue,
bool ack,
@@ -326,12 +328,12 @@ Consumer(_name, type),
}
}
-ManagementObject::shared_ptr SemanticState::ConsumerImpl::GetManagementObject (void) const
+ManagementObject::shared_ptr SemanticStateConsumerImpl::GetManagementObject (void) const
{
return mgmtObject;
}
-Manageable::status_t SemanticState::ConsumerImpl::ManagementMethod (uint32_t methodId, Args&, string&)
+Manageable::status_t SemanticStateConsumerImpl::ManagementMethod (uint32_t methodId, Args&, string&)
{
Manageable::status_t status = Manageable::STATUS_UNKNOWN_METHOD;
@@ -341,16 +343,16 @@ Manageable::status_t SemanticState::ConsumerImpl::ManagementMethod (uint32_t met
}
-OwnershipToken* SemanticState::ConsumerImpl::getSession()
+OwnershipToken* SemanticStateConsumerImpl::getSession()
{
return &(parent->session);
}
-bool SemanticState::ConsumerImpl::deliver(const QueueCursor& cursor, const Message& msg)
+bool SemanticStateConsumerImpl::deliver(const QueueCursor& cursor, const Message& msg)
{
return deliver(cursor, msg, shared_from_this());
}
-bool SemanticState::ConsumerImpl::deliver(const QueueCursor& cursor, const Message& msg, boost::shared_ptr<Consumer> consumer)
+bool SemanticStateConsumerImpl::deliver(const QueueCursor& cursor, const Message& msg, boost::shared_ptr<Consumer> consumer)
{
allocateCredit(msg);
boost::intrusive_ptr<const amqp_0_10::MessageTransfer> transfer = protocols.translate(msg);
@@ -375,12 +377,12 @@ bool SemanticState::ConsumerImpl::deliver(const QueueCursor& cursor, const Messa
return true;
}
-bool SemanticState::ConsumerImpl::filter(const Message&)
+bool SemanticStateConsumerImpl::filter(const Message&)
{
return true;
}
-bool SemanticState::ConsumerImpl::accept(const Message& msg)
+bool SemanticStateConsumerImpl::accept(const Message& msg)
{
// TODO aconway 2009-06-08: if we have byte & message credit but
// checkCredit fails because the message is to big, we should
@@ -393,8 +395,8 @@ bool SemanticState::ConsumerImpl::accept(const Message& msg)
namespace {
struct ConsumerName {
- const SemanticState::ConsumerImpl& consumer;
- ConsumerName(const SemanticState::ConsumerImpl& ci) : consumer(ci) {}
+ const SemanticStateConsumerImpl& consumer;
+ ConsumerName(const SemanticStateConsumerImpl& ci) : consumer(ci) {}
};
ostream& operator<<(ostream& o, const ConsumerName& pc) {
@@ -403,7 +405,7 @@ ostream& operator<<(ostream& o, const ConsumerName& pc) {
}
}
-void SemanticState::ConsumerImpl::allocateCredit(const Message& msg)
+void SemanticStateConsumerImpl::allocateCredit(const Message& msg)
{
Credit original = credit;
boost::intrusive_ptr<const amqp_0_10::MessageTransfer> transfer = protocols.translate(msg);
@@ -413,7 +415,7 @@ void SemanticState::ConsumerImpl::allocateCredit(const Message& msg)
}
-bool SemanticState::ConsumerImpl::checkCredit(const Message& msg)
+bool SemanticStateConsumerImpl::checkCredit(const Message& msg)
{
boost::intrusive_ptr<const amqp_0_10::MessageTransfer> transfer = protocols.translate(msg);
bool enoughCredit = credit.check(1, transfer->getRequiredCredit());
@@ -423,7 +425,7 @@ bool SemanticState::ConsumerImpl::checkCredit(const Message& msg)
return enoughCredit;
}
-SemanticState::ConsumerImpl::~ConsumerImpl()
+SemanticStateConsumerImpl::~SemanticStateConsumerImpl()
{
if (mgmtObject != 0)
mgmtObject->resourceDestroy ();
@@ -496,7 +498,7 @@ void SemanticState::requestDispatch()
i->second->requestDispatch();
}
-void SemanticState::ConsumerImpl::requestDispatch()
+void SemanticStateConsumerImpl::requestDispatch()
{
if (blocked) {
parent->session.getConnection().outputTasks.addOutputTask(this);
@@ -514,7 +516,7 @@ bool SemanticState::complete(DeliveryRecord& delivery)
return delivery.isRedundant();
}
-void SemanticState::ConsumerImpl::complete(DeliveryRecord& delivery)
+void SemanticStateConsumerImpl::complete(DeliveryRecord& delivery)
{
if (!delivery.isComplete()) {
delivery.complete();
@@ -539,7 +541,7 @@ SessionContext& SemanticState::getSession() { return session; }
const SessionContext& SemanticState::getSession() const { return session; }
-const SemanticState::ConsumerImpl::shared_ptr SemanticState::find(const std::string& destination) const
+const SemanticStateConsumerImpl::shared_ptr SemanticState::find(const std::string& destination) const
{
ConsumerImpl::shared_ptr consumer;
if (!find(destination, consumer)) {
@@ -596,7 +598,7 @@ void SemanticState::stop(const std::string& destination)
find(destination)->stop();
}
-void SemanticState::ConsumerImpl::setWindowMode()
+void SemanticStateConsumerImpl::setWindowMode()
{
credit.setWindowMode(true);
if (mgmtObject){
@@ -604,7 +606,7 @@ void SemanticState::ConsumerImpl::setWindowMode()
}
}
-void SemanticState::ConsumerImpl::setCreditMode()
+void SemanticStateConsumerImpl::setCreditMode()
{
credit.setWindowMode(false);
if (mgmtObject){
@@ -612,17 +614,17 @@ void SemanticState::ConsumerImpl::setCreditMode()
}
}
-void SemanticState::ConsumerImpl::addByteCredit(uint32_t value)
+void SemanticStateConsumerImpl::addByteCredit(uint32_t value)
{
credit.addByteCredit(value);
}
-void SemanticState::ConsumerImpl::addMessageCredit(uint32_t value)
+void SemanticStateConsumerImpl::addMessageCredit(uint32_t value)
{
credit.addMessageCredit(value);
}
-bool SemanticState::ConsumerImpl::haveCredit()
+bool SemanticStateConsumerImpl::haveCredit()
{
if (credit) {
return true;
@@ -632,19 +634,19 @@ bool SemanticState::ConsumerImpl::haveCredit()
}
}
-bool SemanticState::ConsumerImpl::doDispatch()
+bool SemanticStateConsumerImpl::doDispatch()
{
return queue->dispatch(shared_from_this());
}
-void SemanticState::ConsumerImpl::flush()
+void SemanticStateConsumerImpl::flush()
{
while(haveCredit() && doDispatch())
;
credit.cancel();
}
-void SemanticState::ConsumerImpl::stop()
+void SemanticStateConsumerImpl::stop()
{
credit.cancel();
}
@@ -699,7 +701,7 @@ void SemanticState::reject(DeliveryId first, DeliveryId last)
getSession().setUnackedCount(unacked.size());
}
-bool SemanticState::ConsumerImpl::doOutput()
+bool SemanticStateConsumerImpl::doOutput()
{
try {
return haveCredit() && doDispatch();
@@ -708,24 +710,24 @@ bool SemanticState::ConsumerImpl::doOutput()
}
}
-void SemanticState::ConsumerImpl::enableNotify()
+void SemanticStateConsumerImpl::enableNotify()
{
Mutex::ScopedLock l(lock);
notifyEnabled = true;
}
-void SemanticState::ConsumerImpl::disableNotify()
+void SemanticStateConsumerImpl::disableNotify()
{
Mutex::ScopedLock l(lock);
notifyEnabled = false;
}
-bool SemanticState::ConsumerImpl::isNotifyEnabled() const {
+bool SemanticStateConsumerImpl::isNotifyEnabled() const {
Mutex::ScopedLock l(lock);
return notifyEnabled;
}
-void SemanticState::ConsumerImpl::notify()
+void SemanticStateConsumerImpl::notify()
{
Mutex::ScopedLock l(lock);
if (notifyEnabled) {
diff --git a/qpid/cpp/src/qpid/broker/SemanticState.h b/qpid/cpp/src/qpid/broker/SemanticState.h
index f873c5c656..24ab30bf00 100644
--- a/qpid/cpp/src/qpid/broker/SemanticState.h
+++ b/qpid/cpp/src/qpid/broker/SemanticState.h
@@ -76,105 +76,16 @@ class SessionState;
* called when a client's socket is ready to write data.
*
*/
+class SemanticStateConsumerImpl;
class SemanticState : private boost::noncopyable {
- public:
- class ConsumerImpl : public Consumer, public sys::OutputTask,
- public boost::enable_shared_from_this<ConsumerImpl>,
- public management::Manageable
- {
- protected:
- mutable qpid::sys::Mutex lock;
- SemanticState* const parent;
- private:
- const boost::shared_ptr<Queue> queue;
- const bool ackExpected;
- const bool acquire;
- bool blocked;
- bool exclusive;
- std::string resumeId;
- const std::string tag; // <destination> from AMQP 0-10 Message.subscribe command
- uint64_t resumeTtl;
- framing::FieldTable arguments;
- Credit credit;
- bool notifyEnabled;
- const int syncFrequency;
- int deliveryCount;
- qmf::org::apache::qpid::broker::Subscription::shared_ptr mgmtObject;
- ProtocolRegistry& protocols;
-
- bool checkCredit(const Message& msg);
- void allocateCredit(const Message& msg);
- bool haveCredit();
-
- protected:
- QPID_BROKER_EXTERN virtual bool doDispatch();
- size_t unacked() { return parent->unacked.size(); }
- QPID_BROKER_EXTERN bool deliver(const QueueCursor&, const Message&, boost::shared_ptr<Consumer>);
-
- public:
- typedef boost::shared_ptr<ConsumerImpl> shared_ptr;
-
- QPID_BROKER_EXTERN ConsumerImpl(SemanticState* parent,
- const std::string& name, boost::shared_ptr<Queue> queue,
- bool ack, SubscriptionType type, bool exclusive,
- const std::string& tag, const std::string& resumeId,
- uint64_t resumeTtl, const framing::FieldTable& arguments);
- QPID_BROKER_EXTERN ~ConsumerImpl();
- QPID_BROKER_EXTERN OwnershipToken* getSession();
- QPID_BROKER_EXTERN bool deliver(const QueueCursor&, const Message&);
- QPID_BROKER_EXTERN bool filter(const Message&);
- QPID_BROKER_EXTERN bool accept(const Message&);
- QPID_BROKER_EXTERN void cancel() {}
-
- QPID_BROKER_EXTERN void disableNotify();
- QPID_BROKER_EXTERN void enableNotify();
- QPID_BROKER_EXTERN void notify();
- QPID_BROKER_EXTERN bool isNotifyEnabled() const;
-
- QPID_BROKER_EXTERN void requestDispatch();
-
- QPID_BROKER_EXTERN void setWindowMode();
- QPID_BROKER_EXTERN void setCreditMode();
- QPID_BROKER_EXTERN void addByteCredit(uint32_t value);
- QPID_BROKER_EXTERN void addMessageCredit(uint32_t value);
- QPID_BROKER_EXTERN void flush();
- QPID_BROKER_EXTERN void stop();
- QPID_BROKER_EXTERN void complete(DeliveryRecord&);
- boost::shared_ptr<Queue> getQueue() const { return queue; }
- bool isBlocked() const { return blocked; }
- bool setBlocked(bool set) { std::swap(set, blocked); return set; }
-
- QPID_BROKER_EXTERN bool doOutput();
-
- Credit& getCredit() { return credit; }
- const Credit& getCredit() const { return credit; }
- bool isAckExpected() const { return ackExpected; }
- bool isAcquire() const { return acquire; }
- bool isExclusive() const { return exclusive; }
- std::string getResumeId() const { return resumeId; };
- const std::string& getTag() const { return tag; }
- uint64_t getResumeTtl() const { return resumeTtl; }
- uint32_t getDeliveryCount() const { return deliveryCount; }
- void setDeliveryCount(uint32_t _deliveryCount) { deliveryCount = _deliveryCount; }
- const framing::FieldTable& getArguments() const { return arguments; }
-
- SemanticState& getParent() { return *parent; }
- const SemanticState& getParent() const { return *parent; }
-
- void acknowledged(const DeliveryRecord&) {}
-
- // manageable entry points
- QPID_BROKER_EXTERN management::ManagementObject::shared_ptr
- GetManagementObject(void) const;
-
- QPID_BROKER_EXTERN management::Manageable::status_t
- ManagementMethod(uint32_t methodId, management::Args& args, std::string& text);
- };
+ friend class SemanticStateConsumerImpl;
+ public:
+ typedef SemanticStateConsumerImpl ConsumerImpl;
typedef std::map<std::string, DtxBuffer::shared_ptr> DtxBufferMap;
private:
- typedef std::map<std::string, ConsumerImpl::shared_ptr> ConsumerImplMap;
+ typedef std::map<std::string, boost::shared_ptr<ConsumerImpl> > ConsumerImplMap;
typedef boost::tuple<std::string, std::string, std::string, std::string> Binding;
typedef std::set<Binding> Bindings;
@@ -201,8 +112,8 @@ class SemanticState : private boost::noncopyable {
bool complete(DeliveryRecord&);
AckRange findRange(DeliveryId first, DeliveryId last);
void requestDispatch();
- void cancel(ConsumerImpl::shared_ptr);
- void disable(ConsumerImpl::shared_ptr);
+ void cancel(boost::shared_ptr<ConsumerImpl>);
+ void disable(boost::shared_ptr<ConsumerImpl>);
void unbindSessionBindings();
public:
@@ -213,8 +124,8 @@ class SemanticState : private boost::noncopyable {
SessionContext& getSession();
const SessionContext& getSession() const;
- const ConsumerImpl::shared_ptr find(const std::string& destination) const;
- bool find(const std::string& destination, ConsumerImpl::shared_ptr&) const;
+ const boost::shared_ptr<ConsumerImpl> find(const std::string& destination) const;
+ bool find(const std::string& destination, boost::shared_ptr<ConsumerImpl>&) const;
/**
* Get named queue, never returns 0.
@@ -264,11 +175,6 @@ class SemanticState : private boost::noncopyable {
void detached();
void closed();
- // Used by cluster to re-create sessions
- template <class F> void eachConsumer(F f) {
- for(ConsumerImplMap::iterator i = consumers.begin(); i != consumers.end(); ++i)
- f(i->second);
- }
DeliveryRecords& getUnacked() { return unacked; }
framing::SequenceSet getAccumulatedAck() const { return accumulatedAck; }
TxBuffer::shared_ptr getTxBuffer() const { return txBuffer; }
@@ -285,6 +191,99 @@ class SemanticState : private boost::noncopyable {
const std::string& routingKey);
};
+class SemanticStateConsumerImpl : public Consumer, public sys::OutputTask,
+ public boost::enable_shared_from_this<SemanticStateConsumerImpl>,
+ public management::Manageable
+{
+ protected:
+ mutable qpid::sys::Mutex lock;
+ SemanticState* const parent;
+ private:
+ const boost::shared_ptr<Queue> queue;
+ const bool ackExpected;
+ const bool acquire;
+ bool blocked;
+ bool exclusive;
+ std::string resumeId;
+ const std::string tag; // <destination> from AMQP 0-10 Message.subscribe command
+ uint64_t resumeTtl;
+ framing::FieldTable arguments;
+ Credit credit;
+ bool notifyEnabled;
+ const int syncFrequency;
+ int deliveryCount;
+ qmf::org::apache::qpid::broker::Subscription::shared_ptr mgmtObject;
+ ProtocolRegistry& protocols;
+
+ bool checkCredit(const Message& msg);
+ void allocateCredit(const Message& msg);
+ bool haveCredit();
+
+ protected:
+ QPID_BROKER_EXTERN virtual bool doDispatch();
+ size_t unacked() { return parent->unacked.size(); }
+ QPID_BROKER_EXTERN bool deliver(const QueueCursor&, const Message&, boost::shared_ptr<Consumer>);
+
+ public:
+ typedef boost::shared_ptr<SemanticStateConsumerImpl> shared_ptr;
+
+ QPID_BROKER_EXTERN SemanticStateConsumerImpl(SemanticState* parent,
+ const std::string& name, boost::shared_ptr<Queue> queue,
+ bool ack, SubscriptionType type, bool exclusive,
+ const std::string& tag, const std::string& resumeId,
+ uint64_t resumeTtl, const framing::FieldTable& arguments);
+ QPID_BROKER_EXTERN ~SemanticStateConsumerImpl();
+ QPID_BROKER_EXTERN OwnershipToken* getSession();
+ QPID_BROKER_EXTERN bool deliver(const QueueCursor&, const Message&);
+ QPID_BROKER_EXTERN bool filter(const Message&);
+ QPID_BROKER_EXTERN bool accept(const Message&);
+ QPID_BROKER_EXTERN void cancel() {}
+
+ QPID_BROKER_EXTERN void disableNotify();
+ QPID_BROKER_EXTERN void enableNotify();
+ QPID_BROKER_EXTERN void notify();
+ QPID_BROKER_EXTERN bool isNotifyEnabled() const;
+
+ QPID_BROKER_EXTERN void requestDispatch();
+
+ QPID_BROKER_EXTERN void setWindowMode();
+ QPID_BROKER_EXTERN void setCreditMode();
+ QPID_BROKER_EXTERN void addByteCredit(uint32_t value);
+ QPID_BROKER_EXTERN void addMessageCredit(uint32_t value);
+ QPID_BROKER_EXTERN void flush();
+ QPID_BROKER_EXTERN void stop();
+ QPID_BROKER_EXTERN void complete(DeliveryRecord&);
+ boost::shared_ptr<Queue> getQueue() const { return queue; }
+ bool isBlocked() const { return blocked; }
+ bool setBlocked(bool set) { std::swap(set, blocked); return set; }
+
+ QPID_BROKER_EXTERN bool doOutput();
+
+ Credit& getCredit() { return credit; }
+ const Credit& getCredit() const { return credit; }
+ bool isAckExpected() const { return ackExpected; }
+ bool isAcquire() const { return acquire; }
+ bool isExclusive() const { return exclusive; }
+ std::string getResumeId() const { return resumeId; };
+ const std::string& getTag() const { return tag; }
+ uint64_t getResumeTtl() const { return resumeTtl; }
+ uint32_t getDeliveryCount() const { return deliveryCount; }
+ void setDeliveryCount(uint32_t _deliveryCount) { deliveryCount = _deliveryCount; }
+ const framing::FieldTable& getArguments() const { return arguments; }
+
+ SemanticState& getParent() { return *parent; }
+ const SemanticState& getParent() const { return *parent; }
+
+ void acknowledged(const DeliveryRecord&) {}
+
+ // manageable entry points
+ QPID_BROKER_EXTERN management::ManagementObject::shared_ptr
+ GetManagementObject(void) const;
+
+ QPID_BROKER_EXTERN management::Manageable::status_t
+ ManagementMethod(uint32_t methodId, management::Args& args, std::string& text);
+};
+
}} // namespace qpid::broker
diff --git a/qpid/cpp/src/qpid/broker/SessionAdapter.cpp b/qpid/cpp/src/qpid/broker/SessionAdapter.cpp
index b679aebbfa..fd3cf9400f 100644
--- a/qpid/cpp/src/qpid/broker/SessionAdapter.cpp
+++ b/qpid/cpp/src/qpid/broker/SessionAdapter.cpp
@@ -16,7 +16,10 @@
*
*/
#include "qpid/broker/SessionAdapter.h"
+
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
+#include "qpid/broker/DtxTimeout.h"
#include "qpid/broker/Queue.h"
#include "qpid/Exception.h"
#include "qpid/framing/reply_exceptions.h"
diff --git a/qpid/cpp/src/qpid/broker/SessionHandler.cpp b/qpid/cpp/src/qpid/broker/SessionHandler.cpp
index 9888d12be2..a6b008647f 100644
--- a/qpid/cpp/src/qpid/broker/SessionHandler.cpp
+++ b/qpid/cpp/src/qpid/broker/SessionHandler.cpp
@@ -19,8 +19,9 @@
*/
#include "qpid/broker/SessionHandler.h"
-#include "qpid/broker/SessionState.h"
+#include "qpid/broker/Broker.h"
#include "qpid/broker/Connection.h"
+#include "qpid/broker/SessionState.h"
#include "qpid/log/Statement.h"
#include <boost/bind.hpp>
@@ -34,9 +35,7 @@ using namespace qpid::sys;
SessionHandler::SessionHandler(Connection& c, ChannelId ch)
: qpid::amqp_0_10::SessionHandler(&c.getOutput(), ch),
connection(c),
- proxy(out),
- clusterOrderProxy(c.getClusterOrderOutput() ?
- new SetChannelProxy(ch, c.getClusterOrderOutput()) : 0)
+ proxy(out)
{}
SessionHandler::~SessionHandler() {}
@@ -110,10 +109,7 @@ void SessionHandler::attachAs(const std::string& name)
{
SessionId id(connection.getUserId(), name);
SessionState::Configuration config = connection.broker.getSessionManager().getSessionConfig();
- // Delay creating management object till attached(). In a cluster,
- // only the active link broker calls attachAs but all brokers
- // receive the subsequent attached() call.
- session.reset(new SessionState(connection.getBroker(), *this, id, config, true));
+ session.reset(new SessionState(connection.getBroker(), *this, id, config));
sendAttach(false);
}
diff --git a/qpid/cpp/src/qpid/broker/SessionHandler.h b/qpid/cpp/src/qpid/broker/SessionHandler.h
index 21c736fa37..d42b7838bb 100644
--- a/qpid/cpp/src/qpid/broker/SessionHandler.h
+++ b/qpid/cpp/src/qpid/broker/SessionHandler.h
@@ -71,17 +71,6 @@ class SessionHandler : public qpid::amqp_0_10::SessionHandler {
framing::AMQP_ClientProxy& getProxy() { return proxy; }
const framing::AMQP_ClientProxy& getProxy() const { return proxy; }
- /**
- * If commands are sent based on the local time (e.g. in timers), they don't have
- * a well-defined ordering across cluster nodes.
- * This proxy is for sending such commands. In a clustered broker it will take steps
- * to synchronize command order across the cluster. In a stand-alone broker
- * it is just a synonym for getProxy()
- */
- framing::AMQP_ClientProxy& getClusterOrderProxy() {
- return clusterOrderProxy.get() ? *clusterOrderProxy : proxy;
- }
-
virtual void handleDetach();
void attached(const std::string& name);//used by 'pushing' inter-broker bridges
void attachAs(const std::string& name);//used by 'pulling' inter-broker bridges
@@ -108,7 +97,6 @@ class SessionHandler : public qpid::amqp_0_10::SessionHandler {
Connection& connection;
framing::AMQP_ClientProxy proxy;
std::auto_ptr<SessionState> session;
- std::auto_ptr<SetChannelProxy> clusterOrderProxy;
boost::shared_ptr<ErrorListener> errorListener;
};
diff --git a/qpid/cpp/src/qpid/broker/SessionState.cpp b/qpid/cpp/src/qpid/broker/SessionState.cpp
index a6494bc362..0b5b705688 100644
--- a/qpid/cpp/src/qpid/broker/SessionState.cpp
+++ b/qpid/cpp/src/qpid/broker/SessionState.cpp
@@ -53,14 +53,14 @@ namespace _qmf = qmf::org::apache::qpid::broker;
SessionState::SessionState(
Broker& b, SessionHandler& h, const SessionId& id,
- const SessionState::Configuration& config, bool delayManagement)
+ const SessionState::Configuration& config)
: qpid::SessionState(id, config),
broker(b), handler(&h),
semanticState(*this),
adapter(semanticState),
asyncCommandCompleter(new AsyncCommandCompleter(this))
{
- if (!delayManagement) addManagementObject();
+ addManagementObject();
attach(h);
}
@@ -333,15 +333,9 @@ void SessionState::readyToSend() {
Broker& SessionState::getBroker() { return broker; }
// Session resume is not fully implemented so it is useless to set a
-// non-0 timeout. Moreover it creates problems in a cluster because
-// dead sessions are kept and interfere with failover.
+// non-0 timeout.
void SessionState::setTimeout(uint32_t) { }
-framing::AMQP_ClientProxy& SessionState::getClusterOrderProxy() {
- return handler->getClusterOrderProxy();
-}
-
-
// Current received command is an execution.sync command.
// Complete this command only when all preceding commands have completed.
// (called via the invoker() in handleCommand() above)
diff --git a/qpid/cpp/src/qpid/broker/SessionState.h b/qpid/cpp/src/qpid/broker/SessionState.h
index ae28df8026..39954bb3ee 100644
--- a/qpid/cpp/src/qpid/broker/SessionState.h
+++ b/qpid/cpp/src/qpid/broker/SessionState.h
@@ -41,6 +41,7 @@
#include <boost/scoped_ptr.hpp>
#include <boost/intrusive_ptr.hpp>
+#include <queue>
#include <set>
#include <vector>
#include <ostream>
@@ -73,7 +74,7 @@ class SessionState : public qpid::SessionState,
{
public:
SessionState(Broker&, SessionHandler&, const SessionId&,
- const SessionState::Configuration&, bool delayManagement=false);
+ const SessionState::Configuration&);
~SessionState();
bool isAttached() const { return handler; }
@@ -116,11 +117,6 @@ class SessionState : public qpid::SessionState,
void readyToSend();
- // Used by cluster to create replica sessions.
- SemanticState& getSemanticState() { return semanticState; }
- boost::intrusive_ptr<qpid::broker::amqp_0_10::MessageTransfer> getMessageInProgress() { return msgBuilder.getMessage(); }
- SessionAdapter& getSessionAdapter() { return adapter; }
-
const SessionId& getSessionId() const { return getId(); }
// Used by ExecutionHandler sync command processing. Notifies
@@ -153,15 +149,6 @@ class SessionState : public qpid::SessionState,
void sendAcceptAndCompletion();
- /**
- * If commands are sent based on the local time (e.g. in timers), they don't have
- * a well-defined ordering across cluster nodes.
- * This proxy is for sending such commands. In a clustered broker it will take steps
- * to synchronize command order across the cluster. In a stand-alone broker
- * it is just a synonym for getProxy()
- */
- framing::AMQP_ClientProxy& getClusterOrderProxy();
-
Broker& broker;
SessionHandler* handler;
sys::AbsTime expiry; // Used by SessionManager.
diff --git a/qpid/cpp/src/qpid/broker/StatefulQueueObserver.h b/qpid/cpp/src/qpid/broker/StatefulQueueObserver.h
deleted file mode 100644
index c682d460b7..0000000000
--- a/qpid/cpp/src/qpid/broker/StatefulQueueObserver.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef QPID_BROKER_STATEFULQUEUEOBSERVER_H
-#define QPID_BROKER_STATEFULQUEUEOBSERVER_H
-
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-#include "qpid/broker/QueueObserver.h"
-#include "qpid/framing/FieldTable.h"
-
-namespace qpid {
-namespace broker {
-
-/**
- * Specialized type of QueueObserver that maintains internal state that has to
- * be replicated across clustered brokers.
- */
-class StatefulQueueObserver : public QueueObserver
-{
- public:
- StatefulQueueObserver(std::string _id) : id(_id) {}
- virtual ~StatefulQueueObserver() {}
-
- /** This identifier must uniquely identify this particular observer amoung
- * all observers on a queue. For cluster replication, this id will be used
- * to identify the peer queue observer for synchronization across
- * brokers.
- */
- const std::string& getId() const { return id; }
-
- /** This method should return the observer's internal state as an opaque
- * map.
- */
- virtual void getState(qpid::framing::FieldTable& state ) const = 0;
-
- /** The input map represents the internal state of the peer observer that
- * this observer should synchonize to.
- */
- virtual void setState(const qpid::framing::FieldTable&) = 0;
-
-
- private:
- std::string id;
-};
-}} // namespace qpid::broker
-
-#endif /*!QPID_BROKER_STATEFULQUEUEOBSERVER_H*/
diff --git a/qpid/cpp/src/qpid/broker/TxAccept.h b/qpid/cpp/src/qpid/broker/TxAccept.h
index a59e69a85f..daf192285a 100644
--- a/qpid/cpp/src/qpid/broker/TxAccept.h
+++ b/qpid/cpp/src/qpid/broker/TxAccept.h
@@ -71,9 +71,6 @@ namespace qpid {
virtual void commit() throw();
virtual void rollback() throw();
virtual ~TxAccept(){}
-
- // Used by cluster replication.
- const framing::SequenceSet& getAcked() const { return acked; }
};
}
}
diff --git a/qpid/cpp/src/qpid/broker/windows/SaslAuthenticator.cpp b/qpid/cpp/src/qpid/broker/windows/SaslAuthenticator.cpp
index 40e74be018..c04d037a6e 100644
--- a/qpid/cpp/src/qpid/broker/windows/SaslAuthenticator.cpp
+++ b/qpid/cpp/src/qpid/broker/windows/SaslAuthenticator.cpp
@@ -23,6 +23,7 @@
// accessing authentication mechanisms, analogous to Cyrus SASL.
#include "qpid/broker/Connection.h"
+#include "qpid/broker/Broker.h"
#include "qpid/log/Statement.h"
#include "qpid/framing/reply_exceptions.h"
#include "qpid/framing/FieldValue.h"
diff --git a/qpid/cpp/src/qpid/broker/windows/SslProtocolFactory.cpp b/qpid/cpp/src/qpid/broker/windows/SslProtocolFactory.cpp
index 66fcfdd8b4..a07afe45ae 100644
--- a/qpid/cpp/src/qpid/broker/windows/SslProtocolFactory.cpp
+++ b/qpid/cpp/src/qpid/broker/windows/SslProtocolFactory.cpp
@@ -1,367 +1,379 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include "qpid/sys/ProtocolFactory.h"
-
-#include "qpid/Plugin.h"
-#include "qpid/broker/Broker.h"
-#include "qpid/log/Statement.h"
-#include "qpid/sys/AsynchIOHandler.h"
-#include "qpid/sys/ConnectionCodec.h"
-#include "qpid/sys/Socket.h"
-#include "qpid/sys/SocketAddress.h"
-#include "qpid/sys/SystemInfo.h"
-#include "qpid/sys/windows/SslAsynchIO.h"
-
-#include <boost/bind.hpp>
-#include <boost/ptr_container/ptr_vector.hpp>
-#include <memory>
-
-// security.h needs to see this to distinguish from kernel use.
-#define SECURITY_WIN32
-#include <security.h>
-#include <Schnlsp.h>
-#undef SECURITY_WIN32
-
-
-namespace qpid {
-namespace sys {
-
-class Timer;
-
-namespace windows {
-
-struct SslServerOptions : qpid::Options
-{
- std::string certStore;
- std::string certStoreLocation;
- std::string certName;
- uint16_t port;
- bool clientAuth;
-
- SslServerOptions() : qpid::Options("SSL Options"),
- certStore("My"),
- certStoreLocation("CurrentUser"),
- certName("localhost"),
- port(5671),
- clientAuth(false)
- {
- qpid::Address me;
- if (qpid::sys::SystemInfo::getLocalHostname(me))
- certName = me.host;
-
- addOptions()
- ("ssl-cert-store", optValue(certStore, "NAME"), "Local store name from which to obtain certificate")
- ("ssl-cert-store-location", optValue(certStoreLocation, "NAME"),
- "Local store name location for certificates ( CurrentUser | LocalMachine | CurrentService )")
- ("ssl-cert-name", optValue(certName, "NAME"), "Name of the certificate to use")
- ("ssl-port", optValue(port, "PORT"), "Port on which to listen for SSL connections")
- ("ssl-require-client-authentication", optValue(clientAuth),
- "Forces clients to authenticate in order to establish an SSL connection");
- }
-};
-
-class SslProtocolFactory : public qpid::sys::ProtocolFactory {
- boost::ptr_vector<Socket> listeners;
- boost::ptr_vector<AsynchAcceptor> acceptors;
- Timer& brokerTimer;
- uint32_t maxNegotiateTime;
- uint16_t listeningPort;
- const bool tcpNoDelay;
- std::string brokerHost;
- const bool clientAuthSelected;
- std::auto_ptr<qpid::sys::AsynchAcceptor> acceptor;
- ConnectFailedCallback connectFailedCallback;
- CredHandle credHandle;
-
- public:
- SslProtocolFactory(const qpid::broker::Broker::Options& opts, const SslServerOptions&, Timer& timer);
- ~SslProtocolFactory();
- void accept(sys::Poller::shared_ptr, sys::ConnectionCodec::Factory*);
- void connect(sys::Poller::shared_ptr, const std::string& host, const std::string& port,
- sys::ConnectionCodec::Factory*,
- ConnectFailedCallback failed);
-
- uint16_t getPort() const;
-
- private:
- void connectFailed(const qpid::sys::Socket&,
- int err,
- const std::string& msg);
- void established(sys::Poller::shared_ptr,
- const qpid::sys::Socket&,
- sys::ConnectionCodec::Factory*,
- bool isClient);
-};
-
-// Static instance to initialise plugin
-static struct SslPlugin : public Plugin {
- SslServerOptions options;
-
- Options* getOptions() { return &options; }
-
- void earlyInitialize(Target&) {
- }
-
- void initialize(Target& target) {
- broker::Broker* broker = dynamic_cast<broker::Broker*>(&target);
- // Only provide to a Broker
- if (broker) {
- try {
- const broker::Broker::Options& opts = broker->getOptions();
- ProtocolFactory::shared_ptr protocol(new SslProtocolFactory(opts, options, broker->getTimer()));
- QPID_LOG(notice, "Listening for SSL connections on TCP port " << protocol->getPort());
- broker->registerProtocolFactory("ssl", protocol);
- } catch (const std::exception& e) {
- QPID_LOG(error, "Failed to initialise SSL listener: " << e.what());
- }
- }
- }
-} sslPlugin;
-
-namespace {
- // Expand list of Interfaces and addresses to a list of addresses
- std::vector<std::string> expandInterfaces(const std::vector<std::string>& interfaces) {
- std::vector<std::string> addresses;
- // If there are no specific interfaces listed use a single "" to listen on every interface
- if (interfaces.empty()) {
- addresses.push_back("");
- return addresses;
- }
- for (unsigned i = 0; i < interfaces.size(); ++i) {
- const std::string& interface = interfaces[i];
- if (!(SystemInfo::getInterfaceAddresses(interface, addresses))) {
- // We don't have an interface of that name -
- // Check for IPv6 ('[' ']') brackets and remove them
- // then pass to be looked up directly
- if (interface[0]=='[' && interface[interface.size()-1]==']') {
- addresses.push_back(interface.substr(1, interface.size()-2));
- } else {
- addresses.push_back(interface);
- }
- }
- }
- return addresses;
- }
-}
-
-SslProtocolFactory::SslProtocolFactory(const qpid::broker::Broker::Options& opts, const SslServerOptions& options, Timer& timer)
- : brokerTimer(timer),
- maxNegotiateTime(opts.maxNegotiateTime),
- tcpNoDelay(opts.tcpNoDelay),
- clientAuthSelected(options.clientAuth) {
-
- // Make sure that certificate store is good before listening to sockets
- // to avoid having open and listening sockets when there is no cert store
- SecInvalidateHandle(&credHandle);
-
- // Get the certificate for this server.
- DWORD flags = 0;
- std::string certStoreLocation = options.certStoreLocation;
- std::transform(certStoreLocation.begin(), certStoreLocation.end(), certStoreLocation.begin(), ::tolower);
- if (certStoreLocation == "currentuser") {
- flags = CERT_SYSTEM_STORE_CURRENT_USER;
- } else if (certStoreLocation == "localmachine") {
- flags = CERT_SYSTEM_STORE_LOCAL_MACHINE;
- } else if (certStoreLocation == "currentservice") {
- flags = CERT_SYSTEM_STORE_CURRENT_SERVICE;
- } else {
- QPID_LOG(error, "Unrecognised SSL certificate store location: " << options.certStoreLocation
- << " - Using default location");
- }
- HCERTSTORE certStoreHandle;
- certStoreHandle = ::CertOpenStore(CERT_STORE_PROV_SYSTEM_A,
- X509_ASN_ENCODING,
- 0,
- flags |
- CERT_STORE_READONLY_FLAG,
- options.certStore.c_str());
- if (!certStoreHandle)
- throw qpid::Exception(QPID_MSG("Opening store " << options.certStore << " " << qpid::sys::strError(GetLastError())));
-
- PCCERT_CONTEXT certContext;
- certContext = ::CertFindCertificateInStore(certStoreHandle,
- X509_ASN_ENCODING,
- 0,
- CERT_FIND_SUBJECT_STR_A,
- options.certName.c_str(),
- NULL);
- if (certContext == NULL) {
- int err = ::GetLastError();
- ::CertCloseStore(certStoreHandle, 0);
- throw qpid::Exception(QPID_MSG("Locating certificate " << options.certName << " in store " << options.certStore << " " << qpid::sys::strError(GetLastError())));
- throw QPID_WINDOWS_ERROR(err);
- }
-
- SCHANNEL_CRED cred;
- memset(&cred, 0, sizeof(cred));
- cred.dwVersion = SCHANNEL_CRED_VERSION;
- cred.cCreds = 1;
- cred.paCred = &certContext;
- SECURITY_STATUS status = ::AcquireCredentialsHandle(NULL,
- UNISP_NAME,
- SECPKG_CRED_INBOUND,
- NULL,
- &cred,
- NULL,
- NULL,
- &credHandle,
- NULL);
- if (status != SEC_E_OK)
- throw QPID_WINDOWS_ERROR(status);
- ::CertFreeCertificateContext(certContext);
- ::CertCloseStore(certStoreHandle, 0);
-
- std::vector<std::string> addresses = expandInterfaces(opts.listenInterfaces);
- if (addresses.empty()) {
- // We specified some interfaces, but couldn't find addresses for them
- QPID_LOG(warning, "TCP/TCP6: No specified network interfaces found: Not Listening");
- listeningPort = 0;
- }
-
- for (unsigned i = 0; i<addresses.size(); ++i) {
- QPID_LOG(debug, "Using interface: " << addresses[i]);
- SocketAddress sa(addresses[i], boost::lexical_cast<std::string>(options.port));
-
-
- // We must have at least one resolved address
- QPID_LOG(info, "SSL Listening to: " << sa.asString())
- Socket* s = createSocket();
- listeningPort = s->listen(sa, opts.connectionBacklog);
- listeners.push_back(s);
-
- // Try any other resolved addresses
- while (sa.nextAddress()) {
- QPID_LOG(info, "SSL Listening to: " << sa.asString())
- Socket* s = createSocket();
- s->listen(sa, opts.connectionBacklog);
- listeners.push_back(s);
- }
- }
-}
-
-SslProtocolFactory::~SslProtocolFactory() {
- ::FreeCredentialsHandle(&credHandle);
-}
-
-void SslProtocolFactory::connectFailed(const qpid::sys::Socket&,
- int err,
- const std::string& msg) {
- if (connectFailedCallback)
- connectFailedCallback(err, msg);
-}
-
-void SslProtocolFactory::established(sys::Poller::shared_ptr poller,
- const qpid::sys::Socket& s,
- sys::ConnectionCodec::Factory* f,
- bool isClient) {
- sys::AsynchIOHandler* async = new sys::AsynchIOHandler(s.getFullAddress(), f, false);
-
- if (tcpNoDelay) {
- s.setTcpNoDelay();
- QPID_LOG(info,
- "Set TCP_NODELAY on connection to " << s.getPeerAddress());
- }
-
- SslAsynchIO *aio;
- if (isClient) {
- async->setClient();
- aio =
- new qpid::sys::windows::ClientSslAsynchIO(brokerHost,
- s,
- credHandle,
- boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
- boost::bind(&AsynchIOHandler::eof, async, _1),
- boost::bind(&AsynchIOHandler::disconnect, async, _1),
- boost::bind(&AsynchIOHandler::closedSocket, async, _1, _2),
- boost::bind(&AsynchIOHandler::nobuffs, async, _1),
- boost::bind(&AsynchIOHandler::idle, async, _1));
- }
- else {
- aio =
- new qpid::sys::windows::ServerSslAsynchIO(clientAuthSelected,
- s,
- credHandle,
- boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
- boost::bind(&AsynchIOHandler::eof, async, _1),
- boost::bind(&AsynchIOHandler::disconnect, async, _1),
- boost::bind(&AsynchIOHandler::closedSocket, async, _1, _2),
- boost::bind(&AsynchIOHandler::nobuffs, async, _1),
- boost::bind(&AsynchIOHandler::idle, async, _1));
- }
-
- async->init(aio, brokerTimer, maxNegotiateTime);
- aio->start(poller);
-}
-
-uint16_t SslProtocolFactory::getPort() const {
- return listeningPort; // Immutable no need for lock.
-}
-
-void SslProtocolFactory::accept(sys::Poller::shared_ptr poller,
- sys::ConnectionCodec::Factory* fact) {
- for (unsigned i = 0; i<listeners.size(); ++i) {
- acceptors.push_back(
- AsynchAcceptor::create(listeners[i],
- boost::bind(&SslProtocolFactory::established, this, poller, _1, fact, false)));
- acceptors[i].start(poller);
- }
-}
-
-void SslProtocolFactory::connect(sys::Poller::shared_ptr poller,
- const std::string& host,
- const std::string& port,
- sys::ConnectionCodec::Factory* fact,
- ConnectFailedCallback failed)
-{
- SCHANNEL_CRED cred;
- memset(&cred, 0, sizeof(cred));
- cred.dwVersion = SCHANNEL_CRED_VERSION;
- SECURITY_STATUS status = ::AcquireCredentialsHandle(NULL,
- UNISP_NAME,
- SECPKG_CRED_OUTBOUND,
- NULL,
- &cred,
- NULL,
- NULL,
- &credHandle,
- NULL);
- if (status != SEC_E_OK)
- throw QPID_WINDOWS_ERROR(status);
-
- brokerHost = host;
- // Note that the following logic does not cause a memory leak.
- // The allocated Socket is freed either by the AsynchConnector
- // upon connection failure or by the AsynchIO upon connection
- // shutdown. The allocated AsynchConnector frees itself when it
- // is no longer needed.
- qpid::sys::Socket* socket = createSocket();
- connectFailedCallback = failed;
- AsynchConnector::create(*socket,
- host,
- port,
- boost::bind(&SslProtocolFactory::established,
- this, poller, _1, fact, true),
- boost::bind(&SslProtocolFactory::connectFailed,
- this, _1, _2, _3));
-}
-
-}}} // namespace qpid::sys::windows
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/sys/ProtocolFactory.h"
+
+#include "qpid/Plugin.h"
+#include "qpid/broker/Broker.h"
+#include "qpid/log/Statement.h"
+#include "qpid/sys/AsynchIOHandler.h"
+#include "qpid/sys/ConnectionCodec.h"
+#include "qpid/sys/Socket.h"
+#include "qpid/sys/SocketAddress.h"
+#include "qpid/sys/SystemInfo.h"
+#include "qpid/sys/windows/SslAsynchIO.h"
+
+#include <boost/bind.hpp>
+#include <boost/ptr_container/ptr_vector.hpp>
+#include <memory>
+
+// security.h needs to see this to distinguish from kernel use.
+#define SECURITY_WIN32
+#include <security.h>
+#include <Schnlsp.h>
+#undef SECURITY_WIN32
+
+
+namespace qpid {
+namespace sys {
+
+class Timer;
+
+namespace windows {
+
+struct SslServerOptions : qpid::Options
+{
+ std::string certStore;
+ std::string certStoreLocation;
+ std::string certName;
+ uint16_t port;
+ bool clientAuth;
+
+ SslServerOptions() : qpid::Options("SSL Options"),
+ certStore("My"),
+ certStoreLocation("CurrentUser"),
+ certName("localhost"),
+ port(5671),
+ clientAuth(false)
+ {
+ qpid::Address me;
+ if (qpid::sys::SystemInfo::getLocalHostname(me))
+ certName = me.host;
+
+ addOptions()
+ ("ssl-cert-store", optValue(certStore, "NAME"), "Local store name from which to obtain certificate")
+ ("ssl-cert-store-location", optValue(certStoreLocation, "NAME"),
+ "Local store name location for certificates ( CurrentUser | LocalMachine | CurrentService )")
+ ("ssl-cert-name", optValue(certName, "NAME"), "Name of the certificate to use")
+ ("ssl-port", optValue(port, "PORT"), "Port on which to listen for SSL connections")
+ ("ssl-require-client-authentication", optValue(clientAuth),
+ "Forces clients to authenticate in order to establish an SSL connection");
+ }
+};
+
+class SslProtocolFactory : public qpid::sys::ProtocolFactory {
+ boost::ptr_vector<Socket> listeners;
+ boost::ptr_vector<AsynchAcceptor> acceptors;
+ Timer& brokerTimer;
+ uint32_t maxNegotiateTime;
+ uint16_t listeningPort;
+ const bool tcpNoDelay;
+ std::string brokerHost;
+ const bool clientAuthSelected;
+ std::auto_ptr<qpid::sys::AsynchAcceptor> acceptor;
+ ConnectFailedCallback connectFailedCallback;
+ CredHandle credHandle;
+
+ public:
+ SslProtocolFactory(const qpid::broker::Broker::Options& opts, const SslServerOptions&, Timer& timer);
+ ~SslProtocolFactory();
+ void accept(sys::Poller::shared_ptr, sys::ConnectionCodec::Factory*);
+ void connect(sys::Poller::shared_ptr, const std::string& name, const std::string& host, const std::string& port,
+ sys::ConnectionCodec::Factory*,
+ ConnectFailedCallback failed);
+
+ uint16_t getPort() const;
+
+ private:
+ void connectFailed(const qpid::sys::Socket&,
+ int err,
+ const std::string& msg);
+ void establishedIncoming(sys::Poller::shared_ptr, const qpid::sys::Socket&, sys::ConnectionCodec::Factory*);
+ void establishedOutgoing(sys::Poller::shared_ptr, const qpid::sys::Socket&, sys::ConnectionCodec::Factory*, std::string& );
+ void establishedCommon(sys::Poller::shared_ptr, sys::AsynchIOHandler*, sys::AsynchIO*, const qpid::sys::Socket&);
+};
+
+// Static instance to initialise plugin
+static struct SslPlugin : public Plugin {
+ SslServerOptions options;
+
+ Options* getOptions() { return &options; }
+
+ void earlyInitialize(Target&) {
+ }
+
+ void initialize(Target& target) {
+ broker::Broker* broker = dynamic_cast<broker::Broker*>(&target);
+ // Only provide to a Broker
+ if (broker) {
+ try {
+ const broker::Broker::Options& opts = broker->getOptions();
+ ProtocolFactory::shared_ptr protocol(new SslProtocolFactory(opts, options, broker->getTimer()));
+ QPID_LOG(notice, "Listening for SSL connections on TCP port " << protocol->getPort());
+ broker->registerProtocolFactory("ssl", protocol);
+ } catch (const std::exception& e) {
+ QPID_LOG(error, "Failed to initialise SSL listener: " << e.what());
+ }
+ }
+ }
+} sslPlugin;
+
+namespace {
+ // Expand list of Interfaces and addresses to a list of addresses
+ std::vector<std::string> expandInterfaces(const std::vector<std::string>& interfaces) {
+ std::vector<std::string> addresses;
+ // If there are no specific interfaces listed use a single "" to listen on every interface
+ if (interfaces.empty()) {
+ addresses.push_back("");
+ return addresses;
+ }
+ for (unsigned i = 0; i < interfaces.size(); ++i) {
+ const std::string& interface = interfaces[i];
+ if (!(SystemInfo::getInterfaceAddresses(interface, addresses))) {
+ // We don't have an interface of that name -
+ // Check for IPv6 ('[' ']') brackets and remove them
+ // then pass to be looked up directly
+ if (interface[0]=='[' && interface[interface.size()-1]==']') {
+ addresses.push_back(interface.substr(1, interface.size()-2));
+ } else {
+ addresses.push_back(interface);
+ }
+ }
+ }
+ return addresses;
+ }
+}
+
+SslProtocolFactory::SslProtocolFactory(const qpid::broker::Broker::Options& opts, const SslServerOptions& options, Timer& timer)
+ : brokerTimer(timer),
+ maxNegotiateTime(opts.maxNegotiateTime),
+ tcpNoDelay(opts.tcpNoDelay),
+ clientAuthSelected(options.clientAuth) {
+
+ // Make sure that certificate store is good before listening to sockets
+ // to avoid having open and listening sockets when there is no cert store
+ SecInvalidateHandle(&credHandle);
+
+ // Get the certificate for this server.
+ DWORD flags = 0;
+ std::string certStoreLocation = options.certStoreLocation;
+ std::transform(certStoreLocation.begin(), certStoreLocation.end(), certStoreLocation.begin(), ::tolower);
+ if (certStoreLocation == "currentuser") {
+ flags = CERT_SYSTEM_STORE_CURRENT_USER;
+ } else if (certStoreLocation == "localmachine") {
+ flags = CERT_SYSTEM_STORE_LOCAL_MACHINE;
+ } else if (certStoreLocation == "currentservice") {
+ flags = CERT_SYSTEM_STORE_CURRENT_SERVICE;
+ } else {
+ QPID_LOG(error, "Unrecognised SSL certificate store location: " << options.certStoreLocation
+ << " - Using default location");
+ }
+ HCERTSTORE certStoreHandle;
+ certStoreHandle = ::CertOpenStore(CERT_STORE_PROV_SYSTEM_A,
+ X509_ASN_ENCODING,
+ 0,
+ flags |
+ CERT_STORE_READONLY_FLAG,
+ options.certStore.c_str());
+ if (!certStoreHandle)
+ throw qpid::Exception(QPID_MSG("Opening store " << options.certStore << " " << qpid::sys::strError(GetLastError())));
+
+ PCCERT_CONTEXT certContext;
+ certContext = ::CertFindCertificateInStore(certStoreHandle,
+ X509_ASN_ENCODING,
+ 0,
+ CERT_FIND_SUBJECT_STR_A,
+ options.certName.c_str(),
+ NULL);
+ if (certContext == NULL) {
+ int err = ::GetLastError();
+ ::CertCloseStore(certStoreHandle, 0);
+ throw qpid::Exception(QPID_MSG("Locating certificate " << options.certName << " in store " << options.certStore << " " << qpid::sys::strError(GetLastError())));
+ throw QPID_WINDOWS_ERROR(err);
+ }
+
+ SCHANNEL_CRED cred;
+ memset(&cred, 0, sizeof(cred));
+ cred.dwVersion = SCHANNEL_CRED_VERSION;
+ cred.cCreds = 1;
+ cred.paCred = &certContext;
+ SECURITY_STATUS status = ::AcquireCredentialsHandle(NULL,
+ UNISP_NAME,
+ SECPKG_CRED_INBOUND,
+ NULL,
+ &cred,
+ NULL,
+ NULL,
+ &credHandle,
+ NULL);
+ if (status != SEC_E_OK)
+ throw QPID_WINDOWS_ERROR(status);
+ ::CertFreeCertificateContext(certContext);
+ ::CertCloseStore(certStoreHandle, 0);
+
+ std::vector<std::string> addresses = expandInterfaces(opts.listenInterfaces);
+ if (addresses.empty()) {
+ // We specified some interfaces, but couldn't find addresses for them
+ QPID_LOG(warning, "TCP/TCP6: No specified network interfaces found: Not Listening");
+ listeningPort = 0;
+ }
+
+ for (unsigned i = 0; i<addresses.size(); ++i) {
+ QPID_LOG(debug, "Using interface: " << addresses[i]);
+ SocketAddress sa(addresses[i], boost::lexical_cast<std::string>(options.port));
+
+
+ // We must have at least one resolved address
+ QPID_LOG(info, "SSL Listening to: " << sa.asString())
+ Socket* s = createSocket();
+ listeningPort = s->listen(sa, opts.connectionBacklog);
+ listeners.push_back(s);
+
+ // Try any other resolved addresses
+ while (sa.nextAddress()) {
+ QPID_LOG(info, "SSL Listening to: " << sa.asString())
+ Socket* s = createSocket();
+ s->listen(sa, opts.connectionBacklog);
+ listeners.push_back(s);
+ }
+ }
+}
+
+SslProtocolFactory::~SslProtocolFactory() {
+ ::FreeCredentialsHandle(&credHandle);
+}
+
+void SslProtocolFactory::connectFailed(const qpid::sys::Socket&,
+ int err,
+ const std::string& msg) {
+ if (connectFailedCallback)
+ connectFailedCallback(err, msg);
+}
+
+void SslProtocolFactory::establishedIncoming(sys::Poller::shared_ptr poller,
+ const qpid::sys::Socket& s,
+ sys::ConnectionCodec::Factory* f) {
+ sys::AsynchIOHandler* async = new sys::AsynchIOHandler(s.getFullAddress(), f, false, false);
+
+ sys::AsynchIO *aio =
+ new qpid::sys::windows::ServerSslAsynchIO(
+ clientAuthSelected,
+ s,
+ credHandle,
+ boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
+ boost::bind(&AsynchIOHandler::eof, async, _1),
+ boost::bind(&AsynchIOHandler::disconnect, async, _1),
+ boost::bind(&AsynchIOHandler::closedSocket, async, _1, _2),
+ boost::bind(&AsynchIOHandler::nobuffs, async, _1),
+ boost::bind(&AsynchIOHandler::idle, async, _1));
+
+ establishedCommon(poller, async, aio, s);
+}
+
+void SslProtocolFactory::establishedOutgoing(sys::Poller::shared_ptr poller,
+ const qpid::sys::Socket& s,
+ sys::ConnectionCodec::Factory* f,
+ std::string& name) {
+ sys::AsynchIOHandler* async = new sys::AsynchIOHandler(name, f, true, false);
+
+ sys::AsynchIO *aio =
+ new qpid::sys::windows::ClientSslAsynchIO(
+ brokerHost,
+ s,
+ credHandle,
+ boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
+ boost::bind(&AsynchIOHandler::eof, async, _1),
+ boost::bind(&AsynchIOHandler::disconnect, async, _1),
+ boost::bind(&AsynchIOHandler::closedSocket, async, _1, _2),
+ boost::bind(&AsynchIOHandler::nobuffs, async, _1),
+ boost::bind(&AsynchIOHandler::idle, async, _1));
+
+ establishedCommon(poller, async, aio, s);
+}
+
+void SslProtocolFactory::establishedCommon(sys::Poller::shared_ptr poller,
+ sys::AsynchIOHandler* async,
+ sys::AsynchIO* aio,
+ const qpid::sys::Socket& s) {
+ if (tcpNoDelay) {
+ s.setTcpNoDelay();
+ QPID_LOG(info,
+ "Set TCP_NODELAY on connection to " << s.getPeerAddress());
+ }
+
+ async->init(aio, brokerTimer, maxNegotiateTime);
+ aio->start(poller);
+}
+
+uint16_t SslProtocolFactory::getPort() const {
+ return listeningPort; // Immutable no need for lock.
+}
+
+void SslProtocolFactory::accept(sys::Poller::shared_ptr poller,
+ sys::ConnectionCodec::Factory* fact) {
+ for (unsigned i = 0; i<listeners.size(); ++i) {
+ acceptors.push_back(
+ AsynchAcceptor::create(listeners[i],
+ boost::bind(&SslProtocolFactory::establishedIncoming, this, poller, _1, fact)));
+ acceptors[i].start(poller);
+ }
+}
+
+void SslProtocolFactory::connect(sys::Poller::shared_ptr poller,
+ const std::string& name,
+ const std::string& host,
+ const std::string& port,
+ sys::ConnectionCodec::Factory* fact,
+ ConnectFailedCallback failed)
+{
+ SCHANNEL_CRED cred;
+ memset(&cred, 0, sizeof(cred));
+ cred.dwVersion = SCHANNEL_CRED_VERSION;
+ SECURITY_STATUS status = ::AcquireCredentialsHandle(NULL,
+ UNISP_NAME,
+ SECPKG_CRED_OUTBOUND,
+ NULL,
+ &cred,
+ NULL,
+ NULL,
+ &credHandle,
+ NULL);
+ if (status != SEC_E_OK)
+ throw QPID_WINDOWS_ERROR(status);
+
+ brokerHost = host;
+ // Note that the following logic does not cause a memory leak.
+ // The allocated Socket is freed either by the AsynchConnector
+ // upon connection failure or by the AsynchIO upon connection
+ // shutdown. The allocated AsynchConnector frees itself when it
+ // is no longer needed.
+ qpid::sys::Socket* socket = createSocket();
+ connectFailedCallback = failed;
+ AsynchConnector::create(*socket,
+ host,
+ port,
+ boost::bind(&SslProtocolFactory::establishedOutgoing,
+ this, poller, _1, fact, name),
+ boost::bind(&SslProtocolFactory::connectFailed,
+ this, _1, _2, _3));
+}
+
+}}} // namespace qpid::sys::windows
diff --git a/qpid/cpp/src/qpid/client/LoadPlugins.cpp b/qpid/cpp/src/qpid/client/LoadPlugins.cpp
index d76e1d458e..c5d8924014 100644
--- a/qpid/cpp/src/qpid/client/LoadPlugins.cpp
+++ b/qpid/cpp/src/qpid/client/LoadPlugins.cpp
@@ -48,7 +48,7 @@ struct LoadtimeInitialise {
for (vector<string>::iterator iter = moduleOptions.load.begin();
iter != moduleOptions.load.end();
iter++)
- qpid::tryShlib (iter->data(), false);
+ qpid::tryShlib (*iter);
if (!moduleOptions.noLoad) {
bool isDefault = defaultPath == moduleOptions.loadDir;
diff --git a/qpid/cpp/src/qpid/client/SessionImpl.cpp b/qpid/cpp/src/qpid/client/SessionImpl.cpp
index 91e728d5ae..01e614e041 100644
--- a/qpid/cpp/src/qpid/client/SessionImpl.cpp
+++ b/qpid/cpp/src/qpid/client/SessionImpl.cpp
@@ -7,9 +7,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -61,9 +61,7 @@ SessionImpl::SessionImpl(const std::string& name, boost::shared_ptr<ConnectionIm
ioHandler(*this),
proxy(ioHandler),
nextIn(0),
- nextOut(0),
- doClearDeliveryPropertiesExchange(true),
- autoDetach(true)
+ nextOut(0)
{
channel.next = connection.get();
}
@@ -72,12 +70,10 @@ SessionImpl::~SessionImpl() {
{
Lock l(state);
if (state != DETACHED && state != DETACHING) {
- if (autoDetach) {
- QPID_LOG(warning, "Session was not closed cleanly: " << id);
- // Inform broker but don't wait for detached as that deadlocks.
- // The detached will be ignored as the channel will be invalid.
- try { detach(); } catch (...) {} // ignore errors.
- }
+ QPID_LOG(warning, "Session was not closed cleanly: " << id);
+ // Inform broker but don't wait for detached as that deadlocks.
+ // The detached will be ignored as the channel will be invalid.
+ try { detach(); } catch (...) {} // ignore errors.
setState(DETACHED);
handleClosed();
state.waitWaiters();
@@ -136,10 +132,10 @@ void SessionImpl::resume(boost::shared_ptr<ConnectionImpl>) // user thread
void SessionImpl::suspend() //user thread
{
Lock l(state);
- detach();
+ detach();
}
-void SessionImpl::detach() //call with lock held
+void SessionImpl::detach() //call with lock held
{
if (state == ATTACHED) {
setState(DETACHING);
@@ -149,8 +145,8 @@ void SessionImpl::detach() //call with lock held
uint16_t SessionImpl::getChannel() const // user thread
-{
- return channel;
+{
+ return channel;
}
void SessionImpl::setChannel(uint16_t c) // user thread
@@ -182,7 +178,7 @@ void SessionImpl::waitForCompletionImpl(const SequenceNumber& id) //call with lo
bool SessionImpl::isComplete(const SequenceNumber& id)
{
- Lock l(state);
+ Lock l(state);
return !incompleteOut.contains(id);
}
@@ -219,7 +215,7 @@ framing::SequenceNumber SessionImpl::getCompleteUpTo()
return --firstIncomplete;
}
-struct MarkCompleted
+struct MarkCompleted
{
const SequenceNumber& id;
SequenceSet& completedIn;
@@ -230,7 +226,7 @@ struct MarkCompleted
{
if (id >= end) {
completedIn.add(start, end);
- } else if (id >= start) {
+ } else if (id >= start) {
completedIn.add(start, id);
}
}
@@ -244,13 +240,13 @@ void SessionImpl::markCompleted(const SequenceSet& ids, bool notifyPeer)
completedIn.add(ids);
if (notifyPeer) {
sendCompletion();
- }
+ }
}
void SessionImpl::markCompleted(const SequenceNumber& id, bool cumulative, bool notifyPeer)
{
Lock l(state);
- if (cumulative) {
+ if (cumulative) {
//everything in incompleteIn less than or equal to id is now complete
MarkCompleted f(id, completedIn);
incompleteIn.for_each(f);
@@ -260,11 +256,11 @@ void SessionImpl::markCompleted(const SequenceNumber& id, bool cumulative, bool
incompleteIn.remove(completedIn);
} else if (incompleteIn.contains(id)) {
incompleteIn.remove(id);
- completedIn.add(id);
+ completedIn.add(id);
}
if (notifyPeer) {
sendCompletion();
- }
+ }
}
void SessionImpl::setException(const sys::ExceptionHolder& ex) {
@@ -310,42 +306,24 @@ namespace {
struct SendContentFn {
FrameHandler& handler;
void operator()(const AMQFrame& f) {
- if (!f.getMethod())
+ if (!f.getMethod())
handler(const_cast<AMQFrame&>(f));
}
SendContentFn(FrameHandler& h) : handler(h) {}
};
-// Adaptor to make FrameSet look like MethodContent; used in cluster update client
-struct MethodContentAdaptor : MethodContent
-{
- AMQHeaderBody header;
- const std::string content;
-
- MethodContentAdaptor(const FrameSet& f) : header(*f.getHeaders()), content(f.getContent()) {}
-
- const AMQHeaderBody& getHeader() const
- {
- return header;
- }
- const std::string& getData() const
- {
- return content;
- }
-};
-
}
-
-Future SessionImpl::send(const AMQBody& command, const FrameSet& content, bool reframe) {
+
+Future SessionImpl::send(const AMQBody& command, const FrameSet& content) {
Acquire a(sendLock);
SequenceNumber id = nextOut++;
{
Lock l(state);
- checkOpen();
+ checkOpen();
incompleteOut.add(id);
}
Future f(id);
- if (command.getMethod()->resultExpected()) {
+ if (command.getMethod()->resultExpected()) {
Lock l(state);
//result listener must be set before the command is sent
f.setFutureResult(results.listenForResult(id));
@@ -353,14 +331,8 @@ Future SessionImpl::send(const AMQBody& command, const FrameSet& content, bool r
AMQFrame frame(command);
frame.setEof(false);
handleOut(frame);
-
- if (reframe) {
- MethodContentAdaptor c(content);
- sendContent(c);
- } else {
- SendContentFn send(out);
- content.map(send);
- }
+ SendContentFn send(out);
+ content.map(send);
return f;
}
@@ -375,11 +347,11 @@ Future SessionImpl::sendCommand(const AMQBody& command, const MethodContent* con
SequenceNumber id = nextOut++;
{
Lock l(state);
- checkOpen();
+ checkOpen();
incompleteOut.add(id);
}
Future f(id);
- if (command.getMethod()->resultExpected()) {
+ if (command.getMethod()->resultExpected()) {
Lock l(state);
//result listener must be set before the command is sent
f.setFutureResult(results.listenForResult(id));
@@ -399,23 +371,13 @@ void SessionImpl::sendContent(const MethodContent& content)
{
AMQFrame header(content.getHeader());
- // doClearDeliveryPropertiesExchange is set by cluster update client so
- // it can send messages with delivery-properties.exchange set.
- //
- if (doClearDeliveryPropertiesExchange) {
- // Normal client is not allowed to set the delivery-properties.exchange
- // so clear it here.
- AMQHeaderBody* headerp = static_cast<AMQHeaderBody*>(header.getBody());
- if (headerp && headerp->get<DeliveryProperties>())
- headerp->get<DeliveryProperties>(true)->clearExchangeFlag();
- }
header.setFirstSegment(false);
uint64_t data_length = content.getData().length();
if(data_length > 0){
header.setLastSegment(false);
- handleOut(header);
+ handleOut(header);
/*Note: end of frame marker included in overhead but not in size*/
- const uint32_t frag_size = maxFrameSize - AMQFrame::frameOverhead();
+ const uint32_t frag_size = maxFrameSize - AMQFrame::frameOverhead();
if(data_length < frag_size){
AMQFrame frame((AMQContentBody(content.getData())));
@@ -442,7 +404,7 @@ void SessionImpl::sendContent(const MethodContent& content)
}
}
} else {
- handleOut(header);
+ handleOut(header);
}
}
@@ -462,7 +424,7 @@ bool isContentFrame(AMQFrame& frame)
{
AMQBody* body = frame.getBody();
uint8_t type = body->type();
- return type == HEADER_BODY || type == CONTENT_BODY || isMessageMethod(body);
+ return type == HEADER_BODY || type == CONTENT_BODY || isMessageMethod(body);
}
void SessionImpl::handleIn(AMQFrame& frame) // network thread
@@ -585,7 +547,7 @@ void SessionImpl::timeout(uint32_t t)
void SessionImpl::commandPoint(const framing::SequenceNumber& id, uint64_t offset)
{
if (offset) throw NotImplementedException("Non-zero byte offset not yet supported for command-point");
-
+
Lock l(state);
nextIn = id;
}
@@ -677,10 +639,10 @@ void SessionImpl::exception(uint16_t errorCode,
{
Lock l(state);
setExceptionLH(createSessionException(errorCode, description));
- QPID_LOG(warning, "Exception received from broker: " << exceptionHolder.what()
+ QPID_LOG(warning, "Exception received from broker: " << exceptionHolder.what()
<< " [caused by " << commandId << " " << classCode << ":" << commandCode << "]");
- if (detachedLifetime)
+ if (detachedLifetime)
setTimeout(0);
}
@@ -748,6 +710,4 @@ boost::shared_ptr<ConnectionImpl> SessionImpl::getConnection()
return connection;
}
-void SessionImpl::disableAutoDetach() { autoDetach = false; }
-
}}
diff --git a/qpid/cpp/src/qpid/client/SessionImpl.h b/qpid/cpp/src/qpid/client/SessionImpl.h
index 4f9213a00a..e6ea8e6b90 100644
--- a/qpid/cpp/src/qpid/client/SessionImpl.h
+++ b/qpid/cpp/src/qpid/client/SessionImpl.h
@@ -87,15 +87,7 @@ public:
Future send(const framing::AMQBody& command);
Future send(const framing::AMQBody& command, const framing::MethodContent& content);
- /**
- * This method takes the content as a FrameSet; if reframe=false,
- * the caller is resposnible for ensuring that the header and
- * content frames in that set are correct for this connection
- * (right flags, right fragmentation etc). If reframe=true, then
- * the header and content from the frameset will be copied and
- * reframed correctly for the connection.
- */
- QPID_CLIENT_EXTERN Future send(const framing::AMQBody& command, const framing::FrameSet& content, bool reframe=false);
+ QPID_CLIENT_EXTERN Future send(const framing::AMQBody& command, const framing::FrameSet& content);
void sendRawFrame(framing::AMQFrame& frame);
Demux& getDemux();
@@ -125,11 +117,6 @@ public:
*/
boost::shared_ptr<ConnectionImpl> getConnection();
- void setDoClearDeliveryPropertiesExchange(bool b=true) { doClearDeliveryPropertiesExchange = b; }
-
- /** Suppress sending detach in destructor. Used by cluster to build session state */
- void disableAutoDetach();
-
private:
enum State {
INACTIVE,
@@ -225,10 +212,6 @@ private:
SessionState sessionState;
- bool doClearDeliveryPropertiesExchange;
-
- bool autoDetach;
-
friend class client::SessionHandler;
};
diff --git a/qpid/cpp/src/qpid/client/SslConnector.cpp b/qpid/cpp/src/qpid/client/SslConnector.cpp
index 3a146592e6..11707eb3f7 100644
--- a/qpid/cpp/src/qpid/client/SslConnector.cpp
+++ b/qpid/cpp/src/qpid/client/SslConnector.cpp
@@ -90,9 +90,11 @@ class SslConnector : public Connector
void connect(const std::string& host, const std::string& port);
void connected(const sys::Socket&);
void connectFailed(const std::string& msg);
+
void close();
void send(framing::AMQFrame& frame);
- void abort() {} // TODO: Need to fix for heartbeat timeouts to work
+ void abort();
+ void connectAborted();
void setInputHandler(framing::InputHandler* handler);
void setShutdownHandler(sys::ShutdownHandler* handler);
@@ -224,6 +226,24 @@ void SslConnector::socketClosed(AsynchIO&, const Socket&) {
shutdownHandler->shutdown();
}
+void SslConnector::connectAborted() {
+ connector->stop();
+ connectFailed("Connection timedout");
+}
+
+void SslConnector::abort() {
+ // Can't abort a closed connection
+ if (!closed) {
+ if (aio) {
+ // Established connection
+ aio->requestCallback(boost::bind(&SslConnector::eof, this, _1));
+ } else if (connector) {
+ // We're still connecting
+ connector->requestCallback(boost::bind(&SslConnector::connectAborted, this));
+ }
+ }
+}
+
void SslConnector::setInputHandler(InputHandler* handler){
input = handler;
}
diff --git a/qpid/cpp/src/qpid/framing/FrameSet.h b/qpid/cpp/src/qpid/framing/FrameSet.h
index 3b9f60950b..9640abb7ac 100644
--- a/qpid/cpp/src/qpid/framing/FrameSet.h
+++ b/qpid/cpp/src/qpid/framing/FrameSet.h
@@ -1,3 +1,5 @@
+#ifndef QPID_FRAMING_FRAMESET_H
+#define QPID_FRAMING_FRAMESET_H
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -18,6 +20,7 @@
* under the License.
*
*/
+
#include <string>
#include "qpid/InlineVector.h"
#include "qpid/framing/amqp_framing.h"
@@ -25,9 +28,6 @@
#include "qpid/framing/SequenceNumber.h"
#include "qpid/CommonImportExport.h"
-#ifndef _FrameSet_
-#define _FrameSet_
-
namespace qpid {
namespace framing {
@@ -117,4 +117,4 @@ public:
}
-#endif
+#endif /*!QPID_FRAMING_FRAMESET_H*/
diff --git a/qpid/cpp/src/qpid/ha/Backup.cpp b/qpid/cpp/src/qpid/ha/Backup.cpp
index 3024656daa..2aabf6342b 100644
--- a/qpid/cpp/src/qpid/ha/Backup.cpp
+++ b/qpid/cpp/src/qpid/ha/Backup.cpp
@@ -20,9 +20,12 @@
*/
#include "Backup.h"
#include "BrokerReplicator.h"
+#include "ConnectionObserver.h"
#include "HaBroker.h"
+#include "Primary.h"
#include "ReplicatingSubscription.h"
#include "Settings.h"
+#include "StatusCheck.h"
#include "qpid/Url.h"
#include "qpid/amqp_0_10/Codecs.h"
#include "qpid/broker/Bridge.h"
@@ -44,28 +47,38 @@ using namespace framing;
using namespace broker;
using types::Variant;
using std::string;
+using sys::Mutex;
Backup::Backup(HaBroker& hb, const Settings& s) :
- logPrefix("Backup: "), haBroker(hb), broker(hb.getBroker()), settings(s)
+ logPrefix("Backup: "), membership(hb.getMembership()), stopped(false),
+ haBroker(hb), broker(hb.getBroker()), settings(s),
+ statusCheck(
+ new StatusCheck(
+ logPrefix, broker.getLinkHearbeatInterval(), hb.getBrokerInfo()))
{
- // Empty brokerUrl means delay initialization until seBrokertUrl() is called.
- if (!s.brokerUrl.empty()) initialize(Url(s.brokerUrl));
+ // Set link properties to tag outgoing links.
+ framing::FieldTable linkProperties = broker.getLinkClientProperties();
+ linkProperties.setTable(
+ ConnectionObserver::BACKUP_TAG, hb.getBrokerInfo().asFieldTable());
+ broker.setLinkClientProperties(linkProperties);
}
-void Backup::initialize(const Url& brokers) {
- if (brokers.empty()) throw Url::Invalid("HA broker URL is empty");
- QPID_LOG(info, logPrefix << "Connecting to cluster, broker URL: " << brokers);
- string protocol = brokers[0].protocol.empty() ? "tcp" : brokers[0].protocol;
- types::Uuid uuid(true);
- // Declare the link
- std::pair<Link::shared_ptr, bool> result = broker.getLinks().declare(
- broker::QPID_NAME_PREFIX + string("ha.link.") + uuid.str(),
- brokers[0].host, brokers[0].port, protocol,
- false, // durable
- settings.mechanism, settings.username, settings.password,
- false); // no amq.failover - don't want to use client URL.
- {
- sys::Mutex::ScopedLock l(lock);
+void Backup::setBrokerUrl(const Url& brokers) {
+ if (brokers.empty()) return;
+ Mutex::ScopedLock l(lock);
+ if (stopped) return;
+ if (haBroker.getStatus() == JOINING) statusCheck->setUrl(brokers);
+ if (!link) { // Not yet initialized
+ QPID_LOG(info, logPrefix << "Connecting to cluster, broker URL: " << brokers);
+ string protocol = brokers[0].protocol.empty() ? "tcp" : brokers[0].protocol;
+ types::Uuid uuid(true);
+ std::pair<Link::shared_ptr, bool> result;
+ result = broker.getLinks().declare(
+ broker::QPID_NAME_PREFIX + string("ha.link.") + uuid.str(),
+ brokers[0].host, brokers[0].port, protocol,
+ false, // durable
+ settings.mechanism, settings.username, settings.password,
+ false); // no amq.failover - don't want to use client URL.
link = result.first;
replicator.reset(new BrokerReplicator(haBroker, link));
replicator->initialize();
@@ -74,8 +87,9 @@ void Backup::initialize(const Url& brokers) {
link->setUrl(brokers); // Outside the lock, once set link doesn't change.
}
-Backup::~Backup() {
- QPID_LOG(debug, logPrefix << "Backup shutting down.");
+void Backup::stop(Mutex::ScopedLock&) {
+ if (stopped) return;
+ QPID_LOG(debug, logPrefix << "Leaving backup role.");
if (link) link->close();
if (replicator.get()) {
broker.getExchanges().destroy(replicator->getName());
@@ -84,31 +98,45 @@ Backup::~Backup() {
}
}
-// Called via management.
-void Backup::setBrokerUrl(const Url& url) {
- // Ignore empty URLs seen during start-up for some tests.
- if (url.empty()) return;
- bool linkSet = false;
+Role* Backup::recover(Mutex::ScopedLock&) {
+ BrokerInfo::Set backups;
{
- sys::Mutex::ScopedLock l(lock);
- linkSet = link;
+ Mutex::ScopedLock l(lock);
+ if (stopped) return 0;
+ stop(l); // Stop backup activity before starting primary.
+ QPID_LOG(notice, "Promoting to primary: " << haBroker.getBrokerInfo());
+ // Reset membership before allowing backups to connect.
+ backups = membership.otherBackups();
+ membership.clear();
+ return new Primary(haBroker, backups);
}
- if (linkSet)
- link->setUrl(url); // Outside lock, once set link doesn't change
- else
- initialize(url); // Deferred initialization
}
-void Backup::setStatus(BrokerStatus status) {
- switch (status) {
- case READY:
- QPID_LOG(notice, logPrefix << "Ready to become primary.");
+Role* Backup::promote() {
+ Mutex::ScopedLock l(lock);
+ if (stopped) return 0;
+ switch (haBroker.getStatus()) {
+ case JOINING:
+ if (statusCheck->canPromote()) return recover(l);
+ else {
+ QPID_LOG(error,
+ logPrefix << "Joining active cluster, cannot be promoted.");
+ throw Exception("Joining active cluster, cannot be promoted.");
+ }
break;
case CATCHUP:
- QPID_LOG(notice, logPrefix << "Catching up on primary, cannot be promoted.");
+ QPID_LOG(error, logPrefix << "Still catching up, cannot be promoted.");
+ throw Exception("Still catching up, cannot be promoted.");
+ break;
+ case READY: return recover(l); break;
default:
- assert(0);
+ assert(0); // Not a valid state for the Backup role..
}
}
+Backup::~Backup() {
+ Mutex::ScopedLock l(lock);
+ stop(l);
+}
+
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/Backup.h b/qpid/cpp/src/qpid/ha/Backup.h
index 4f2d5babde..4943ca5e2e 100644
--- a/qpid/cpp/src/qpid/ha/Backup.h
+++ b/qpid/cpp/src/qpid/ha/Backup.h
@@ -22,6 +22,7 @@
*
*/
+#include "Role.h"
#include "Settings.h"
#include "qpid/Url.h"
#include "qpid/sys/Mutex.h"
@@ -38,30 +39,41 @@ namespace ha {
class Settings;
class BrokerReplicator;
class HaBroker;
+class StatusCheck;
+class Membership;
/**
- * State associated with a backup broker. Manages connections to primary.
+ * Backup role: Manages connections to primary, replicates management events and queue contents.
*
* THREAD SAFE
*/
-class Backup
+class Backup : public Role
{
public:
Backup(HaBroker&, const Settings&);
~Backup();
+
+ std::string getLogPrefix() const { return logPrefix; }
+
void setBrokerUrl(const Url&);
- void setStatus(BrokerStatus);
+
+ Role* promote();
private:
- void initialize(const Url&);
+ void stop(sys::Mutex::ScopedLock&);
+ Role* recover(sys::Mutex::ScopedLock&);
+
std::string logPrefix;
+ Membership& membership;
sys::Mutex lock;
+ bool stopped;
HaBroker& haBroker;
broker::Broker& broker;
Settings settings;
boost::shared_ptr<broker::Link> link;
boost::shared_ptr<BrokerReplicator> replicator;
+ std::auto_ptr<StatusCheck> statusCheck;
};
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/BrokerInfo.cpp b/qpid/cpp/src/qpid/ha/BrokerInfo.cpp
index 5a8dfa512a..a0c4af88ca 100644
--- a/qpid/cpp/src/qpid/ha/BrokerInfo.cpp
+++ b/qpid/cpp/src/qpid/ha/BrokerInfo.cpp
@@ -45,8 +45,9 @@ using framing::FieldTable;
BrokerInfo::BrokerInfo() : port(0), status(JOINING) {}
-BrokerInfo::BrokerInfo(const std::string& host, uint16_t port_, const types::Uuid& id) :
- hostName(host), port(port_), systemId(id), status(JOINING)
+BrokerInfo::BrokerInfo(const types::Uuid& id, BrokerStatus s,
+ const std::string& host, uint16_t port_) :
+ hostName(host), port(port_), systemId(id), status(s)
{
updateLogId();
}
diff --git a/qpid/cpp/src/qpid/ha/BrokerInfo.h b/qpid/cpp/src/qpid/ha/BrokerInfo.h
index 5131e23be2..6142e03f98 100644
--- a/qpid/cpp/src/qpid/ha/BrokerInfo.h
+++ b/qpid/cpp/src/qpid/ha/BrokerInfo.h
@@ -44,7 +44,8 @@ class BrokerInfo
typedef std::map<types::Uuid, BrokerInfo> Map;
BrokerInfo();
- BrokerInfo(const std::string& host, uint16_t port_, const types::Uuid& id);
+ BrokerInfo(const types::Uuid& id, BrokerStatus,
+ const std::string& host=std::string(), uint16_t port=0);
BrokerInfo(const framing::FieldTable& ft) { assign(ft); }
BrokerInfo(const types::Variant::Map& m) { assign(m); }
diff --git a/qpid/cpp/src/qpid/ha/BrokerReplicator.cpp b/qpid/cpp/src/qpid/ha/BrokerReplicator.cpp
index 8f3eb3bf90..37c2a2d6b4 100644
--- a/qpid/cpp/src/qpid/ha/BrokerReplicator.cpp
+++ b/qpid/cpp/src/qpid/ha/BrokerReplicator.cpp
@@ -227,7 +227,9 @@ class BrokerReplicator::UpdateTracker {
typedef std::set<std::string> Names;
typedef boost::function<void (const std::string&)> CleanFn;
- UpdateTracker(CleanFn f, const ReplicationTest& rt) : cleanFn(f), repTest(rt) {}
+ UpdateTracker(const std::string& type_, // "queue" or "exchange"
+ CleanFn f, const ReplicationTest& rt)
+ : type(type_), cleanFn(f), repTest(rt) {}
/** Destructor cleans up remaining initial queues. */
~UpdateTracker() {
@@ -264,6 +266,12 @@ class BrokerReplicator::UpdateTracker {
}
private:
+ void clean(const std::string& name) {
+ QPID_LOG(info, "Backup updated, deleting " << type << " " << name);
+ cleanFn(name);
+ }
+
+ std::string type;
Names initial, events;
CleanFn cleanFn;
ReplicationTest repTest;
@@ -353,13 +361,15 @@ void BrokerReplicator::initializeBridge(Bridge& bridge, SessionHandler& sessionH
initialized = true;
exchangeTracker.reset(
- new UpdateTracker(boost::bind(&BrokerReplicator::deleteExchange, this, _1),
+ new UpdateTracker("exchange",
+ boost::bind(&BrokerReplicator::deleteExchange, this, _1),
replicationTest));
exchanges.eachExchange(
boost::bind(&UpdateTracker::addExchange, exchangeTracker.get(), _1));
queueTracker.reset(
- new UpdateTracker(boost::bind(&BrokerReplicator::deleteQueue, this, _1, true),
+ new UpdateTracker("queue",
+ boost::bind(&BrokerReplicator::deleteQueue, this, _1, true),
replicationTest));
queues.eachQueue(boost::bind(&UpdateTracker::addQueue, queueTracker.get(), _1));
@@ -394,7 +404,7 @@ void BrokerReplicator::route(Deliverable& msg) {
// We transition from JOINING->CATCHUP on the first message received from the primary.
// Until now we couldn't be sure if we had a good connection to the primary.
if (haBroker.getStatus() == JOINING) {
- haBroker.setStatus(CATCHUP);
+ haBroker.getMembership().setStatus(CATCHUP);
QPID_LOG(notice, logPrefix << "Connected to primary " << primary);
}
Variant::List list;
@@ -439,9 +449,10 @@ void BrokerReplicator::route(Deliverable& msg) {
}
}
} catch (const std::exception& e) {
- QPID_LOG(critical, logPrefix << "Configuration replication failed: " << e.what()
- << ": while handling: " << list);
- haBroker.shutdown();
+;
+ haBroker.shutdown(
+ QPID_MSG(logPrefix << "Configuration replication failed: "
+ << e.what() << ": while handling: " << list));
throw;
}
}
@@ -571,7 +582,7 @@ void BrokerReplicator::doEventUnbind(Variant::Map& values) {
void BrokerReplicator::doEventMembersUpdate(Variant::Map& values) {
Variant::List members = values[MEMBERS].asList();
- haBroker.setMembership(members);
+ setMembership(members);
}
void BrokerReplicator::doEventSubscribe(Variant::Map& values) {
@@ -723,11 +734,12 @@ void BrokerReplicator::doResponseHaBroker(Variant::Map& values) {
if (mine != primary)
throw Exception(QPID_MSG("Replicate default on backup (" << mine
<< ") does not match primary (" << primary << ")"));
- haBroker.setMembership(values[MEMBERS].asList());
+ setMembership(values[MEMBERS].asList());
} catch (const std::exception& e) {
- QPID_LOG(critical, logPrefix << "Invalid HA Broker response: " << e.what()
- << ": " << values);
- haBroker.shutdown();
+ haBroker.shutdown(
+ QPID_MSG(logPrefix << "Invalid HA Broker response: " << e.what()
+ << ": " << values));
+
throw;
}
}
@@ -848,7 +860,7 @@ namespace {
}
void BrokerReplicator::disconnected() {
- QPID_LOG(info, logPrefix << "Disconnected");
+ QPID_LOG(info, logPrefix << "Disconnected from " << primary);
connection = 0;
// Clean up auto-delete queues
vector<boost::shared_ptr<Exchange> > collect;
@@ -859,4 +871,25 @@ void BrokerReplicator::disconnected() {
boost::bind(&BrokerReplicator::autoDeleteCheck, this, _1));
}
+void BrokerReplicator::setMembership(const Variant::List& brokers) {
+ Membership& membership(haBroker.getMembership());
+ membership.assign(brokers);
+ // Check if the primary has signalled a change in my status:
+ // from CATCHUP to READY when we are caught up.
+ // from READY TO CATCHUP if we are timed out during fail-over.
+ BrokerInfo info;
+ if (membership.get(membership.getSelf(), info)) {
+ BrokerStatus oldStatus = haBroker.getStatus();
+ BrokerStatus newStatus = info.getStatus();
+ if (oldStatus == CATCHUP && newStatus == READY) {
+ QPID_LOG(info, logPrefix << logPrefix << "Caught-up and ready");
+ haBroker.getMembership().setStatus(READY);
+ }
+ else if (oldStatus == READY && newStatus == CATCHUP) {
+ QPID_LOG(info, logPrefix << logPrefix << "No longer ready, catching up");
+ haBroker.getMembership().setStatus(CATCHUP);
+ }
+ }
+}
+
}} // namespace broker
diff --git a/qpid/cpp/src/qpid/ha/BrokerReplicator.h b/qpid/cpp/src/qpid/ha/BrokerReplicator.h
index 9134163575..9161227c0f 100644
--- a/qpid/cpp/src/qpid/ha/BrokerReplicator.h
+++ b/qpid/cpp/src/qpid/ha/BrokerReplicator.h
@@ -136,6 +136,8 @@ class BrokerReplicator : public broker::Exchange,
void autoDeleteCheck(boost::shared_ptr<broker::Exchange>);
void disconnected();
+ void setMembership(const types::Variant::List&); // Set membership from list.
+
std::string logPrefix;
std::string userId, remoteHost;
ReplicationTest replicationTest;
diff --git a/qpid/cpp/src/qpid/ha/HaBroker.cpp b/qpid/cpp/src/qpid/ha/HaBroker.cpp
index 8c16a5ea38..c4cb640f97 100644
--- a/qpid/cpp/src/qpid/ha/HaBroker.cpp
+++ b/qpid/cpp/src/qpid/ha/HaBroker.cpp
@@ -26,7 +26,7 @@
#include "QueueReplicator.h"
#include "ReplicatingSubscription.h"
#include "Settings.h"
-#include "StatusCheck.h"
+#include "StandAlone.h"
#include "qpid/amqp_0_10/Codecs.h"
#include "qpid/Exception.h"
#include "qpid/broker/Broker.h"
@@ -42,7 +42,6 @@
#include "qmf/org/apache/qpid/ha/ArgsHaBrokerReplicate.h"
#include "qmf/org/apache/qpid/ha/ArgsHaBrokerSetBrokersUrl.h"
#include "qmf/org/apache/qpid/ha/ArgsHaBrokerSetPublicUrl.h"
-#include "qmf/org/apache/qpid/ha/EventMembersUpdate.h"
#include "qpid/log/Statement.h"
#include <boost/shared_ptr.hpp>
@@ -56,23 +55,23 @@ using types::Variant;
using types::Uuid;
using sys::Mutex;
using boost::shared_ptr;
+using boost::dynamic_pointer_cast;
// Called in Plugin::earlyInitialize
HaBroker::HaBroker(broker::Broker& b, const Settings& s)
- : logPrefix("Broker: "),
- broker(b),
- systemId(broker.getSystem()->getSystemId().data()),
+ : systemId(b.getSystem()->getSystemId().data()),
settings(s),
+ replicationTest(s.replicateDefault.get()),
+ broker(b),
observer(new ConnectionObserver(*this, systemId)),
- status(STANDALONE),
- membership(systemId),
- replicationTest(s.replicateDefault.get())
+ role(new StandAlone),
+ membership(BrokerInfo(systemId, STANDALONE), *this)
{
// If we are joining a cluster we must start excluding clients now,
// otherwise there's a window for a client to connect before we get to
// initialize()
if (settings.cluster) {
- QPID_LOG(debug, logPrefix << "Rejecting client connections.");
+ QPID_LOG(debug, role->getLogPrefix() << "Rejecting client connections.");
shared_ptr<broker::ConnectionObserver> excluder(new BackupConnectionExcluder);
observer->setObserver(excluder, "Backup: ");
broker.getConnectionObservers().add(observer);
@@ -86,13 +85,16 @@ bool isNone(const std::string& x) { return x.empty() || x == NONE; }
// Called in Plugin::initialize
void HaBroker::initialize() {
-
// FIXME aconway 2012-07-19: assumes there's a TCP transport with a meaningful port.
- brokerInfo = BrokerInfo(
- broker.getSystem()->getNodeName(),
- broker.getPort(broker::Broker::TCP_TRANSPORT),
- systemId);
- QPID_LOG(notice, logPrefix << "Initializing: " << brokerInfo);
+ membership.add(
+ BrokerInfo(
+ membership.getSelf(),
+ settings.cluster ? JOINING : membership.getStatus(),
+ broker.getSystem()->getNodeName(),
+ broker.getPort(broker::Broker::TCP_TRANSPORT)
+ )
+ );
+ QPID_LOG(notice, role->getLogPrefix() << "Initializing: " << membership.getInfo());
// Set up the management object.
ManagementAgent* ma = broker.getManagementAgent();
@@ -103,83 +105,34 @@ void HaBroker::initialize() {
mgmtObject->set_replicateDefault(settings.replicateDefault.str());
mgmtObject->set_systemId(systemId);
ma->addObject(mgmtObject);
+ membership.setMgmtObject(mgmtObject);
// Register a factory for replicating subscriptions.
broker.getConsumerFactories().add(
- boost::shared_ptr<ReplicatingSubscription::Factory>(
+ shared_ptr<ReplicatingSubscription::Factory>(
new ReplicatingSubscription::Factory()));
// If we are in a cluster, start as backup in joining state.
if (settings.cluster) {
- status = JOINING;
- backup.reset(new Backup(*this, settings));
+ assert(membership.getStatus() == JOINING);
+ role.reset(new Backup(*this, settings));
broker.getKnownBrokers = boost::bind(&HaBroker::getKnownBrokers, this);
- statusCheck.reset(new StatusCheck(logPrefix, broker.getLinkHearbeatInterval(), brokerInfo));
if (!isNone(settings.publicUrl)) setPublicUrl(Url(settings.publicUrl));
if (!isNone(settings.brokerUrl)) setBrokerUrl(Url(settings.brokerUrl));
}
-
-
- // NOTE: lock is not needed in a constructor, but create one
- // to pass to functions that have a ScopedLock parameter.
- Mutex::ScopedLock l(lock);
- statusChanged(l);
}
HaBroker::~HaBroker() {
- QPID_LOG(notice, logPrefix << "Shut down");
+ QPID_LOG(notice, role->getLogPrefix() << "Shut down");
broker.getConnectionObservers().remove(observer);
}
-// Called from ManagementMethod on promote.
-void HaBroker::recover() {
- boost::shared_ptr<Backup> b;
- {
- Mutex::ScopedLock l(lock);
- // No longer replicating, close link. Note: link must be closed before we
- // setStatus(RECOVERING) as that will remove our broker info from the
- // outgoing link properties so we won't recognize self-connects.
- b = backup;
- backup.reset(); // Reset in lock.
- }
- b.reset(); // Call destructor outside of lock.
- BrokerInfo::Set backups;
- {
- Mutex::ScopedLock l(lock);
- setStatus(RECOVERING, l);
- backups = membership.otherBackups();
- membership.reset(brokerInfo);
- // Drop the lock, new Primary may call back on activate.
- }
- // Outside of lock, may call back on activate()
- primary.reset(new Primary(*this, backups)); // Starts primary-ready check.
-}
-
-// Called back from Primary active check.
-void HaBroker::activate() { setStatus(ACTIVE); }
-
Manageable::status_t HaBroker::ManagementMethod (uint32_t methodId, Args& args, string&) {
switch (methodId) {
case _qmf::HaBroker::METHOD_PROMOTE: {
- switch (getStatus()) {
- case JOINING:
- if (statusCheck->canPromote())
- recover();
- else {
- QPID_LOG(error, logPrefix << "Cluster already active, cannot be promoted");
- throw Exception("Cluster already active, cannot be promoted.");
- }
- break;
- case CATCHUP:
- QPID_LOG(error, logPrefix << "Still catching up, cannot be promoted.");
- throw Exception("Still catching up, cannot be promoted.");
- break;
- case READY: recover(); break;
- case RECOVERING: break;
- case ACTIVE: break;
- case STANDALONE: break;
- }
- break;
+ Role* r = role->promote();
+ if (r) role.reset(r);
+ break;
}
case _qmf::HaBroker::METHOD_SETBROKERSURL: {
setBrokerUrl(Url(dynamic_cast<_qmf::ArgsHaBrokerSetBrokersUrl&>(args).i_url));
@@ -192,10 +145,10 @@ Manageable::status_t HaBroker::ManagementMethod (uint32_t methodId, Args& args,
case _qmf::HaBroker::METHOD_REPLICATE: {
_qmf::ArgsHaBrokerReplicate& bq_args =
dynamic_cast<_qmf::ArgsHaBrokerReplicate&>(args);
- QPID_LOG(debug, logPrefix << "Replicate individual queue "
+ QPID_LOG(debug, role->getLogPrefix() << "Replicate individual queue "
<< bq_args.i_queue << " from " << bq_args.i_broker);
- boost::shared_ptr<broker::Queue> queue = broker.getQueues().get(bq_args.i_queue);
+ shared_ptr<broker::Queue> queue = broker.getQueues().get(bq_args.i_queue);
Url url(bq_args.i_broker);
string protocol = url[0].protocol.empty() ? "tcp" : url[0].protocol;
Uuid uuid(true);
@@ -205,10 +158,10 @@ Manageable::status_t HaBroker::ManagementMethod (uint32_t methodId, Args& args,
false, // durable
settings.mechanism, settings.username, settings.password,
false); // no amq.failover - don't want to use client URL.
- boost::shared_ptr<broker::Link> link = result.first;
+ shared_ptr<broker::Link> link = result.first;
link->setUrl(url);
// Create a queue replicator
- boost::shared_ptr<QueueReplicator> qr(
+ shared_ptr<QueueReplicator> qr(
new QueueReplicator(*this, queue, link));
qr->activate();
broker.getExchanges().registerExchange(qr);
@@ -227,20 +180,17 @@ void HaBroker::setPublicUrl(const Url& url) {
mgmtObject->set_publicUrl(url.str());
knownBrokers.clear();
knownBrokers.push_back(url);
- QPID_LOG(debug, logPrefix << "Setting public URL to: " << url);
+ QPID_LOG(debug, role->getLogPrefix() << "Setting public URL to: " << url);
}
void HaBroker::setBrokerUrl(const Url& url) {
- boost::shared_ptr<Backup> b;
{
Mutex::ScopedLock l(lock);
brokerUrl = url;
mgmtObject->set_brokersUrl(brokerUrl.str());
- QPID_LOG(info, logPrefix << "Brokers URL set to: " << url);
- if (status == JOINING && statusCheck.get()) statusCheck->setUrl(url);
- b = backup;
+ QPID_LOG(info, role->getLogPrefix() << "Brokers URL set to: " << url);
}
- if (b) b->setBrokerUrl(url); // Oustside lock, avoid deadlock
+ role->setBrokerUrl(url); // Oustside lock
}
std::vector<Url> HaBroker::getKnownBrokers() const {
@@ -248,123 +198,14 @@ std::vector<Url> HaBroker::getKnownBrokers() const {
return knownBrokers;
}
-void HaBroker::shutdown() {
- QPID_LOG(critical, logPrefix << "Critical error, shutting down.");
+void HaBroker::shutdown(const std::string& message) {
+ QPID_LOG(critical, message);
broker.shutdown();
+ throw Exception(message);
}
BrokerStatus HaBroker::getStatus() const {
- Mutex::ScopedLock l(lock);
- return status;
-}
-
-void HaBroker::setStatus(BrokerStatus newStatus) {
- Mutex::ScopedLock l(lock);
- setStatus(newStatus, l);
-}
-
-namespace {
-bool checkTransition(BrokerStatus from, BrokerStatus to) {
- // Legal state transitions. Initial state is JOINING, ACTIVE is terminal.
- static const BrokerStatus TRANSITIONS[][2] = {
- { JOINING, CATCHUP }, // Connected to primary
- { JOINING, RECOVERING }, // Chosen as initial primary.
- { CATCHUP, READY }, // Caught up all queues, ready to take over.
- { READY, RECOVERING }, // Chosen as new primary
- { READY, CATCHUP }, // Timed out failing over, demoted to catch-up.
- { RECOVERING, ACTIVE } // All expected backups are ready
- };
- static const size_t N = sizeof(TRANSITIONS)/sizeof(TRANSITIONS[0]);
- for (size_t i = 0; i < N; ++i) {
- if (TRANSITIONS[i][0] == from && TRANSITIONS[i][1] == to)
- return true;
- }
- return false;
-}
-} // namespace
-
-void HaBroker::setStatus(BrokerStatus newStatus, Mutex::ScopedLock& l) {
- QPID_LOG(info, logPrefix << "Status change: "
- << printable(status) << " -> " << printable(newStatus));
- bool legal = checkTransition(status, newStatus);
- assert(legal);
- if (!legal) {
- QPID_LOG(critical, logPrefix << "Illegal state transition: "
- << printable(status) << " -> " << printable(newStatus));
- shutdown();
- }
- status = newStatus;
- statusChanged(l);
-}
-
-void HaBroker::statusChanged(Mutex::ScopedLock& l) {
- mgmtObject->set_status(printable(status).str());
- brokerInfo.setStatus(status);
- setLinkProperties(l);
-}
-
-void HaBroker::membershipUpdated(Mutex::ScopedLock&) {
- QPID_LOG(info, logPrefix << "Membership changed: " << membership);
- Variant::List brokers = membership.asList();
- mgmtObject->set_members(brokers);
- broker.getManagementAgent()->raiseEvent(_qmf::EventMembersUpdate(brokers));
-}
-
-void HaBroker::setMembership(const Variant::List& brokers) {
- boost::shared_ptr<Backup> b;
- {
- Mutex::ScopedLock l(lock);
- membership.assign(brokers);
- QPID_LOG(info, logPrefix << "Membership update: " << membership);
- BrokerInfo info;
- // Update my status to what the primary says it is. The primary can toggle
- // status between READY and CATCHUP based on the state of our subscriptions.
- if (membership.get(systemId, info) && status != info.getStatus()) {
- setStatus(info.getStatus(), l);
- b = backup;
- }
- membershipUpdated(l);
- }
- if (b) b->setStatus(status); // Oustside lock, avoid deadlock
-}
-
-void HaBroker::resetMembership(const BrokerInfo& b) {
- Mutex::ScopedLock l(lock);
- membership.reset(b);
- QPID_LOG(debug, logPrefix << "Membership reset to: " << membership);
- membershipUpdated(l);
-}
-
-void HaBroker::addBroker(const BrokerInfo& b) {
- Mutex::ScopedLock l(lock);
- membership.add(b);
- QPID_LOG(debug, logPrefix << "Membership add: " << b);
- membershipUpdated(l);
-}
-
-void HaBroker::removeBroker(const Uuid& id) {
- Mutex::ScopedLock l(lock);
- BrokerInfo info;
- if (membership.get(id, info)) {
- membership.remove(id);
- QPID_LOG(debug, logPrefix << "Membership remove: " << info);
- membershipUpdated(l);
- }
-}
-
-void HaBroker::setLinkProperties(Mutex::ScopedLock&) {
- framing::FieldTable linkProperties = broker.getLinkClientProperties();
- if (isBackup(status)) {
- // If this is a backup then any outgoing links are backup
- // links and need to be tagged.
- linkProperties.setTable(ConnectionObserver::BACKUP_TAG, brokerInfo.asFieldTable());
- }
- else {
- // If this is a primary then any outgoing links are federation links
- // and should not be tagged.
- linkProperties.erase(ConnectionObserver::BACKUP_TAG);
- }
- broker.setLinkClientProperties(linkProperties);
+ return membership.getStatus();
}
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/HaBroker.h b/qpid/cpp/src/qpid/ha/HaBroker.h
index 76dbf57a0c..7ba023129c 100644
--- a/qpid/cpp/src/qpid/ha/HaBroker.h
+++ b/qpid/cpp/src/qpid/ha/HaBroker.h
@@ -53,12 +53,15 @@ namespace ha {
class Backup;
class ConnectionObserver;
class Primary;
-class StatusCheck;
-
+class Role;
/**
* HA state and actions associated with a HA broker. Holds all the management info.
*
* THREAD SAFE: may be called in arbitrary broker IO or timer threads.
+
+ * NOTE: HaBroker and Role subclasses follow this lock hierarchy:
+ * - HaBroker MUST NOT hold its own lock across calls Role subclasses.
+ * - Role subclasses MAY hold their locks accross calls to HaBroker.
*/
class HaBroker : public management::Manageable
{
@@ -78,60 +81,41 @@ class HaBroker : public management::Manageable
broker::Broker& getBroker() { return broker; }
const Settings& getSettings() const { return settings; }
- /** Shut down the broker. Caller should log a critical error message. */
- void shutdown();
+ /** Shut down the broker because of a critical error. */
+ void shutdown(const std::string& message);
BrokerStatus getStatus() const;
- void setStatus(BrokerStatus);
- void activate();
-
- Backup* getBackup() { return backup.get(); }
ReplicationTest getReplicationTest() const { return replicationTest; }
-
boost::shared_ptr<ConnectionObserver> getObserver() { return observer; }
- const BrokerInfo& getBrokerInfo() const { return brokerInfo; }
-
- void setMembership(const types::Variant::List&); // Set membership from list.
- void resetMembership(const BrokerInfo& b); // Reset to contain just one member.
- void addBroker(const BrokerInfo& b); // Add a broker to the membership.
- void removeBroker(const types::Uuid& id); // Remove a broker from membership.
-
+ BrokerInfo getBrokerInfo() const { return membership.getInfo(); }
+ Membership& getMembership() { return membership; }
types::Uuid getSystemId() const { return systemId; }
private:
+
void setPublicUrl(const Url&);
void setBrokerUrl(const Url&);
void updateClientUrl(sys::Mutex::ScopedLock&);
- bool isPrimary(sys::Mutex::ScopedLock&) { return !backup.get(); }
-
- void setStatus(BrokerStatus, sys::Mutex::ScopedLock&);
- void recover();
- void statusChanged(sys::Mutex::ScopedLock&);
- void setLinkProperties(sys::Mutex::ScopedLock&);
-
std::vector<Url> getKnownBrokers() const;
- void membershipUpdated(sys::Mutex::ScopedLock&);
-
- std::string logPrefix;
- broker::Broker& broker;
- types::Uuid systemId;
+ // Immutable members
+ const types::Uuid systemId;
const Settings settings;
+ // Member variables protected by lock
mutable sys::Mutex lock;
- boost::shared_ptr<ConnectionObserver> observer; // Used by Backup and Primary
- boost::shared_ptr<Backup> backup;
- boost::shared_ptr<Primary> primary;
- qmf::org::apache::qpid::ha::HaBroker::shared_ptr mgmtObject;
Url publicUrl, brokerUrl;
std::vector<Url> knownBrokers;
- BrokerStatus status;
- BrokerInfo brokerInfo;
- Membership membership;
ReplicationTest replicationTest;
- std::auto_ptr<StatusCheck> statusCheck;
+
+ // Independently thread-safe member variables
+ broker::Broker& broker;
+ qmf::org::apache::qpid::ha::HaBroker::shared_ptr mgmtObject;
+ boost::shared_ptr<ConnectionObserver> observer; // Used by Backup and Primary
+ boost::shared_ptr<Role> role;
+ Membership membership;
};
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/HaPlugin.cpp b/qpid/cpp/src/qpid/ha/HaPlugin.cpp
index 5edb98c135..d26b466847 100644
--- a/qpid/cpp/src/qpid/ha/HaPlugin.cpp
+++ b/qpid/cpp/src/qpid/ha/HaPlugin.cpp
@@ -86,7 +86,7 @@ struct HaPlugin : public Plugin {
void initialize(Plugin::Target& target) {
broker::Broker* broker = dynamic_cast<broker::Broker*>(&target);
- if (broker) haBroker->initialize();
+ if (broker && haBroker.get()) haBroker->initialize();
}
void finalize() {
diff --git a/qpid/cpp/src/qpid/ha/Membership.cpp b/qpid/cpp/src/qpid/ha/Membership.cpp
index 74580f9b1e..d33d57c37f 100644
--- a/qpid/cpp/src/qpid/ha/Membership.cpp
+++ b/qpid/cpp/src/qpid/ha/Membership.cpp
@@ -19,6 +19,12 @@
*
*/
#include "Membership.h"
+#include "HaBroker.h"
+#include "qpid/broker/Broker.h"
+#include "qpid/management/ManagementAgent.h"
+#include "qpid/types/Variant.h"
+#include "qmf/org/apache/qpid/ha/EventMembersUpdate.h"
+#include "qmf/org/apache/qpid/ha/HaBroker.h"
#include <boost/bind.hpp>
#include <iostream>
#include <iterator>
@@ -26,37 +32,57 @@
namespace qpid {
namespace ha {
+namespace _qmf = ::qmf::org::apache::qpid::ha;
-void Membership::reset(const BrokerInfo& b) {
+using sys::Mutex;
+using types::Variant;
+
+Membership::Membership(const BrokerInfo& info, HaBroker& b)
+ : haBroker(b), self(info.getSystemId())
+{
+ brokers[self] = info;
+}
+
+void Membership::clear() {
+ Mutex::ScopedLock l(lock);
+ BrokerInfo me = brokers[self];
brokers.clear();
- brokers[b.getSystemId()] = b;
+ brokers[self] = me;
}
void Membership::add(const BrokerInfo& b) {
+ Mutex::ScopedLock l(lock);
brokers[b.getSystemId()] = b;
+ update(l);
}
void Membership::remove(const types::Uuid& id) {
+ Mutex::ScopedLock l(lock);
BrokerInfo::Map::iterator i = brokers.find(id);
if (i != brokers.end()) {
brokers.erase(i);
- }
+ update(l);
+ }
}
bool Membership::contains(const types::Uuid& id) {
+ Mutex::ScopedLock l(lock);
return brokers.find(id) != brokers.end();
}
void Membership::assign(const types::Variant::List& list) {
+ Mutex::ScopedLock l(lock);
brokers.clear();
for (types::Variant::List::const_iterator i = list.begin(); i != list.end(); ++i) {
BrokerInfo b(i->asMap());
brokers[b.getSystemId()] = b;
}
+ update(l);
}
types::Variant::List Membership::asList() const {
+ Mutex::ScopedLock l(lock);
types::Variant::List list;
for (BrokerInfo::Map::const_iterator i = brokers.begin(); i != brokers.end(); ++i)
list.push_back(i->second.asMap());
@@ -64,6 +90,7 @@ types::Variant::List Membership::asList() const {
}
BrokerInfo::Set Membership::otherBackups() const {
+ Mutex::ScopedLock l(lock);
BrokerInfo::Set result;
for (BrokerInfo::Map::const_iterator i = brokers.begin(); i != brokers.end(); ++i)
if (i->second.getStatus() == READY && i->second.getSystemId() != self)
@@ -71,15 +98,84 @@ BrokerInfo::Set Membership::otherBackups() const {
return result;
}
-bool Membership::get(const types::Uuid& id, BrokerInfo& result) {
- BrokerInfo::Map::iterator i = brokers.find(id);
+bool Membership::get(const types::Uuid& id, BrokerInfo& result) const {
+ Mutex::ScopedLock l(lock);
+ BrokerInfo::Map::const_iterator i = brokers.find(id);
if (i == brokers.end()) return false;
result = i->second;
return true;
}
-std::ostream& operator<<(std::ostream& o, const Membership& members) {
- return o << members.brokers;
+void Membership::update(Mutex::ScopedLock& l) {
+ QPID_LOG(info, "Membership: " << brokers);
+ Variant::List brokers = asList();
+ if (mgmtObject) mgmtObject->set_status(printable(getStatus(l)).str());
+ if (mgmtObject) mgmtObject->set_members(brokers);
+ haBroker.getBroker().getManagementAgent()->raiseEvent(
+ _qmf::EventMembersUpdate(brokers));
+}
+
+void Membership::setMgmtObject(boost::shared_ptr<_qmf::HaBroker> mo) {
+ Mutex::ScopedLock l(lock);
+ mgmtObject = mo;
+ update(l);
+}
+
+
+namespace {
+bool checkTransition(BrokerStatus from, BrokerStatus to) {
+ // Legal state transitions. Initial state is JOINING, ACTIVE is terminal.
+ static const BrokerStatus TRANSITIONS[][2] = {
+ { STANDALONE, JOINING }, // Initialization of backup broker
+ { JOINING, CATCHUP }, // Connected to primary
+ { JOINING, RECOVERING }, // Chosen as initial primary.
+ { CATCHUP, READY }, // Caught up all queues, ready to take over.
+ { READY, RECOVERING }, // Chosen as new primary
+ { READY, CATCHUP }, // Timed out failing over, demoted to catch-up.
+ { RECOVERING, ACTIVE } // All expected backups are ready
+ };
+ static const size_t N = sizeof(TRANSITIONS)/sizeof(TRANSITIONS[0]);
+ for (size_t i = 0; i < N; ++i) {
+ if (TRANSITIONS[i][0] == from && TRANSITIONS[i][1] == to)
+ return true;
+ }
+ return false;
+}
+} // namespace
+
+void Membership::setStatus(BrokerStatus newStatus) {
+ BrokerStatus status = getStatus();
+ QPID_LOG(info, "Status change: "
+ << printable(status) << " -> " << printable(newStatus));
+ bool legal = checkTransition(status, newStatus);
+ if (!legal) {
+ haBroker.shutdown(QPID_MSG("Illegal state transition: " << printable(status)
+ << " -> " << printable(newStatus)));
+ }
+
+ Mutex::ScopedLock l(lock);
+ brokers[self].setStatus(newStatus);
+ if (mgmtObject) mgmtObject->set_status(printable(newStatus).str());
+ update(l);
+}
+
+BrokerStatus Membership::getStatus() const {
+ Mutex::ScopedLock l(lock);
+ return getStatus(l);
+}
+
+BrokerStatus Membership::getStatus(sys::Mutex::ScopedLock&) const {
+ BrokerInfo::Map::const_iterator i = brokers.find(self);
+ assert(i != brokers.end());
+ return i->second.getStatus();
+}
+
+BrokerInfo Membership::getInfo() const {
+ Mutex::ScopedLock l(lock);
+ BrokerInfo::Map::const_iterator i = brokers.find(self);
+ assert(i != brokers.end());
+ return i->second;
}
+// FIXME aconway 2013-01-23: move to .h?
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/Membership.h b/qpid/cpp/src/qpid/ha/Membership.h
index 8406dccd5d..956569fbd8 100644
--- a/qpid/cpp/src/qpid/ha/Membership.h
+++ b/qpid/cpp/src/qpid/ha/Membership.h
@@ -24,45 +24,72 @@
#include "BrokerInfo.h"
#include "types.h"
-#include "qpid/framing/Uuid.h"
#include "qpid/log/Statement.h"
+#include "qpid/sys/Mutex.h"
#include "qpid/types/Variant.h"
#include <boost/function.hpp>
#include <set>
#include <vector>
#include <iosfwd>
+
+namespace qmf { namespace org { namespace apache { namespace qpid { namespace ha {
+class HaBroker;
+}}}}}
+
namespace qpid {
+
+namespace broker {
+class Broker;
+}
+
+namespace types {
+class Uuid;
+}
+
namespace ha {
+class HaBroker;
/**
* Keep track of the brokers in the membership.
- * THREAD UNSAFE: caller must serialize
+ * Send management when events on membership changes.
+ * THREAD SAFE
*/
class Membership
{
public:
- Membership(const types::Uuid& self_) : self(self_) {}
+ Membership(const BrokerInfo& info, HaBroker&);
- void reset(const BrokerInfo& b); ///< Reset to contain just one member.
+ void setMgmtObject(boost::shared_ptr<qmf::org::apache::qpid::ha::HaBroker>);
+
+ void clear(); ///< Clear all but self.
void add(const BrokerInfo& b);
void remove(const types::Uuid& id);
bool contains(const types::Uuid& id);
+
/** Return IDs of all READY backups other than self */
BrokerInfo::Set otherBackups() const;
void assign(const types::Variant::List&);
types::Variant::List asList() const;
- bool get(const types::Uuid& id, BrokerInfo& result);
+ bool get(const types::Uuid& id, BrokerInfo& result) const;
+
+ types::Uuid getSelf() const { return self; }
+ BrokerInfo getInfo() const;
+ BrokerStatus getStatus() const;
+ void setStatus(BrokerStatus s);
private:
- types::Uuid self;
+ void update(sys::Mutex::ScopedLock&);
+ BrokerStatus getStatus(sys::Mutex::ScopedLock&) const;
+
+ mutable sys::Mutex lock;
+ HaBroker& haBroker;
+ boost::shared_ptr<qmf::org::apache::qpid::ha::HaBroker> mgmtObject;
+ const types::Uuid self;
BrokerInfo::Map brokers;
- friend std::ostream& operator<<(std::ostream&, const Membership&);
};
-std::ostream& operator<<(std::ostream&, const Membership&);
-
}} // namespace qpid::ha
#endif /*!QPID_HA_MEMBERSHIP_H*/
diff --git a/qpid/cpp/src/qpid/ha/Primary.cpp b/qpid/cpp/src/qpid/ha/Primary.cpp
index 6d5d68191b..12535399e3 100644
--- a/qpid/cpp/src/qpid/ha/Primary.cpp
+++ b/qpid/cpp/src/qpid/ha/Primary.cpp
@@ -82,8 +82,10 @@ class ExpectedBackupTimerTask : public sys::TimerTask {
Primary* Primary::instance = 0;
Primary::Primary(HaBroker& hb, const BrokerInfo::Set& expect) :
- haBroker(hb), logPrefix("Primary: "), active(false)
+ haBroker(hb), membership(hb.getMembership()),
+ logPrefix("Primary: "), active(false)
{
+ hb.getMembership().setStatus(RECOVERING);
assert(instance == 0);
instance = this; // Let queue replicators find us.
if (expect.empty()) {
@@ -96,7 +98,7 @@ Primary::Primary(HaBroker& hb, const BrokerInfo::Set& expect) :
QPID_LOG(notice, logPrefix << "Promoted to primary. Expected backups: " << expect);
for (BrokerInfo::Set::const_iterator i = expect.begin(); i != expect.end(); ++i) {
boost::shared_ptr<RemoteBackup> backup(
- new RemoteBackup(*i, haBroker.getReplicationTest(), false));
+ new RemoteBackup(*i, haBroker.getReplicationTest(), 0));
backups[i->getSystemId()] = backup;
if (!backup->isReady()) expectedBackups.insert(backup);
backup->setCatchupQueues(hb.getBroker().getQueues(), true); // Create guards
@@ -108,11 +110,18 @@ Primary::Primary(HaBroker& hb, const BrokerInfo::Set& expect) :
hb.getBroker().getTimer().add(timerTask);
}
+
+ // Remove backup tag property from outgoing link properties.
+ framing::FieldTable linkProperties = hb.getBroker().getLinkClientProperties();
+ linkProperties.erase(ConnectionObserver::BACKUP_TAG);
+ hb.getBroker().setLinkClientProperties(linkProperties);
+
configurationObserver.reset(new PrimaryConfigurationObserver(*this));
haBroker.getBroker().getConfigurationObservers().add(configurationObserver);
Mutex::ScopedLock l(lock); // We are now active as a configurationObserver
checkReady(l);
+
// Allow client connections
connectionObserver.reset(new PrimaryConnectionObserver(*this));
haBroker.getObserver()->setObserver(connectionObserver, logPrefix);
@@ -128,7 +137,7 @@ void Primary::checkReady(Mutex::ScopedLock&) {
active = true;
Mutex::ScopedUnlock u(lock); // Don't hold lock across callback
QPID_LOG(notice, logPrefix << "Finished waiting for backups, primary is active.");
- haBroker.activate();
+ membership.setStatus(ACTIVE);
}
}
@@ -136,7 +145,7 @@ void Primary::checkReady(BackupMap::iterator i, Mutex::ScopedLock& l) {
if (i != backups.end() && i->second->reportReady()) {
BrokerInfo info = i->second->getBrokerInfo();
info.setStatus(READY);
- haBroker.addBroker(info);
+ membership.add(info);
if (expectedBackups.erase(i->second)) {
QPID_LOG(info, logPrefix << "Expected backup is ready: " << info);
checkReady(l);
@@ -161,9 +170,10 @@ void Primary::timeoutExpectedBackups() {
expectedBackups.erase(i++);
backups.erase(info.getSystemId());
rb->cancel();
- // Downgrade the broker to CATCHUP
+ // Downgrade the broker's status to CATCHUP
+ // The broker will get this status change when it eventually connects.
info.setStatus(CATCHUP);
- haBroker.addBroker(info);
+ membership.add(info);
}
else ++i;
}
@@ -228,7 +238,7 @@ void Primary::opened(broker::Connection& connection) {
if (i == backups.end()) {
QPID_LOG(info, logPrefix << "New backup connected: " << info);
boost::shared_ptr<RemoteBackup> backup(
- new RemoteBackup(info, haBroker.getReplicationTest(), true));
+ new RemoteBackup(info, haBroker.getReplicationTest(), &connection));
{
// Avoid deadlock with queue registry lock.
Mutex::ScopedUnlock u(lock);
@@ -238,11 +248,11 @@ void Primary::opened(broker::Connection& connection) {
}
else {
QPID_LOG(info, logPrefix << "Known backup connected: " << info);
- i->second->setConnected(true);
+ i->second->setConnection(&connection);
checkReady(i, l);
}
if (info.getStatus() == JOINING) info.setStatus(CATCHUP);
- haBroker.addBroker(info);
+ membership.add(info);
}
else
QPID_LOG(debug, logPrefix << "Accepted client connection "
@@ -259,7 +269,7 @@ void Primary::closed(broker::Connection& connection) {
// Checking isConnected() lets us ignore such spurious closes.
if (i != backups.end() && i->second->isConnected()) {
QPID_LOG(info, logPrefix << "Backup disconnected: " << info);
- haBroker.removeBroker(info.getSystemId());
+ membership.remove(info.getSystemId());
expectedBackups.erase(i->second);
backups.erase(i);
checkReady(l);
@@ -275,4 +285,9 @@ boost::shared_ptr<QueueGuard> Primary::getGuard(const QueuePtr& q, const BrokerI
return i == backups.end() ? boost::shared_ptr<QueueGuard>() : i->second->guard(q);
}
+Role* Primary::promote() {
+ QPID_LOG(info, "Ignoring promotion, already primary: " << haBroker.getBrokerInfo());
+ return 0;
+}
+
}} // namespace qpid::ha
diff --git a/qpid/cpp/src/qpid/ha/Primary.h b/qpid/cpp/src/qpid/ha/Primary.h
index c713115176..3097695817 100644
--- a/qpid/cpp/src/qpid/ha/Primary.h
+++ b/qpid/cpp/src/qpid/ha/Primary.h
@@ -24,6 +24,7 @@
#include "types.h"
#include "BrokerInfo.h"
+#include "Role.h"
#include "qpid/sys/Mutex.h"
#include <boost/shared_ptr.hpp>
#include <boost/intrusive_ptr.hpp>
@@ -48,6 +49,7 @@ class HaBroker;
class ReplicatingSubscription;
class RemoteBackup;
class QueueGuard;
+class Membership;
/**
* State associated with a primary broker:
@@ -56,7 +58,7 @@ class QueueGuard;
*
* THREAD SAFE: called concurrently in arbitrary connection threads.
*/
-class Primary
+class Primary : public Role
{
public:
typedef boost::shared_ptr<broker::Queue> QueuePtr;
@@ -67,6 +69,11 @@ class Primary
Primary(HaBroker& hb, const BrokerInfo::Set& expectedBackups);
~Primary();
+ // Role implementation
+ std::string getLogPrefix() const { return logPrefix; }
+ Role* promote();
+ void setBrokerUrl(const Url&) {}
+
void readyReplica(const ReplicatingSubscription&);
void removeReplica(const std::string& q);
@@ -94,12 +101,13 @@ class Primary
sys::Mutex lock;
HaBroker& haBroker;
+ Membership& membership;
std::string logPrefix;
bool active;
/**
* Set of expected backups that must be ready before we declare ourselves
- * active. These are backups that were known before the primary crashed. As
- * new primary we expect them to re-connect.
+ * active. These are backups that were known and ready before the primary
+ * crashed. As new primary we expect them to re-connect.
*/
BackupSet expectedBackups;
/**
diff --git a/qpid/cpp/src/qpid/ha/QueueGuard.cpp b/qpid/cpp/src/qpid/ha/QueueGuard.cpp
index f3bc4c4417..b2b012766c 100644
--- a/qpid/cpp/src/qpid/ha/QueueGuard.cpp
+++ b/qpid/cpp/src/qpid/ha/QueueGuard.cpp
@@ -66,7 +66,7 @@ QueueGuard::~QueueGuard() { cancel(); }
// NOTE: Called with message lock held.
void QueueGuard::enqueued(const Message& m) {
// Delay completion
- QPID_LOG(trace, logPrefix << "Delayed completion of " << m);
+ QPID_LOG(trace, logPrefix << "Delayed completion of " << m.getSequence());
m.getIngressCompletion()->startCompleter();
{
Mutex::ScopedLock l(lock);
diff --git a/qpid/cpp/src/qpid/ha/QueueReplicator.cpp b/qpid/cpp/src/qpid/ha/QueueReplicator.cpp
index 6b270b41d3..6fe49bc1af 100644
--- a/qpid/cpp/src/qpid/ha/QueueReplicator.cpp
+++ b/qpid/cpp/src/qpid/ha/QueueReplicator.cpp
@@ -260,9 +260,7 @@ void QueueReplicator::route(Deliverable& msg)
// Ignore unknown event keys, may be introduced in later versions.
}
catch (const std::exception& e) {
- QPID_LOG(critical, logPrefix << "Replication failed: " << e.what());
- haBroker.shutdown();
- throw;
+ haBroker.shutdown(QPID_MSG(logPrefix << "Replication failed: " << e.what()));
}
}
diff --git a/qpid/cpp/src/qpid/ha/RemoteBackup.cpp b/qpid/cpp/src/qpid/ha/RemoteBackup.cpp
index b933c71bbb..394ba3041b 100644
--- a/qpid/cpp/src/qpid/ha/RemoteBackup.cpp
+++ b/qpid/cpp/src/qpid/ha/RemoteBackup.cpp
@@ -21,6 +21,7 @@
#include "RemoteBackup.h"
#include "QueueGuard.h"
#include "qpid/broker/Broker.h"
+#include "qpid/broker/Connection.h"
#include "qpid/broker/Queue.h"
#include "qpid/broker/QueueRegistry.h"
#include "qpid/log/Statement.h"
@@ -32,9 +33,10 @@ namespace ha {
using sys::Mutex;
using boost::bind;
-RemoteBackup::RemoteBackup(const BrokerInfo& info, ReplicationTest rt, bool con) :
- logPrefix("Primary: Remote backup "+info.getLogId()+": "),
- brokerInfo(info), replicationTest(rt), connected(con), reportedReady(false)
+RemoteBackup::RemoteBackup(
+ const BrokerInfo& info, ReplicationTest rt, broker::Connection* c
+) : logPrefix("Primary: Remote backup "+info.getLogId()+": "),
+ brokerInfo(info), replicationTest(rt), connection(c), reportedReady(false)
{}
void RemoteBackup::setCatchupQueues(broker::QueueRegistry& queues, bool createGuards)
@@ -46,13 +48,19 @@ void RemoteBackup::setCatchupQueues(broker::QueueRegistry& queues, bool createGu
RemoteBackup::~RemoteBackup() { cancel(); }
void RemoteBackup::cancel() {
+ QPID_LOG(debug, logPrefix << "Cancelled " << (connection? "connected":"disconnected")
+ << " backup: " << brokerInfo);
for (GuardMap::iterator i = guards.begin(); i != guards.end(); ++i)
i->second->cancel();
guards.clear();
+ if (connection) {
+ connection->abort();
+ connection = 0;
+ }
}
bool RemoteBackup::isReady() {
- return connected && catchupQueues.empty();
+ return connection && catchupQueues.empty();
}
void RemoteBackup::catchupQueue(const QueuePtr& q, bool createGuard) {
diff --git a/qpid/cpp/src/qpid/ha/RemoteBackup.h b/qpid/cpp/src/qpid/ha/RemoteBackup.h
index e48ceff3ae..a65d916432 100644
--- a/qpid/cpp/src/qpid/ha/RemoteBackup.h
+++ b/qpid/cpp/src/qpid/ha/RemoteBackup.h
@@ -33,6 +33,7 @@ namespace qpid {
namespace broker {
class Queue;
class QueueRegistry;
+class Connection;
}
namespace ha {
@@ -54,7 +55,7 @@ class RemoteBackup
/** Note: isReady() can be true after construction
*@param connected true if the backup is already connected.
*/
- RemoteBackup(const BrokerInfo& info, ReplicationTest, bool connected);
+ RemoteBackup(const BrokerInfo&, ReplicationTest, broker::Connection*);
~RemoteBackup();
/** Set all queues in the registry as catch-up queues.
@@ -66,8 +67,8 @@ class RemoteBackup
GuardPtr guard(const QueuePtr&);
/** Is the remote backup connected? */
- void setConnected(bool b) { connected=b; }
- bool isConnected() const { return connected; }
+ void setConnection(broker::Connection* c) { connection = c; }
+ bool isConnected() const { return connection; }
/** ReplicatingSubscription associated with queue is ready.
* Note: may set isReady()
@@ -101,7 +102,7 @@ class RemoteBackup
ReplicationTest replicationTest;
GuardMap guards;
QueueSet catchupQueues;
- bool connected;
+ broker::Connection* connection;
bool reportedReady;
};
diff --git a/qpid/cpp/src/qpid/ha/ReplicatingSubscription.h b/qpid/cpp/src/qpid/ha/ReplicatingSubscription.h
index f714e8e01a..c2d35dd5cf 100644
--- a/qpid/cpp/src/qpid/ha/ReplicatingSubscription.h
+++ b/qpid/cpp/src/qpid/ha/ReplicatingSubscription.h
@@ -65,6 +65,8 @@ class QueueGuard;
class ReplicatingSubscription : public broker::SemanticState::ConsumerImpl
{
public:
+ typedef broker::SemanticState::ConsumerImpl ConsumerImpl;
+
struct Factory : public broker::ConsumerFactory {
boost::shared_ptr<broker::SemanticState::ConsumerImpl> create(
broker::SemanticState* parent,
diff --git a/qpid/cpp/src/qpid/ha/Role.h b/qpid/cpp/src/qpid/ha/Role.h
new file mode 100644
index 0000000000..570c65e3e7
--- /dev/null
+++ b/qpid/cpp/src/qpid/ha/Role.h
@@ -0,0 +1,55 @@
+#ifndef QPID_HA_ROLE_H
+#define QPID_HA_ROLE_H
+
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <string>
+
+namespace qpid {
+class Url;
+
+namespace ha {
+
+/**
+ * A HaBroker has a role, e.g. Primary, Backup, StandAlone.
+ * Role subclasses define the actions of the broker in each role.
+ * The Role interface allows the HaBroker to pass management actions
+ * to be implemented by the role.
+ */
+class Role
+{
+ public:
+ /** Log prefix appropriate to the role */
+ virtual std::string getLogPrefix() const = 0;
+
+ /** QMF promote method handler.
+ * @return The new role if promoted, 0 if not. Caller takes ownership.
+ */
+ virtual Role* promote() = 0;
+
+ virtual void setBrokerUrl(const Url& url) = 0;
+
+ private:
+};
+}} // namespace qpid::ha
+
+#endif /*!QPID_HA_ROLE_H*/
diff --git a/qpid/gentools/templ.cpp/model/AMQP_HighestVersion.h.tmpl b/qpid/cpp/src/qpid/ha/StandAlone.h
index 9753b454ba..4bfd1810f2 100644
--- a/qpid/gentools/templ.cpp/model/AMQP_HighestVersion.h.tmpl
+++ b/qpid/cpp/src/qpid/ha/StandAlone.h
@@ -1,4 +1,6 @@
-&{AMQP_HighestVersion.h}
+#ifndef QPID_HA_STANDALONE_H
+#define QPID_HA_STANDALONE_H
+
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -8,9 +10,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,24 +21,25 @@
* under the License.
*
*/
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-#ifndef qpid_framing_highestProtocolVersion__
-#define qpid_framing_highestProtocolVersion__
-
-#include <ProtocolVersion.h>
-
-
namespace qpid {
-namespace framing {
+class Url;
-static ProtocolVersion highestProtocolVersion(${hv_latest_major}, ${hv_latest_minor});
+namespace ha {
+
+/**
+ * Stand-alone role: acts as a stand-alone broker, no clustering.
+ * HA module needed to setting up replication via QMF methods.
+ */
+class StandAlone : public Role
+{
+ public:
+ std::string getLogPrefix() const { return logPrefix; }
+ Role* promote() { return 0; }
+ void setBrokerUrl(const Url&) {}
-} /* namespace framing */
-} /* namespace qpid */
+ private:
+ std::string logPrefix;
+};
+}} // namespace qpid::ha
-#endif
+#endif /*!QPID_HA_STANDALONE_H*/
diff --git a/qpid/cpp/src/qpid/ha/StatusCheck.h b/qpid/cpp/src/qpid/ha/StatusCheck.h
index 3c62c43a22..997ced4159 100644
--- a/qpid/cpp/src/qpid/ha/StatusCheck.h
+++ b/qpid/cpp/src/qpid/ha/StatusCheck.h
@@ -32,6 +32,11 @@
namespace qpid {
namespace ha {
+// FIXME aconway 2012-12-21: This solution is incomplete. It will only protect
+// against bad promotion if there are READY brokers when this broker starts.
+// It will not help the situation where brokers became READY after this one starts.
+//
+
/**
* Check whether a JOINING broker can be promoted .
*
@@ -49,8 +54,10 @@ class StatusCheck
~StatusCheck();
void setUrl(const Url&);
bool canPromote();
- void setPromote(bool p);
+
private:
+ void setPromote(bool p);
+
std::string logPrefix;
sys::Mutex lock;
std::vector<sys::Thread> threads;
diff --git a/qpid/cpp/src/qpid/legacystore/BindingDbt.cpp b/qpid/cpp/src/qpid/legacystore/BindingDbt.cpp
new file mode 100644
index 0000000000..a48c156e71
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/BindingDbt.cpp
@@ -0,0 +1,50 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/BindingDbt.h"
+
+namespace mrg {
+namespace msgstore {
+
+BindingDbt::BindingDbt(const qpid::broker::PersistableExchange& e, const qpid::broker::PersistableQueue& q, const std::string& k, const qpid::framing::FieldTable& a)
+ : data(new char[encodedSize(e, q, k, a)]),
+ buffer(data, encodedSize(e, q, k, a))
+{
+ buffer.putLongLong(q.getPersistenceId());
+ buffer.putShortString(q.getName());
+ buffer.putShortString(k);
+ buffer.put(a);
+
+ set_data(data);
+ set_size(encodedSize(e, q, k, a));
+}
+
+BindingDbt::~BindingDbt()
+{
+ delete [] data;
+}
+
+uint32_t BindingDbt::encodedSize(const qpid::broker::PersistableExchange& /*not used*/, const qpid::broker::PersistableQueue& q, const std::string& k, const qpid::framing::FieldTable& a)
+{
+ return 8 /*queue id*/ + q.getName().size() + 1 + k.size() + 1 + a.encodedSize();
+}
+
+}}
diff --git a/qpid/cpp/src/qpid/legacystore/BindingDbt.h b/qpid/cpp/src/qpid/legacystore/BindingDbt.h
new file mode 100644
index 0000000000..63c7cd144e
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/BindingDbt.h
@@ -0,0 +1,56 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_BINDINGDBT_H
+#define QPID_LEGACYSTORE_BINDINGDBT_H
+
+#include "db-inc.h"
+#include "qpid/broker/PersistableExchange.h"
+#include "qpid/broker/PersistableQueue.h"
+#include "qpid/framing/Buffer.h"
+#include "qpid/framing/FieldTable.h"
+
+namespace mrg{
+namespace msgstore{
+
+class BindingDbt : public Dbt
+{
+ char* data;
+ qpid::framing::Buffer buffer;
+
+ static uint32_t encodedSize(const qpid::broker::PersistableExchange& e,
+ const qpid::broker::PersistableQueue& q,
+ const std::string& k,
+ const qpid::framing::FieldTable& a);
+
+public:
+ BindingDbt(const qpid::broker::PersistableExchange& e,
+ const qpid::broker::PersistableQueue& q,
+ const std::string& k,
+ const qpid::framing::FieldTable& a);
+
+ virtual ~BindingDbt();
+
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_BINDINGDBT_H
diff --git a/qpid/cpp/src/qpid/legacystore/BufferValue.cpp b/qpid/cpp/src/qpid/legacystore/BufferValue.cpp
new file mode 100644
index 0000000000..fb2c471cd7
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/BufferValue.cpp
@@ -0,0 +1,56 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/BufferValue.h"
+
+namespace mrg {
+namespace msgstore {
+
+
+
+BufferValue::BufferValue(u_int32_t size, u_int64_t offset)
+ : data(new char[size]),
+ buffer(data, size)
+{
+ set_data(data);
+ set_size(size);
+ set_flags(DB_DBT_USERMEM | DB_DBT_PARTIAL);
+ set_doff(offset);
+ set_dlen(size);
+ set_ulen(size);
+}
+
+BufferValue::BufferValue(const qpid::broker::Persistable& p)
+ : data(new char[p.encodedSize()]),
+ buffer(data, p.encodedSize())
+{
+ p.encode(buffer);
+
+ set_data(data);
+ set_size(p.encodedSize());
+}
+
+BufferValue::~BufferValue()
+{
+ delete [] data;
+}
+
+}}
diff --git a/qpid/cpp/src/qpid/legacystore/BufferValue.h b/qpid/cpp/src/qpid/legacystore/BufferValue.h
new file mode 100644
index 0000000000..527fbcf577
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/BufferValue.h
@@ -0,0 +1,46 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_BUFFERVALUE_H
+#define QPID_LEGACYSTORE_BUFFERVALUE_H
+
+#include "db-inc.h"
+#include "qpid/broker/Persistable.h"
+#include "qpid/framing/Buffer.h"
+
+namespace mrg{
+namespace msgstore{
+
+class BufferValue : public Dbt
+{
+ char* data;
+
+public:
+ qpid::framing::Buffer buffer;
+
+ BufferValue(u_int32_t size, u_int64_t offset);
+ BufferValue(const qpid::broker::Persistable& p);
+ virtual ~BufferValue();
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_BUFFERVALUE_H
diff --git a/qpid/cpp/src/qpid/legacystore/Cursor.h b/qpid/cpp/src/qpid/legacystore/Cursor.h
new file mode 100644
index 0000000000..0c869c29a0
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/Cursor.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_CURSOR_H
+#define QPID_LEGACYSTORE_CURSOR_H
+
+#include <boost/shared_ptr.hpp>
+#include "db-inc.h"
+
+namespace mrg{
+namespace msgstore{
+
+class Cursor
+{
+ Dbc* cursor;
+public:
+ typedef boost::shared_ptr<Db> db_ptr;
+
+ Cursor() : cursor(0) {}
+ virtual ~Cursor() { if(cursor) cursor->close(); }
+
+ void open(db_ptr db, DbTxn* txn, u_int32_t flags = 0) { db->cursor(txn, &cursor, flags); }
+ void close() { if(cursor) cursor->close(); cursor = 0; }
+ Dbc* get() { return cursor; }
+ Dbc* operator->() { return cursor; }
+ bool next(Dbt& key, Dbt& value) { return cursor->get(&key, &value, DB_NEXT) == 0; }
+ bool current(Dbt& key, Dbt& value) { return cursor->get(&key, &value, DB_CURRENT) == 0; }
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_CURSOR_H
diff --git a/qpid/cpp/src/qpid/legacystore/DataTokenImpl.cpp b/qpid/cpp/src/qpid/legacystore/DataTokenImpl.cpp
new file mode 100644
index 0000000000..796d4c02f0
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/DataTokenImpl.cpp
@@ -0,0 +1,28 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/DataTokenImpl.h"
+
+using namespace mrg::msgstore;
+
+DataTokenImpl::DataTokenImpl():data_tok() {}
+
+DataTokenImpl::~DataTokenImpl() {}
diff --git a/qpid/cpp/src/qpid/legacystore/DataTokenImpl.h b/qpid/cpp/src/qpid/legacystore/DataTokenImpl.h
new file mode 100644
index 0000000000..e01d471e1b
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/DataTokenImpl.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_DATATOKENIMPL_H
+#define QPID_LEGACYSTORE_DATATOKENIMPL_H
+
+#include "qpid/legacystore/jrnl/data_tok.h"
+#include "qpid/broker/PersistableMessage.h"
+#include <boost/intrusive_ptr.hpp>
+
+namespace mrg {
+namespace msgstore {
+
+class DataTokenImpl : public journal::data_tok, public qpid::RefCounted
+{
+ private:
+ boost::intrusive_ptr<qpid::broker::PersistableMessage> sourceMsg;
+ public:
+ DataTokenImpl();
+ virtual ~DataTokenImpl();
+
+ inline boost::intrusive_ptr<qpid::broker::PersistableMessage>& getSourceMessage() { return sourceMsg; }
+ inline void setSourceMessage(const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg) { sourceMsg = msg; }
+};
+
+} // namespace msgstore
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_DATATOKENIMPL_H
diff --git a/qpid/cpp/src/qpid/legacystore/IdDbt.cpp b/qpid/cpp/src/qpid/legacystore/IdDbt.cpp
new file mode 100644
index 0000000000..d9edaf80e6
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/IdDbt.cpp
@@ -0,0 +1,42 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/IdDbt.h"
+
+using namespace mrg::msgstore;
+
+IdDbt::IdDbt() : id(0)
+{
+ init();
+}
+
+IdDbt::IdDbt(u_int64_t _id) : id(_id)
+{
+ init();
+}
+
+void IdDbt::init()
+{
+ set_data(&id);
+ set_size(sizeof(u_int64_t));
+ set_ulen(sizeof(u_int64_t));
+ set_flags(DB_DBT_USERMEM);
+}
diff --git a/qpid/cpp/src/qpid/legacystore/IdDbt.h b/qpid/cpp/src/qpid/legacystore/IdDbt.h
new file mode 100644
index 0000000000..ecf5922963
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/IdDbt.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_IDDBT_H
+#define QPID_LEGACYSTORE_IDDBT_H
+
+#include "db-inc.h"
+
+namespace mrg{
+namespace msgstore{
+
+class IdDbt : public Dbt
+{
+ void init();
+public:
+ u_int64_t id;
+
+ IdDbt(u_int64_t id);
+ IdDbt();
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_IDDBT_H
diff --git a/qpid/cpp/src/qpid/legacystore/IdSequence.cpp b/qpid/cpp/src/qpid/legacystore/IdSequence.cpp
new file mode 100644
index 0000000000..975b1107e7
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/IdSequence.cpp
@@ -0,0 +1,40 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/IdSequence.h"
+
+using namespace mrg::msgstore;
+using qpid::sys::Mutex;
+
+IdSequence::IdSequence() : id(1) {}
+
+u_int64_t IdSequence::next()
+{
+ Mutex::ScopedLock guard(lock);
+ if (!id) id++; // avoid 0 when folding around
+ return id++;
+}
+
+void IdSequence::reset(uint64_t value)
+{
+ //deliberately not threadsafe, used only on recovery
+ id = value;
+}
diff --git a/qpid/cpp/src/qpid/legacystore/IdSequence.h b/qpid/cpp/src/qpid/legacystore/IdSequence.h
new file mode 100644
index 0000000000..11d7ff61ca
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/IdSequence.h
@@ -0,0 +1,44 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_IDSEQUENCE_H
+#define QPID_LEGACYSTORE_IDSEQUENCE_H
+
+#include "qpid/framing/amqp_types.h"
+#include "qpid/sys/Mutex.h"
+#include <sys/types.h>
+
+namespace mrg{
+namespace msgstore{
+
+class IdSequence
+{
+ qpid::sys::Mutex lock;
+ uint64_t id;
+public:
+ IdSequence();
+ uint64_t next();
+ void reset(uint64_t value);
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_IDSEQUENCE_H
diff --git a/qpid/cpp/src/qpid/legacystore/JournalImpl.cpp b/qpid/cpp/src/qpid/legacystore/JournalImpl.cpp
new file mode 100644
index 0000000000..ba3f2aecae
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/JournalImpl.cpp
@@ -0,0 +1,633 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/JournalImpl.h"
+
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/log/Statement.h"
+#include "qpid/management/ManagementAgent.h"
+#include "qmf/org/apache/qpid/legacystore/ArgsJournalExpand.h"
+#include "qmf/org/apache/qpid/legacystore/EventCreated.h"
+#include "qmf/org/apache/qpid/legacystore/EventEnqThresholdExceeded.h"
+#include "qmf/org/apache/qpid/legacystore/EventFull.h"
+#include "qmf/org/apache/qpid/legacystore/EventRecovered.h"
+#include "qpid/sys/Monitor.h"
+#include "qpid/sys/Timer.h"
+#include "qpid/legacystore/StoreException.h"
+
+using namespace mrg::msgstore;
+using namespace mrg::journal;
+using qpid::management::ManagementAgent;
+namespace _qmf = qmf::org::apache::qpid::legacystore;
+
+InactivityFireEvent::InactivityFireEvent(JournalImpl* p, const qpid::sys::Duration timeout):
+ qpid::sys::TimerTask(timeout, "JournalInactive:"+p->id()), _parent(p) {}
+
+void InactivityFireEvent::fire() { qpid::sys::Mutex::ScopedLock sl(_ife_lock); if (_parent) _parent->flushFire(); }
+
+GetEventsFireEvent::GetEventsFireEvent(JournalImpl* p, const qpid::sys::Duration timeout):
+ qpid::sys::TimerTask(timeout, "JournalGetEvents:"+p->id()), _parent(p) {}
+
+void GetEventsFireEvent::fire() { qpid::sys::Mutex::ScopedLock sl(_gefe_lock); if (_parent) _parent->getEventsFire(); }
+
+JournalImpl::JournalImpl(qpid::sys::Timer& timer_,
+ const std::string& journalId,
+ const std::string& journalDirectory,
+ const std::string& journalBaseFilename,
+ const qpid::sys::Duration getEventsTimeout,
+ const qpid::sys::Duration flushTimeout,
+ qpid::management::ManagementAgent* a,
+ DeleteCallback onDelete):
+ jcntl(journalId, journalDirectory, journalBaseFilename),
+ timer(timer_),
+ getEventsTimerSetFlag(false),
+ lastReadRid(0),
+ writeActivityFlag(false),
+ flushTriggeredFlag(true),
+ _xidp(0),
+ _datap(0),
+ _dlen(0),
+ _dtok(),
+ _external(false),
+ deleteCallback(onDelete)
+{
+ getEventsFireEventsPtr = new GetEventsFireEvent(this, getEventsTimeout);
+ inactivityFireEventPtr = new InactivityFireEvent(this, flushTimeout);
+ {
+ timer.start();
+ timer.add(inactivityFireEventPtr);
+ }
+
+ initManagement(a);
+
+ log(LOG_NOTICE, "Created");
+ std::ostringstream oss;
+ oss << "Journal directory = \"" << journalDirectory << "\"; Base file name = \"" << journalBaseFilename << "\"";
+ log(LOG_DEBUG, oss.str());
+}
+
+JournalImpl::~JournalImpl()
+{
+ if (deleteCallback) deleteCallback(*this);
+ if (_init_flag && !_stop_flag){
+ try { stop(true); } // NOTE: This will *block* until all outstanding disk aio calls are complete!
+ catch (const jexception& e) { log(LOG_ERROR, e.what()); }
+ }
+ getEventsFireEventsPtr->cancel();
+ inactivityFireEventPtr->cancel();
+ free_read_buffers();
+
+ if (_mgmtObject.get() != 0) {
+ _mgmtObject->resourceDestroy();
+ _mgmtObject.reset();
+ }
+
+ log(LOG_NOTICE, "Destroyed");
+}
+
+void
+JournalImpl::initManagement(qpid::management::ManagementAgent* a)
+{
+ _agent = a;
+ if (_agent != 0)
+ {
+ _mgmtObject = _qmf::Journal::shared_ptr (
+ new _qmf::Journal(_agent, this));
+
+ _mgmtObject->set_name(_jid);
+ _mgmtObject->set_directory(_jdir.dirname());
+ _mgmtObject->set_baseFileName(_base_filename);
+ _mgmtObject->set_readPageSize(JRNL_RMGR_PAGE_SIZE * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ _mgmtObject->set_readPages(JRNL_RMGR_PAGES);
+
+ // The following will be set on initialize(), but being properties, these must be set to 0 in the meantime
+ _mgmtObject->set_initialFileCount(0);
+ _mgmtObject->set_dataFileSize(0);
+ _mgmtObject->set_currentFileCount(0);
+ _mgmtObject->set_writePageSize(0);
+ _mgmtObject->set_writePages(0);
+
+ _agent->addObject(_mgmtObject, 0, true);
+ }
+}
+
+
+void
+JournalImpl::initialize(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks,
+ mrg::journal::aio_callback* const cbp)
+{
+ std::ostringstream oss;
+ oss << "Initialize; num_jfiles=" << num_jfiles << " jfsize_sblks=" << jfsize_sblks;
+ oss << " wcache_pgsize_sblks=" << wcache_pgsize_sblks;
+ oss << " wcache_num_pages=" << wcache_num_pages;
+ log(LOG_DEBUG, oss.str());
+ jcntl::initialize(num_jfiles, auto_expand, ae_max_jfiles, jfsize_sblks, wcache_num_pages, wcache_pgsize_sblks, cbp);
+ log(LOG_DEBUG, "Initialization complete");
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->set_initialFileCount(_lpmgr.num_jfiles());
+ _mgmtObject->set_autoExpand(_lpmgr.is_ae());
+ _mgmtObject->set_currentFileCount(_lpmgr.num_jfiles());
+ _mgmtObject->set_maxFileCount(_lpmgr.ae_max_jfiles());
+ _mgmtObject->set_dataFileSize(_jfsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ _mgmtObject->set_writePageSize(wcache_pgsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ _mgmtObject->set_writePages(wcache_num_pages);
+ }
+ if (_agent != 0)
+ _agent->raiseEvent(qmf::org::apache::qpid::legacystore::EventCreated(_jid, _jfsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE, _lpmgr.num_jfiles()),
+ qpid::management::ManagementAgent::SEV_NOTE);
+}
+
+void
+JournalImpl::recover(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks,
+ mrg::journal::aio_callback* const cbp,
+ boost::ptr_list<msgstore::PreparedTransaction>* prep_tx_list_ptr,
+ u_int64_t& highest_rid,
+ u_int64_t queue_id)
+{
+ std::ostringstream oss1;
+ oss1 << "Recover; num_jfiles=" << num_jfiles << " jfsize_sblks=" << jfsize_sblks;
+ oss1 << " queue_id = 0x" << std::hex << queue_id << std::dec;
+ oss1 << " wcache_pgsize_sblks=" << wcache_pgsize_sblks;
+ oss1 << " wcache_num_pages=" << wcache_num_pages;
+ log(LOG_DEBUG, oss1.str());
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->set_initialFileCount(_lpmgr.num_jfiles());
+ _mgmtObject->set_autoExpand(_lpmgr.is_ae());
+ _mgmtObject->set_currentFileCount(_lpmgr.num_jfiles());
+ _mgmtObject->set_maxFileCount(_lpmgr.ae_max_jfiles());
+ _mgmtObject->set_dataFileSize(_jfsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ _mgmtObject->set_writePageSize(wcache_pgsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ _mgmtObject->set_writePages(wcache_num_pages);
+ }
+
+ if (prep_tx_list_ptr) {
+ // Create list of prepared xids
+ std::vector<std::string> prep_xid_list;
+ for (msgstore::PreparedTransaction::list::iterator i = prep_tx_list_ptr->begin(); i != prep_tx_list_ptr->end(); i++) {
+ prep_xid_list.push_back(i->xid);
+ }
+
+ jcntl::recover(num_jfiles, auto_expand, ae_max_jfiles, jfsize_sblks, wcache_num_pages, wcache_pgsize_sblks,
+ cbp, &prep_xid_list, highest_rid);
+ } else {
+ jcntl::recover(num_jfiles, auto_expand, ae_max_jfiles, jfsize_sblks, wcache_num_pages, wcache_pgsize_sblks,
+ cbp, 0, highest_rid);
+ }
+
+ // Populate PreparedTransaction lists from _tmap
+ if (prep_tx_list_ptr)
+ {
+ for (msgstore::PreparedTransaction::list::iterator i = prep_tx_list_ptr->begin(); i != prep_tx_list_ptr->end(); i++) {
+ txn_data_list tdl = _tmap.get_tdata_list(i->xid); // tdl will be empty if xid not found
+ for (tdl_itr tdl_itr = tdl.begin(); tdl_itr < tdl.end(); tdl_itr++) {
+ if (tdl_itr->_enq_flag) { // enqueue op
+ i->enqueues->add(queue_id, tdl_itr->_rid);
+ } else { // dequeue op
+ i->dequeues->add(queue_id, tdl_itr->_drid);
+ }
+ }
+ }
+ }
+ std::ostringstream oss2;
+ oss2 << "Recover phase 1 complete; highest rid found = 0x" << std::hex << highest_rid;
+ oss2 << std::dec << "; emap.size=" << _emap.size() << "; tmap.size=" << _tmap.size();
+ oss2 << "; journal now read-only.";
+ log(LOG_DEBUG, oss2.str());
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->inc_recordDepth(_emap.size());
+ _mgmtObject->inc_enqueues(_emap.size());
+ _mgmtObject->inc_txn(_tmap.size());
+ _mgmtObject->inc_txnEnqueues(_tmap.enq_cnt());
+ _mgmtObject->inc_txnDequeues(_tmap.deq_cnt());
+ }
+}
+
+void
+JournalImpl::recover_complete()
+{
+ jcntl::recover_complete();
+ log(LOG_DEBUG, "Recover phase 2 complete; journal now writable.");
+ if (_agent != 0)
+ _agent->raiseEvent(qmf::org::apache::qpid::legacystore::EventRecovered(_jid, _jfsize_sblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE, _lpmgr.num_jfiles(),
+ _emap.size(), _tmap.size(), _tmap.enq_cnt(), _tmap.deq_cnt()), qpid::management::ManagementAgent::SEV_NOTE);
+}
+
+//#define MAX_AIO_SLEEPS 1000000 // tot: ~10 sec
+//#define AIO_SLEEP_TIME_US 10 // 0.01 ms
+// Return true if content is recovered from store; false if content is external and must be recovered from an external store.
+// Throw exception for all errors.
+bool
+JournalImpl::loadMsgContent(u_int64_t rid, std::string& data, size_t length, size_t offset)
+{
+ qpid::sys::Mutex::ScopedLock sl(_read_lock);
+ if (_dtok.rid() != rid)
+ {
+ // Free any previous msg
+ free_read_buffers();
+
+ // Last read encountered out-of-order rids, check if this rid is in that list
+ bool oooFlag = false;
+ for (std::vector<u_int64_t>::const_iterator i=oooRidList.begin(); i!=oooRidList.end() && !oooFlag; i++) {
+ if (*i == rid) {
+ oooFlag = true;
+ }
+ }
+
+ // TODO: This is a brutal approach - very inefficient and slow. Rather introduce a system of remembering
+ // jumpover points and allow the read to jump back to the first known jumpover point - but this needs
+ // a mechanism in rrfc to accomplish it. Also helpful is a struct containing a journal address - a
+ // combination of lid/offset.
+ // NOTE: The second part of the if stmt (rid < lastReadRid) is required to handle browsing.
+ if (oooFlag || rid < lastReadRid) {
+ _rmgr.invalidate();
+ oooRidList.clear();
+ }
+ _dlen = 0;
+ _dtok.reset();
+ _dtok.set_wstate(DataTokenImpl::ENQ);
+ _dtok.set_rid(0);
+ _external = false;
+ size_t xlen = 0;
+ bool transient = false;
+ bool done = false;
+ bool rid_found = false;
+ while (!done) {
+ iores res = read_data_record(&_datap, _dlen, &_xidp, xlen, transient, _external, &_dtok);
+ switch (res) {
+ case mrg::journal::RHM_IORES_SUCCESS:
+ if (_dtok.rid() != rid) {
+ // Check if this is an out-of-order rid that may impact next read
+ if (_dtok.rid() > rid)
+ oooRidList.push_back(_dtok.rid());
+ free_read_buffers();
+ // Reset data token for next read
+ _dlen = 0;
+ _dtok.reset();
+ _dtok.set_wstate(DataTokenImpl::ENQ);
+ _dtok.set_rid(0);
+ } else {
+ rid_found = _dtok.rid() == rid;
+ lastReadRid = rid;
+ done = true;
+ }
+ break;
+ case mrg::journal::RHM_IORES_PAGE_AIOWAIT:
+ if (get_wr_events(&_aio_cmpl_timeout) == journal::jerrno::AIO_TIMEOUT) {
+ std::stringstream ss;
+ ss << "read_data_record() returned " << mrg::journal::iores_str(res);
+ ss << "; timed out waiting for page to be processed.";
+ throw jexception(mrg::journal::jerrno::JERR__TIMEOUT, ss.str().c_str(), "JournalImpl",
+ "loadMsgContent");
+ }
+ break;
+ default:
+ std::stringstream ss;
+ ss << "read_data_record() returned " << mrg::journal::iores_str(res);
+ throw jexception(mrg::journal::jerrno::JERR__UNEXPRESPONSE, ss.str().c_str(), "JournalImpl",
+ "loadMsgContent");
+ }
+ }
+ if (!rid_found) {
+ std::stringstream ss;
+ ss << "read_data_record() was unable to find rid 0x" << std::hex << rid << std::dec;
+ ss << " (" << rid << "); last rid found was 0x" << std::hex << _dtok.rid() << std::dec;
+ ss << " (" << _dtok.rid() << ")";
+ throw jexception(mrg::journal::jerrno::JERR__RECNFOUND, ss.str().c_str(), "JournalImpl", "loadMsgContent");
+ }
+ }
+
+ if (_external) return false;
+
+ u_int32_t hdr_offs = qpid::framing::Buffer(static_cast<char*>(_datap), sizeof(u_int32_t)).getLong() + sizeof(u_int32_t);
+ if (hdr_offs + offset + length > _dlen) {
+ data.append((const char*)_datap + hdr_offs + offset, _dlen - hdr_offs - offset);
+ } else {
+ data.append((const char*)_datap + hdr_offs + offset, length);
+ }
+ return true;
+}
+
+void
+JournalImpl::enqueue_data_record(const void* const data_buff, const size_t tot_data_len,
+ const size_t this_data_len, data_tok* dtokp, const bool transient)
+{
+ handleIoResult(jcntl::enqueue_data_record(data_buff, tot_data_len, this_data_len, dtokp, transient));
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->inc_enqueues();
+ _mgmtObject->inc_recordDepth();
+ }
+}
+
+void
+JournalImpl::enqueue_extern_data_record(const size_t tot_data_len, data_tok* dtokp,
+ const bool transient)
+{
+ handleIoResult(jcntl::enqueue_extern_data_record(tot_data_len, dtokp, transient));
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->inc_enqueues();
+ _mgmtObject->inc_recordDepth();
+ }
+}
+
+void
+JournalImpl::enqueue_txn_data_record(const void* const data_buff, const size_t tot_data_len,
+ const size_t this_data_len, data_tok* dtokp, const std::string& xid, const bool transient)
+{
+ bool txn_incr = _mgmtObject.get() != 0 ? _tmap.in_map(xid) : false;
+
+ handleIoResult(jcntl::enqueue_txn_data_record(data_buff, tot_data_len, this_data_len, dtokp, xid, transient));
+
+ if (_mgmtObject.get() != 0)
+ {
+ if (!txn_incr) // If this xid was not in _tmap, it will be now...
+ _mgmtObject->inc_txn();
+ _mgmtObject->inc_enqueues();
+ _mgmtObject->inc_txnEnqueues();
+ _mgmtObject->inc_recordDepth();
+ }
+}
+
+void
+JournalImpl::enqueue_extern_txn_data_record(const size_t tot_data_len, data_tok* dtokp,
+ const std::string& xid, const bool transient)
+{
+ bool txn_incr = _mgmtObject.get() != 0 ? _tmap.in_map(xid) : false;
+
+ handleIoResult(jcntl::enqueue_extern_txn_data_record(tot_data_len, dtokp, xid, transient));
+
+ if (_mgmtObject.get() != 0)
+ {
+ if (!txn_incr) // If this xid was not in _tmap, it will be now...
+ _mgmtObject->inc_txn();
+ _mgmtObject->inc_enqueues();
+ _mgmtObject->inc_txnEnqueues();
+ _mgmtObject->inc_recordDepth();
+ }
+}
+
+void
+JournalImpl::dequeue_data_record(data_tok* const dtokp, const bool txn_coml_commit)
+{
+ handleIoResult(jcntl::dequeue_data_record(dtokp, txn_coml_commit));
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->inc_dequeues();
+ _mgmtObject->inc_txnDequeues();
+ _mgmtObject->dec_recordDepth();
+ }
+}
+
+void
+JournalImpl::dequeue_txn_data_record(data_tok* const dtokp, const std::string& xid, const bool txn_coml_commit)
+{
+ bool txn_incr = _mgmtObject.get() != 0 ? _tmap.in_map(xid) : false;
+
+ handleIoResult(jcntl::dequeue_txn_data_record(dtokp, xid, txn_coml_commit));
+
+ if (_mgmtObject.get() != 0)
+ {
+ if (!txn_incr) // If this xid was not in _tmap, it will be now...
+ _mgmtObject->inc_txn();
+ _mgmtObject->inc_dequeues();
+ _mgmtObject->inc_txnDequeues();
+ _mgmtObject->dec_recordDepth();
+ }
+}
+
+void
+JournalImpl::txn_abort(data_tok* const dtokp, const std::string& xid)
+{
+ handleIoResult(jcntl::txn_abort(dtokp, xid));
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->dec_txn();
+ _mgmtObject->inc_txnAborts();
+ }
+}
+
+void
+JournalImpl::txn_commit(data_tok* const dtokp, const std::string& xid)
+{
+ handleIoResult(jcntl::txn_commit(dtokp, xid));
+
+ if (_mgmtObject.get() != 0)
+ {
+ _mgmtObject->dec_txn();
+ _mgmtObject->inc_txnCommits();
+ }
+}
+
+void
+JournalImpl::stop(bool block_till_aio_cmpl)
+{
+ InactivityFireEvent* ifep = dynamic_cast<InactivityFireEvent*>(inactivityFireEventPtr.get());
+ assert(ifep); // dynamic_cast can return null if the cast fails
+ ifep->cancel();
+ jcntl::stop(block_till_aio_cmpl);
+
+ if (_mgmtObject.get() != 0) {
+ _mgmtObject->resourceDestroy();
+ _mgmtObject.reset();
+ }
+}
+
+iores
+JournalImpl::flush(const bool block_till_aio_cmpl)
+{
+ const iores res = jcntl::flush(block_till_aio_cmpl);
+ {
+ qpid::sys::Mutex::ScopedLock sl(_getf_lock);
+ if (_wmgr.get_aio_evt_rem() && !getEventsTimerSetFlag) { setGetEventTimer(); }
+ }
+ return res;
+}
+
+void
+JournalImpl::log(mrg::journal::log_level ll, const std::string& log_stmt) const
+{
+ log(ll, log_stmt.c_str());
+}
+
+void
+JournalImpl::log(mrg::journal::log_level ll, const char* const log_stmt) const
+{
+ switch (ll)
+ {
+ case LOG_TRACE: QPID_LOG(trace, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_DEBUG: QPID_LOG(debug, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_INFO: QPID_LOG(info, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_NOTICE: QPID_LOG(notice, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_WARN: QPID_LOG(warning, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_ERROR: QPID_LOG(error, "Journal \"" << _jid << "\": " << log_stmt); break;
+ case LOG_CRITICAL: QPID_LOG(critical, "Journal \"" << _jid << "\": " << log_stmt); break;
+ }
+}
+
+void
+JournalImpl::getEventsFire()
+{
+ qpid::sys::Mutex::ScopedLock sl(_getf_lock);
+ getEventsTimerSetFlag = false;
+ if (_wmgr.get_aio_evt_rem()) { jcntl::get_wr_events(0); }
+ if (_wmgr.get_aio_evt_rem()) { setGetEventTimer(); }
+}
+
+void
+JournalImpl::flushFire()
+{
+ if (writeActivityFlag) {
+ writeActivityFlag = false;
+ flushTriggeredFlag = false;
+ } else {
+ if (!flushTriggeredFlag) {
+ flush();
+ flushTriggeredFlag = true;
+ }
+ }
+ inactivityFireEventPtr->setupNextFire();
+ {
+ timer.add(inactivityFireEventPtr);
+ }
+}
+
+void
+JournalImpl::wr_aio_cb(std::vector<data_tok*>& dtokl)
+{
+ for (std::vector<data_tok*>::const_iterator i=dtokl.begin(); i!=dtokl.end(); i++)
+ {
+ DataTokenImpl* dtokp = static_cast<DataTokenImpl*>(*i);
+ if (/*!is_stopped() &&*/ dtokp->getSourceMessage())
+ {
+ switch (dtokp->wstate())
+ {
+ case data_tok::ENQ:
+ dtokp->getSourceMessage()->enqueueComplete();
+ break;
+ case data_tok::DEQ:
+/* Don't need to signal until we have a way to ack completion of dequeue in AMQP
+ dtokp->getSourceMessage()->dequeueComplete();
+ if ( dtokp->getSourceMessage()->isDequeueComplete() ) // clear id after last dequeue
+ dtokp->getSourceMessage()->setPersistenceId(0);
+*/
+ break;
+ default: ;
+ }
+ }
+ dtokp->release();
+ }
+}
+
+void
+JournalImpl::rd_aio_cb(std::vector<u_int16_t>& /*pil*/)
+{}
+
+void
+JournalImpl::free_read_buffers()
+{
+ if (_xidp) {
+ ::free(_xidp);
+ _xidp = 0;
+ _datap = 0;
+ } else if (_datap) {
+ ::free(_datap);
+ _datap = 0;
+ }
+}
+
+void
+JournalImpl::handleIoResult(const iores r)
+{
+ writeActivityFlag = true;
+ switch (r)
+ {
+ case mrg::journal::RHM_IORES_SUCCESS:
+ return;
+ case mrg::journal::RHM_IORES_ENQCAPTHRESH:
+ {
+ std::ostringstream oss;
+ oss << "Enqueue capacity threshold exceeded on queue \"" << _jid << "\".";
+ log(LOG_WARN, oss.str());
+ if (_agent != 0)
+ _agent->raiseEvent(qmf::org::apache::qpid::legacystore::EventEnqThresholdExceeded(_jid, "Journal enqueue capacity threshold exceeded"),
+ qpid::management::ManagementAgent::SEV_WARN);
+ THROW_STORE_FULL_EXCEPTION(oss.str());
+ }
+ case mrg::journal::RHM_IORES_FULL:
+ {
+ std::ostringstream oss;
+ oss << "Journal full on queue \"" << _jid << "\".";
+ log(LOG_CRITICAL, oss.str());
+ if (_agent != 0)
+ _agent->raiseEvent(qmf::org::apache::qpid::legacystore::EventFull(_jid, "Journal full"), qpid::management::ManagementAgent::SEV_ERROR);
+ THROW_STORE_FULL_EXCEPTION(oss.str());
+ }
+ default:
+ {
+ std::ostringstream oss;
+ oss << "Unexpected I/O response (" << mrg::journal::iores_str(r) << ") on queue " << _jid << "\".";
+ log(LOG_ERROR, oss.str());
+ THROW_STORE_FULL_EXCEPTION(oss.str());
+ }
+ }
+}
+
+qpid::management::Manageable::status_t JournalImpl::ManagementMethod (uint32_t methodId,
+ qpid::management::Args& /*args*/,
+ std::string& /*text*/)
+{
+ Manageable::status_t status = Manageable::STATUS_UNKNOWN_METHOD;
+
+ switch (methodId)
+ {
+ case _qmf::Journal::METHOD_EXPAND :
+ //_qmf::ArgsJournalExpand& eArgs = (_qmf::ArgsJournalExpand&) args;
+
+ // Implement "expand" using eArgs.i_by (expand-by argument)
+
+ status = Manageable::STATUS_NOT_IMPLEMENTED;
+ break;
+ }
+
+ return status;
+}
diff --git a/qpid/cpp/src/qpid/legacystore/JournalImpl.h b/qpid/cpp/src/qpid/legacystore/JournalImpl.h
new file mode 100644
index 0000000000..7227b2ffd4
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/JournalImpl.h
@@ -0,0 +1,265 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_JOURNALIMPL_H
+#define QPID_LEGACYSTORE_JOURNALIMPL_H
+
+#include <set>
+#include "qpid/legacystore/jrnl/enums.h"
+#include "qpid/legacystore/jrnl/jcntl.h"
+#include "qpid/legacystore/DataTokenImpl.h"
+#include "qpid/legacystore/PreparedTransaction.h"
+#include "qpid/broker/PersistableQueue.h"
+#include "qpid/sys/Timer.h"
+#include "qpid/sys/Time.h"
+#include <boost/ptr_container/ptr_list.hpp>
+#include <boost/intrusive_ptr.hpp>
+#include "qpid/management/Manageable.h"
+#include "qmf/org/apache/qpid/legacystore/Journal.h"
+
+namespace qpid { namespace sys {
+class Timer;
+}}
+
+namespace mrg {
+namespace msgstore {
+
+class JournalImpl;
+
+class InactivityFireEvent : public qpid::sys::TimerTask
+{
+ JournalImpl* _parent;
+ qpid::sys::Mutex _ife_lock;
+
+ public:
+ InactivityFireEvent(JournalImpl* p, const qpid::sys::Duration timeout);
+ virtual ~InactivityFireEvent() {}
+ void fire();
+ inline void cancel() { qpid::sys::Mutex::ScopedLock sl(_ife_lock); _parent = 0; }
+};
+
+class GetEventsFireEvent : public qpid::sys::TimerTask
+{
+ JournalImpl* _parent;
+ qpid::sys::Mutex _gefe_lock;
+
+ public:
+ GetEventsFireEvent(JournalImpl* p, const qpid::sys::Duration timeout);
+ virtual ~GetEventsFireEvent() {}
+ void fire();
+ inline void cancel() { qpid::sys::Mutex::ScopedLock sl(_gefe_lock); _parent = 0; }
+};
+
+class JournalImpl : public qpid::broker::ExternalQueueStore, public mrg::journal::jcntl, public mrg::journal::aio_callback
+{
+ public:
+ typedef boost::function<void (JournalImpl&)> DeleteCallback;
+
+ private:
+// static qpid::sys::Mutex _static_lock;
+// static u_int32_t cnt;
+
+ qpid::sys::Timer& timer;
+ bool getEventsTimerSetFlag;
+ boost::intrusive_ptr<qpid::sys::TimerTask> getEventsFireEventsPtr;
+ qpid::sys::Mutex _getf_lock;
+ qpid::sys::Mutex _read_lock;
+
+ u_int64_t lastReadRid; // rid of last read msg for loadMsgContent() - detects out-of-order read requests
+ std::vector<u_int64_t> oooRidList; // list of out-of-order rids (greater than current rid) encountered during read sequence
+
+ bool writeActivityFlag;
+ bool flushTriggeredFlag;
+ boost::intrusive_ptr<qpid::sys::TimerTask> inactivityFireEventPtr;
+
+ // temp local vars for loadMsgContent below
+ void* _xidp;
+ void* _datap;
+ size_t _dlen;
+ mrg::journal::data_tok _dtok;
+ bool _external;
+
+ qpid::management::ManagementAgent* _agent;
+ qmf::org::apache::qpid::legacystore::Journal::shared_ptr _mgmtObject;
+ DeleteCallback deleteCallback;
+
+ public:
+
+ JournalImpl(qpid::sys::Timer& timer,
+ const std::string& journalId,
+ const std::string& journalDirectory,
+ const std::string& journalBaseFilename,
+ const qpid::sys::Duration getEventsTimeout,
+ const qpid::sys::Duration flushTimeout,
+ qpid::management::ManagementAgent* agent,
+ DeleteCallback deleteCallback=DeleteCallback() );
+
+ virtual ~JournalImpl();
+
+ void initManagement(qpid::management::ManagementAgent* agent);
+
+ void initialize(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks,
+ mrg::journal::aio_callback* const cbp);
+
+ inline void initialize(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks) {
+ initialize(num_jfiles, auto_expand, ae_max_jfiles, jfsize_sblks, wcache_num_pages, wcache_pgsize_sblks,
+ this);
+ }
+
+ void recover(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks,
+ mrg::journal::aio_callback* const cbp,
+ boost::ptr_list<msgstore::PreparedTransaction>* prep_tx_list_ptr,
+ u_int64_t& highest_rid,
+ u_int64_t queue_id);
+
+ inline void recover(const u_int16_t num_jfiles,
+ const bool auto_expand,
+ const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks,
+ const u_int16_t wcache_num_pages,
+ const u_int32_t wcache_pgsize_sblks,
+ boost::ptr_list<msgstore::PreparedTransaction>* prep_tx_list_ptr,
+ u_int64_t& highest_rid,
+ u_int64_t queue_id) {
+ recover(num_jfiles, auto_expand, ae_max_jfiles, jfsize_sblks, wcache_num_pages, wcache_pgsize_sblks,
+ this, prep_tx_list_ptr, highest_rid, queue_id);
+ }
+
+ void recover_complete();
+
+ // Temporary fn to read and save last msg read from journal so it can be assigned
+ // in chunks. To be replaced when coding to do this direct from the journal is ready.
+ // Returns true if the record is extern, false if local.
+ bool loadMsgContent(u_int64_t rid, std::string& data, size_t length, size_t offset = 0);
+
+ // Overrides for write inactivity timer
+ void enqueue_data_record(const void* const data_buff, const size_t tot_data_len,
+ const size_t this_data_len, mrg::journal::data_tok* dtokp,
+ const bool transient = false);
+
+ void enqueue_extern_data_record(const size_t tot_data_len, mrg::journal::data_tok* dtokp,
+ const bool transient = false);
+
+ void enqueue_txn_data_record(const void* const data_buff, const size_t tot_data_len,
+ const size_t this_data_len, mrg::journal::data_tok* dtokp, const std::string& xid,
+ const bool transient = false);
+
+ void enqueue_extern_txn_data_record(const size_t tot_data_len, mrg::journal::data_tok* dtokp,
+ const std::string& xid, const bool transient = false);
+
+ void dequeue_data_record(mrg::journal::data_tok* const dtokp, const bool txn_coml_commit = false);
+
+ void dequeue_txn_data_record(mrg::journal::data_tok* const dtokp, const std::string& xid, const bool txn_coml_commit = false);
+
+ void txn_abort(mrg::journal::data_tok* const dtokp, const std::string& xid);
+
+ void txn_commit(mrg::journal::data_tok* const dtokp, const std::string& xid);
+
+ void stop(bool block_till_aio_cmpl = false);
+
+ // Logging
+ void log(mrg::journal::log_level level, const std::string& log_stmt) const;
+ void log(mrg::journal::log_level level, const char* const log_stmt) const;
+
+ // Overrides for get_events timer
+ mrg::journal::iores flush(const bool block_till_aio_cmpl = false);
+
+ // TimerTask callback
+ void getEventsFire();
+ void flushFire();
+
+ // AIO callbacks
+ virtual void wr_aio_cb(std::vector<mrg::journal::data_tok*>& dtokl);
+ virtual void rd_aio_cb(std::vector<u_int16_t>& pil);
+
+ qpid::management::ManagementObject::shared_ptr GetManagementObject (void) const
+ { return _mgmtObject; }
+
+ qpid::management::Manageable::status_t ManagementMethod (uint32_t,
+ qpid::management::Args&,
+ std::string&);
+
+ void resetDeleteCallback() { deleteCallback = DeleteCallback(); }
+
+ private:
+ void free_read_buffers();
+
+ inline void setGetEventTimer()
+ {
+ getEventsFireEventsPtr->setupNextFire();
+ timer.add(getEventsFireEventsPtr);
+ getEventsTimerSetFlag = true;
+ }
+ void handleIoResult(const mrg::journal::iores r);
+
+ // Management instrumentation callbacks overridden from jcntl
+ inline void instr_incr_outstanding_aio_cnt() {
+ if (_mgmtObject.get() != 0) _mgmtObject->inc_outstandingAIOs();
+ }
+ inline void instr_decr_outstanding_aio_cnt() {
+ if (_mgmtObject.get() != 0) _mgmtObject->dec_outstandingAIOs();
+ }
+
+}; // class JournalImpl
+
+class TplJournalImpl : public JournalImpl
+{
+ public:
+ TplJournalImpl(qpid::sys::Timer& timer,
+ const std::string& journalId,
+ const std::string& journalDirectory,
+ const std::string& journalBaseFilename,
+ const qpid::sys::Duration getEventsTimeout,
+ const qpid::sys::Duration flushTimeout,
+ qpid::management::ManagementAgent* agent) :
+ JournalImpl(timer, journalId, journalDirectory, journalBaseFilename, getEventsTimeout, flushTimeout, agent)
+ {}
+
+ virtual ~TplJournalImpl() {}
+
+ // Special version of read_data_record that ignores transactions - needed when reading the TPL
+ inline mrg::journal::iores read_data_record(void** const datapp, std::size_t& dsize,
+ void** const xidpp, std::size_t& xidsize, bool& transient, bool& external,
+ mrg::journal::data_tok* const dtokp) {
+ return JournalImpl::read_data_record(datapp, dsize, xidpp, xidsize, transient, external, dtokp, true);
+ }
+ inline void read_reset() { _rmgr.invalidate(); }
+}; // class TplJournalImpl
+
+} // namespace msgstore
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JOURNALIMPL_H
diff --git a/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.cpp b/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.cpp
new file mode 100644
index 0000000000..69e9f48a17
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.cpp
@@ -0,0 +1,1732 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+
+#include "qpid/legacystore/BindingDbt.h"
+#include "qpid/legacystore/BufferValue.h"
+#include "qpid/legacystore/IdDbt.h"
+#include "qpid/legacystore/jrnl/txn_map.h"
+#include "qpid/framing/FieldValue.h"
+#include "qpid/log/Statement.h"
+#include "qmf/org/apache/qpid/legacystore/Package.h"
+#include "qpid/legacystore/StoreException.h"
+#include <dirent.h>
+#include <db.h>
+
+#define MAX_AIO_SLEEPS 100000 // tot: ~1 sec
+#define AIO_SLEEP_TIME_US 10 // 0.01 ms
+
+namespace _qmf = qmf::org::apache::qpid::legacystore;
+
+namespace mrg {
+namespace msgstore {
+
+
+const std::string MessageStoreImpl::storeTopLevelDir("rhm"); // Sets the top-level store dir name
+// FIXME aconway 2010-03-09: was 10
+qpid::sys::Duration MessageStoreImpl::defJournalGetEventsTimeout(1 * qpid::sys::TIME_MSEC); // 10ms
+qpid::sys::Duration MessageStoreImpl::defJournalFlushTimeout(500 * qpid::sys::TIME_MSEC); // 0.5s
+qpid::sys::Mutex TxnCtxt::globalSerialiser;
+
+MessageStoreImpl::TplRecoverStruct::TplRecoverStruct(const u_int64_t _rid,
+ const bool _deq_flag,
+ const bool _commit_flag,
+ const bool _tpc_flag) :
+ rid(_rid),
+ deq_flag(_deq_flag),
+ commit_flag(_commit_flag),
+ tpc_flag(_tpc_flag)
+{}
+
+MessageStoreImpl::MessageStoreImpl(qpid::broker::Broker* broker_, const char* envpath) :
+ numJrnlFiles(0),
+ autoJrnlExpand(false),
+ autoJrnlExpandMaxFiles(0),
+ jrnlFsizeSblks(0),
+ truncateFlag(false),
+ wCachePgSizeSblks(0),
+ wCacheNumPages(0),
+ tplNumJrnlFiles(0),
+ tplJrnlFsizeSblks(0),
+ tplWCachePgSizeSblks(0),
+ tplWCacheNumPages(0),
+ highestRid(0),
+ isInit(false),
+ envPath(envpath),
+ broker(broker_),
+ mgmtObject(),
+ agent(0)
+{}
+
+u_int16_t MessageStoreImpl::chkJrnlNumFilesParam(const u_int16_t param, const std::string paramName)
+{
+ u_int16_t p = param;
+ if (p < JRNL_MIN_NUM_FILES) {
+ p = JRNL_MIN_NUM_FILES;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") is below allowable minimum (" << JRNL_MIN_NUM_FILES << "); changing this parameter to minimum value.");
+ } else if (p > JRNL_MAX_NUM_FILES) {
+ p = JRNL_MAX_NUM_FILES;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") is above allowable maximum (" << JRNL_MAX_NUM_FILES << "); changing this parameter to maximum value.");
+ }
+ return p;
+}
+
+u_int32_t MessageStoreImpl::chkJrnlFileSizeParam(const u_int32_t param, const std::string paramName, const u_int32_t wCachePgSizeSblks)
+{
+ u_int32_t p = param;
+ u_int32_t min = JRNL_MIN_FILE_SIZE / JRNL_RMGR_PAGE_SIZE;
+ u_int32_t max = JRNL_MAX_FILE_SIZE / JRNL_RMGR_PAGE_SIZE;
+ if (p < min) {
+ p = min;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") is below allowable minimum (" << min << "); changing this parameter to minimum value.");
+ } else if (p > max) {
+ p = max;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") is above allowable maximum (" << max << "); changing this parameter to maximum value.");
+ }
+ if (wCachePgSizeSblks > p * JRNL_RMGR_PAGE_SIZE) {
+ std::ostringstream oss;
+ oss << "Cannot create store with file size less than write page cache size. [file size = " << p << " (" << (p * JRNL_RMGR_PAGE_SIZE / 2) << " kB); write page cache = " << (wCachePgSizeSblks / 2) << " kB]";
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ return p;
+}
+
+u_int32_t MessageStoreImpl::chkJrnlWrPageCacheSize(const u_int32_t param, const std::string paramName, const u_int16_t jrnlFsizePgs)
+{
+ u_int32_t p = param;
+ switch (p)
+ {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ if (jrnlFsizePgs == 1) {
+ p = 64;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") cannot set a page size greater than the journal file size; changing this parameter to the journal file size (" << p << ")");
+ }
+ break;
+ default:
+ if (p == 0) {
+ // For zero value, use default
+ p = JRNL_WMGR_DEF_PAGE_SIZE * JRNL_DBLK_SIZE * JRNL_SBLK_SIZE / 1024;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") must be a power of 2 between 1 and 128; changing this parameter to default value (" << p << ")");
+ } else {
+ // For any positive value, use closest value
+ if (p < 6) p = 4;
+ else if (p < 12) p = 8;
+ else if (p < 24) p = 16;
+ else if (p < 48) p = 32;
+ else if (p < 96) p = 64;
+ else p = 128;
+ QPID_LOG(warning, "parameter " << paramName << " (" << param << ") must be a power of 2 between 1 and 128; changing this parameter to closest allowable value (" << p << ")");
+ }
+ }
+ return p;
+}
+
+u_int16_t MessageStoreImpl::getJrnlWrNumPages(const u_int32_t wrPageSizeKib)
+{
+ u_int32_t wrPageSizeSblks = wrPageSizeKib * 1024 / JRNL_DBLK_SIZE / JRNL_SBLK_SIZE; // convert from KiB to number sblks
+ u_int32_t defTotWCacheSize = JRNL_WMGR_DEF_PAGE_SIZE * JRNL_WMGR_DEF_PAGES; // in sblks. Currently 2014 sblks (1 MiB).
+ switch (wrPageSizeKib)
+ {
+ case 1:
+ case 2:
+ case 4:
+ // 256 KiB total cache
+ return defTotWCacheSize / wrPageSizeSblks / 4;
+ case 8:
+ case 16:
+ // 512 KiB total cache
+ return defTotWCacheSize / wrPageSizeSblks / 2;
+ default: // 32, 64, 128
+ // 1 MiB total cache
+ return defTotWCacheSize / wrPageSizeSblks;
+ }
+}
+
+void MessageStoreImpl::chkJrnlAutoExpandOptions(const StoreOptions* opts,
+ bool& autoJrnlExpand,
+ u_int16_t& autoJrnlExpandMaxFiles,
+ const std::string& autoJrnlExpandMaxFilesParamName,
+ const u_int16_t numJrnlFiles,
+ const std::string& numJrnlFilesParamName)
+{
+ if (!opts->autoJrnlExpand) {
+ // auto-expand disabled
+ autoJrnlExpand = false;
+ autoJrnlExpandMaxFiles = 0;
+ return;
+ }
+ u_int16_t p = opts->autoJrnlExpandMaxFiles;
+ if (numJrnlFiles == JRNL_MAX_NUM_FILES) {
+ // num-jfiles at max; disable auto-expand
+ autoJrnlExpand = false;
+ autoJrnlExpandMaxFiles = 0;
+ QPID_LOG(warning, "parameter " << autoJrnlExpandMaxFilesParamName << " (" << p << ") must be higher than parameter "
+ << numJrnlFilesParamName << " (" << numJrnlFiles << ") which is at the maximum allowable value; disabling auto-expand.");
+ return;
+ }
+ if (p > JRNL_MAX_NUM_FILES) {
+ // auto-expand-max-jfiles higher than max allowable, adjust
+ autoJrnlExpand = true;
+ autoJrnlExpandMaxFiles = JRNL_MAX_NUM_FILES;
+ QPID_LOG(warning, "parameter " << autoJrnlExpandMaxFilesParamName << " (" << p << ") is above allowable maximum ("
+ << JRNL_MAX_NUM_FILES << "); changing this parameter to maximum value.");
+ return;
+ }
+ if (p && p == defAutoJrnlExpandMaxFiles && numJrnlFiles != defTplNumJrnlFiles) {
+ // num-jfiles is different from the default AND max-auto-expand-jfiles is still at default
+ // change value of max-auto-expand-jfiles
+ autoJrnlExpand = true;
+ if (2 * numJrnlFiles <= JRNL_MAX_NUM_FILES) {
+ autoJrnlExpandMaxFiles = 2 * numJrnlFiles <= JRNL_MAX_NUM_FILES ? 2 * numJrnlFiles : JRNL_MAX_NUM_FILES;
+ QPID_LOG(warning, "parameter " << autoJrnlExpandMaxFilesParamName << " adjusted from its default value ("
+ << defAutoJrnlExpandMaxFiles << ") to twice that of parameter " << numJrnlFilesParamName << " (" << autoJrnlExpandMaxFiles << ").");
+ } else {
+ autoJrnlExpandMaxFiles = 2 * numJrnlFiles <= JRNL_MAX_NUM_FILES ? 2 * numJrnlFiles : JRNL_MAX_NUM_FILES;
+ QPID_LOG(warning, "parameter " << autoJrnlExpandMaxFilesParamName << " adjusted from its default to maximum allowable value ("
+ << JRNL_MAX_NUM_FILES << ") because of the value of " << numJrnlFilesParamName << " (" << numJrnlFiles << ").");
+ }
+ return;
+ }
+ // No adjustments req'd, set values
+ autoJrnlExpand = true;
+ autoJrnlExpandMaxFiles = p;
+}
+
+void MessageStoreImpl::initManagement ()
+{
+ if (broker != 0) {
+ agent = broker->getManagementAgent();
+ if (agent != 0) {
+ _qmf::Package packageInitializer(agent);
+ mgmtObject = _qmf::Store::shared_ptr (
+ new _qmf::Store(agent, this, broker));
+
+ mgmtObject->set_location(storeDir);
+ mgmtObject->set_defaultInitialFileCount(numJrnlFiles);
+ mgmtObject->set_defaultDataFileSize(jrnlFsizeSblks / JRNL_RMGR_PAGE_SIZE);
+ mgmtObject->set_tplIsInitialized(false);
+ mgmtObject->set_tplDirectory(getTplBaseDir());
+ mgmtObject->set_tplWritePageSize(tplWCachePgSizeSblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ mgmtObject->set_tplWritePages(tplWCacheNumPages);
+ mgmtObject->set_tplInitialFileCount(tplNumJrnlFiles);
+ mgmtObject->set_tplDataFileSize(tplJrnlFsizeSblks * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE);
+ mgmtObject->set_tplCurrentFileCount(tplNumJrnlFiles);
+
+ agent->addObject(mgmtObject, 0, true);
+
+ // Initialize all existing queues (ie those recovered before management was initialized)
+ for (JournalListMapItr i=journalList.begin(); i!=journalList.end(); i++) {
+ i->second->initManagement(agent);
+ }
+ }
+ }
+}
+
+bool MessageStoreImpl::init(const qpid::Options* options)
+{
+ // Extract and check options
+ const StoreOptions* opts = static_cast<const StoreOptions*>(options);
+ u_int16_t numJrnlFiles = chkJrnlNumFilesParam(opts->numJrnlFiles, "num-jfiles");
+ u_int32_t jrnlFsizePgs = chkJrnlFileSizeParam(opts->jrnlFsizePgs, "jfile-size-pgs");
+ u_int32_t jrnlWrCachePageSizeKib = chkJrnlWrPageCacheSize(opts->wCachePageSizeKib, "wcache-page-size", jrnlFsizePgs);
+ u_int16_t tplNumJrnlFiles = chkJrnlNumFilesParam(opts->tplNumJrnlFiles, "tpl-num-jfiles");
+ u_int32_t tplJrnlFSizePgs = chkJrnlFileSizeParam(opts->tplJrnlFsizePgs, "tpl-jfile-size-pgs");
+ u_int32_t tplJrnlWrCachePageSizeKib = chkJrnlWrPageCacheSize(opts->tplWCachePageSizeKib, "tpl-wcache-page-size", tplJrnlFSizePgs);
+ bool autoJrnlExpand;
+ u_int16_t autoJrnlExpandMaxFiles;
+ chkJrnlAutoExpandOptions(opts, autoJrnlExpand, autoJrnlExpandMaxFiles, "auto-expand-max-jfiles", numJrnlFiles, "num-jfiles");
+
+ // Pass option values to init(...)
+ return init(opts->storeDir, numJrnlFiles, jrnlFsizePgs, opts->truncateFlag, jrnlWrCachePageSizeKib, tplNumJrnlFiles, tplJrnlFSizePgs, tplJrnlWrCachePageSizeKib, autoJrnlExpand, autoJrnlExpandMaxFiles);
+}
+
+// These params, taken from options, are assumed to be correct and verified
+bool MessageStoreImpl::init(const std::string& dir,
+ u_int16_t jfiles,
+ u_int32_t jfileSizePgs,
+ const bool truncateFlag,
+ u_int32_t wCachePageSizeKib,
+ u_int16_t tplJfiles,
+ u_int32_t tplJfileSizePgs,
+ u_int32_t tplWCachePageSizeKib,
+ bool autoJExpand,
+ u_int16_t autoJExpandMaxFiles)
+{
+ if (isInit) return true;
+
+ // Set geometry members (converting to correct units where req'd)
+ numJrnlFiles = jfiles;
+ jrnlFsizeSblks = jfileSizePgs * JRNL_RMGR_PAGE_SIZE;
+ wCachePgSizeSblks = wCachePageSizeKib * 1024 / JRNL_DBLK_SIZE / JRNL_SBLK_SIZE; // convert from KiB to number sblks
+ wCacheNumPages = getJrnlWrNumPages(wCachePageSizeKib);
+ tplNumJrnlFiles = tplJfiles;
+ tplJrnlFsizeSblks = tplJfileSizePgs * JRNL_RMGR_PAGE_SIZE;
+ tplWCachePgSizeSblks = tplWCachePageSizeKib * 1024 / JRNL_DBLK_SIZE / JRNL_SBLK_SIZE; // convert from KiB to number sblks
+ tplWCacheNumPages = getJrnlWrNumPages(tplWCachePageSizeKib);
+ autoJrnlExpand = autoJExpand;
+ autoJrnlExpandMaxFiles = autoJExpandMaxFiles;
+ if (dir.size()>0) storeDir = dir;
+
+ if (truncateFlag)
+ truncateInit(false);
+ else
+ init();
+
+ QPID_LOG(notice, "Store module initialized; store-dir=" << dir);
+ QPID_LOG(info, "> Default files per journal: " << jfiles);
+// TODO: Uncomment these lines when auto-expand is enabled.
+// QPID_LOG(info, "> Auto-expand " << (autoJrnlExpand ? "enabled" : "disabled"));
+// if (autoJrnlExpand) QPID_LOG(info, "> Max auto-expand journal files: " << autoJrnlExpandMaxFiles);
+ QPID_LOG(info, "> Default journal file size: " << jfileSizePgs << " (wpgs)");
+ QPID_LOG(info, "> Default write cache page size: " << wCachePageSizeKib << " (KiB)");
+ QPID_LOG(info, "> Default number of write cache pages: " << wCacheNumPages);
+ QPID_LOG(info, "> TPL files per journal: " << tplNumJrnlFiles);
+ QPID_LOG(info, "> TPL journal file size: " << tplJfileSizePgs << " (wpgs)");
+ QPID_LOG(info, "> TPL write cache page size: " << tplWCachePageSizeKib << " (KiB)");
+ QPID_LOG(info, "> TPL number of write cache pages: " << tplWCacheNumPages);
+
+ return isInit;
+}
+
+void MessageStoreImpl::init()
+{
+ const int retryMax = 3;
+ int bdbRetryCnt = 0;
+ do {
+ if (bdbRetryCnt++ > 0)
+ {
+ closeDbs();
+ ::usleep(1000000); // 1 sec delay
+ QPID_LOG(error, "Previoius BDB store initialization failed, retrying (" << bdbRetryCnt << " of " << retryMax << ")...");
+ }
+
+ try {
+ journal::jdir::create_dir(getBdbBaseDir());
+
+ dbenv.reset(new DbEnv(0));
+ dbenv->set_errpfx("msgstore");
+ dbenv->set_lg_regionmax(256000); // default = 65000
+ dbenv->open(getBdbBaseDir().c_str(), DB_THREAD | DB_CREATE | DB_INIT_TXN | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_USE_ENVIRON | DB_RECOVER, 0);
+
+ // Databases are constructed here instead of the constructor so that the DB_RECOVER flag can be used
+ // against the database environment. Recover can only be performed if no databases have been created
+ // against the environment at the time of recovery, as recovery invalidates the environment.
+ queueDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(queueDb);
+ configDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(configDb);
+ exchangeDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(exchangeDb);
+ mappingDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(mappingDb);
+ bindingDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(bindingDb);
+ generalDb.reset(new Db(dbenv.get(), 0));
+ dbs.push_back(generalDb);
+
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), false);
+ try {
+ open(queueDb, txn.get(), "queues.db", false);
+ open(configDb, txn.get(), "config.db", false);
+ open(exchangeDb, txn.get(), "exchanges.db", false);
+ open(mappingDb, txn.get(), "mappings.db", true);
+ open(bindingDb, txn.get(), "bindings.db", true);
+ open(generalDb, txn.get(), "general.db", false);
+ txn.commit();
+ } catch (...) { txn.abort(); throw; }
+ // NOTE: during normal initialization, agent == 0 because the store is initialized before the management infrastructure.
+ // However during a truncated initialization in a cluster, agent != 0. We always pass 0 as the agent for the
+ // TplStore to keep things consistent in a cluster. See https://bugzilla.redhat.com/show_bug.cgi?id=681026
+ tplStorePtr.reset(new TplJournalImpl(broker->getTimer(), "TplStore", getTplBaseDir(), "tpl", defJournalGetEventsTimeout, defJournalFlushTimeout, 0));
+ isInit = true;
+ } catch (const DbException& e) {
+ if (e.get_errno() == DB_VERSION_MISMATCH)
+ {
+ QPID_LOG(error, "Database environment mismatch: This version of db4 does not match that which created the store database.: " << e.what());
+ THROW_STORE_EXCEPTION_2("Database environment mismatch: This version of db4 does not match that which created the store database. "
+ "(If recovery is not important, delete the contents of the store directory. Otherwise, try upgrading the database using "
+ "db_upgrade or using db_recover - but the db4-utils package must also be installed to use these utilities.)", e);
+ }
+ QPID_LOG(error, "BDB exception occurred while initializing store: " << e.what());
+ if (bdbRetryCnt >= retryMax)
+ THROW_STORE_EXCEPTION_2("BDB exception occurred while initializing store", e);
+ } catch (const StoreException&) {
+ throw;
+ } catch (const journal::jexception& e) {
+ QPID_LOG(error, "Journal Exception occurred while initializing store: " << e);
+ THROW_STORE_EXCEPTION_2("Journal Exception occurred while initializing store", e.what());
+ } catch (...) {
+ QPID_LOG(error, "Unknown exception occurred while initializing store.");
+ throw;
+ }
+ } while (!isInit);
+}
+
+void MessageStoreImpl::finalize()
+{
+ if (tplStorePtr.get() && tplStorePtr->is_ready()) tplStorePtr->stop(true);
+ {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ for (JournalListMapItr i = journalList.begin(); i != journalList.end(); i++)
+ {
+ JournalImpl* jQueue = i->second;
+ jQueue->resetDeleteCallback();
+ if (jQueue->is_ready()) jQueue->stop(true);
+ }
+ }
+
+ if (mgmtObject.get() != 0) {
+ mgmtObject->resourceDestroy();
+ mgmtObject.reset();
+ }
+}
+
+void MessageStoreImpl::truncateInit(const bool saveStoreContent)
+{
+ if (isInit) {
+ {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ if (journalList.size()) { // check no queues exist
+ std::ostringstream oss;
+ oss << "truncateInit() called with " << journalList.size() << " queues still in existence";
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ }
+ closeDbs();
+ dbs.clear();
+ if (tplStorePtr->is_ready()) tplStorePtr->stop(true);
+ dbenv->close(0);
+ isInit = false;
+ }
+ std::ostringstream oss;
+ oss << storeDir << "/" << storeTopLevelDir;
+ if (saveStoreContent) {
+ std::string dir = mrg::journal::jdir::push_down(storeDir, storeTopLevelDir, "cluster");
+ QPID_LOG(notice, "Store directory " << oss.str() << " was pushed down (saved) into directory " << dir << ".");
+ } else {
+ mrg::journal::jdir::delete_dir(oss.str().c_str());
+ QPID_LOG(notice, "Store directory " << oss.str() << " was truncated.");
+ }
+ init();
+}
+
+void MessageStoreImpl::chkTplStoreInit()
+{
+ // Prevent multiple threads from late-initializing the TPL
+ qpid::sys::Mutex::ScopedLock sl(tplInitLock);
+ if (!tplStorePtr->is_ready()) {
+ journal::jdir::create_dir(getTplBaseDir());
+ tplStorePtr->initialize(tplNumJrnlFiles, false, 0, tplJrnlFsizeSblks, tplWCacheNumPages, tplWCachePgSizeSblks);
+ if (mgmtObject.get() != 0) mgmtObject->set_tplIsInitialized(true);
+ }
+}
+
+void MessageStoreImpl::open(db_ptr db,
+ DbTxn* txn,
+ const char* file,
+ bool dupKey)
+{
+ if(dupKey) db->set_flags(DB_DUPSORT);
+ db->open(txn, file, 0, DB_BTREE, DB_CREATE | DB_THREAD, 0);
+}
+
+void MessageStoreImpl::closeDbs()
+{
+ for (std::list<db_ptr >::iterator i = dbs.begin(); i != dbs.end(); i++) {
+ (*i)->close(0);
+ }
+ dbs.clear();
+}
+
+MessageStoreImpl::~MessageStoreImpl()
+{
+ finalize();
+ try {
+ closeDbs();
+ } catch (const DbException& e) {
+ QPID_LOG(error, "Error closing BDB databases: " << e.what());
+ } catch (const journal::jexception& e) {
+ QPID_LOG(error, "Error: " << e.what());
+ } catch (const std::exception& e) {
+ QPID_LOG(error, "Error: " << e.what());
+ } catch (...) {
+ QPID_LOG(error, "Unknown error in MessageStoreImpl::~MessageStoreImpl()");
+ }
+
+ if (mgmtObject.get() != 0) {
+ mgmtObject->resourceDestroy();
+ mgmtObject.reset();
+ }
+}
+
+void MessageStoreImpl::create(qpid::broker::PersistableQueue& queue,
+ const qpid::framing::FieldTable& args)
+{
+ checkInit();
+ if (queue.getPersistenceId()) {
+ THROW_STORE_EXCEPTION("Queue already created: " + queue.getName());
+ }
+ JournalImpl* jQueue = 0;
+ qpid::framing::FieldTable::ValuePtr value;
+
+ u_int16_t localFileCount = numJrnlFiles;
+ bool localAutoExpandFlag = autoJrnlExpand;
+ u_int16_t localAutoExpandMaxFileCount = autoJrnlExpandMaxFiles;
+ u_int32_t localFileSizeSblks = jrnlFsizeSblks;
+
+ value = args.get("qpid.file_count");
+ if (value.get() != 0 && !value->empty() && value->convertsTo<int>())
+ localFileCount = chkJrnlNumFilesParam((u_int16_t) value->get<int>(), "qpid.file_count");
+
+ value = args.get("qpid.file_size");
+ if (value.get() != 0 && !value->empty() && value->convertsTo<int>())
+ localFileSizeSblks = chkJrnlFileSizeParam((u_int32_t) value->get<int>(), "qpid.file_size", wCachePgSizeSblks) * JRNL_RMGR_PAGE_SIZE;
+
+ if (queue.getName().size() == 0)
+ {
+ QPID_LOG(error, "Cannot create store for empty (null) queue name - ignoring and attempting to continue.");
+ return;
+ }
+
+ jQueue = new JournalImpl(broker->getTimer(), queue.getName(), getJrnlDir(queue), std::string("JournalData"),
+ defJournalGetEventsTimeout, defJournalFlushTimeout, agent,
+ boost::bind(&MessageStoreImpl::journalDeleted, this, _1));
+ {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ journalList[queue.getName()]=jQueue;
+ }
+
+ value = args.get("qpid.auto_expand");
+ if (value.get() != 0 && !value->empty() && value->convertsTo<bool>())
+ localAutoExpandFlag = (bool) value->get<bool>();
+
+ value = args.get("qpid.auto_expand_max_jfiles");
+ if (value.get() != 0 && !value->empty() && value->convertsTo<int>())
+ localAutoExpandMaxFileCount = (u_int16_t) value->get<int>();
+
+ queue.setExternalQueueStore(dynamic_cast<qpid::broker::ExternalQueueStore*>(jQueue));
+ try {
+ // init will create the deque's for the init...
+ jQueue->initialize(localFileCount, localAutoExpandFlag, localAutoExpandMaxFileCount, localFileSizeSblks, wCacheNumPages, wCachePgSizeSblks);
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue.getName() + ": create() failed: " + e.what());
+ }
+ try {
+ if (!create(queueDb, queueIdSequence, queue)) {
+ THROW_STORE_EXCEPTION("Queue already exists: " + queue.getName());
+ }
+ } catch (const DbException& e) {
+ THROW_STORE_EXCEPTION_2("Error creating queue named " + queue.getName(), e);
+ }
+}
+
+void MessageStoreImpl::destroy(qpid::broker::PersistableQueue& queue)
+{
+ checkInit();
+ destroy(queueDb, queue);
+ deleteBindingsForQueue(queue);
+ qpid::broker::ExternalQueueStore* eqs = queue.getExternalQueueStore();
+ if (eqs) {
+ JournalImpl* jQueue = static_cast<JournalImpl*>(eqs);
+ jQueue->delete_jrnl_files();
+ queue.setExternalQueueStore(0); // will delete the journal if exists
+ {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ journalList.erase(queue.getName());
+ }
+ }
+}
+
+void MessageStoreImpl::create(const qpid::broker::PersistableExchange& exchange,
+ const qpid::framing::FieldTable& /*args*/)
+{
+ checkInit();
+ if (exchange.getPersistenceId()) {
+ THROW_STORE_EXCEPTION("Exchange already created: " + exchange.getName());
+ }
+ try {
+ if (!create(exchangeDb, exchangeIdSequence, exchange)) {
+ THROW_STORE_EXCEPTION("Exchange already exists: " + exchange.getName());
+ }
+ } catch (const DbException& e) {
+ THROW_STORE_EXCEPTION_2("Error creating exchange named " + exchange.getName(), e);
+ }
+}
+
+void MessageStoreImpl::destroy(const qpid::broker::PersistableExchange& exchange)
+{
+ checkInit();
+ destroy(exchangeDb, exchange);
+ //need to also delete bindings
+ IdDbt key(exchange.getPersistenceId());
+ bindingDb->del(0, &key, DB_AUTO_COMMIT);
+}
+
+void MessageStoreImpl::create(const qpid::broker::PersistableConfig& general)
+{
+ checkInit();
+ if (general.getPersistenceId()) {
+ THROW_STORE_EXCEPTION("General configuration item already created");
+ }
+ try {
+ if (!create(generalDb, generalIdSequence, general)) {
+ THROW_STORE_EXCEPTION("General configuration already exists");
+ }
+ } catch (const DbException& e) {
+ THROW_STORE_EXCEPTION_2("Error creating general configuration", e);
+ }
+}
+
+void MessageStoreImpl::destroy(const qpid::broker::PersistableConfig& general)
+{
+ checkInit();
+ destroy(generalDb, general);
+}
+
+bool MessageStoreImpl::create(db_ptr db,
+ IdSequence& seq,
+ const qpid::broker::Persistable& p)
+{
+ u_int64_t id (seq.next());
+ Dbt key(&id, sizeof(id));
+ BufferValue value (p);
+
+ int status;
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), true);
+ try {
+ status = db->put(txn.get(), &key, &value, DB_NOOVERWRITE);
+ txn.commit();
+ } catch (...) {
+ txn.abort();
+ throw;
+ }
+ if (status == DB_KEYEXIST) {
+ return false;
+ } else {
+ p.setPersistenceId(id);
+ return true;
+ }
+}
+
+void MessageStoreImpl::destroy(db_ptr db, const qpid::broker::Persistable& p)
+{
+ qpid::sys::Mutex::ScopedLock sl(bdbLock);
+ IdDbt key(p.getPersistenceId());
+ db->del(0, &key, DB_AUTO_COMMIT);
+}
+
+
+void MessageStoreImpl::bind(const qpid::broker::PersistableExchange& e,
+ const qpid::broker::PersistableQueue& q,
+ const std::string& k,
+ const qpid::framing::FieldTable& a)
+{
+ checkInit();
+ IdDbt key(e.getPersistenceId());
+ BindingDbt value(e, q, k, a);
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), true);
+ try {
+ put(bindingDb, txn.get(), key, value);
+ txn.commit();
+ } catch (...) {
+ txn.abort();
+ throw;
+ }
+}
+
+void MessageStoreImpl::unbind(const qpid::broker::PersistableExchange& e,
+ const qpid::broker::PersistableQueue& q,
+ const std::string& k,
+ const qpid::framing::FieldTable&)
+{
+ checkInit();
+ deleteBinding(e, q, k);
+}
+
+void MessageStoreImpl::recover(qpid::broker::RecoveryManager& registry)
+{
+ checkInit();
+ txn_list prepared;
+ recoverLockedMappings(prepared);
+
+ queue_index queues;//id->queue
+ exchange_index exchanges;//id->exchange
+ message_index messages;//id->message
+
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), false);
+ try {
+ //read all queues, calls recoversMessages
+ recoverQueues(txn, registry, queues, prepared, messages);
+
+ //recover exchange & bindings:
+ recoverExchanges(txn, registry, exchanges);
+ recoverBindings(txn, exchanges, queues);
+
+ //recover general-purpose configuration
+ recoverGeneral(txn, registry);
+
+ txn.commit();
+ } catch (const DbException& e) {
+ txn.abort();
+ THROW_STORE_EXCEPTION_2("Error on recovery", e);
+ } catch (...) {
+ txn.abort();
+ throw;
+ }
+
+ //recover transactions:
+ for (txn_list::iterator i = prepared.begin(); i != prepared.end(); i++) {
+ const PreparedTransaction pt = *i;
+ if (mgmtObject.get() != 0) {
+ mgmtObject->inc_tplTransactionDepth();
+ mgmtObject->inc_tplTxnPrepares();
+ }
+
+ std::string xid = pt.xid;
+
+ // Restore data token state in TxnCtxt
+ TplRecoverMapCitr citr = tplRecoverMap.find(xid);
+ if (citr == tplRecoverMap.end()) THROW_STORE_EXCEPTION("XID not found in tplRecoverMap");
+
+ // If a record is found that is dequeued but not committed/aborted from tplStore, then a complete() call
+ // was interrupted part way through committing/aborting the impacted queues. Complete this process.
+ bool incomplTplTxnFlag = citr->second.deq_flag;
+
+ if (citr->second.tpc_flag) {
+ // Dtx (2PC) transaction
+ TPCTxnCtxt* tpcc = new TPCTxnCtxt(xid, &messageIdSequence);
+ std::auto_ptr<qpid::broker::TPCTransactionContext> txn(tpcc);
+ tpcc->recoverDtok(citr->second.rid, xid);
+ tpcc->prepare(tplStorePtr.get());
+
+ qpid::broker::RecoverableTransaction::shared_ptr dtx;
+ if (!incomplTplTxnFlag) dtx = registry.recoverTransaction(xid, txn);
+ if (pt.enqueues.get()) {
+ for (LockedMappings::iterator j = pt.enqueues->begin(); j != pt.enqueues->end(); j++) {
+ tpcc->addXidRecord(queues[j->first]->getExternalQueueStore());
+ if (!incomplTplTxnFlag) dtx->enqueue(queues[j->first], messages[j->second]);
+ }
+ }
+ if (pt.dequeues.get()) {
+ for (LockedMappings::iterator j = pt.dequeues->begin(); j != pt.dequeues->end(); j++) {
+ tpcc->addXidRecord(queues[j->first]->getExternalQueueStore());
+ if (!incomplTplTxnFlag) dtx->dequeue(queues[j->first], messages[j->second]);
+ }
+ }
+
+ if (incomplTplTxnFlag) {
+ tpcc->complete(citr->second.commit_flag);
+ }
+ } else {
+ // Local (1PC) transaction
+ boost::shared_ptr<TxnCtxt> opcc(new TxnCtxt(xid, &messageIdSequence));
+ opcc->recoverDtok(citr->second.rid, xid);
+ opcc->prepare(tplStorePtr.get());
+
+ if (pt.enqueues.get()) {
+ for (LockedMappings::iterator j = pt.enqueues->begin(); j != pt.enqueues->end(); j++) {
+ opcc->addXidRecord(queues[j->first]->getExternalQueueStore());
+ }
+ }
+ if (pt.dequeues.get()) {
+ for (LockedMappings::iterator j = pt.dequeues->begin(); j != pt.dequeues->end(); j++) {
+ opcc->addXidRecord(queues[j->first]->getExternalQueueStore());
+ }
+ }
+ if (incomplTplTxnFlag) {
+ opcc->complete(citr->second.commit_flag);
+ } else {
+ completed(*opcc.get(), citr->second.commit_flag);
+ }
+ }
+ }
+ registry.recoveryComplete();
+}
+
+void MessageStoreImpl::recoverQueues(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& registry,
+ queue_index& queue_index,
+ txn_list& prepared,
+ message_index& messages)
+{
+ Cursor queues;
+ queues.open(queueDb, txn.get());
+
+ u_int64_t maxQueueId(1);
+
+ IdDbt key;
+ Dbt value;
+ //read all queues
+ while (queues.next(key, value)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ //create a Queue instance
+ qpid::broker::RecoverableQueue::shared_ptr queue = registry.recoverQueue(buffer);
+ //set the persistenceId and update max as required
+ queue->setPersistenceId(key.id);
+
+ const std::string queueName = queue->getName().c_str();
+ JournalImpl* jQueue = 0;
+ if (queueName.size() == 0)
+ {
+ QPID_LOG(error, "Cannot recover empty (null) queue name - ignoring and attempting to continue.");
+ break;
+ }
+ jQueue = new JournalImpl(broker->getTimer(), queueName, getJrnlHashDir(queueName), std::string("JournalData"),
+ defJournalGetEventsTimeout, defJournalFlushTimeout, agent,
+ boost::bind(&MessageStoreImpl::journalDeleted, this, _1));
+ {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ journalList[queueName] = jQueue;
+ }
+ queue->setExternalQueueStore(dynamic_cast<qpid::broker::ExternalQueueStore*>(jQueue));
+
+ try
+ {
+ long rcnt = 0L; // recovered msg count
+ long idcnt = 0L; // in-doubt msg count
+ u_int64_t thisHighestRid = 0ULL;
+ jQueue->recover(numJrnlFiles, autoJrnlExpand, autoJrnlExpandMaxFiles, jrnlFsizeSblks, wCacheNumPages, wCachePgSizeSblks, &prepared, thisHighestRid, key.id); // start recovery
+ if (highestRid == 0ULL)
+ highestRid = thisHighestRid;
+ else if (thisHighestRid - highestRid < 0x8000000000000000ULL) // RFC 1982 comparison for unsigned 64-bit
+ highestRid = thisHighestRid;
+ recoverMessages(txn, registry, queue, prepared, messages, rcnt, idcnt);
+ QPID_LOG(info, "Recovered queue \"" << queueName << "\": " << rcnt << " messages recovered; " << idcnt << " messages in-doubt.");
+ jQueue->recover_complete(); // start journal.
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queueName + ": recoverQueues() failed: " + e.what());
+ }
+ //read all messages: done on a per queue basis if using Journal
+
+ queue_index[key.id] = queue;
+ maxQueueId = std::max(key.id, maxQueueId);
+ }
+
+ // NOTE: highestRid is set by both recoverQueues() and recoverTplStore() as
+ // the messageIdSequence is used for both queue journals and the tpl journal.
+ messageIdSequence.reset(highestRid + 1);
+ QPID_LOG(info, "Most recent persistence id found: 0x" << std::hex << highestRid << std::dec);
+
+ queueIdSequence.reset(maxQueueId + 1);
+}
+
+
+void MessageStoreImpl::recoverExchanges(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& registry,
+ exchange_index& index)
+{
+ //TODO: this is a copy&paste from recoverQueues - refactor!
+ Cursor exchanges;
+ exchanges.open(exchangeDb, txn.get());
+
+ u_int64_t maxExchangeId(1);
+ IdDbt key;
+ Dbt value;
+ //read all exchanges
+ while (exchanges.next(key, value)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ //create a Exchange instance
+ qpid::broker::RecoverableExchange::shared_ptr exchange = registry.recoverExchange(buffer);
+ if (exchange) {
+ //set the persistenceId and update max as required
+ exchange->setPersistenceId(key.id);
+ index[key.id] = exchange;
+ QPID_LOG(info, "Recovered exchange \"" << exchange->getName() << '"');
+ }
+ maxExchangeId = std::max(key.id, maxExchangeId);
+ }
+ exchangeIdSequence.reset(maxExchangeId + 1);
+}
+
+void MessageStoreImpl::recoverBindings(TxnCtxt& txn,
+ exchange_index& exchanges,
+ queue_index& queues)
+{
+ Cursor bindings;
+ bindings.open(bindingDb, txn.get());
+
+ IdDbt key;
+ Dbt value;
+ while (bindings.next(key, value)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ if (buffer.available() < 8) {
+ QPID_LOG(error, "Not enough data for binding: " << buffer.available());
+ THROW_STORE_EXCEPTION("Not enough data for binding");
+ }
+ uint64_t queueId = buffer.getLongLong();
+ std::string queueName;
+ std::string routingkey;
+ qpid::framing::FieldTable args;
+ buffer.getShortString(queueName);
+ buffer.getShortString(routingkey);
+ buffer.get(args);
+ exchange_index::iterator exchange = exchanges.find(key.id);
+ queue_index::iterator queue = queues.find(queueId);
+ if (exchange != exchanges.end() && queue != queues.end()) {
+ //could use the recoverable queue here rather than the name...
+ exchange->second->bind(queueName, routingkey, args);
+ QPID_LOG(info, "Recovered binding exchange=" << exchange->second->getName()
+ << " key=" << routingkey
+ << " queue=" << queueName);
+ } else {
+ //stale binding, delete it
+ QPID_LOG(warning, "Deleting stale binding");
+ bindings->del(0);
+ }
+ }
+}
+
+void MessageStoreImpl::recoverGeneral(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& registry)
+{
+ Cursor items;
+ items.open(generalDb, txn.get());
+
+ u_int64_t maxGeneralId(1);
+ IdDbt key;
+ Dbt value;
+ //read all items
+ while (items.next(key, value)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ //create instance
+ qpid::broker::RecoverableConfig::shared_ptr config = registry.recoverConfig(buffer);
+ //set the persistenceId and update max as required
+ config->setPersistenceId(key.id);
+ maxGeneralId = std::max(key.id, maxGeneralId);
+ }
+ generalIdSequence.reset(maxGeneralId + 1);
+}
+
+void MessageStoreImpl::recoverMessages(TxnCtxt& /*txn*/,
+ qpid::broker::RecoveryManager& recovery,
+ qpid::broker::RecoverableQueue::shared_ptr& queue,
+ txn_list& prepared,
+ message_index& messages,
+ long& rcnt,
+ long& idcnt)
+{
+ size_t preambleLength = sizeof(u_int32_t)/*header size*/;
+
+ JournalImpl* jc = static_cast<JournalImpl*>(queue->getExternalQueueStore());
+ DataTokenImpl dtok;
+ size_t readSize = 0;
+ unsigned msg_count = 0;
+
+ // TODO: This optimization to skip reading if there are no enqueued messages to read
+ // breaks the python system test in phase 6 with "Exception: Cannot write lock file"
+ // Figure out what is breaking.
+ //bool read = jc->get_enq_cnt() > 0;
+ bool read = true;
+
+ void* dbuff = NULL; size_t dbuffSize = 0;
+ void* xidbuff = NULL; size_t xidbuffSize = 0;
+ bool transientFlag = false;
+ bool externalFlag = false;
+
+ dtok.set_wstate(DataTokenImpl::ENQ);
+
+ // Read the message from the Journal.
+ try {
+ unsigned aio_sleep_cnt = 0;
+ while (read) {
+ mrg::journal::iores res = jc->read_data_record(&dbuff, dbuffSize, &xidbuff, xidbuffSize, transientFlag, externalFlag, &dtok);
+ readSize = dtok.dsize();
+
+ switch (res)
+ {
+ case mrg::journal::RHM_IORES_SUCCESS: {
+ msg_count++;
+ qpid::broker::RecoverableMessage::shared_ptr msg;
+ char* data = (char*)dbuff;
+
+ unsigned headerSize;
+ if (externalFlag) {
+ msg = getExternMessage(recovery, dtok.rid(), headerSize); // large message external to jrnl
+ } else {
+ headerSize = qpid::framing::Buffer(data, preambleLength).getLong();
+ qpid::framing::Buffer headerBuff(data+ preambleLength, headerSize); /// do we want read size or header size ????
+ msg = recovery.recoverMessage(headerBuff);
+ }
+ msg->setPersistenceId(dtok.rid());
+ // At some future point if delivery attempts are stored, then this call would
+ // become optional depending on that information.
+ msg->setRedelivered();
+ // Reset the TTL for the recovered message
+ msg->computeExpiration(broker->getExpiryPolicy());
+
+ u_int32_t contentOffset = headerSize + preambleLength;
+ u_int64_t contentSize = readSize - contentOffset;
+ if (msg->loadContent(contentSize) && !externalFlag) {
+ //now read the content
+ qpid::framing::Buffer contentBuff(data + contentOffset, contentSize);
+ msg->decodeContent(contentBuff);
+ }
+
+ PreparedTransaction::list::iterator i = PreparedTransaction::getLockedPreparedTransaction(prepared, queue->getPersistenceId(), dtok.rid());
+ if (i == prepared.end()) { // not in prepared list
+ rcnt++;
+ queue->recover(msg);
+ } else {
+ u_int64_t rid = dtok.rid();
+ std::string xid(i->xid);
+ TplRecoverMapCitr citr = tplRecoverMap.find(xid);
+ if (citr == tplRecoverMap.end()) THROW_STORE_EXCEPTION("XID not found in tplRecoverMap");
+
+ // deq present in prepared list: this xid is part of incomplete txn commit/abort
+ // or this is a 1PC txn that must be rolled forward
+ if (citr->second.deq_flag || !citr->second.tpc_flag) {
+ if (jc->is_enqueued(rid, true)) {
+ // Enqueue is non-tx, dequeue tx
+ assert(jc->is_locked(rid)); // This record MUST be locked by a txn dequeue
+ if (!citr->second.commit_flag) {
+ rcnt++;
+ queue->recover(msg); // recover message in abort case only
+ }
+ } else {
+ // Enqueue and/or dequeue tx
+ journal::txn_map& tmap = jc->get_txn_map();
+ journal::txn_data_list txnList = tmap.get_tdata_list(xid); // txnList will be empty if xid not found
+ bool enq = false;
+ bool deq = false;
+ for (journal::tdl_itr j = txnList.begin(); j<txnList.end(); j++) {
+ if (j->_enq_flag && j->_rid == rid) enq = true;
+ else if (!j->_enq_flag && j->_drid == rid) deq = true;
+ }
+ if (enq && !deq && citr->second.commit_flag) {
+ rcnt++;
+ queue->recover(msg); // recover txn message in commit case only
+ }
+ }
+ } else {
+ idcnt++;
+ messages[rid] = msg;
+ }
+ }
+
+ dtok.reset();
+ dtok.set_wstate(DataTokenImpl::ENQ);
+
+ if (xidbuff)
+ ::free(xidbuff);
+ else if (dbuff)
+ ::free(dbuff);
+ aio_sleep_cnt = 0;
+ break;
+ }
+ case mrg::journal::RHM_IORES_PAGE_AIOWAIT:
+ if (++aio_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO in MessageStoreImpl::recoverMessages()");
+ ::usleep(AIO_SLEEP_TIME_US);
+ break;
+ case mrg::journal::RHM_IORES_EMPTY:
+ read = false;
+ break; // done with all messages. (add call in jrnl to test that _emap is empty.)
+ default:
+ std::ostringstream oss;
+ oss << "recoverMessages(): Queue: " << queue->getName() << ": Unexpected return from journal read: " << mrg::journal::iores_str(res);
+ THROW_STORE_EXCEPTION(oss.str());
+ } // switch
+ } // while
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue->getName() + ": recoverMessages() failed: " + e.what());
+ }
+}
+
+qpid::broker::RecoverableMessage::shared_ptr MessageStoreImpl::getExternMessage(qpid::broker::RecoveryManager& /*recovery*/,
+ uint64_t /*messageId*/,
+ unsigned& /*headerSize*/)
+{
+ throw mrg::journal::jexception(mrg::journal::jerrno::JERR__NOTIMPL, "MessageStoreImpl", "getExternMessage");
+}
+
+int MessageStoreImpl::enqueueMessage(TxnCtxt& txn,
+ IdDbt& msgId,
+ qpid::broker::RecoverableMessage::shared_ptr& msg,
+ queue_index& index,
+ txn_list& prepared,
+ message_index& messages)
+{
+ Cursor mappings;
+ mappings.open(mappingDb, txn.get());
+
+ IdDbt value;
+
+ int count(0);
+ for (int status = mappings->get(&msgId, &value, DB_SET); status == 0; status = mappings->get(&msgId, &value, DB_NEXT_DUP)) {
+ if (index.find(value.id) == index.end()) {
+ QPID_LOG(warning, "Recovered message for queue that no longer exists");
+ mappings->del(0);
+ } else {
+ qpid::broker::RecoverableQueue::shared_ptr queue = index[value.id];
+ if (PreparedTransaction::isLocked(prepared, value.id, msgId.id)) {
+ messages[msgId.id] = msg;
+ } else {
+ queue->recover(msg);
+ }
+ count++;
+ }
+ }
+ mappings.close();
+ return count;
+}
+
+void MessageStoreImpl::readTplStore()
+{
+ tplRecoverMap.clear();
+ journal::txn_map& tmap = tplStorePtr->get_txn_map();
+ DataTokenImpl dtok;
+ void* dbuff = NULL; size_t dbuffSize = 0;
+ void* xidbuff = NULL; size_t xidbuffSize = 0;
+ bool transientFlag = false;
+ bool externalFlag = false;
+ bool done = false;
+ try {
+ unsigned aio_sleep_cnt = 0;
+ while (!done) {
+ dtok.reset();
+ dtok.set_wstate(DataTokenImpl::ENQ);
+ mrg::journal::iores res = tplStorePtr->read_data_record(&dbuff, dbuffSize, &xidbuff, xidbuffSize, transientFlag, externalFlag, &dtok);
+ switch (res) {
+ case mrg::journal::RHM_IORES_SUCCESS: {
+ // Every TPL record contains both data and an XID
+ assert(dbuffSize>0);
+ assert(xidbuffSize>0);
+ std::string xid(static_cast<const char*>(xidbuff), xidbuffSize);
+ bool is2PC = *(static_cast<char*>(dbuff)) != 0;
+
+ // Check transaction details; add to recover map
+ journal::txn_data_list txnList = tmap.get_tdata_list(xid); // txnList will be empty if xid not found
+ if (!txnList.empty()) { // xid found in tmap
+ unsigned enqCnt = 0;
+ unsigned deqCnt = 0;
+ u_int64_t rid = 0;
+
+ // Assume commit (roll forward) in cases where only prepare has been called - ie only enqueue record exists.
+ // Note: will apply to both 1PC and 2PC transactions.
+ bool commitFlag = true;
+
+ for (journal::tdl_itr j = txnList.begin(); j<txnList.end(); j++) {
+ if (j->_enq_flag) {
+ rid = j->_rid;
+ enqCnt++;
+ } else {
+ commitFlag = j->_commit_flag;
+ deqCnt++;
+ }
+ }
+ assert(enqCnt == 1);
+ assert(deqCnt <= 1);
+ tplRecoverMap.insert(TplRecoverMapPair(xid, TplRecoverStruct(rid, deqCnt == 1, commitFlag, is2PC)));
+ }
+
+ ::free(xidbuff);
+ aio_sleep_cnt = 0;
+ break;
+ }
+ case mrg::journal::RHM_IORES_PAGE_AIOWAIT:
+ if (++aio_sleep_cnt > MAX_AIO_SLEEPS)
+ THROW_STORE_EXCEPTION("Timeout waiting for AIO in MessageStoreImpl::recoverTplStore()");
+ ::usleep(AIO_SLEEP_TIME_US);
+ break;
+ case mrg::journal::RHM_IORES_EMPTY:
+ done = true;
+ break; // done with all messages. (add call in jrnl to test that _emap is empty.)
+ default:
+ std::ostringstream oss;
+ oss << "readTplStore(): Unexpected result from journal read: " << mrg::journal::iores_str(res);
+ THROW_STORE_EXCEPTION(oss.str());
+ } // switch
+ }
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("TPL recoverTplStore() failed: ") + e.what());
+ }
+}
+
+void MessageStoreImpl::recoverTplStore()
+{
+ if (journal::jdir::exists(tplStorePtr->jrnl_dir() + tplStorePtr->base_filename() + ".jinf")) {
+ u_int64_t thisHighestRid = 0ULL;
+ tplStorePtr->recover(tplNumJrnlFiles, false, 0, tplJrnlFsizeSblks, tplWCachePgSizeSblks, tplWCacheNumPages, 0, thisHighestRid, 0);
+ if (highestRid == 0ULL)
+ highestRid = thisHighestRid;
+ else if (thisHighestRid - highestRid < 0x8000000000000000ULL) // RFC 1982 comparison for unsigned 64-bit
+ highestRid = thisHighestRid;
+
+ // Load tplRecoverMap by reading the TPL store
+ readTplStore();
+
+ tplStorePtr->recover_complete(); // start journal.
+ }
+}
+
+void MessageStoreImpl::recoverLockedMappings(txn_list& txns)
+{
+ if (!tplStorePtr->is_ready())
+ recoverTplStore();
+
+ // Abort unprepared xids and populate the locked maps
+ for (TplRecoverMapCitr i = tplRecoverMap.begin(); i != tplRecoverMap.end(); i++) {
+ LockedMappings::shared_ptr enq_ptr;
+ enq_ptr.reset(new LockedMappings);
+ LockedMappings::shared_ptr deq_ptr;
+ deq_ptr.reset(new LockedMappings);
+ txns.push_back(new PreparedTransaction(i->first, enq_ptr, deq_ptr));
+ }
+}
+
+void MessageStoreImpl::collectPreparedXids(std::set<std::string>& xids)
+{
+ if (tplStorePtr->is_ready()) {
+ tplStorePtr->read_reset();
+ readTplStore();
+ } else {
+ recoverTplStore();
+ }
+ for (TplRecoverMapCitr i = tplRecoverMap.begin(); i != tplRecoverMap.end(); i++) {
+ // Discard all txns that are to be rolled forward/back and 1PC transactions
+ if (!i->second.deq_flag && i->second.tpc_flag)
+ xids.insert(i->first);
+ }
+}
+
+void MessageStoreImpl::stage(const boost::intrusive_ptr<qpid::broker::PersistableMessage>& /*msg*/)
+{
+ throw mrg::journal::jexception(mrg::journal::jerrno::JERR__NOTIMPL, "MessageStoreImpl", "stage");
+}
+
+void MessageStoreImpl::destroy(qpid::broker::PersistableMessage& /*msg*/)
+{
+ throw mrg::journal::jexception(mrg::journal::jerrno::JERR__NOTIMPL, "MessageStoreImpl", "destroy");
+}
+
+void MessageStoreImpl::appendContent(const boost::intrusive_ptr<const qpid::broker::PersistableMessage>& /*msg*/,
+ const std::string& /*data*/)
+{
+ throw mrg::journal::jexception(mrg::journal::jerrno::JERR__NOTIMPL, "MessageStoreImpl", "appendContent");
+}
+
+void MessageStoreImpl::loadContent(const qpid::broker::PersistableQueue& queue,
+ const boost::intrusive_ptr<const qpid::broker::PersistableMessage>& msg,
+ std::string& data,
+ u_int64_t offset,
+ u_int32_t length)
+{
+ checkInit();
+ u_int64_t messageId (msg->getPersistenceId());
+
+ if (messageId != 0) {
+ try {
+ JournalImpl* jc = static_cast<JournalImpl*>(queue.getExternalQueueStore());
+ if (jc && jc->is_enqueued(messageId) ) {
+ if (!jc->loadMsgContent(messageId, data, length, offset)) {
+ std::ostringstream oss;
+ oss << "Queue " << queue.getName() << ": loadContent() failed: Message " << messageId << " is extern";
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ } else {
+ std::ostringstream oss;
+ oss << "Queue " << queue.getName() << ": loadContent() failed: Message " << messageId << " not enqueued";
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue.getName() + ": loadContent() failed: " + e.what());
+ }
+ } else {
+ THROW_STORE_EXCEPTION("Cannot load content. Message not known to store!");
+ }
+}
+
+void MessageStoreImpl::flush(const qpid::broker::PersistableQueue& queue)
+{
+ if (queue.getExternalQueueStore() == 0) return;
+ checkInit();
+ std::string qn = queue.getName();
+ try {
+ JournalImpl* jc = static_cast<JournalImpl*>(queue.getExternalQueueStore());
+ if (jc) {
+ // TODO: check if this result should be used...
+ /*mrg::journal::iores res =*/ jc->flush();
+ }
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + qn + ": flush() failed: " + e.what() );
+ }
+}
+
+void MessageStoreImpl::enqueue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue)
+{
+ checkInit();
+ u_int64_t queueId (queue.getPersistenceId());
+ u_int64_t messageId (msg->getPersistenceId());
+ if (queueId == 0) {
+ THROW_STORE_EXCEPTION("Queue not created: " + queue.getName());
+ }
+
+ TxnCtxt implicit;
+ TxnCtxt* txn = 0;
+ if (ctxt) {
+ txn = check(ctxt);
+ } else {
+ txn = &implicit;
+ }
+
+ bool newId = false;
+ if (messageId == 0) {
+ messageId = messageIdSequence.next();
+ msg->setPersistenceId(messageId);
+ newId = true;
+ }
+ store(&queue, txn, msg, newId);
+
+ // add queue* to the txn map..
+ if (ctxt) txn->addXidRecord(queue.getExternalQueueStore());
+}
+
+u_int64_t MessageStoreImpl::msgEncode(std::vector<char>& buff, const boost::intrusive_ptr<qpid::broker::PersistableMessage>& message)
+{
+ u_int32_t headerSize = message->encodedHeaderSize();
+ u_int64_t size = message->encodedSize() + sizeof(u_int32_t);
+ try { buff = std::vector<char>(size); } // long + headers + content
+ catch (const std::exception& e) {
+ std::ostringstream oss;
+ oss << "Unable to allocate memory for encoding message; requested size: " << size << "; error: " << e.what();
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ qpid::framing::Buffer buffer(&buff[0],size);
+ buffer.putLong(headerSize);
+ message->encode(buffer);
+ return size;
+}
+
+void MessageStoreImpl::store(const qpid::broker::PersistableQueue* queue,
+ TxnCtxt* txn,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& message,
+ bool /*newId*/)
+{
+ std::vector<char> buff;
+ u_int64_t size = msgEncode(buff, message);
+
+ try {
+ if (queue) {
+ boost::intrusive_ptr<DataTokenImpl> dtokp(new DataTokenImpl);
+ dtokp->addRef();
+ dtokp->setSourceMessage(message);
+ dtokp->set_external_rid(true);
+ dtokp->set_rid(message->getPersistenceId()); // set the messageID into the Journal header (record-id)
+
+ JournalImpl* jc = static_cast<JournalImpl*>(queue->getExternalQueueStore());
+ if (txn->getXid().empty()) {
+ jc->enqueue_data_record(&buff[0], size, size, dtokp.get(), !message->isPersistent());
+ } else {
+ jc->enqueue_txn_data_record(&buff[0], size, size, dtokp.get(), txn->getXid(), !message->isPersistent());
+ }
+ } else {
+ THROW_STORE_EXCEPTION(std::string("MessageStoreImpl::store() failed: queue NULL."));
+ }
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue->getName() + ": MessageStoreImpl::store() failed: " +
+ e.what());
+ }
+}
+
+void MessageStoreImpl::dequeue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue)
+{
+ checkInit();
+ u_int64_t queueId (queue.getPersistenceId());
+ u_int64_t messageId (msg->getPersistenceId());
+ if (queueId == 0) {
+ THROW_STORE_EXCEPTION("Queue \"" + queue.getName() + "\" has null queue Id (has not been created)");
+ }
+ if (messageId == 0) {
+ THROW_STORE_EXCEPTION("Queue \"" + queue.getName() + "\": Dequeuing message with null persistence Id.");
+ }
+
+ TxnCtxt implicit;
+ TxnCtxt* txn = 0;
+ if (ctxt) {
+ txn = check(ctxt);
+ } else {
+ txn = &implicit;
+ }
+
+ // add queue* to the txn map..
+ if (ctxt) txn->addXidRecord(queue.getExternalQueueStore());
+ async_dequeue(ctxt, msg, queue);
+
+ msg->dequeueComplete();
+}
+
+void MessageStoreImpl::async_dequeue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue)
+{
+ boost::intrusive_ptr<DataTokenImpl> ddtokp(new DataTokenImpl);
+ ddtokp->setSourceMessage(msg);
+ ddtokp->set_external_rid(true);
+ ddtokp->set_rid(messageIdSequence.next());
+ ddtokp->set_dequeue_rid(msg->getPersistenceId());
+ ddtokp->set_wstate(DataTokenImpl::ENQ);
+ std::string tid;
+ if (ctxt) {
+ TxnCtxt* txn = check(ctxt);
+ tid = txn->getXid();
+ }
+ // Manually increase the ref count, as raw pointers are used beyond this point
+ ddtokp->addRef();
+ try {
+ JournalImpl* jc = static_cast<JournalImpl*>(queue.getExternalQueueStore());
+ if (tid.empty()) {
+ jc->dequeue_data_record(ddtokp.get());
+ } else {
+ jc->dequeue_txn_data_record(ddtokp.get(), tid);
+ }
+ } catch (const journal::jexception& e) {
+ ddtokp->release();
+ THROW_STORE_EXCEPTION(std::string("Queue ") + queue.getName() + ": async_dequeue() failed: " + e.what());
+ }
+}
+
+u_int32_t MessageStoreImpl::outstandingQueueAIO(const qpid::broker::PersistableQueue& /*queue*/)
+{
+ checkInit();
+ return 0;
+}
+
+void MessageStoreImpl::completed(TxnCtxt& txn,
+ bool commit)
+{
+ try {
+ chkTplStoreInit(); // Late initialize (if needed)
+
+ // Nothing to do if not prepared
+ if (txn.getDtok()->is_enqueued()) {
+ txn.incrDtokRef();
+ DataTokenImpl* dtokp = txn.getDtok();
+ dtokp->set_dequeue_rid(dtokp->rid());
+ dtokp->set_rid(messageIdSequence.next());
+ tplStorePtr->dequeue_txn_data_record(txn.getDtok(), txn.getXid(), commit);
+ }
+ txn.complete(commit);
+ if (mgmtObject.get() != 0) {
+ mgmtObject->dec_tplTransactionDepth();
+ if (commit)
+ mgmtObject->inc_tplTxnCommits();
+ else
+ mgmtObject->inc_tplTxnAborts();
+ }
+ } catch (const std::exception& e) {
+ QPID_LOG(error, "Error completing xid " << txn.getXid() << ": " << e.what());
+ throw;
+ }
+}
+
+std::auto_ptr<qpid::broker::TransactionContext> MessageStoreImpl::begin()
+{
+ checkInit();
+ // pass sequence number for c/a
+ return std::auto_ptr<qpid::broker::TransactionContext>(new TxnCtxt(&messageIdSequence));
+}
+
+std::auto_ptr<qpid::broker::TPCTransactionContext> MessageStoreImpl::begin(const std::string& xid)
+{
+ checkInit();
+ IdSequence* jtx = &messageIdSequence;
+ // pass sequence number for c/a
+ return std::auto_ptr<qpid::broker::TPCTransactionContext>(new TPCTxnCtxt(xid, jtx));
+}
+
+void MessageStoreImpl::prepare(qpid::broker::TPCTransactionContext& ctxt)
+{
+ checkInit();
+ TxnCtxt* txn = dynamic_cast<TxnCtxt*>(&ctxt);
+ if(!txn) throw qpid::broker::InvalidTransactionContextException();
+ localPrepare(txn);
+}
+
+void MessageStoreImpl::localPrepare(TxnCtxt* ctxt)
+{
+ try {
+ chkTplStoreInit(); // Late initialize (if needed)
+
+ // This sync is required to ensure multi-queue atomicity - ie all txn data
+ // must hit the disk on *all* queues before the TPL prepare (enq) is written.
+ ctxt->sync();
+
+ ctxt->incrDtokRef();
+ DataTokenImpl* dtokp = ctxt->getDtok();
+ dtokp->set_external_rid(true);
+ dtokp->set_rid(messageIdSequence.next());
+ char tpcFlag = static_cast<char>(ctxt->isTPC());
+ tplStorePtr->enqueue_txn_data_record(&tpcFlag, sizeof(char), sizeof(char), dtokp, ctxt->getXid(), false);
+ ctxt->prepare(tplStorePtr.get());
+ // make sure all the data is written to disk before returning
+ ctxt->sync();
+ if (mgmtObject.get() != 0) {
+ mgmtObject->inc_tplTransactionDepth();
+ mgmtObject->inc_tplTxnPrepares();
+ }
+ } catch (const std::exception& e) {
+ QPID_LOG(error, "Error preparing xid " << ctxt->getXid() << ": " << e.what());
+ throw;
+ }
+}
+
+void MessageStoreImpl::commit(qpid::broker::TransactionContext& ctxt)
+{
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ if (txn->impactedQueuesEmpty()) return;
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), true);
+}
+
+void MessageStoreImpl::abort(qpid::broker::TransactionContext& ctxt)
+{
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ if (txn->impactedQueuesEmpty()) return;
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), false);
+}
+
+TxnCtxt* MessageStoreImpl::check(qpid::broker::TransactionContext* ctxt)
+{
+ TxnCtxt* txn = dynamic_cast<TxnCtxt*>(ctxt);
+ if(!txn) throw qpid::broker::InvalidTransactionContextException();
+ return txn;
+}
+
+void MessageStoreImpl::put(db_ptr db,
+ DbTxn* txn,
+ Dbt& key,
+ Dbt& value)
+{
+ try {
+ int status = db->put(txn, &key, &value, DB_NODUPDATA);
+ if (status == DB_KEYEXIST) {
+ THROW_STORE_EXCEPTION("duplicate data");
+ } else if (status) {
+ THROW_STORE_EXCEPTION(DbEnv::strerror(status));
+ }
+ } catch (const DbException& e) {
+ THROW_STORE_EXCEPTION(e.what());
+ }
+}
+
+void MessageStoreImpl::deleteBindingsForQueue(const qpid::broker::PersistableQueue& queue)
+{
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), true);
+ try {
+ {
+ Cursor bindings;
+ bindings.open(bindingDb, txn.get());
+
+ IdDbt key;
+ Dbt value;
+ while (bindings.next(key, value)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ if (buffer.available() < 8) {
+ THROW_STORE_EXCEPTION("Not enough data for binding");
+ }
+ uint64_t queueId = buffer.getLongLong();
+ if (queue.getPersistenceId() == queueId) {
+ bindings->del(0);
+ QPID_LOG(debug, "Deleting binding for " << queue.getName() << " " << key.id << "->" << queueId);
+ }
+ }
+ }
+ txn.commit();
+ } catch (const std::exception& e) {
+ txn.abort();
+ THROW_STORE_EXCEPTION_2("Error deleting bindings", e.what());
+ } catch (...) {
+ txn.abort();
+ throw;
+ }
+ QPID_LOG(debug, "Deleted all bindings for " << queue.getName() << ":" << queue.getPersistenceId());
+}
+
+void MessageStoreImpl::deleteBinding(const qpid::broker::PersistableExchange& exchange,
+ const qpid::broker::PersistableQueue& queue,
+ const std::string& bkey)
+{
+ TxnCtxt txn;
+ txn.begin(dbenv.get(), true);
+ try {
+ {
+ Cursor bindings;
+ bindings.open(bindingDb, txn.get());
+
+ IdDbt key(exchange.getPersistenceId());
+ Dbt value;
+
+ for (int status = bindings->get(&key, &value, DB_SET); status == 0; status = bindings->get(&key, &value, DB_NEXT_DUP)) {
+ qpid::framing::Buffer buffer(reinterpret_cast<char*>(value.get_data()), value.get_size());
+ if (buffer.available() < 8) {
+ THROW_STORE_EXCEPTION("Not enough data for binding");
+ }
+ uint64_t queueId = buffer.getLongLong();
+ if (queue.getPersistenceId() == queueId) {
+ std::string q;
+ std::string k;
+ buffer.getShortString(q);
+ buffer.getShortString(k);
+ if (bkey == k) {
+ bindings->del(0);
+ QPID_LOG(debug, "Deleting binding for " << queue.getName() << " " << key.id << "->" << queueId);
+ }
+ }
+ }
+ }
+ txn.commit();
+ } catch (const std::exception& e) {
+ txn.abort();
+ THROW_STORE_EXCEPTION_2("Error deleting bindings", e.what());
+ } catch (...) {
+ txn.abort();
+ throw;
+ }
+}
+
+std::string MessageStoreImpl::getJrnlBaseDir()
+{
+ std::ostringstream dir;
+ dir << storeDir << "/" << storeTopLevelDir << "/jrnl/" ;
+ return dir.str();
+}
+
+std::string MessageStoreImpl::getBdbBaseDir()
+{
+ std::ostringstream dir;
+ dir << storeDir << "/" << storeTopLevelDir << "/dat/" ;
+ return dir.str();
+}
+
+std::string MessageStoreImpl::getTplBaseDir()
+{
+ std::ostringstream dir;
+ dir << storeDir << "/" << storeTopLevelDir << "/tpl/" ;
+ return dir.str();
+}
+
+std::string MessageStoreImpl::getJrnlDir(const qpid::broker::PersistableQueue& queue) //for exmaple /var/rhm/ + queueDir/
+{
+ return getJrnlHashDir(queue.getName().c_str());
+}
+
+u_int32_t MessageStoreImpl::bHash(const std::string str)
+{
+ // Daniel Bernstein hash fn
+ u_int32_t h = 0;
+ for (std::string::const_iterator i = str.begin(); i < str.end(); i++)
+ h = 33*h + *i;
+ return h;
+}
+
+std::string MessageStoreImpl::getJrnlHashDir(const std::string& queueName) //for exmaple /var/rhm/ + queueDir/
+{
+ std::stringstream dir;
+ dir << getJrnlBaseDir() << std::hex << std::setfill('0') << std::setw(4);
+ dir << (bHash(queueName.c_str()) % 29); // Use a prime number for better distribution across dirs
+ dir << "/" << queueName << "/";
+ return dir.str();
+}
+
+std::string MessageStoreImpl::getStoreDir() const { return storeDir; }
+
+void MessageStoreImpl::journalDeleted(JournalImpl& j) {
+ qpid::sys::Mutex::ScopedLock sl(journalListLock);
+ journalList.erase(j.id());
+}
+
+MessageStoreImpl::StoreOptions::StoreOptions(const std::string& name) :
+ qpid::Options(name),
+ numJrnlFiles(defNumJrnlFiles),
+ autoJrnlExpand(defAutoJrnlExpand),
+ autoJrnlExpandMaxFiles(defAutoJrnlExpandMaxFiles),
+ jrnlFsizePgs(defJrnlFileSizePgs),
+ truncateFlag(defTruncateFlag),
+ wCachePageSizeKib(defWCachePageSize),
+ tplNumJrnlFiles(defTplNumJrnlFiles),
+ tplJrnlFsizePgs(defTplJrnlFileSizePgs),
+ tplWCachePageSizeKib(defTplWCachePageSize)
+{
+ std::ostringstream oss1;
+ oss1 << "Default number of files for each journal instance (queue). [Allowable values: " <<
+ JRNL_MIN_NUM_FILES << " - " << JRNL_MAX_NUM_FILES << "]";
+ std::ostringstream oss2;
+ oss2 << "Default size for each journal file in multiples of read pages (1 read page = 64KiB). [Allowable values: " <<
+ JRNL_MIN_FILE_SIZE / JRNL_RMGR_PAGE_SIZE << " - " << JRNL_MAX_FILE_SIZE / JRNL_RMGR_PAGE_SIZE << "]";
+ std::ostringstream oss3;
+ oss3 << "Number of files for transaction prepared list journal instance. [Allowable values: " <<
+ JRNL_MIN_NUM_FILES << " - " << JRNL_MAX_NUM_FILES << "]";
+ std::ostringstream oss4;
+ oss4 << "Size of each transaction prepared list journal file in multiples of read pages (1 read page = 64KiB) [Allowable values: " <<
+ JRNL_MIN_FILE_SIZE / JRNL_RMGR_PAGE_SIZE << " - " << JRNL_MAX_FILE_SIZE / JRNL_RMGR_PAGE_SIZE << "]";
+ addOptions()
+ ("store-dir", qpid::optValue(storeDir, "DIR"),
+ "Store directory location for persistence (instead of using --data-dir value). "
+ "Required if --no-data-dir is also used.")
+ ("num-jfiles", qpid::optValue(numJrnlFiles, "N"), oss1.str().c_str())
+ ("jfile-size-pgs", qpid::optValue(jrnlFsizePgs, "N"), oss2.str().c_str())
+// TODO: Uncomment these lines when auto-expand is enabled.
+// ("auto-expand", qpid::optValue(autoJrnlExpand, "yes|no"),
+// "If yes|true|1, allows journal to auto-expand by adding additional journal files as needed. "
+// "If no|false|0, the number of journal files will remain fixed (num-jfiles).")
+// ("max-auto-expand-jfiles", qpid::optValue(autoJrnlExpandMaxFiles, "N"),
+// "Maximum number of journal files allowed from auto-expanding; must be greater than --num-jfiles parameter.")
+ ("truncate", qpid::optValue(truncateFlag, "yes|no"),
+ "If yes|true|1, will truncate the store (discard any existing records). If no|false|0, will preserve "
+ "the existing store files for recovery.")
+ ("wcache-page-size", qpid::optValue(wCachePageSizeKib, "N"),
+ "Size of the pages in the write page cache in KiB. "
+ "Allowable values - powers of 2: 1, 2, 4, ... , 128. "
+ "Lower values decrease latency at the expense of throughput.")
+ ("tpl-num-jfiles", qpid::optValue(tplNumJrnlFiles, "N"), oss3.str().c_str())
+ ("tpl-jfile-size-pgs", qpid::optValue(tplJrnlFsizePgs, "N"), oss4.str().c_str())
+ ("tpl-wcache-page-size", qpid::optValue(tplWCachePageSizeKib, "N"),
+ "Size of the pages in the transaction prepared list write page cache in KiB. "
+ "Allowable values - powers of 2: 1, 2, 4, ... , 128. "
+ "Lower values decrease latency at the expense of throughput.")
+ ;
+}
+
+}}
diff --git a/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.h b/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.h
new file mode 100644
index 0000000000..68aceedfbb
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/MessageStoreImpl.h
@@ -0,0 +1,380 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_MESSAGESTOREIMPL_H
+#define QPID_LEGACYSTORE_MESSAGESTOREIMPL_H
+
+#include <string>
+
+#include "db-inc.h"
+#include "qpid/legacystore/Cursor.h"
+#include "qpid/legacystore/IdDbt.h"
+#include "qpid/legacystore/IdSequence.h"
+#include "qpid/legacystore/JournalImpl.h"
+#include "qpid/legacystore/jrnl/jcfg.h"
+#include "qpid/legacystore/PreparedTransaction.h"
+#include "qpid/broker/Broker.h"
+#include "qpid/broker/MessageStore.h"
+#include "qpid/management/Manageable.h"
+#include "qmf/org/apache/qpid/legacystore/Store.h"
+#include "qpid/legacystore/TxnCtxt.h"
+
+// Assume DB_VERSION_MAJOR == 4
+#if (DB_VERSION_MINOR == 2)
+#include <errno.h>
+#define DB_BUFFER_SMALL ENOMEM
+#endif
+
+namespace qpid { namespace sys {
+class Timer;
+}}
+
+namespace mrg {
+namespace msgstore {
+
+/**
+ * An implementation of the MessageStore interface based on Berkeley DB
+ */
+class MessageStoreImpl : public qpid::broker::MessageStore, public qpid::management::Manageable
+{
+ public:
+ typedef boost::shared_ptr<Db> db_ptr;
+ typedef boost::shared_ptr<DbEnv> dbEnv_ptr;
+
+ struct StoreOptions : public qpid::Options {
+ StoreOptions(const std::string& name="Store Options");
+ std::string clusterName;
+ std::string storeDir;
+ u_int16_t numJrnlFiles;
+ bool autoJrnlExpand;
+ u_int16_t autoJrnlExpandMaxFiles;
+ u_int32_t jrnlFsizePgs;
+ bool truncateFlag;
+ u_int32_t wCachePageSizeKib;
+ u_int16_t tplNumJrnlFiles;
+ u_int32_t tplJrnlFsizePgs;
+ u_int32_t tplWCachePageSizeKib;
+ };
+
+ protected:
+ typedef std::map<u_int64_t, qpid::broker::RecoverableQueue::shared_ptr> queue_index;
+ typedef std::map<u_int64_t, qpid::broker::RecoverableExchange::shared_ptr> exchange_index;
+ typedef std::map<u_int64_t, qpid::broker::RecoverableMessage::shared_ptr> message_index;
+
+ typedef LockedMappings::map txn_lock_map;
+ typedef boost::ptr_list<PreparedTransaction> txn_list;
+
+ // Structs for Transaction Recover List (TPL) recover state
+ struct TplRecoverStruct {
+ u_int64_t rid; // rid of TPL record
+ bool deq_flag;
+ bool commit_flag;
+ bool tpc_flag;
+ TplRecoverStruct(const u_int64_t _rid, const bool _deq_flag, const bool _commit_flag, const bool _tpc_flag);
+ };
+ typedef TplRecoverStruct TplRecover;
+ typedef std::pair<std::string, TplRecover> TplRecoverMapPair;
+ typedef std::map<std::string, TplRecover> TplRecoverMap;
+ typedef TplRecoverMap::const_iterator TplRecoverMapCitr;
+
+ typedef std::map<std::string, JournalImpl*> JournalListMap;
+ typedef JournalListMap::iterator JournalListMapItr;
+
+ // Default store settings
+ static const u_int16_t defNumJrnlFiles = 8;
+ static const u_int32_t defJrnlFileSizePgs = 24;
+ static const bool defTruncateFlag = false;
+ static const u_int32_t defWCachePageSize = JRNL_WMGR_DEF_PAGE_SIZE * JRNL_DBLK_SIZE * JRNL_SBLK_SIZE / 1024;
+ static const u_int16_t defTplNumJrnlFiles = 8;
+ static const u_int32_t defTplJrnlFileSizePgs = 24;
+ static const u_int32_t defTplWCachePageSize = defWCachePageSize / 8;
+ // TODO: set defAutoJrnlExpand to true and defAutoJrnlExpandMaxFiles to 16 when auto-expand comes on-line
+ static const bool defAutoJrnlExpand = false;
+ static const u_int16_t defAutoJrnlExpandMaxFiles = 0;
+
+ static const std::string storeTopLevelDir;
+ static qpid::sys::Duration defJournalGetEventsTimeout;
+ static qpid::sys::Duration defJournalFlushTimeout;
+
+ std::list<db_ptr> dbs;
+ dbEnv_ptr dbenv;
+ db_ptr queueDb;
+ db_ptr configDb;
+ db_ptr exchangeDb;
+ db_ptr mappingDb;
+ db_ptr bindingDb;
+ db_ptr generalDb;
+
+ // Pointer to Transaction Prepared List (TPL) journal instance
+ boost::shared_ptr<TplJournalImpl> tplStorePtr;
+ TplRecoverMap tplRecoverMap;
+ qpid::sys::Mutex tplInitLock;
+ JournalListMap journalList;
+ qpid::sys::Mutex journalListLock;
+ qpid::sys::Mutex bdbLock;
+
+ IdSequence queueIdSequence;
+ IdSequence exchangeIdSequence;
+ IdSequence generalIdSequence;
+ IdSequence messageIdSequence;
+ std::string storeDir;
+ u_int16_t numJrnlFiles;
+ bool autoJrnlExpand;
+ u_int16_t autoJrnlExpandMaxFiles;
+ u_int32_t jrnlFsizeSblks;
+ bool truncateFlag;
+ u_int32_t wCachePgSizeSblks;
+ u_int16_t wCacheNumPages;
+ u_int16_t tplNumJrnlFiles;
+ u_int32_t tplJrnlFsizeSblks;
+ u_int32_t tplWCachePgSizeSblks;
+ u_int16_t tplWCacheNumPages;
+ u_int64_t highestRid;
+ bool isInit;
+ const char* envPath;
+ qpid::broker::Broker* broker;
+
+ qmf::org::apache::qpid::legacystore::Store::shared_ptr mgmtObject;
+ qpid::management::ManagementAgent* agent;
+
+
+ // Parameter validation and calculation
+ static u_int16_t chkJrnlNumFilesParam(const u_int16_t param,
+ const std::string paramName);
+ static u_int32_t chkJrnlFileSizeParam(const u_int32_t param,
+ const std::string paramName,
+ const u_int32_t wCachePgSizeSblks = 0);
+ static u_int32_t chkJrnlWrPageCacheSize(const u_int32_t param,
+ const std::string paramName,
+ const u_int16_t jrnlFsizePgs);
+ static u_int16_t getJrnlWrNumPages(const u_int32_t wrPageSizeKib);
+ void chkJrnlAutoExpandOptions(const MessageStoreImpl::StoreOptions* opts,
+ bool& autoJrnlExpand,
+ u_int16_t& autoJrnlExpandMaxFiles,
+ const std::string& autoJrnlExpandMaxFilesParamName,
+ const u_int16_t numJrnlFiles,
+ const std::string& numJrnlFilesParamName);
+
+ void init();
+
+ void recoverQueues(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& recovery,
+ queue_index& index,
+ txn_list& locked,
+ message_index& messages);
+ void recoverMessages(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& recovery,
+ queue_index& index,
+ txn_list& locked,
+ message_index& prepared);
+ void recoverMessages(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& recovery,
+ qpid::broker::RecoverableQueue::shared_ptr& queue,
+ txn_list& locked,
+ message_index& prepared,
+ long& rcnt,
+ long& idcnt);
+ qpid::broker::RecoverableMessage::shared_ptr getExternMessage(qpid::broker::RecoveryManager& recovery,
+ uint64_t mId,
+ unsigned& headerSize);
+ void recoverExchanges(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& recovery,
+ exchange_index& index);
+ void recoverBindings(TxnCtxt& txn,
+ exchange_index& exchanges,
+ queue_index& queues);
+ void recoverGeneral(TxnCtxt& txn,
+ qpid::broker::RecoveryManager& recovery);
+ int enqueueMessage(TxnCtxt& txn,
+ IdDbt& msgId,
+ qpid::broker::RecoverableMessage::shared_ptr& msg,
+ queue_index& index,
+ txn_list& locked,
+ message_index& prepared);
+ void readTplStore();
+ void recoverTplStore();
+ void recoverLockedMappings(txn_list& txns);
+ TxnCtxt* check(qpid::broker::TransactionContext* ctxt);
+ u_int64_t msgEncode(std::vector<char>& buff, const boost::intrusive_ptr<qpid::broker::PersistableMessage>& message);
+ void store(const qpid::broker::PersistableQueue* queue,
+ TxnCtxt* txn,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& message,
+ bool newId);
+ void async_dequeue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue);
+ void destroy(db_ptr db,
+ const qpid::broker::Persistable& p);
+ bool create(db_ptr db,
+ IdSequence& seq,
+ const qpid::broker::Persistable& p);
+ void completed(TxnCtxt& txn,
+ bool commit);
+ void deleteBindingsForQueue(const qpid::broker::PersistableQueue& queue);
+ void deleteBinding(const qpid::broker::PersistableExchange& exchange,
+ const qpid::broker::PersistableQueue& queue,
+ const std::string& key);
+
+ void put(db_ptr db,
+ DbTxn* txn,
+ Dbt& key,
+ Dbt& value);
+ void open(db_ptr db,
+ DbTxn* txn,
+ const char* file,
+ bool dupKey);
+ void closeDbs();
+
+ // journal functions
+ void createJrnlQueue(const qpid::broker::PersistableQueue& queue);
+ u_int32_t bHash(const std::string str);
+ std::string getJrnlDir(const qpid::broker::PersistableQueue& queue); //for exmaple /var/rhm/ + queueDir/
+ std::string getJrnlHashDir(const std::string& queueName);
+ std::string getJrnlBaseDir();
+ std::string getBdbBaseDir();
+ std::string getTplBaseDir();
+ inline void checkInit() {
+ // TODO: change the default dir to ~/.qpidd
+ if (!isInit) { init("/tmp"); isInit = true; }
+ }
+ void chkTplStoreInit();
+
+ // debug aid for printing XIDs that may contain non-printable chars
+ static std::string xid2str(const std::string xid) {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ for (unsigned i=0; i<xid.size(); i++) {
+ if (isprint(xid[i]))
+ oss << xid[i];
+ else
+ oss << "/" << std::setw(2) << (int)((char)xid[i]);
+ }
+ return oss.str();
+ }
+
+ public:
+ typedef boost::shared_ptr<MessageStoreImpl> shared_ptr;
+
+ MessageStoreImpl(qpid::broker::Broker* broker, const char* envpath = 0);
+
+ virtual ~MessageStoreImpl();
+
+ bool init(const qpid::Options* options);
+
+ bool init(const std::string& dir,
+ u_int16_t jfiles = defNumJrnlFiles,
+ u_int32_t jfileSizePgs = defJrnlFileSizePgs,
+ const bool truncateFlag = false,
+ u_int32_t wCachePageSize = defWCachePageSize,
+ u_int16_t tplJfiles = defTplNumJrnlFiles,
+ u_int32_t tplJfileSizePgs = defTplJrnlFileSizePgs,
+ u_int32_t tplWCachePageSize = defTplWCachePageSize,
+ bool autoJExpand = defAutoJrnlExpand,
+ u_int16_t autoJExpandMaxFiles = defAutoJrnlExpandMaxFiles);
+
+ void truncateInit(const bool saveStoreContent = false);
+
+ void initManagement ();
+
+ void finalize();
+
+ void create(qpid::broker::PersistableQueue& queue,
+ const qpid::framing::FieldTable& args);
+
+ void destroy(qpid::broker::PersistableQueue& queue);
+
+ void create(const qpid::broker::PersistableExchange& queue,
+ const qpid::framing::FieldTable& args);
+
+ void destroy(const qpid::broker::PersistableExchange& queue);
+
+ void bind(const qpid::broker::PersistableExchange& exchange,
+ const qpid::broker::PersistableQueue& queue,
+ const std::string& key,
+ const qpid::framing::FieldTable& args);
+
+ void unbind(const qpid::broker::PersistableExchange& exchange,
+ const qpid::broker::PersistableQueue& queue,
+ const std::string& key,
+ const qpid::framing::FieldTable& args);
+
+ void create(const qpid::broker::PersistableConfig& config);
+
+ void destroy(const qpid::broker::PersistableConfig& config);
+
+ void recover(qpid::broker::RecoveryManager& queues);
+
+ void stage(const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg);
+
+ void destroy(qpid::broker::PersistableMessage& msg);
+
+ void appendContent(const boost::intrusive_ptr<const qpid::broker::PersistableMessage>& msg,
+ const std::string& data);
+
+ void loadContent(const qpid::broker::PersistableQueue& queue,
+ const boost::intrusive_ptr<const qpid::broker::PersistableMessage>& msg,
+ std::string& data,
+ uint64_t offset,
+ uint32_t length);
+
+ void enqueue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue);
+
+ void dequeue(qpid::broker::TransactionContext* ctxt,
+ const boost::intrusive_ptr<qpid::broker::PersistableMessage>& msg,
+ const qpid::broker::PersistableQueue& queue);
+
+ void flush(const qpid::broker::PersistableQueue& queue);
+
+ u_int32_t outstandingQueueAIO(const qpid::broker::PersistableQueue& queue);
+
+ void collectPreparedXids(std::set<std::string>& xids);
+
+ std::auto_ptr<qpid::broker::TransactionContext> begin();
+
+ std::auto_ptr<qpid::broker::TPCTransactionContext> begin(const std::string& xid);
+
+ void prepare(qpid::broker::TPCTransactionContext& ctxt);
+
+ void localPrepare(TxnCtxt* ctxt);
+
+ void commit(qpid::broker::TransactionContext& ctxt);
+
+ void abort(qpid::broker::TransactionContext& ctxt);
+
+ qpid::management::ManagementObject::shared_ptr GetManagementObject (void) const
+ { return mgmtObject; }
+
+ inline qpid::management::Manageable::status_t ManagementMethod (u_int32_t, qpid::management::Args&, std::string&)
+ { return qpid::management::Manageable::STATUS_OK; }
+
+ std::string getStoreDir() const;
+
+ private:
+ void journalDeleted(JournalImpl&);
+
+}; // class MessageStoreImpl
+
+} // namespace msgstore
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_MESSAGESTOREIMPL_H
diff --git a/qpid/cpp/src/qpid/legacystore/PreparedTransaction.cpp b/qpid/cpp/src/qpid/legacystore/PreparedTransaction.cpp
new file mode 100644
index 0000000000..50b81e2824
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/PreparedTransaction.cpp
@@ -0,0 +1,81 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/PreparedTransaction.h"
+#include <algorithm>
+
+using namespace mrg::msgstore;
+using std::string;
+
+void LockedMappings::add(queue_id queue, message_id message)
+{
+ locked.push_back(std::make_pair(queue, message));
+}
+
+bool LockedMappings::isLocked(queue_id queue, message_id message)
+{
+ idpair op( std::make_pair(queue, message) );
+ return find(locked.begin(), locked.end(), op) != locked.end();
+}
+
+void LockedMappings::add(LockedMappings::map& map, std::string& key, queue_id queue, message_id message)
+{
+ LockedMappings::map::iterator i = map.find(key);
+ if (i == map.end()) {
+ LockedMappings::shared_ptr ptr(new LockedMappings());
+ i = map.insert(std::make_pair(key, ptr)).first;
+ }
+ i->second->add(queue, message);
+}
+
+bool PreparedTransaction::isLocked(queue_id queue, message_id message)
+{
+ return (enqueues.get() && enqueues->isLocked(queue, message))
+ || (dequeues.get() && dequeues->isLocked(queue, message));
+}
+
+
+bool PreparedTransaction::isLocked(PreparedTransaction::list& txns, queue_id queue, message_id message)
+{
+ for (PreparedTransaction::list::iterator i = txns.begin(); i != txns.end(); i++) {
+ if (i->isLocked(queue, message)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+PreparedTransaction::list::iterator PreparedTransaction::getLockedPreparedTransaction(PreparedTransaction::list& txns, queue_id queue, message_id message)
+{
+ for (PreparedTransaction::list::iterator i = txns.begin(); i != txns.end(); i++) {
+ if (i->isLocked(queue, message)) {
+ return i;
+ }
+ }
+ return txns.end();
+}
+
+PreparedTransaction::PreparedTransaction(const std::string& _xid,
+ LockedMappings::shared_ptr _enqueues,
+ LockedMappings::shared_ptr _dequeues)
+
+ : xid(_xid), enqueues(_enqueues), dequeues(_dequeues) {}
+
diff --git a/qpid/cpp/src/qpid/legacystore/PreparedTransaction.h b/qpid/cpp/src/qpid/legacystore/PreparedTransaction.h
new file mode 100644
index 0000000000..c5f7b9458a
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/PreparedTransaction.h
@@ -0,0 +1,74 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_PREPAREDTRANSACTION_H
+#define QPID_LEGACYSTORE_PREPAREDTRANSACTION_H
+
+#include <list>
+#include <map>
+#include <set>
+#include <string>
+#include <boost/shared_ptr.hpp>
+#include <boost/ptr_container/ptr_list.hpp>
+
+namespace mrg{
+namespace msgstore{
+
+typedef u_int64_t queue_id;
+typedef u_int64_t message_id;
+
+class LockedMappings
+{
+public:
+ typedef boost::shared_ptr<LockedMappings> shared_ptr;
+ typedef std::map<std::string, shared_ptr> map;
+ typedef std::pair<queue_id, message_id> idpair;
+ typedef std::list<idpair>::iterator iterator;
+
+ void add(queue_id queue, message_id message);
+ bool isLocked(queue_id queue, message_id message);
+ std::size_t size() { return locked.size(); }
+ iterator begin() { return locked.begin(); }
+ iterator end() { return locked.end(); }
+
+ static void add(LockedMappings::map& map, std::string& key, queue_id queue, message_id message);
+
+private:
+ std::list<idpair> locked;
+};
+
+struct PreparedTransaction
+{
+ typedef boost::ptr_list<PreparedTransaction> list;
+
+ const std::string xid;
+ const LockedMappings::shared_ptr enqueues;
+ const LockedMappings::shared_ptr dequeues;
+
+ PreparedTransaction(const std::string& xid, LockedMappings::shared_ptr enqueues, LockedMappings::shared_ptr dequeues);
+ bool isLocked(queue_id queue, message_id message);
+ static bool isLocked(PreparedTransaction::list& txns, queue_id queue, message_id message);
+ static PreparedTransaction::list::iterator getLockedPreparedTransaction(PreparedTransaction::list& txns, queue_id queue, message_id message);
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_PREPAREDTRANSACTION_H
diff --git a/qpid/cpp/src/qpid/legacystore/StoreException.h b/qpid/cpp/src/qpid/legacystore/StoreException.h
new file mode 100644
index 0000000000..6624aafd5a
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/StoreException.h
@@ -0,0 +1,56 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_STOREEXCEPTION_H
+#define QPID_LEGACYSTORE_STOREEXCEPTION_H
+
+#include "qpid/legacystore/IdDbt.h"
+#include <boost/format.hpp>
+
+namespace mrg{
+namespace msgstore{
+
+class StoreException : public std::exception
+{
+ std::string text;
+public:
+ StoreException(const std::string& _text) : text(_text) {}
+ StoreException(const std::string& _text, const DbException& cause) : text(_text + ": " + cause.what()) {}
+ virtual ~StoreException() throw() {}
+ virtual const char* what() const throw() { return text.c_str(); }
+};
+
+class StoreFullException : public StoreException
+{
+public:
+ StoreFullException(const std::string& _text) : StoreException(_text) {}
+ StoreFullException(const std::string& _text, const DbException& cause) : StoreException(_text, cause) {}
+ virtual ~StoreFullException() throw() {}
+
+};
+
+#define THROW_STORE_EXCEPTION(MESSAGE) throw StoreException(boost::str(boost::format("%s (%s:%d)") % (MESSAGE) % __FILE__ % __LINE__))
+#define THROW_STORE_EXCEPTION_2(MESSAGE, EXCEPTION) throw StoreException(boost::str(boost::format("%s (%s:%d)") % (MESSAGE) % __FILE__ % __LINE__), EXCEPTION)
+#define THROW_STORE_FULL_EXCEPTION(MESSAGE) throw StoreFullException(boost::str(boost::format("%s (%s:%d)") % (MESSAGE) % __FILE__ % __LINE__))
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_STOREEXCEPTION_H
diff --git a/qpid/cpp/src/qpid/legacystore/StorePlugin.cpp b/qpid/cpp/src/qpid/legacystore/StorePlugin.cpp
new file mode 100644
index 0000000000..f9b77ce02c
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/StorePlugin.cpp
@@ -0,0 +1,81 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/broker/Broker.h"
+#include "qpid/Plugin.h"
+#include "qpid/Options.h"
+#include "qpid/DataDir.h"
+#include "qpid/log/Statement.h"
+#include "qpid/legacystore/MessageStoreImpl.h"
+
+using mrg::msgstore::MessageStoreImpl;
+
+namespace qpid {
+namespace broker {
+
+using namespace std;
+
+struct StorePlugin : public Plugin {
+
+ MessageStoreImpl::StoreOptions options;
+ boost::shared_ptr<MessageStoreImpl> store;
+
+ Options* getOptions() { return &options; }
+
+ void earlyInitialize (Plugin::Target& target)
+ {
+ Broker* broker = dynamic_cast<Broker*>(&target);
+ if (!broker) return;
+ store.reset(new MessageStoreImpl(broker));
+ DataDir& dataDir = broker->getDataDir ();
+ if (options.storeDir.empty ())
+ {
+ if (!dataDir.isEnabled ())
+ throw Exception ("msgstore: If --data-dir is blank or --no-data-dir is specified, --store-dir must be present.");
+
+ options.storeDir = dataDir.getPath ();
+ }
+ store->init(&options);
+ boost::shared_ptr<qpid::broker::MessageStore> brokerStore(store);
+ broker->setStore(brokerStore);
+ target.addFinalizer(boost::bind(&StorePlugin::finalize, this));
+ }
+
+ void initialize(Plugin::Target& target)
+ {
+ Broker* broker = dynamic_cast<Broker*>(&target);
+ if (!broker) return;
+ if (!store) return;
+ QPID_LOG(info, "Enabling management instrumentation for the store.");
+ store->initManagement();
+ }
+
+ void finalize()
+ {
+ store.reset();
+ }
+
+ const char* id() {return "StorePlugin";}
+};
+
+static StorePlugin instance; // Static initialization.
+
+}} // namespace qpid::broker
diff --git a/qpid/cpp/src/qpid/legacystore/TxnCtxt.cpp b/qpid/cpp/src/qpid/legacystore/TxnCtxt.cpp
new file mode 100644
index 0000000000..1db41f4c70
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/TxnCtxt.cpp
@@ -0,0 +1,184 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "qpid/legacystore/TxnCtxt.h"
+
+#include <sstream>
+
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/StoreException.h"
+
+namespace mrg {
+namespace msgstore {
+
+void TxnCtxt::completeTxn(bool commit) {
+ sync();
+ for (ipqItr i = impactedQueues.begin(); i != impactedQueues.end(); i++) {
+ commitTxn(static_cast<JournalImpl*>(*i), commit);
+ }
+ impactedQueues.clear();
+ if (preparedXidStorePtr)
+ commitTxn(preparedXidStorePtr, commit);
+}
+
+void TxnCtxt::commitTxn(JournalImpl* jc, bool commit) {
+ if (jc && loggedtx) { /* if using journal */
+ boost::intrusive_ptr<DataTokenImpl> dtokp(new DataTokenImpl);
+ dtokp->addRef();
+ dtokp->set_external_rid(true);
+ dtokp->set_rid(loggedtx->next());
+ try {
+ if (commit) {
+ jc->txn_commit(dtokp.get(), getXid());
+ sync();
+ } else {
+ jc->txn_abort(dtokp.get(), getXid());
+ }
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Error commit") + e.what());
+ }
+ }
+}
+
+// static
+uuid_t TxnCtxt::uuid;
+
+// static
+IdSequence TxnCtxt::uuidSeq;
+
+// static
+bool TxnCtxt::staticInit = TxnCtxt::setUuid();
+
+// static
+bool TxnCtxt::setUuid() {
+ ::uuid_generate(uuid);
+ return true;
+}
+
+TxnCtxt::TxnCtxt(IdSequence* _loggedtx) : loggedtx(_loggedtx), dtokp(new DataTokenImpl), preparedXidStorePtr(0), txn(0) {
+ if (loggedtx) {
+// // Human-readable tid: 53 bytes
+// // uuit_t is a char[16]
+// tid.reserve(53);
+// u_int64_t* u1 = (u_int64_t*)uuid;
+// u_int64_t* u2 = (u_int64_t*)(uuid + sizeof(u_int64_t));
+// std::stringstream s;
+// s << "tid:" << std::hex << std::setfill('0') << std::setw(16) << uuidSeq.next() << ":" << std::setw(16) << *u1 << std::setw(16) << *u2;
+// tid.assign(s.str());
+
+ // Binary tid: 24 bytes
+ tid.reserve(24);
+ u_int64_t c = uuidSeq.next();
+ tid.append((char*)&c, sizeof(c));
+ tid.append((char*)&uuid, sizeof(uuid));
+ }
+}
+
+TxnCtxt::TxnCtxt(std::string _tid, IdSequence* _loggedtx) : loggedtx(_loggedtx), dtokp(new DataTokenImpl), preparedXidStorePtr(0), tid(_tid), txn(0) {}
+
+TxnCtxt::~TxnCtxt() { abort(); }
+
+void TxnCtxt::sync() {
+ if (loggedtx) {
+ try {
+ for (ipqItr i = impactedQueues.begin(); i != impactedQueues.end(); i++)
+ jrnl_flush(static_cast<JournalImpl*>(*i));
+ if (preparedXidStorePtr)
+ jrnl_flush(preparedXidStorePtr);
+ for (ipqItr i = impactedQueues.begin(); i != impactedQueues.end(); i++)
+ jrnl_sync(static_cast<JournalImpl*>(*i), &journal::jcntl::_aio_cmpl_timeout);
+ if (preparedXidStorePtr)
+ jrnl_sync(preparedXidStorePtr, &journal::jcntl::_aio_cmpl_timeout);
+ } catch (const journal::jexception& e) {
+ THROW_STORE_EXCEPTION(std::string("Error during txn sync: ") + e.what());
+ }
+ }
+}
+
+void TxnCtxt::jrnl_flush(JournalImpl* jc) {
+ if (jc && !(jc->is_txn_synced(getXid())))
+ jc->flush();
+}
+
+void TxnCtxt::jrnl_sync(JournalImpl* jc, timespec* timeout) {
+ if (!jc || jc->is_txn_synced(getXid()))
+ return;
+ while (jc->get_wr_aio_evt_rem()) {
+ if (jc->get_wr_events(timeout) == journal::jerrno::AIO_TIMEOUT && timeout)
+ THROW_STORE_EXCEPTION(std::string("Error: timeout waiting for TxnCtxt::jrnl_sync()"));
+ }
+}
+
+void TxnCtxt::begin(DbEnv* env, bool sync) {
+ int err;
+ try { err = env->txn_begin(0, &txn, 0); }
+ catch (const DbException&) { txn = 0; throw; }
+ if (err != 0) {
+ std::ostringstream oss;
+ oss << "Error: Env::txn_begin() returned error code: " << err;
+ THROW_STORE_EXCEPTION(oss.str());
+ }
+ if (sync)
+ globalHolder = AutoScopedLock(new qpid::sys::Mutex::ScopedLock(globalSerialiser));
+}
+
+void TxnCtxt::commit() {
+ if (txn) {
+ txn->commit(0);
+ txn = 0;
+ globalHolder.reset();
+ }
+}
+
+void TxnCtxt::abort(){
+ if (txn) {
+ txn->abort();
+ txn = 0;
+ globalHolder.reset();
+ }
+}
+
+DbTxn* TxnCtxt::get() { return txn; }
+
+bool TxnCtxt::isTPC() { return false; }
+
+const std::string& TxnCtxt::getXid() { return tid; }
+
+void TxnCtxt::addXidRecord(qpid::broker::ExternalQueueStore* queue) { impactedQueues.insert(queue); }
+
+void TxnCtxt::complete(bool commit) { completeTxn(commit); }
+
+bool TxnCtxt::impactedQueuesEmpty() { return impactedQueues.empty(); }
+
+DataTokenImpl* TxnCtxt::getDtok() { return dtokp.get(); }
+
+void TxnCtxt::incrDtokRef() { dtokp->addRef(); }
+
+void TxnCtxt::recoverDtok(const u_int64_t rid, const std::string xid) {
+ dtokp->set_rid(rid);
+ dtokp->set_wstate(DataTokenImpl::ENQ);
+ dtokp->set_xid(xid);
+ dtokp->set_external_rid(true);
+}
+
+TPCTxnCtxt::TPCTxnCtxt(const std::string& _xid, IdSequence* _loggedtx) : TxnCtxt(_loggedtx), xid(_xid) {}
+
+}}
diff --git a/qpid/cpp/src/qpid/legacystore/TxnCtxt.h b/qpid/cpp/src/qpid/legacystore/TxnCtxt.h
new file mode 100644
index 0000000000..77eaa27cd7
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/TxnCtxt.h
@@ -0,0 +1,117 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#ifndef QPID_LEGACYSTORE_TXNCTXT_H
+#define QPID_LEGACYSTORE_TXNCTXT_H
+
+#include "db-inc.h"
+#include <memory>
+#include <set>
+#include <string>
+
+#include "qpid/legacystore/DataTokenImpl.h"
+#include "qpid/legacystore/IdSequence.h"
+#include "qpid/legacystore/JournalImpl.h"
+#include "qpid/broker/PersistableQueue.h"
+#include "qpid/broker/TransactionalStore.h"
+#include "qpid/sys/Mutex.h"
+#include "qpid/sys/uuid.h"
+
+#include <boost/intrusive_ptr.hpp>
+
+namespace mrg {
+namespace msgstore {
+
+class TxnCtxt : public qpid::broker::TransactionContext
+{
+ protected:
+ static qpid::sys::Mutex globalSerialiser;
+
+ static uuid_t uuid;
+ static IdSequence uuidSeq;
+ static bool staticInit;
+ static bool setUuid();
+
+ typedef std::set<qpid::broker::ExternalQueueStore*> ipqdef;
+ typedef ipqdef::iterator ipqItr;
+ typedef std::auto_ptr<qpid::sys::Mutex::ScopedLock> AutoScopedLock;
+
+ ipqdef impactedQueues; // list of Queues used in the txn
+ IdSequence* loggedtx;
+ boost::intrusive_ptr<DataTokenImpl> dtokp;
+ AutoScopedLock globalHolder;
+ JournalImpl* preparedXidStorePtr;
+
+ /**
+ * local txn id, if non XA.
+ */
+ std::string tid;
+ DbTxn* txn;
+
+ virtual void completeTxn(bool commit);
+ void commitTxn(JournalImpl* jc, bool commit);
+ void jrnl_flush(JournalImpl* jc);
+ void jrnl_sync(JournalImpl* jc, timespec* timeout);
+
+ public:
+ TxnCtxt(IdSequence* _loggedtx=NULL);
+ TxnCtxt(std::string _tid, IdSequence* _loggedtx);
+ virtual ~TxnCtxt();
+
+ /**
+ * Call to make sure all the data for this txn is written to safe store
+ *
+ *@return if the data successfully synced.
+ */
+ void sync();
+ void begin(DbEnv* env, bool sync = false);
+ void commit();
+ void abort();
+ DbTxn* get();
+ virtual bool isTPC();
+ virtual const std::string& getXid();
+
+ void addXidRecord(qpid::broker::ExternalQueueStore* queue);
+ inline void prepare(JournalImpl* _preparedXidStorePtr) { preparedXidStorePtr = _preparedXidStorePtr; }
+ void complete(bool commit);
+ bool impactedQueuesEmpty();
+ DataTokenImpl* getDtok();
+ void incrDtokRef();
+ void recoverDtok(const u_int64_t rid, const std::string xid);
+};
+
+
+class TPCTxnCtxt : public TxnCtxt, public qpid::broker::TPCTransactionContext
+{
+ protected:
+ const std::string xid;
+
+ public:
+ TPCTxnCtxt(const std::string& _xid, IdSequence* _loggedtx);
+ inline virtual bool isTPC() { return true; }
+ inline virtual const std::string& getXid() { return xid; }
+};
+
+}}
+
+#endif // ifndef QPID_LEGACYSTORE_TXNCTXT_H
+
+
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/aio.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/aio.cpp
new file mode 100644
index 0000000000..ffbddd887e
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/aio.cpp
@@ -0,0 +1,41 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file aio.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::aio (libaio interface
+ * encapsulation). See comments in file aio.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/aio.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/aio.h b/qpid/cpp/src/qpid/legacystore/jrnl/aio.h
new file mode 100644
index 0000000000..b1de5f79f7
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/aio.h
@@ -0,0 +1,153 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file aio.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains an encapsulation of the libaio interface used
+ * by the journal.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_AIO_H
+#define QPID_LEGACYSTORE_JRNL_AIO_H
+
+#include <libaio.h>
+#include <cstring>
+#include <sys/types.h>
+#include <string.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+typedef iocb aio_cb;
+typedef io_event aio_event;
+
+/**
+ * \brief This class is a C++ wrapper class for the libaio functions used by the journal. Note that only those
+ * functions used by the journal are included here. This is not a complete implementation of all libaio functions.
+ */
+class aio
+{
+public:
+ static inline int queue_init(int maxevents, io_context_t* ctxp)
+ {
+ return ::io_queue_init(maxevents, ctxp);
+ }
+
+ static inline int queue_release(io_context_t ctx)
+ {
+ return ::io_queue_release(ctx);
+ }
+
+ static inline int submit(io_context_t ctx, long nr, aio_cb* aios[])
+ {
+ return ::io_submit(ctx, nr, aios);
+ }
+
+ static inline int getevents(io_context_t ctx, long min_nr, long nr, aio_event* events, timespec* const timeout)
+ {
+ return ::io_getevents(ctx, min_nr, nr, events, timeout);
+ }
+
+ /**
+ * \brief This function allows iocbs to be initialized with a pointer that can be re-used. This prepares an
+ * aio_cb struct for read use. (This is a wrapper for libaio's ::io_prep_pread() function.)
+ *
+ * \param aiocbp Pointer to the aio_cb struct to be prepared.
+ * \param fd File descriptor to be used for read.
+ * \param buf Pointer to buffer in which read data is to be placed.
+ * \param count Number of bytes to read - buffer must be large enough.
+ * \param offset Offset within file from which data will be read.
+ */
+ static inline void prep_pread(aio_cb* aiocbp, int fd, void* buf, std::size_t count, int64_t offset)
+ {
+ ::io_prep_pread(aiocbp, fd, buf, count, offset);
+ }
+
+ /**
+ * \brief Special version of libaio's io_prep_pread() which preserves the value of the data pointer. This allows
+ * iocbs to be initialized with a pointer that can be re-used. This prepares a aio_cb struct for read use.
+ *
+ * \param aiocbp Pointer to the aio_cb struct to be prepared.
+ * \param fd File descriptor to be used for read.
+ * \param buf Pointer to buffer in which read data is to be placed.
+ * \param count Number of bytes to read - buffer must be large enough.
+ * \param offset Offset within file from which data will be read.
+ */
+ static inline void prep_pread_2(aio_cb* aiocbp, int fd, void* buf, std::size_t count, int64_t offset)
+ {
+ std::memset((void*) ((char*) aiocbp + sizeof(void*)), 0, sizeof(aio_cb) - sizeof(void*));
+ aiocbp->aio_fildes = fd;
+ aiocbp->aio_lio_opcode = IO_CMD_PREAD;
+ aiocbp->aio_reqprio = 0;
+ aiocbp->u.c.buf = buf;
+ aiocbp->u.c.nbytes = count;
+ aiocbp->u.c.offset = offset;
+ }
+
+ /**
+ * \brief This function allows iocbs to be initialized with a pointer that can be re-used. This function prepares
+ * an aio_cb struct for write use. (This is a wrapper for libaio's ::io_prep_pwrite() function.)
+ *
+ * \param aiocbp Pointer to the aio_cb struct to be prepared.
+ * \param fd File descriptor to be used for write.
+ * \param buf Pointer to buffer in which data to be written is located.
+ * \param count Number of bytes to write.
+ * \param offset Offset within file to which data will be written.
+ */
+ static inline void prep_pwrite(aio_cb* aiocbp, int fd, void* buf, std::size_t count, int64_t offset)
+ {
+ ::io_prep_pwrite(aiocbp, fd, buf, count, offset);
+ }
+
+ /**
+ * \brief Special version of libaio's io_prep_pwrite() which preserves the value of the data pointer. This allows
+ * iocbs to be initialized with a pointer that can be re-used. This function prepares an aio_cb struct for write
+ * use.
+ *
+ * \param aiocbp Pointer to the aio_cb struct to be prepared.
+ * \param fd File descriptor to be used for write.
+ * \param buf Pointer to buffer in which data to be written is located.
+ * \param count Number of bytes to write.
+ * \param offset Offset within file to which data will be written.
+ */
+ static inline void prep_pwrite_2(aio_cb* aiocbp, int fd, void* buf, std::size_t count, int64_t offset)
+ {
+ std::memset((void*) ((char*) aiocbp + sizeof(void*)), 0, sizeof(aio_cb) - sizeof(void*));
+ aiocbp->aio_fildes = fd;
+ aiocbp->aio_lio_opcode = IO_CMD_PWRITE;
+ aiocbp->aio_reqprio = 0;
+ aiocbp->u.c.buf = buf;
+ aiocbp->u.c.nbytes = count;
+ aiocbp->u.c.offset = offset;
+ }
+};
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_AIO_H
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ClientProxy.cpp.tmpl b/qpid/cpp/src/qpid/legacystore/jrnl/aio_callback.h
index 8cca6e5cec..90249278a5 100644
--- a/qpid/gentools/templ.cpp/model/AMQP_ClientProxy.cpp.tmpl
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/aio_callback.h
@@ -1,4 +1,3 @@
-&{AMQP_ClientProxy.cpp}
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -8,9 +7,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -20,33 +19,39 @@
*
*/
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
+/**
+ * \file aio_callback.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the definition for the AIO callback function
+ * pointer.
+ *
+ * \author Kim van der Riet
*/
-#include <sstream>
-
-#include <AMQP_ClientProxy.h>
-#include <AMQFrame.h>
-%{MLIST} ${cpc_method_body_include}
+#ifndef QPID_LEGACYSTORE_JRNL_AIO_CALLBACK_H
+#define QPID_LEGACYSTORE_JRNL_AIO_CALLBACK_H
-namespace qpid {
-namespace framing {
+#include <vector>
+#include <sys/types.h>
-AMQP_ClientProxy::AMQP_ClientProxy(OutputHandler* out, u_int8_t major, u_int8_t minor) :
-%{CLIST} ${cpc_constructor_initializer}
-
-{}
+namespace mrg
+{
+namespace journal
+{
- // Inner class instance get methods
-
-%{CLIST} ${cpc_inner_class_get_method}
+ class data_tok;
- // Inner class implementation
+ class aio_callback
+ {
+ public:
+ virtual ~aio_callback() {}
+ virtual void wr_aio_cb(std::vector<data_tok*>& dtokl) = 0;
+ virtual void rd_aio_cb(std::vector<u_int16_t>& pil) = 0;
+ };
-%{CLIST} ${cpc_inner_class_impl}
+} // namespace journal
+} // namespace mrg
-} /* namespace framing */
-} /* namespace qpid */
+#endif // ifndef QPID_LEGACYSTORE_JRNL_AIO_CALLBACK_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/cvar.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/cvar.cpp
new file mode 100644
index 0000000000..e4010bf91f
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/cvar.cpp
@@ -0,0 +1,33 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file cvar.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::cvar (condition variable). See
+ * comments in file cvar.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/cvar.h"
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/cvar.h b/qpid/cpp/src/qpid/legacystore/jrnl/cvar.h
new file mode 100644
index 0000000000..0498e743a2
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/cvar.h
@@ -0,0 +1,87 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file cvar.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains a posix condition variable class.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_CVAR_H
+#define QPID_LEGACYSTORE_JRNL_CVAR_H
+
+#include <cstring>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/jrnl/smutex.h"
+#include "qpid/legacystore/jrnl/time_ns.h"
+#include <pthread.h>
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+ // Ultra-simple thread condition variable class
+ class cvar
+ {
+ private:
+ const smutex& _sm;
+ pthread_cond_t _c;
+ public:
+ inline cvar(const smutex& sm) : _sm(sm) { ::pthread_cond_init(&_c, 0); }
+ inline ~cvar() { ::pthread_cond_destroy(&_c); }
+ inline void wait()
+ {
+ PTHREAD_CHK(::pthread_cond_wait(&_c, _sm.get()), "::pthread_cond_wait", "cvar", "wait");
+ }
+ inline void timedwait(timespec& ts)
+ {
+ PTHREAD_CHK(::pthread_cond_timedwait(&_c, _sm.get(), &ts), "::pthread_cond_timedwait", "cvar", "timedwait");
+ }
+ inline bool waitintvl(const long intvl_ns)
+ {
+ time_ns t; t.now(); t+=intvl_ns;
+ int ret = ::pthread_cond_timedwait(&_c, _sm.get(), &t);
+ if (ret == ETIMEDOUT)
+ return true;
+ PTHREAD_CHK(ret, "::pthread_cond_timedwait", "cvar", "waitintvl");
+ return false;
+ }
+ inline void signal()
+ {
+ PTHREAD_CHK(::pthread_cond_signal(&_c), "::pthread_cond_signal", "cvar", "notify");
+ }
+ inline void broadcast()
+ {
+ PTHREAD_CHK(::pthread_cond_broadcast(&_c), "::pthread_cond_broadcast", "cvar", "broadcast");
+ }
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_CVAR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.cpp
new file mode 100644
index 0000000000..ce7206d80d
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.cpp
@@ -0,0 +1,194 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file data_tok.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::data_tok (data block token).
+ * See comments in file data_tok.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/data_tok.h"
+
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/jrnl/slock.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+// Static members
+
+u_int64_t data_tok::_cnt = 0;
+smutex data_tok::_mutex;
+
+data_tok::data_tok():
+ _wstate(NONE),
+ _rstate(UNREAD),
+ _dsize(0),
+ _dblks_written(0),
+ _dblks_read(0),
+ _pg_cnt(0),
+ _fid(0),
+ _rid(0),
+ _xid(),
+ _dequeue_rid(0),
+ _external_rid(false)
+{
+ slock s(_mutex);
+ _icnt = _cnt++;
+}
+
+data_tok::~data_tok() {}
+
+const char*
+data_tok::wstate_str() const
+{
+ return wstate_str(_wstate);
+}
+
+const char*
+data_tok::wstate_str(write_state wstate)
+{
+ switch (wstate)
+ {
+ case NONE:
+ return "NONE";
+ case ENQ_CACHED:
+ return "ENQ_CACHED";
+ case ENQ_PART:
+ return "ENQ_PART";
+ case ENQ_SUBM:
+ return "ENQ_SUBM";
+ case ENQ:
+ return "ENQ";
+ case DEQ_CACHED:
+ return "DEQ_CACHED";
+ case DEQ_PART:
+ return "DEQ_PART";
+ case DEQ_SUBM:
+ return "DEQ_SUBM";
+ case DEQ:
+ return "DEQ";
+ case ABORT_CACHED:
+ return "ABORT_CACHED";
+ case ABORT_PART:
+ return "ABORT_PART";
+ case ABORT_SUBM:
+ return "ABORT_SUBM";
+ case ABORTED:
+ return "ABORTED";
+ case COMMIT_CACHED:
+ return "COMMIT_CACHED";
+ case COMMIT_PART:
+ return "COMMIT_PART";
+ case COMMIT_SUBM:
+ return "COMMIT_SUBM";
+ case COMMITTED:
+ return "COMMITTED";
+ }
+ // Not using default: forces compiler to ensure all cases are covered.
+ return "<wstate unknown>";
+}
+
+const char*
+data_tok::rstate_str() const
+{
+ return rstate_str(_rstate);
+}
+
+const char*
+data_tok::rstate_str(read_state rstate)
+{
+ switch (rstate)
+ {
+ case NONE:
+ return "NONE";
+ case READ_PART:
+ return "READ_PART";
+ case SKIP_PART:
+ return "SKIP_PART";
+ case READ:
+ return "READ";
+ // Not using default: forces compiler to ensure all cases are covered.
+ }
+ return "<rstate unknown>";
+}
+
+void
+data_tok::set_rstate(const read_state rstate)
+{
+ if (_wstate != ENQ && rstate != UNREAD)
+ {
+ std::ostringstream oss;
+ oss << "Attempted to change read state to " << rstate_str(rstate);
+ oss << " while write state is not enqueued (wstate ENQ); wstate=" << wstate_str() << ".";
+ throw jexception(jerrno::JERR_DTOK_ILLEGALSTATE, oss.str(), "data_tok",
+ "set_rstate");
+ }
+ _rstate = rstate;
+}
+
+void
+data_tok::reset()
+{
+ _wstate = NONE;
+ _rstate = UNREAD;
+ _dsize = 0;
+ _dblks_written = 0;
+ _dblks_read = 0;
+ _pg_cnt = 0;
+ _fid = 0;
+ _rid = 0;
+ _xid.clear();
+}
+
+// debug aid
+std::string
+data_tok::status_str() const
+{
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "dtok id=0x" << _icnt << "; ws=" << wstate_str() << "; rs=" << rstate_str();
+ oss << "; fid=0x" << _fid << "; rid=0x" << _rid << "; xid=";
+ for (unsigned i=0; i<_xid.size(); i++)
+ {
+ if (isprint(_xid[i]))
+ oss << _xid[i];
+ else
+ oss << "/" << std::setw(2) << (int)((char)_xid[i]);
+ }
+ oss << "; drid=0x" << _dequeue_rid << " extrid=" << (_external_rid?"T":"F");
+ oss << "; ds=0x" << _dsize << "; dw=0x" << _dblks_written << "; dr=0x" << _dblks_read;
+ oss << " pc=0x" << _pg_cnt;
+ return oss.str();
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.h b/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.h
new file mode 100644
index 0000000000..e35f069399
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/data_tok.h
@@ -0,0 +1,172 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file data_tok.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::data_tok (data block token).
+ * See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_DATA_TOK_H
+#define QPID_LEGACYSTORE_JRNL_DATA_TOK_H
+
+namespace mrg
+{
+namespace journal
+{
+class data_tok;
+}
+}
+
+#include <cassert>
+#include <cstddef>
+#include "qpid/legacystore/jrnl/smutex.h"
+#include <pthread.h>
+#include <string>
+#include <sys/types.h>
+
+namespace mrg
+{
+
+namespace journal
+{
+
+ /**
+ * \class data_tok
+ * \brief Data block token (data_tok) used to track wstate of a data block through asynchronous
+ * I/O process
+ */
+ class data_tok
+ {
+ public:
+ // TODO: Fix this, separate write state from operation
+ // ie: wstate = NONE, CACHED, PART, SUBM, COMPL
+ // op = ENQUEUE, DEQUEUE, ABORT, COMMIT
+ enum write_state
+ {
+ NONE, ///< Data block not sent to journal
+ ENQ_CACHED, ///< Data block enqueue written to page cache
+ ENQ_PART, ///< Data block part-submitted to AIO, waiting for page buffer to free up
+ ENQ_SUBM, ///< Data block enqueue submitted to AIO
+ ENQ, ///< Data block enqueue AIO write complete (enqueue complete)
+ DEQ_CACHED, ///< Data block dequeue written to page cache
+ DEQ_PART, ///< Data block part-submitted to AIO, waiting for page buffer to free up
+ DEQ_SUBM, ///< Data block dequeue submitted to AIO
+ DEQ, ///< Data block dequeue AIO write complete (dequeue complete)
+ ABORT_CACHED,
+ ABORT_PART,
+ ABORT_SUBM,
+ ABORTED,
+ COMMIT_CACHED,
+ COMMIT_PART,
+ COMMIT_SUBM,
+ COMMITTED
+ };
+
+ enum read_state
+ {
+ UNREAD, ///< Data block not read
+ READ_PART, ///< Data block is part-read; waiting for page buffer to fill
+ SKIP_PART, ///< Prev. dequeued dblock is part-skipped; waiting for page buffer to fill
+ READ ///< Data block is fully read
+ };
+
+ protected:
+ static smutex _mutex;
+ static u_int64_t _cnt;
+ u_int64_t _icnt;
+ write_state _wstate; ///< Enqueued / dequeued state of data
+ read_state _rstate; ///< Read state of data
+ std::size_t _dsize; ///< Data size in bytes
+ u_int32_t _dblks_written; ///< Data blocks read/written
+ u_int32_t _dblks_read; ///< Data blocks read/written
+ u_int32_t _pg_cnt; ///< Page counter - incr for each page containing part of data
+ u_int16_t _fid; ///< FID containing header of enqueue record
+ u_int64_t _rid; ///< RID of data set by enqueue operation
+ std::string _xid; ///< XID set by enqueue operation
+ u_int64_t _dequeue_rid; ///< RID of data set by dequeue operation
+ bool _external_rid; ///< Flag to indicate external setting of rid
+
+ public:
+ data_tok();
+ virtual ~data_tok();
+
+ inline u_int64_t id() const { return _icnt; }
+ inline write_state wstate() const { return _wstate; }
+ const char* wstate_str() const;
+ static const char* wstate_str(write_state wstate);
+ inline read_state rstate() const { return _rstate; }
+ const char* rstate_str() const;
+ static const char* rstate_str(read_state rstate);
+ inline bool is_writable() const { return _wstate == NONE || _wstate == ENQ_PART; }
+ inline bool is_enqueued() const { return _wstate == ENQ; }
+ inline bool is_readable() const { return _wstate == ENQ; }
+ inline bool is_read() const { return _rstate == READ; }
+ inline bool is_dequeueable() const { return _wstate == ENQ || _wstate == DEQ_PART; }
+ inline void set_wstate(const write_state wstate) { _wstate = wstate; }
+ void set_rstate(const read_state rstate);
+ inline std::size_t dsize() const { return _dsize; }
+ inline void set_dsize(std::size_t dsize) { _dsize = dsize; }
+
+ inline u_int32_t dblocks_written() const { return _dblks_written; }
+ inline void incr_dblocks_written(u_int32_t dblks_written)
+ { _dblks_written += dblks_written; }
+ inline void set_dblocks_written(u_int32_t dblks_written) { _dblks_written = dblks_written; }
+
+ inline u_int32_t dblocks_read() const { return _dblks_read; }
+ inline void incr_dblocks_read(u_int32_t dblks_read) { _dblks_read += dblks_read; }
+ inline void set_dblocks_read(u_int32_t dblks_read) { _dblks_read = dblks_read; }
+
+ inline u_int32_t pg_cnt() const { return _pg_cnt; }
+ inline u_int32_t incr_pg_cnt() { return ++_pg_cnt; }
+ inline u_int32_t decr_pg_cnt() { assert(_pg_cnt != 0); return --_pg_cnt; }
+
+ inline u_int16_t fid() const { return _fid; }
+ inline void set_fid(const u_int16_t fid) { _fid = fid; }
+ inline u_int64_t rid() const { return _rid; }
+ inline void set_rid(const u_int64_t rid) { _rid = rid; }
+ inline u_int64_t dequeue_rid() const {return _dequeue_rid; }
+ inline void set_dequeue_rid(const u_int64_t rid) { _dequeue_rid = rid; }
+ inline bool external_rid() const { return _external_rid; }
+ inline void set_external_rid(const bool external_rid) { _external_rid = external_rid; }
+
+ inline bool has_xid() const { return !_xid.empty(); }
+ inline const std::string& xid() const { return _xid; }
+ inline void clear_xid() { _xid.clear(); }
+ inline void set_xid(const std::string& xid) { _xid.assign(xid); }
+ inline void set_xid(const void* xidp, const std::size_t xid_len)
+ { _xid.assign((const char*)xidp, xid_len); }
+
+ void reset();
+
+ // debug aid
+ std::string status_str() const;
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_DATA_TOK_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/deq_hdr.h b/qpid/cpp/src/qpid/legacystore/jrnl/deq_hdr.h
new file mode 100644
index 0000000000..ae7081eac1
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/deq_hdr.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file deq_hdr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::deq_hdr (dequeue record),
+ * used to dequeue a previously enqueued record.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_DEQ_HDR_H
+#define QPID_LEGACYSTORE_JRNL_DEQ_HDR_H
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/rec_hdr.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for dequeue record.
+ *
+ * Struct for dequeue record. If this record has a non-zero xidsize field (i.e., there is a
+ * valid XID), then this header is followed by the XID of xidsize bytes and a rec_tail. If,
+ * on the other hand, this record has a zero xidsize (i.e., there is no XID), then the rec_tail
+ * is absent.
+ *
+ * Note that this record had its own rid distinct from the rid of the record it is dequeueing.
+ * The rid field below is the rid of the dequeue record itself; the deq-rid field is the rid of a
+ * previous enqueue record being dequeued by this record.
+ *
+ * Record header info in binary format (32 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+ -+
+ * | magic | v | e | flags | |
+ * +---+---+---+---+---+---+---+---+ | struct hdr
+ * | rid | |
+ * +---+---+---+---+---+---+---+---+ -+
+ * | deq-rid |
+ * +---+---+---+---+---+---+---+---+
+ * | xidsize |
+ * +---+---+---+---+---+---+---+---+
+ * v = file version (If the format or encoding of this file changes, then this
+ * number should be incremented)
+ * e = endian flag, false (0x00) for little endian, true (0x01) for big endian
+ * </pre>
+ *
+ * Note that journal files should be transferable between 32- and 64-bit
+ * hardware of the same endianness, but not between hardware of opposite
+ * entianness without some sort of binary conversion utility. Thus buffering
+ * will be needed for types that change size between 32- and 64-bit compiles.
+ */
+ struct deq_hdr : rec_hdr
+ {
+ u_int64_t _deq_rid; ///< Record ID of dequeued record
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Big-endian filler for 32-bit size_t
+#endif
+ std::size_t _xidsize; ///< XID size
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Little-endian filler for 32-bit size_t
+#endif
+ static const u_int16_t DEQ_HDR_TXNCMPLCOMMIT_MASK = 0x10;
+
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ inline deq_hdr(): rec_hdr(), _deq_rid(0),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(0)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler0(0)
+#endif
+ {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ inline deq_hdr(const u_int32_t magic, const u_int8_t version, const u_int64_t rid,
+ const u_int64_t deq_rid, const std::size_t xidsize, const bool owi,
+ const bool txn_coml_commit = false):
+ rec_hdr(magic, version, rid, owi), _deq_rid(deq_rid),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(xidsize)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler0(0)
+#endif
+ { set_txn_coml_commit(txn_coml_commit); }
+
+
+ inline bool is_txn_coml_commit() const { return _uflag & DEQ_HDR_TXNCMPLCOMMIT_MASK; }
+
+ inline void set_txn_coml_commit(const bool commit)
+ {
+ _uflag = commit ? _uflag | DEQ_HDR_TXNCMPLCOMMIT_MASK :
+ _uflag & (~DEQ_HDR_TXNCMPLCOMMIT_MASK);
+ }
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(deq_hdr); }
+ };
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_DEQ_HDR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.cpp
new file mode 100644
index 0000000000..4de412c201
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.cpp
@@ -0,0 +1,459 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file deq_rec.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::deq_rec (journal dequeue
+ * record) class. See comments in file deq_rec.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "jrnl/deq_rec.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+deq_rec::deq_rec():
+ _deq_hdr(RHM_JDAT_DEQ_MAGIC, RHM_JDAT_VERSION, 0, 0, 0, false),
+ _xidp(0),
+ _buff(0),
+ _deq_tail(_deq_hdr)
+{}
+
+deq_rec::deq_rec(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
+ const std::size_t xidlen, const bool owi, const bool txn_coml_commit):
+ _deq_hdr(RHM_JDAT_DEQ_MAGIC, RHM_JDAT_VERSION, rid, drid, xidlen, owi, txn_coml_commit),
+ _xidp(xidp),
+ _buff(0),
+ _deq_tail(_deq_hdr)
+{}
+
+deq_rec::~deq_rec()
+{
+ clean();
+}
+
+void
+deq_rec::reset()
+{
+ _deq_hdr._rid = 0;
+ _deq_hdr.set_owi(false);
+ _deq_hdr.set_txn_coml_commit(false);
+ _deq_hdr._deq_rid = 0;
+ _deq_hdr._xidsize = 0;
+ _deq_tail._rid = 0;
+ _xidp = 0;
+ _buff = 0;
+}
+
+void
+deq_rec::reset(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
+ const std::size_t xidlen, const bool owi, const bool txn_coml_commit)
+{
+ _deq_hdr._rid = rid;
+ _deq_hdr.set_owi(owi);
+ _deq_hdr.set_txn_coml_commit(txn_coml_commit);
+ _deq_hdr._deq_rid = drid;
+ _deq_hdr._xidsize = xidlen;
+ _deq_tail._rid = rid;
+ _xidp = xidp;
+ _buff = 0;
+}
+
+u_int32_t
+deq_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(wptr != 0);
+ assert(max_size_dblks > 0);
+ if (_xidp == 0)
+ assert(_deq_hdr._xidsize == 0);
+
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t rem = max_size_dblks * JRNL_DBLK_SIZE;
+ std::size_t wr_cnt = 0;
+ if (rec_offs_dblks) // Continuation of split dequeue record (over 2 or more pages)
+ {
+ if (size_dblks(rec_size()) - rec_offs_dblks > max_size_dblks) // Further split required
+ {
+ rec_offs -= sizeof(_deq_hdr);
+ std::size_t wsize = _deq_hdr._xidsize > rec_offs ? _deq_hdr._xidsize - rec_offs : 0;
+ std::size_t wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= _deq_hdr._xidsize - wsize2;
+ if (rem)
+ {
+ wsize = sizeof(_deq_tail) > rec_offs ? sizeof(_deq_tail) - rec_offs : 0;
+ wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_deq_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= sizeof(_deq_tail) - wsize2;
+ }
+ assert(rem == 0);
+ assert(rec_offs == 0);
+ }
+ else // No further split required
+ {
+ rec_offs -= sizeof(_deq_hdr);
+ std::size_t wsize = _deq_hdr._xidsize > rec_offs ? _deq_hdr._xidsize - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt += wsize;
+ }
+ rec_offs -= _deq_hdr._xidsize - wsize;
+ wsize = sizeof(_deq_tail) > rec_offs ? sizeof(_deq_tail) - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_deq_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+#ifdef RHM_CLEAN
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t dblk_rec_size = size_dblks(rec_size() - rec_offs) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ rec_offs -= sizeof(_deq_tail) - wsize;
+ assert(rec_offs == 0);
+ }
+ }
+ else // Start at beginning of data record
+ {
+ // Assumption: the header will always fit into the first dblk
+ std::memcpy(wptr, (void*)&_deq_hdr, sizeof(_deq_hdr));
+ wr_cnt = sizeof(_deq_hdr);
+ if (size_dblks(rec_size()) > max_size_dblks) // Split required - can only occur with xid
+ {
+ std::size_t wsize;
+ rem -= sizeof(_deq_hdr);
+ if (rem)
+ {
+ wsize = rem >= _deq_hdr._xidsize ? _deq_hdr._xidsize : rem;
+ std::memcpy((char*)wptr + wr_cnt, _xidp, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ if (rem)
+ {
+ wsize = rem >= sizeof(_deq_tail) ? sizeof(_deq_tail) : rem;
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_deq_tail, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ assert(rem == 0);
+ }
+ else // No split required
+ {
+ if (_deq_hdr._xidsize)
+ {
+ std::memcpy((char*)wptr + wr_cnt, _xidp, _deq_hdr._xidsize);
+ wr_cnt += _deq_hdr._xidsize;
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_deq_tail, sizeof(_deq_tail));
+ wr_cnt += sizeof(_deq_tail);
+ }
+#ifdef RHM_CLEAN
+ std::size_t dblk_rec_size = size_dblks(rec_size()) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ }
+ return size_dblks(wr_cnt);
+}
+
+u_int32_t
+deq_rec::decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(rptr != 0);
+ assert(max_size_dblks > 0);
+
+ std::size_t rd_cnt = 0;
+ if (rec_offs_dblks) // Continuation of record on new page
+ {
+ const u_int32_t hdr_xid_dblks = size_dblks(deq_hdr::size() + _deq_hdr._xidsize);
+ const u_int32_t hdr_xid_tail_dblks = size_dblks(deq_hdr::size() + _deq_hdr._xidsize +
+ rec_tail::size());
+ const std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+
+ if (hdr_xid_tail_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of xid fits within this page
+ if (rec_offs - deq_hdr::size() < _deq_hdr._xidsize)
+ {
+ // Part of xid still outstanding, copy remainder of xid and tail
+ const std::size_t xid_offs = rec_offs - deq_hdr::size();
+ const std::size_t xid_rem = _deq_hdr._xidsize - xid_offs;
+ std::memcpy((char*)_buff + xid_offs, rptr, xid_rem);
+ rd_cnt = xid_rem;
+ std::memcpy((void*)&_deq_tail, ((char*)rptr + rd_cnt), sizeof(_deq_tail));
+ chk_tail();
+ rd_cnt += sizeof(_deq_tail);
+ }
+ else
+ {
+ // Tail or part of tail only outstanding, complete tail
+ const std::size_t tail_offs = rec_offs - deq_hdr::size() - _deq_hdr._xidsize;
+ const std::size_t tail_rem = rec_tail::size() - tail_offs;
+ std::memcpy((char*)&_deq_tail + tail_offs, rptr, tail_rem);
+ chk_tail();
+ rd_cnt = tail_rem;
+ }
+ }
+ else if (hdr_xid_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of xid fits within this page, tail split
+ const std::size_t xid_offs = rec_offs - deq_hdr::size();
+ const std::size_t xid_rem = _deq_hdr._xidsize - xid_offs;
+ std::memcpy((char*)_buff + xid_offs, rptr, xid_rem);
+ rd_cnt += xid_rem;
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_deq_tail, ((char*)rptr + xid_rem), tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else
+ {
+ // Remainder of xid split
+ const std::size_t xid_cp_size = (max_size_dblks * JRNL_DBLK_SIZE);
+ std::memcpy((char*)_buff + rec_offs - deq_hdr::size(), rptr, xid_cp_size);
+ rd_cnt += xid_cp_size;
+ }
+ }
+ else // Start of record
+ {
+ // Get and check header
+ _deq_hdr.hdr_copy(h);
+ rd_cnt = sizeof(rec_hdr);
+ _deq_hdr._deq_rid = *(u_int64_t*)((char*)rptr + rd_cnt);
+ rd_cnt += sizeof(u_int64_t);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ rd_cnt += sizeof(u_int32_t); // Filler 0
+#endif
+ _deq_hdr._xidsize = *(std::size_t*)((char*)rptr + rd_cnt);
+ rd_cnt = _deq_hdr.size();
+ chk_hdr();
+ if (_deq_hdr._xidsize)
+ {
+ _buff = std::malloc(_deq_hdr._xidsize);
+ MALLOC_CHK(_buff, "_buff", "deq_rec", "decode");
+ const u_int32_t hdr_xid_dblks = size_dblks(deq_hdr::size() + _deq_hdr._xidsize);
+ const u_int32_t hdr_xid_tail_dblks = size_dblks(deq_hdr::size() + _deq_hdr._xidsize +
+ rec_tail::size());
+
+ // Check if record (header + xid + tail) fits within this page, we can check the
+ // tail before the expense of copying data to memory
+ if (hdr_xid_tail_dblks <= max_size_dblks)
+ {
+ // Entire header, xid and tail fits within this page
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _deq_hdr._xidsize);
+ rd_cnt += _deq_hdr._xidsize;
+ std::memcpy((void*)&_deq_tail, (char*)rptr + rd_cnt, sizeof(_deq_tail));
+ rd_cnt += sizeof(_deq_tail);
+ chk_tail();
+ }
+ else if (hdr_xid_dblks <= max_size_dblks)
+ {
+ // Entire header and xid fit within this page, tail split
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _deq_hdr._xidsize);
+ rd_cnt += _deq_hdr._xidsize;
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_deq_tail, (char*)rptr + rd_cnt, tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else
+ {
+ // Header fits within this page, xid split
+ const std::size_t xid_cp_size = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ std::memcpy(_buff, (char*)rptr + rd_cnt, xid_cp_size);
+ rd_cnt += xid_cp_size;
+ }
+ }
+ }
+ return size_dblks(rd_cnt);
+}
+
+bool
+deq_rec::rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs)
+{
+ if (rec_offs == 0)
+ {
+ _deq_hdr.hdr_copy(h);
+ ifsp->read((char*)&_deq_hdr._deq_rid, sizeof(u_int64_t));
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_deq_hdr._xidsize, sizeof(std::size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ rec_offs = sizeof(_deq_hdr);
+ // Read header, allocate (if req'd) for xid
+ if (_deq_hdr._xidsize)
+ {
+ _buff = std::malloc(_deq_hdr._xidsize);
+ MALLOC_CHK(_buff, "_buff", "enq_rec", "rcv_decode");
+ }
+ }
+ if (rec_offs < sizeof(_deq_hdr) + _deq_hdr._xidsize)
+ {
+ // Read xid (or continue reading xid)
+ std::size_t offs = rec_offs - sizeof(_deq_hdr);
+ ifsp->read((char*)_buff + offs, _deq_hdr._xidsize - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < _deq_hdr._xidsize - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ if (rec_offs < sizeof(_deq_hdr) +
+ (_deq_hdr._xidsize ? _deq_hdr._xidsize + sizeof(rec_tail) : 0))
+ {
+ // Read tail (or continue reading tail)
+ std::size_t offs = rec_offs - sizeof(_deq_hdr) - _deq_hdr._xidsize;
+ ifsp->read((char*)&_deq_tail + offs, sizeof(rec_tail) - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < sizeof(rec_tail) - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - rec_size());
+ if (_deq_hdr._xidsize)
+ chk_tail(); // Throws if tail invalid or record incomplete
+ assert(!ifsp->fail() && !ifsp->bad());
+ return true;
+}
+
+std::size_t
+deq_rec::get_xid(void** const xidpp)
+{
+ if (!_buff)
+ {
+ *xidpp = 0;
+ return 0;
+ }
+ *xidpp = _buff;
+ return _deq_hdr._xidsize;
+}
+
+std::string&
+deq_rec::str(std::string& str) const
+{
+ std::ostringstream oss;
+ oss << "deq_rec: m=" << _deq_hdr._magic;
+ oss << " v=" << (int)_deq_hdr._version;
+ oss << " rid=" << _deq_hdr._rid;
+ oss << " drid=" << _deq_hdr._deq_rid;
+ if (_xidp)
+ oss << " xid=\"" << _xidp << "\"";
+ str.append(oss.str());
+ return str;
+}
+
+std::size_t
+deq_rec::xid_size() const
+{
+ return _deq_hdr._xidsize;
+}
+
+std::size_t
+deq_rec::rec_size() const
+{
+ return deq_hdr::size() + (_deq_hdr._xidsize ? _deq_hdr._xidsize + rec_tail::size() : 0);
+}
+
+void
+deq_rec::chk_hdr() const
+{
+ jrec::chk_hdr(_deq_hdr);
+ if (_deq_hdr._magic != RHM_JDAT_DEQ_MAGIC)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "deq magic: rid=0x" << std::setw(16) << _deq_hdr._rid;
+ oss << ": expected=0x" << std::setw(8) << RHM_JDAT_DEQ_MAGIC;
+ oss << " read=0x" << std::setw(2) << (int)_deq_hdr._magic;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "deq_rec", "chk_hdr");
+ }
+}
+
+void
+deq_rec::chk_hdr(u_int64_t rid) const
+{
+ chk_hdr();
+ jrec::chk_rid(_deq_hdr, rid);
+}
+
+void
+deq_rec::chk_tail() const
+{
+ jrec::chk_tail(_deq_tail, _deq_hdr);
+}
+
+void
+deq_rec::clean()
+{
+ // clean up allocated memory here
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.h b/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.h
new file mode 100644
index 0000000000..d870b658da
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/deq_rec.h
@@ -0,0 +1,103 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file deq_rec.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::deq_rec (journal dequeue
+ * record) class. See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_DEQ_REQ_H
+#define QPID_LEGACYSTORE_JRNL_DEQ_REQ_H
+
+namespace mrg
+{
+namespace journal
+{
+class deq_rec;
+}
+}
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/deq_hdr.h"
+#include "qpid/legacystore/jrnl/jrec.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class deq_rec
+ * \brief Class to handle a single journal dequeue record.
+ */
+ class deq_rec : public jrec
+ {
+ private:
+ deq_hdr _deq_hdr; ///< Dequeue header
+ const void* _xidp; ///< xid pointer for encoding (writing to disk)
+ void* _buff; ///< Pointer to buffer to receive data read from disk
+ rec_tail _deq_tail; ///< Record tail, only encoded if XID is present
+
+ public:
+ // constructor used for read operations and xid will have memory allocated
+ deq_rec();
+ // constructor used for write operations, where xid already exists
+ deq_rec(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
+ const std::size_t xidlen, const bool owi, const bool txn_coml_commit);
+ virtual ~deq_rec();
+
+ // Prepare instance for use in reading data from journal
+ void reset();
+ // Prepare instance for use in writing data to journal
+ void reset(const u_int64_t rid, const u_int64_t drid, const void* const xidp,
+ const std::size_t xidlen, const bool owi, const bool txn_coml_commit);
+ u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks);
+ u_int32_t decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks);
+ // Decode used for recover
+ bool rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs);
+
+ inline bool is_txn_coml_commit() const { return _deq_hdr.is_txn_coml_commit(); }
+ inline u_int64_t rid() const { return _deq_hdr._rid; }
+ inline u_int64_t deq_rid() const { return _deq_hdr._deq_rid; }
+ std::size_t get_xid(void** const xidpp);
+ std::string& str(std::string& str) const;
+ inline std::size_t data_size() const { return 0; } // This record never carries data
+ std::size_t xid_size() const;
+ std::size_t rec_size() const;
+
+ private:
+ virtual void chk_hdr() const;
+ virtual void chk_hdr(u_int64_t rid) const;
+ virtual void chk_tail() const;
+ virtual void clean();
+ }; // class deq_rec
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_DEQ_REQ_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enq_hdr.h b/qpid/cpp/src/qpid/legacystore/jrnl/enq_hdr.h
new file mode 100644
index 0000000000..0d1e6116be
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enq_hdr.h
@@ -0,0 +1,165 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enq_hdr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::enq_hdr (enueue header),
+ * used to start an enqueue record in the journal.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_ENQ_HDR_H
+#define QPID_LEGACYSTORE_JRNL_ENQ_HDR_H
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/rec_hdr.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for enqueue record.
+ *
+ * Struct for enqueue record. In addition to the common data, this header includes both the
+ * xid and data blob sizes.
+ *
+ * This header precedes all enqueue data in journal files.
+ *
+ * Record header info in binary format (32 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+ -+
+ * | magic | v | e | flags | |
+ * +---+---+---+---+---+---+---+---+ | struct hdr
+ * | rid | |
+ * +---+---+---+---+---+---+---+---+ -+
+ * | xidsize |
+ * +---+---+---+---+---+---+---+---+
+ * | dsize |
+ * +---+---+---+---+---+---+---+---+
+ * v = file version (If the format or encoding of this file changes, then this
+ * number should be incremented)
+ * e = endian flag, false (0x00) for little endian, true (0x01) for big endian
+ * </pre>
+ *
+ * Note that journal files should be transferable between 32- and 64-bit
+ * hardware of the same endianness, but not between hardware of opposite
+ * entianness without some sort of binary conversion utility. Thus buffering
+ * will be needed for types that change size between 32- and 64-bit compiles.
+ */
+ struct enq_hdr : rec_hdr
+ {
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Big-endian filler for 32-bit size_t
+#endif
+ std::size_t _xidsize; ///< XID size
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Little-endian filler for 32-bit size_t
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler1; ///< Big-endian filler for 32-bit size_t
+#endif
+ std::size_t _dsize; ///< Record data size
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler1; ///< Little-endian filler for 32-bit size_t
+#endif
+ static const u_int16_t ENQ_HDR_TRANSIENT_MASK = 0x10;
+ static const u_int16_t ENQ_HDR_EXTERNAL_MASK = 0x20;
+
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ inline enq_hdr(): rec_hdr(),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(0),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+ _dsize(0)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler1(0)
+#endif
+ {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ inline enq_hdr(const u_int32_t magic, const u_int8_t version, const u_int64_t rid,
+ const std::size_t xidsize, const std::size_t dsize, const bool owi,
+ const bool transient = false): rec_hdr(magic, version, rid, owi),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(xidsize),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+ _dsize(dsize)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler1(0)
+#endif
+ { set_transient(transient); }
+
+
+ inline bool is_transient() const { return _uflag & ENQ_HDR_TRANSIENT_MASK; }
+
+ inline void set_transient(const bool transient)
+ {
+ _uflag = transient ? _uflag | ENQ_HDR_TRANSIENT_MASK :
+ _uflag & (~ENQ_HDR_TRANSIENT_MASK);
+ }
+
+ inline bool is_external() const { return _uflag & ENQ_HDR_EXTERNAL_MASK; }
+
+ inline void set_external(const bool external)
+ {
+ _uflag = external ? _uflag | ENQ_HDR_EXTERNAL_MASK :
+ _uflag & (~ENQ_HDR_EXTERNAL_MASK);
+ }
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(enq_hdr); }
+ };
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_ENQ_HDR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.cpp
new file mode 100644
index 0000000000..d024b704a7
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.cpp
@@ -0,0 +1,183 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enq_map.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::enq_map (enqueue map). See
+ * comments in file enq_map.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/enq_map.h"
+
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/slock.h"
+#include <sstream>
+
+
+namespace mrg
+{
+namespace journal
+{
+
+// static return/error codes
+int16_t enq_map::EMAP_DUP_RID = -3;
+int16_t enq_map::EMAP_LOCKED = -2;
+int16_t enq_map::EMAP_RID_NOT_FOUND = -1;
+int16_t enq_map::EMAP_OK = 0;
+int16_t enq_map::EMAP_FALSE = 0;
+int16_t enq_map::EMAP_TRUE = 1;
+
+enq_map::enq_map():
+ _map(),
+ _pfid_enq_cnt()
+{}
+
+enq_map::~enq_map() {}
+
+void
+enq_map::set_num_jfiles(const u_int16_t num_jfiles)
+{
+ _pfid_enq_cnt.resize(num_jfiles, 0);
+}
+
+
+int16_t
+enq_map::insert_pfid(const u_int64_t rid, const u_int16_t pfid)
+{
+ return insert_pfid(rid, pfid, false);
+}
+
+int16_t
+enq_map::insert_pfid(const u_int64_t rid, const u_int16_t pfid, const bool locked)
+{
+ std::pair<emap_itr, bool> ret;
+ emap_data_struct rec(pfid, locked);
+ {
+ slock s(_mutex);
+ ret = _map.insert(emap_param(rid, rec));
+ }
+ if (ret.second == false)
+ return EMAP_DUP_RID;
+ _pfid_enq_cnt.at(pfid)++;
+ return EMAP_OK;
+}
+
+int16_t
+enq_map::get_pfid(const u_int64_t rid)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return EMAP_RID_NOT_FOUND;
+ if (itr->second._lock)
+ return EMAP_LOCKED;
+ return itr->second._pfid;
+}
+
+int16_t
+enq_map::get_remove_pfid(const u_int64_t rid, const bool txn_flag)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return EMAP_RID_NOT_FOUND;
+ if (itr->second._lock && !txn_flag) // locked, but not a commit/abort
+ return EMAP_LOCKED;
+ u_int16_t pfid = itr->second._pfid;
+ _map.erase(itr);
+ _pfid_enq_cnt.at(pfid)--;
+ return pfid;
+}
+
+bool
+enq_map::is_enqueued(const u_int64_t rid, bool ignore_lock)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return false;
+ if (!ignore_lock && itr->second._lock) // locked
+ return false;
+ return true;
+}
+
+int16_t
+enq_map::lock(const u_int64_t rid)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return EMAP_RID_NOT_FOUND;
+ itr->second._lock = true;
+ return EMAP_OK;
+}
+
+int16_t
+enq_map::unlock(const u_int64_t rid)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return EMAP_RID_NOT_FOUND;
+ itr->second._lock = false;
+ return EMAP_OK;
+}
+
+int16_t
+enq_map::is_locked(const u_int64_t rid)
+{
+ slock s(_mutex);
+ emap_itr itr = _map.find(rid);
+ if (itr == _map.end()) // not found in map
+ return EMAP_RID_NOT_FOUND;
+ return itr->second._lock ? EMAP_TRUE : EMAP_FALSE;
+}
+
+void
+enq_map::rid_list(std::vector<u_int64_t>& rv)
+{
+ rv.clear();
+ {
+ slock s(_mutex);
+ for (emap_itr itr = _map.begin(); itr != _map.end(); itr++)
+ rv.push_back(itr->first);
+ }
+}
+
+void
+enq_map::pfid_list(std::vector<u_int16_t>& fv)
+{
+ fv.clear();
+ {
+ slock s(_mutex);
+ for (emap_itr itr = _map.begin(); itr != _map.end(); itr++)
+ fv.push_back(itr->second._pfid);
+ }
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.h b/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.h
new file mode 100644
index 0000000000..75404afebe
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enq_map.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enq_map.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::enq_map (enqueue map).
+ * See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_ENQ_MAP_H
+#define QPID_LEGACYSTORE_JRNL_ENQ_MAP_H
+
+namespace mrg
+{
+namespace journal
+{
+class enq_map;
+}
+}
+
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/jrnl/smutex.h"
+#include <map>
+#include <pthread.h>
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class enq_map
+ * \brief Class for storing the physical file id (pfid) and a transaction locked flag for each enqueued
+ * data block using the record id (rid) as a key. This is the primary mechanism for
+ * deterimining the enqueue low water mark: if a pfid exists in this map, then there is
+ * at least one still-enqueued record in that file. (The transaction map must also be
+ * clear, however.)
+ *
+ * Map rids against pfid and lock status. As records are enqueued, they are added to this
+ * map, and as they are dequeued, they are removed. An enqueue is locked when a transactional
+ * dequeue is pending that has been neither committed nor aborted.
+ * <pre>
+ * key data
+ *
+ * rid1 --- [ pfid, txn_lock ]
+ * rid2 --- [ pfid, txn_lock ]
+ * rid3 --- [ pfid, txn_lock ]
+ * ...
+ * </pre>
+ */
+ class enq_map
+ {
+ public:
+ // return/error codes
+ static int16_t EMAP_DUP_RID;
+ static int16_t EMAP_LOCKED;
+ static int16_t EMAP_RID_NOT_FOUND;
+ static int16_t EMAP_OK;
+ static int16_t EMAP_FALSE;
+ static int16_t EMAP_TRUE;
+
+ private:
+
+ struct emap_data_struct
+ {
+ u_int16_t _pfid;
+ bool _lock;
+ emap_data_struct(const u_int16_t pfid, const bool lock) : _pfid(pfid), _lock(lock) {}
+ };
+ typedef std::pair<u_int64_t, emap_data_struct> emap_param;
+ typedef std::map<u_int64_t, emap_data_struct> emap;
+ typedef emap::iterator emap_itr;
+
+ emap _map;
+ smutex _mutex;
+ std::vector<u_int32_t> _pfid_enq_cnt;
+
+ public:
+ enq_map();
+ virtual ~enq_map();
+
+ void set_num_jfiles(const u_int16_t num_jfiles);
+ inline u_int32_t get_enq_cnt(const u_int16_t pfid) const { return _pfid_enq_cnt.at(pfid); };
+
+ int16_t insert_pfid(const u_int64_t rid, const u_int16_t pfid); // 0=ok; -3=duplicate rid;
+ int16_t insert_pfid(const u_int64_t rid, const u_int16_t pfid, const bool locked); // 0=ok; -3=duplicate rid;
+ int16_t get_pfid(const u_int64_t rid); // >=0=pfid; -1=rid not found; -2=locked
+ int16_t get_remove_pfid(const u_int64_t rid, const bool txn_flag = false); // >=0=pfid; -1=rid not found; -2=locked
+ bool is_enqueued(const u_int64_t rid, bool ignore_lock = false);
+ int16_t lock(const u_int64_t rid); // 0=ok; -1=rid not found
+ int16_t unlock(const u_int64_t rid); // 0=ok; -1=rid not found
+ int16_t is_locked(const u_int64_t rid); // 1=true; 0=false; -1=rid not found
+ inline void clear() { _map.clear(); }
+ inline bool empty() const { return _map.empty(); }
+ inline u_int32_t size() const { return u_int32_t(_map.size()); }
+ void rid_list(std::vector<u_int64_t>& rv);
+ void pfid_list(std::vector<u_int16_t>& fv);
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_ENQ_MAP_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.cpp
new file mode 100644
index 0000000000..468599836b
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.cpp
@@ -0,0 +1,638 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enq_rec.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::enq_rec (journal enqueue
+ * record) class. See comments in file enq_rec.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/enq_rec.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+// Constructor used for read operations, where buf contains preallocated space to receive data.
+enq_rec::enq_rec():
+ jrec(), // superclass
+ _enq_hdr(RHM_JDAT_ENQ_MAGIC, RHM_JDAT_VERSION, 0, 0, 0, false, false),
+ _xidp(0),
+ _data(0),
+ _buff(0),
+ _enq_tail(_enq_hdr)
+{}
+
+// Constructor used for transactional write operations, where dbuf contains data to be written.
+enq_rec::enq_rec(const u_int64_t rid, const void* const dbuf, const std::size_t dlen,
+ const void* const xidp, const std::size_t xidlen, const bool owi, const bool transient):
+ jrec(), // superclass
+ _enq_hdr(RHM_JDAT_ENQ_MAGIC, RHM_JDAT_VERSION, rid, xidlen, dlen, owi, transient),
+ _xidp(xidp),
+ _data(dbuf),
+ _buff(0),
+ _enq_tail(_enq_hdr)
+{}
+
+enq_rec::~enq_rec()
+{
+ clean();
+}
+
+// Prepare instance for use in reading data from journal, where buf contains preallocated space
+// to receive data.
+void
+enq_rec::reset()
+{
+ _enq_hdr._rid = 0;
+ _enq_hdr.set_owi(false);
+ _enq_hdr.set_transient(false);
+ _enq_hdr._xidsize = 0;
+ _enq_hdr._dsize = 0;
+ _xidp = 0;
+ _data = 0;
+ _buff = 0;
+ _enq_tail._rid = 0;
+}
+
+// Prepare instance for use in writing transactional data to journal, where dbuf contains data to
+// be written.
+void
+enq_rec::reset(const u_int64_t rid, const void* const dbuf, const std::size_t dlen,
+ const void* const xidp, const std::size_t xidlen, const bool owi, const bool transient,
+ const bool external)
+{
+ _enq_hdr._rid = rid;
+ _enq_hdr.set_owi(owi);
+ _enq_hdr.set_transient(transient);
+ _enq_hdr.set_external(external);
+ _enq_hdr._xidsize = xidlen;
+ _enq_hdr._dsize = dlen;
+ _xidp = xidp;
+ _data = dbuf;
+ _buff = 0;
+ _enq_tail._rid = rid;
+}
+
+u_int32_t
+enq_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(wptr != 0);
+ assert(max_size_dblks > 0);
+ if (_xidp == 0)
+ assert(_enq_hdr._xidsize == 0);
+
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t rem = max_size_dblks * JRNL_DBLK_SIZE;
+ std::size_t wr_cnt = 0;
+ if (rec_offs_dblks) // Continuation of split data record (over 2 or more pages)
+ {
+ if (size_dblks(rec_size()) - rec_offs_dblks > max_size_dblks) // Further split required
+ {
+ rec_offs -= sizeof(_enq_hdr);
+ std::size_t wsize = _enq_hdr._xidsize > rec_offs ? _enq_hdr._xidsize - rec_offs : 0;
+ std::size_t wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt = wsize;
+ rem -= wsize;
+ }
+ rec_offs -= _enq_hdr._xidsize - wsize2;
+ if (rem && !_enq_hdr.is_external())
+ {
+ wsize = _enq_hdr._dsize > rec_offs ? _enq_hdr._dsize - rec_offs : 0;
+ wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy((char*)wptr + wr_cnt, (const char*)_data + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= _enq_hdr._dsize - wsize2;
+ }
+ if (rem)
+ {
+ wsize = sizeof(_enq_tail) > rec_offs ? sizeof(_enq_tail) - rec_offs : 0;
+ wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_enq_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= sizeof(_enq_tail) - wsize2;
+ }
+ assert(rem == 0);
+ assert(rec_offs == 0);
+ }
+ else // No further split required
+ {
+ rec_offs -= sizeof(_enq_hdr);
+ std::size_t wsize = _enq_hdr._xidsize > rec_offs ? _enq_hdr._xidsize - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt += wsize;
+ }
+ rec_offs -= _enq_hdr._xidsize - wsize;
+ wsize = _enq_hdr._dsize > rec_offs ? _enq_hdr._dsize - rec_offs : 0;
+ if (wsize && !_enq_hdr.is_external())
+ {
+ std::memcpy((char*)wptr + wr_cnt, (const char*)_data + rec_offs, wsize);
+ wr_cnt += wsize;
+ }
+ rec_offs -= _enq_hdr._dsize - wsize;
+ wsize = sizeof(_enq_tail) > rec_offs ? sizeof(_enq_tail) - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_enq_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+#ifdef RHM_CLEAN
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t dblk_rec_size = size_dblks(rec_size() - rec_offs) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ rec_offs -= sizeof(_enq_tail) - wsize;
+ assert(rec_offs == 0);
+ }
+ }
+ else // Start at beginning of data record
+ {
+ // Assumption: the header will always fit into the first dblk
+ std::memcpy(wptr, (void*)&_enq_hdr, sizeof(_enq_hdr));
+ wr_cnt = sizeof(_enq_hdr);
+ if (size_dblks(rec_size()) > max_size_dblks) // Split required
+ {
+ std::size_t wsize;
+ rem -= sizeof(_enq_hdr);
+ if (rem)
+ {
+ wsize = rem >= _enq_hdr._xidsize ? _enq_hdr._xidsize : rem;
+ std::memcpy((char*)wptr + wr_cnt, _xidp, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ if (rem && !_enq_hdr.is_external())
+ {
+ wsize = rem >= _enq_hdr._dsize ? _enq_hdr._dsize : rem;
+ std::memcpy((char*)wptr + wr_cnt, _data, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ if (rem)
+ {
+ wsize = rem >= sizeof(_enq_tail) ? sizeof(_enq_tail) : rem;
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_enq_tail, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ assert(rem == 0);
+ }
+ else // No split required
+ {
+ if (_enq_hdr._xidsize)
+ {
+ std::memcpy((char*)wptr + wr_cnt, _xidp, _enq_hdr._xidsize);
+ wr_cnt += _enq_hdr._xidsize;
+ }
+ if (!_enq_hdr.is_external())
+ {
+ std::memcpy((char*)wptr + wr_cnt, _data, _enq_hdr._dsize);
+ wr_cnt += _enq_hdr._dsize;
+ }
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_enq_tail, sizeof(_enq_tail));
+ wr_cnt += sizeof(_enq_tail);
+#ifdef RHM_CLEAN
+ std::size_t dblk_rec_size = size_dblks(rec_size()) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ }
+ return size_dblks(wr_cnt);
+}
+
+u_int32_t
+enq_rec::decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(rptr != 0);
+ assert(max_size_dblks > 0);
+
+ std::size_t rd_cnt = 0;
+ if (rec_offs_dblks) // Continuation of record on new page
+ {
+ const u_int32_t hdr_xid_data_size = enq_hdr::size() + _enq_hdr._xidsize +
+ (_enq_hdr.is_external() ? 0 : _enq_hdr._dsize);
+ const u_int32_t hdr_xid_data_tail_size = hdr_xid_data_size + rec_tail::size();
+ const u_int32_t hdr_data_dblks = size_dblks(hdr_xid_data_size);
+ const u_int32_t hdr_tail_dblks = size_dblks(hdr_xid_data_tail_size);
+ const std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ const std::size_t offs = rec_offs - enq_hdr::size();
+
+ if (hdr_tail_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of record fits within this page
+ if (offs < _enq_hdr._xidsize)
+ {
+ // some XID still outstanding, copy remainder of XID, data and tail
+ const std::size_t rem = _enq_hdr._xidsize + _enq_hdr._dsize - offs;
+ std::memcpy((char*)_buff + offs, rptr, rem);
+ rd_cnt += rem;
+ std::memcpy((void*)&_enq_tail, ((char*)rptr + rd_cnt), sizeof(_enq_tail));
+ chk_tail();
+ rd_cnt += sizeof(_enq_tail);
+ }
+ else if (offs < _enq_hdr._xidsize + _enq_hdr._dsize && !_enq_hdr.is_external())
+ {
+ // some data still outstanding, copy remainder of data and tail
+ const std::size_t data_offs = offs - _enq_hdr._xidsize;
+ const std::size_t data_rem = _enq_hdr._dsize - data_offs;
+ std::memcpy((char*)_buff + offs, rptr, data_rem);
+ rd_cnt += data_rem;
+ std::memcpy((void*)&_enq_tail, ((char*)rptr + rd_cnt), sizeof(_enq_tail));
+ chk_tail();
+ rd_cnt += sizeof(_enq_tail);
+ }
+ else
+ {
+ // Tail or part of tail only outstanding, complete tail
+ const std::size_t tail_offs = rec_offs - enq_hdr::size() - _enq_hdr._xidsize -
+ _enq_hdr._dsize;
+ const std::size_t tail_rem = rec_tail::size() - tail_offs;
+ std::memcpy((char*)&_enq_tail + tail_offs, rptr, tail_rem);
+ chk_tail();
+ rd_cnt = tail_rem;
+ }
+ }
+ else if (hdr_data_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of xid & data fits within this page; tail split
+
+ /*
+ * TODO: This section needs revision. Since it is known that the end of the page falls within the
+ * tail record, it is only necessary to write from the current offset to the end of the page under
+ * all circumstances. The multiple if/else combinations may be eliminated, as well as one memcpy()
+ * operation.
+ *
+ * Also note that Coverity has detected a possible memory overwrite in this block. It occurs if
+ * both the following two if() stmsts (numbered) are false. With rd_cnt = 0, this would result in
+ * the value of tail_rem > sizeof(tail_rec). Practically, this could only happen if the start and
+ * end of a page both fall within the same tail record, in which case the tail would have to be
+ * (much!) larger. However, the logic here does not account for this possibility.
+ *
+ * If the optimization above is undertaken, this code would probably be removed.
+ */
+ if (offs < _enq_hdr._xidsize) // 1
+ {
+ // some XID still outstanding, copy remainder of XID and data
+ const std::size_t rem = _enq_hdr._xidsize + _enq_hdr._dsize - offs;
+ std::memcpy((char*)_buff + offs, rptr, rem);
+ rd_cnt += rem;
+ }
+ else if (offs < _enq_hdr._xidsize + _enq_hdr._dsize && !_enq_hdr.is_external()) // 2
+ {
+ // some data still outstanding, copy remainder of data
+ const std::size_t data_offs = offs - _enq_hdr._xidsize;
+ const std::size_t data_rem = _enq_hdr._dsize - data_offs;
+ std::memcpy((char*)_buff + offs, rptr, data_rem);
+ rd_cnt += data_rem;
+ }
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_enq_tail, ((char*)rptr + rd_cnt), tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else
+ {
+ // Since xid and data are contiguous, both fit within current page - copy whole page
+ const std::size_t data_cp_size = (max_size_dblks * JRNL_DBLK_SIZE);
+ std::memcpy((char*)_buff + offs, rptr, data_cp_size);
+ rd_cnt += data_cp_size;
+ }
+ }
+ else // Start of record
+ {
+ // Get and check header
+ _enq_hdr.hdr_copy(h);
+ rd_cnt = sizeof(rec_hdr);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ rd_cnt += sizeof(u_int32_t); // Filler 0
+#endif
+ _enq_hdr._xidsize = *(std::size_t*)((char*)rptr + rd_cnt);
+ rd_cnt += sizeof(std::size_t);
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ rd_cnt += sizeof(u_int32_t); // Filler 0
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ rd_cnt += sizeof(u_int32_t); // Filler 1
+#endif
+ _enq_hdr._dsize = *(std::size_t*)((char*)rptr + rd_cnt);
+ rd_cnt = _enq_hdr.size();
+ chk_hdr();
+ if (_enq_hdr._xidsize + (_enq_hdr.is_external() ? 0 : _enq_hdr._dsize))
+ {
+ _buff = std::malloc(_enq_hdr._xidsize + (_enq_hdr.is_external() ? 0 : _enq_hdr._dsize));
+ MALLOC_CHK(_buff, "_buff", "enq_rec", "decode");
+
+ const u_int32_t hdr_xid_size = enq_hdr::size() + _enq_hdr._xidsize;
+ const u_int32_t hdr_xid_data_size = hdr_xid_size + (_enq_hdr.is_external() ? 0 : _enq_hdr._dsize);
+ const u_int32_t hdr_xid_data_tail_size = hdr_xid_data_size + rec_tail::size();
+ const u_int32_t hdr_xid_dblks = size_dblks(hdr_xid_size);
+ const u_int32_t hdr_data_dblks = size_dblks(hdr_xid_data_size);
+ const u_int32_t hdr_tail_dblks = size_dblks(hdr_xid_data_tail_size);
+ // Check if record (header + data + tail) fits within this page, we can check the
+ // tail before the expense of copying data to memory
+ if (hdr_tail_dblks <= max_size_dblks)
+ {
+ // Header, xid, data and tail fits within this page
+ if (_enq_hdr._xidsize)
+ {
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _enq_hdr._xidsize);
+ rd_cnt += _enq_hdr._xidsize;
+ }
+ if (_enq_hdr._dsize && !_enq_hdr.is_external())
+ {
+ std::memcpy((char*)_buff + _enq_hdr._xidsize, (char*)rptr + rd_cnt,
+ _enq_hdr._dsize);
+ rd_cnt += _enq_hdr._dsize;
+ }
+ std::memcpy((void*)&_enq_tail, (char*)rptr + rd_cnt, sizeof(_enq_tail));
+ chk_tail();
+ rd_cnt += sizeof(_enq_tail);
+ }
+ else if (hdr_data_dblks <= max_size_dblks)
+ {
+ // Header, xid and data fit within this page, tail split or separated
+ if (_enq_hdr._xidsize)
+ {
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _enq_hdr._xidsize);
+ rd_cnt += _enq_hdr._xidsize;
+ }
+ if (_enq_hdr._dsize && !_enq_hdr.is_external())
+ {
+ std::memcpy((char*)_buff + _enq_hdr._xidsize, (char*)rptr + rd_cnt,
+ _enq_hdr._dsize);
+ rd_cnt += _enq_hdr._dsize;
+ }
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_enq_tail, (char*)rptr + rd_cnt, tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else if (hdr_xid_dblks <= max_size_dblks)
+ {
+ // Header and xid fits within this page, data split or separated
+ if (_enq_hdr._xidsize)
+ {
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _enq_hdr._xidsize);
+ rd_cnt += _enq_hdr._xidsize;
+ }
+ if (_enq_hdr._dsize && !_enq_hdr.is_external())
+ {
+ const std::size_t data_cp_size = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ std::memcpy((char*)_buff + _enq_hdr._xidsize, (char*)rptr + rd_cnt, data_cp_size);
+ rd_cnt += data_cp_size;
+ }
+ }
+ else
+ {
+ // Header fits within this page, xid split or separated
+ const std::size_t data_cp_size = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ std::memcpy(_buff, (char*)rptr + rd_cnt, data_cp_size);
+ rd_cnt += data_cp_size;
+ }
+ }
+ }
+ return size_dblks(rd_cnt);
+}
+
+bool
+enq_rec::rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs)
+{
+ if (rec_offs == 0)
+ {
+ // Read header, allocate (if req'd) for xid
+ _enq_hdr.hdr_copy(h);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_enq_hdr._xidsize, sizeof(std::size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler1
+#endif
+ ifsp->read((char*)&_enq_hdr._dsize, sizeof(std::size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler1
+#endif
+ rec_offs = sizeof(_enq_hdr);
+ if (_enq_hdr._xidsize)
+ {
+ _buff = std::malloc(_enq_hdr._xidsize);
+ MALLOC_CHK(_buff, "_buff", "enq_rec", "rcv_decode");
+ }
+ }
+ if (rec_offs < sizeof(_enq_hdr) + _enq_hdr._xidsize)
+ {
+ // Read xid (or continue reading xid)
+ std::size_t offs = rec_offs - sizeof(_enq_hdr);
+ ifsp->read((char*)_buff + offs, _enq_hdr._xidsize - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < _enq_hdr._xidsize - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ if (!_enq_hdr.is_external())
+ {
+ if (rec_offs < sizeof(_enq_hdr) + _enq_hdr._xidsize + _enq_hdr._dsize)
+ {
+ // Ignore data (or continue ignoring data)
+ std::size_t offs = rec_offs - sizeof(_enq_hdr) - _enq_hdr._xidsize;
+ ifsp->ignore(_enq_hdr._dsize - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < _enq_hdr._dsize - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ }
+ if (rec_offs < sizeof(_enq_hdr) + _enq_hdr._xidsize +
+ (_enq_hdr.is_external() ? 0 : _enq_hdr._dsize) + sizeof(rec_tail))
+ {
+ // Read tail (or continue reading tail)
+ std::size_t offs = rec_offs - sizeof(_enq_hdr) - _enq_hdr._xidsize;
+ if (!_enq_hdr.is_external())
+ offs -= _enq_hdr._dsize;
+ ifsp->read((char*)&_enq_tail + offs, sizeof(rec_tail) - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < sizeof(rec_tail) - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - rec_size());
+ chk_tail(); // Throws if tail invalid or record incomplete
+ assert(!ifsp->fail() && !ifsp->bad());
+ return true;
+}
+
+std::size_t
+enq_rec::get_xid(void** const xidpp)
+{
+ if (!_buff || !_enq_hdr._xidsize)
+ {
+ *xidpp = 0;
+ return 0;
+ }
+ *xidpp = _buff;
+ return _enq_hdr._xidsize;
+}
+
+std::size_t
+enq_rec::get_data(void** const datapp)
+{
+ if (!_buff)
+ {
+ *datapp = 0;
+ return 0;
+ }
+ if (_enq_hdr.is_external())
+ *datapp = 0;
+ else
+ *datapp = (void*)((char*)_buff + _enq_hdr._xidsize);
+ return _enq_hdr._dsize;
+}
+
+std::string&
+enq_rec::str(std::string& str) const
+{
+ std::ostringstream oss;
+ oss << "enq_rec: m=" << _enq_hdr._magic;
+ oss << " v=" << (int)_enq_hdr._version;
+ oss << " rid=" << _enq_hdr._rid;
+ if (_xidp)
+ oss << " xid=\"" << _xidp << "\"";
+ oss << " len=" << _enq_hdr._dsize;
+ str.append(oss.str());
+ return str;
+}
+
+std::size_t
+enq_rec::rec_size() const
+{
+ return rec_size(_enq_hdr._xidsize, _enq_hdr._dsize, _enq_hdr.is_external());
+}
+
+std::size_t
+enq_rec::rec_size(const std::size_t xidsize, const std::size_t dsize, const bool external)
+{
+ if (external)
+ return enq_hdr::size() + xidsize + rec_tail::size();
+ return enq_hdr::size() + xidsize + dsize + rec_tail::size();
+}
+
+void
+enq_rec::set_rid(const u_int64_t rid)
+{
+ _enq_hdr._rid = rid;
+ _enq_tail._rid = rid;
+}
+
+void
+enq_rec::chk_hdr() const
+{
+ jrec::chk_hdr(_enq_hdr);
+ if (_enq_hdr._magic != RHM_JDAT_ENQ_MAGIC)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "enq magic: rid=0x" << std::setw(16) << _enq_hdr._rid;
+ oss << ": expected=0x" << std::setw(8) << RHM_JDAT_ENQ_MAGIC;
+ oss << " read=0x" << std::setw(2) << (int)_enq_hdr._magic;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "enq_rec", "chk_hdr");
+ }
+}
+
+void
+enq_rec::chk_hdr(u_int64_t rid) const
+{
+ chk_hdr();
+ jrec::chk_rid(_enq_hdr, rid);
+}
+
+void
+enq_rec::chk_tail() const
+{
+ jrec::chk_tail(_enq_tail, _enq_hdr);
+}
+
+void
+enq_rec::clean()
+{
+ // clean up allocated memory here
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.h b/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.h
new file mode 100644
index 0000000000..805a96a1aa
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enq_rec.h
@@ -0,0 +1,116 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enq_rec.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::enq_rec (journal enqueue
+ * record) class. See class documentation for details.
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_ENQ_REC_H
+#define QPID_LEGACYSTORE_JRNL_ENQ_REC_H
+
+namespace mrg
+{
+namespace journal
+{
+class enq_rec;
+}
+}
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/enq_hdr.h"
+#include "qpid/legacystore/jrnl/jrec.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class enq_rec
+ * \brief Class to handle a single journal enqueue record.
+ */
+ class enq_rec : public jrec
+ {
+ private:
+ enq_hdr _enq_hdr;
+ const void* _xidp; ///< xid pointer for encoding (for writing to disk)
+ const void* _data; ///< Pointer to data to be written to disk
+ void* _buff; ///< Pointer to buffer to receive data read from disk
+ rec_tail _enq_tail;
+
+ public:
+ /**
+ * \brief Constructor used for read operations.
+ */
+ enq_rec();
+
+ /**
+ * \brief Constructor used for write operations, where mbuf contains data to be written.
+ */
+ enq_rec(const u_int64_t rid, const void* const dbuf, const std::size_t dlen,
+ const void* const xidp, const std::size_t xidlen, const bool owi, const bool transient);
+
+ /**
+ * \brief Destructor
+ */
+ virtual ~enq_rec();
+
+ // Prepare instance for use in reading data from journal, xid and data will be allocated
+ void reset();
+ // Prepare instance for use in writing data to journal
+ void reset(const u_int64_t rid, const void* const dbuf, const std::size_t dlen,
+ const void* const xidp, const std::size_t xidlen, const bool owi, const bool transient,
+ const bool external);
+
+ u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks);
+ u_int32_t decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks);
+ // Decode used for recover
+ bool rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs);
+
+ std::size_t get_xid(void** const xidpp);
+ std::size_t get_data(void** const datapp);
+ inline bool is_transient() const { return _enq_hdr.is_transient(); }
+ inline bool is_external() const { return _enq_hdr.is_external(); }
+ std::string& str(std::string& str) const;
+ inline std::size_t data_size() const { return _enq_hdr._dsize; }
+ inline std::size_t xid_size() const { return _enq_hdr._xidsize; }
+ std::size_t rec_size() const;
+ static std::size_t rec_size(const std::size_t xidsize, const std::size_t dsize, const bool external);
+ inline u_int64_t rid() const { return _enq_hdr._rid; }
+ void set_rid(const u_int64_t rid);
+
+ private:
+ void chk_hdr() const;
+ void chk_hdr(u_int64_t rid) const;
+ void chk_tail() const;
+ virtual void clean();
+ }; // class enq_rec
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_ENQ_REC_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/enums.h b/qpid/cpp/src/qpid/legacystore/jrnl/enums.h
new file mode 100644
index 0000000000..169a13fa4d
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/enums.h
@@ -0,0 +1,108 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file enums.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing definitions for namespace mrg::journal enums.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_ENUMS_H
+#define QPID_LEGACYSTORE_JRNL_ENUMS_H
+
+namespace mrg
+{
+namespace journal
+{
+
+ // TODO: Change this to flags, as multiple of these conditions may exist simultaneously
+ /**
+ * \brief Enumeration of possilbe return states from journal read and write operations.
+ */
+ enum _iores
+ {
+ RHM_IORES_SUCCESS = 0, ///< Success: IO operation completed noramlly.
+ RHM_IORES_PAGE_AIOWAIT, ///< IO operation suspended - next page is waiting for AIO.
+ RHM_IORES_FILE_AIOWAIT, ///< IO operation suspended - next file is waiting for AIO.
+ RHM_IORES_EMPTY, ///< During read operations, nothing further is available to read.
+ RHM_IORES_RCINVALID, ///< Read page cache is invalid (ie obsolete or uninitialized)
+ RHM_IORES_ENQCAPTHRESH, ///< Enqueue capacity threshold (limit) reached.
+ RHM_IORES_FULL, ///< During write operations, the journal files are full.
+ RHM_IORES_BUSY, ///< Another blocking operation is in progress.
+ RHM_IORES_TXPENDING, ///< Operation blocked by pending transaction.
+ RHM_IORES_NOTIMPL ///< Function is not yet implemented.
+ };
+ typedef _iores iores;
+
+ static inline const char* iores_str(iores res)
+ {
+ switch (res)
+ {
+ case RHM_IORES_SUCCESS: return "RHM_IORES_SUCCESS";
+ case RHM_IORES_PAGE_AIOWAIT: return "RHM_IORES_PAGE_AIOWAIT";
+ case RHM_IORES_FILE_AIOWAIT: return "RHM_IORES_FILE_AIOWAIT";
+ case RHM_IORES_EMPTY: return "RHM_IORES_EMPTY";
+ case RHM_IORES_RCINVALID: return "RHM_IORES_RCINVALID";
+ case RHM_IORES_ENQCAPTHRESH: return "RHM_IORES_ENQCAPTHRESH";
+ case RHM_IORES_FULL: return "RHM_IORES_FULL";
+ case RHM_IORES_BUSY: return "RHM_IORES_BUSY";
+ case RHM_IORES_TXPENDING: return "RHM_IORES_TXPENDING";
+ case RHM_IORES_NOTIMPL: return "RHM_IORES_NOTIMPL";
+ }
+ return "<iores unknown>";
+ }
+
+ enum _log_level
+ {
+ LOG_TRACE = 0,
+ LOG_DEBUG,
+ LOG_INFO,
+ LOG_NOTICE,
+ LOG_WARN,
+ LOG_ERROR,
+ LOG_CRITICAL
+ };
+ typedef _log_level log_level;
+
+ static inline const char* log_level_str(log_level ll)
+ {
+ switch (ll)
+ {
+ case LOG_TRACE: return "TRACE";
+ case LOG_DEBUG: return "DEBUG";
+ case LOG_INFO: return "INFO";
+ case LOG_NOTICE: return "NOTICE";
+ case LOG_WARN: return "WARN";
+ case LOG_ERROR: return "ERROR";
+ case LOG_CRITICAL: return "CRITICAL";
+ }
+ return "<log level unknown>";
+ }
+
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_ENUMS_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.cpp
new file mode 100644
index 0000000000..fbb176667e
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.cpp
@@ -0,0 +1,375 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file fcntl.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::fcntl (non-logging file
+ * handle), used for controlling journal log files. See comments in file
+ * fcntl.h for details.
+ */
+
+#include "qpid/legacystore/jrnl/fcntl.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <fcntl.h>
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+#include <unistd.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+fcntl::fcntl(const std::string& fbasename, const u_int16_t pfid, const u_int16_t lfid, const u_int32_t jfsize_sblks,
+ const rcvdat* const ro):
+ _fname(),
+ _pfid(pfid),
+ _lfid(lfid),
+ _ffull_dblks(JRNL_SBLK_SIZE * (jfsize_sblks + 1)),
+ _wr_fh(-1),
+ _rec_enqcnt(0),
+ _rd_subm_cnt_dblks(0),
+ _rd_cmpl_cnt_dblks(0),
+ _wr_subm_cnt_dblks(0),
+ _wr_cmpl_cnt_dblks(0),
+ _aio_cnt(0),
+ _fhdr_wr_aio_outstanding(false)
+{
+ initialize(fbasename, pfid, lfid, jfsize_sblks, ro);
+ open_wr_fh();
+}
+
+fcntl::~fcntl()
+{
+ close_wr_fh();
+}
+
+bool
+fcntl::reset(const rcvdat* const ro)
+{
+ rd_reset();
+ return wr_reset(ro);
+}
+
+void
+fcntl::rd_reset()
+{
+ _rd_subm_cnt_dblks = 0;
+ _rd_cmpl_cnt_dblks = 0;
+}
+
+bool
+fcntl::wr_reset(const rcvdat* const ro)
+{
+ if (ro)
+ {
+ if (!ro->_jempty)
+ {
+ if (ro->_lfid == _pfid)
+ {
+ _wr_subm_cnt_dblks = ro->_eo/JRNL_DBLK_SIZE;
+ _wr_cmpl_cnt_dblks = ro->_eo/JRNL_DBLK_SIZE;
+ }
+ else
+ {
+ _wr_subm_cnt_dblks = _ffull_dblks;
+ _wr_cmpl_cnt_dblks = _ffull_dblks;
+ }
+ _rec_enqcnt = ro->_enq_cnt_list[_pfid];
+ return true;
+ }
+ }
+ // Journal overflow test - checks if the file to be reset still contains enqueued records
+ // or outstanding aios
+ if (_rec_enqcnt || _aio_cnt)
+ return false;
+ _wr_subm_cnt_dblks = 0;
+ _wr_cmpl_cnt_dblks = 0;
+ return true;
+}
+
+int
+fcntl::open_wr_fh()
+{
+ if (_wr_fh < 0)
+ {
+ _wr_fh = ::open(_fname.c_str(), O_WRONLY | O_DIRECT, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); // 0644 -rw-r--r--
+ if (_wr_fh < 0)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " file=\"" << _fname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_FCNTL_OPENWR, oss.str(), "fcntl", "open_fh");
+ }
+ }
+ return _wr_fh;
+}
+
+void
+fcntl::close_wr_fh()
+{
+ if (_wr_fh >= 0)
+ {
+ ::close(_wr_fh);
+ _wr_fh = -1;
+ }
+}
+
+u_int32_t
+fcntl::add_enqcnt(u_int32_t a)
+{
+ _rec_enqcnt += a;
+ return _rec_enqcnt;
+}
+
+u_int32_t
+fcntl::decr_enqcnt()
+{
+ if (_rec_enqcnt == 0)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid;
+ throw jexception(jerrno::JERR__UNDERFLOW, oss.str(), "fcntl", "decr_enqcnt");
+ }
+ return --_rec_enqcnt;
+}
+
+u_int32_t
+fcntl::subtr_enqcnt(u_int32_t s)
+{
+ if (_rec_enqcnt < s)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " rec_enqcnt=" << _rec_enqcnt << " decr=" << s;
+ throw jexception(jerrno::JERR__UNDERFLOW, oss.str(), "fcntl", "subtr_enqcnt");
+ }
+ _rec_enqcnt -= s;
+ return _rec_enqcnt;
+}
+
+u_int32_t
+fcntl::add_rd_subm_cnt_dblks(u_int32_t a)
+{
+ if (_rd_subm_cnt_dblks + a > _wr_subm_cnt_dblks)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " rd_subm_cnt_dblks=" << _rd_subm_cnt_dblks << " incr=" << a;
+ oss << " wr_subm_cnt_dblks=" << _wr_subm_cnt_dblks;
+ throw jexception(jerrno::JERR_FCNTL_RDOFFSOVFL, oss.str(), "fcntl", "add_rd_subm_cnt_dblks");
+ }
+ _rd_subm_cnt_dblks += a;
+ return _rd_subm_cnt_dblks;
+}
+
+u_int32_t
+fcntl::add_rd_cmpl_cnt_dblks(u_int32_t a)
+{
+ if (_rd_cmpl_cnt_dblks + a > _rd_subm_cnt_dblks)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " rd_cmpl_cnt_dblks=" << _rd_cmpl_cnt_dblks << " incr=" << a;
+ oss << " rd_subm_cnt_dblks=" << _rd_subm_cnt_dblks;
+ throw jexception(jerrno::JERR_FCNTL_CMPLOFFSOVFL, oss.str(), "fcntl", "add_rd_cmpl_cnt_dblks");
+ }
+ _rd_cmpl_cnt_dblks += a;
+ return _rd_cmpl_cnt_dblks;
+}
+
+u_int32_t
+fcntl::add_wr_subm_cnt_dblks(u_int32_t a)
+{
+ if (_wr_subm_cnt_dblks + a > _ffull_dblks) // Allow for file header
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " wr_subm_cnt_dblks=" << _wr_subm_cnt_dblks << " incr=" << a;
+ oss << " fsize=" << _ffull_dblks << " dblks";
+ throw jexception(jerrno::JERR_FCNTL_FILEOFFSOVFL, oss.str(), "fcntl", "add_wr_subm_cnt_dblks");
+ }
+ _wr_subm_cnt_dblks += a;
+ return _wr_subm_cnt_dblks;
+}
+
+u_int32_t
+fcntl::add_wr_cmpl_cnt_dblks(u_int32_t a)
+{
+ if (_wr_cmpl_cnt_dblks + a > _wr_subm_cnt_dblks)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " wr_cmpl_cnt_dblks=" << _wr_cmpl_cnt_dblks << " incr=" << a;
+ oss << " wr_subm_cnt_dblks=" << _wr_subm_cnt_dblks;
+ throw jexception(jerrno::JERR_FCNTL_CMPLOFFSOVFL, oss.str(), "fcntl", "add_wr_cmpl_cnt_dblks");
+ }
+ _wr_cmpl_cnt_dblks += a;
+ return _wr_cmpl_cnt_dblks;
+}
+
+u_int16_t
+fcntl::decr_aio_cnt()
+{
+ if(_aio_cnt == 0)
+ {
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " lfid=" << _lfid << " Decremented aio_cnt to below zero";
+ throw jexception(jerrno::JERR__UNDERFLOW, oss.str(), "fcntl", "decr_aio_cnt");
+ }
+ return --_aio_cnt;
+}
+
+// Debug function
+const std::string
+fcntl::status_str() const
+{
+ std::ostringstream oss;
+ oss << "pfid=" << _pfid << " ws=" << _wr_subm_cnt_dblks << " wc=" << _wr_cmpl_cnt_dblks;
+ oss << " rs=" << _rd_subm_cnt_dblks << " rc=" << _rd_cmpl_cnt_dblks;
+ oss << " ec=" << _rec_enqcnt << " ac=" << _aio_cnt;
+ return oss.str();
+}
+
+// Protected functions
+
+void
+fcntl::initialize(const std::string& fbasename, const u_int16_t pfid, const u_int16_t lfid, const u_int32_t jfsize_sblks,
+ const rcvdat* const ro)
+{
+ _pfid = pfid;
+ _lfid = lfid;
+ _fname = filename(fbasename, pfid);
+
+#ifdef RHM_JOWRITE
+ // In test mode, only create file if it does not exist
+ struct stat s;
+ if (::stat(_fname.c_str(), &s))
+ {
+#endif
+ if (ro) // Recovery initialization: set counters only
+ {
+ if (!ro->_jempty)
+ {
+ // For last file only, set write counters to end of last record (the
+ // continuation point); for all others, set to eof.
+ if (ro->_lfid == _pfid)
+ {
+ _wr_subm_cnt_dblks = ro->_eo/JRNL_DBLK_SIZE;
+ _wr_cmpl_cnt_dblks = ro->_eo/JRNL_DBLK_SIZE;
+ }
+ else
+ {
+ _wr_subm_cnt_dblks = _ffull_dblks;
+ _wr_cmpl_cnt_dblks = _ffull_dblks;
+ }
+ // Set the number of enqueued records for this file.
+ _rec_enqcnt = ro->_enq_cnt_list[_pfid];
+ }
+ }
+ else // Normal initialization: create empty journal files
+ create_jfile(jfsize_sblks);
+#ifdef RHM_JOWRITE
+ }
+#endif
+}
+
+std::string
+fcntl::filename(const std::string& fbasename, const u_int16_t pfid)
+{
+ std::ostringstream oss;
+ oss << fbasename << ".";
+ oss << std::setw(4) << std::setfill('0') << std::hex << pfid;
+ oss << "." << JRNL_DATA_EXTENSION;
+ return oss.str();
+}
+
+void
+fcntl::clean_file(const u_int32_t jfsize_sblks)
+{
+ // NOTE: The journal file size is always one sblock bigger than the specified journal
+ // file size, which is the data content size. The extra block is for the journal file
+ // header which precedes all data on each file and is exactly one sblock in size.
+ u_int32_t nsblks = jfsize_sblks + 1;
+
+ // TODO - look at more efficient alternatives to allocating a null block:
+ // 1. mmap() against /dev/zero, but can alignment for O_DIRECT be assured?
+ // 2. ftruncate(), but does this result in a sparse file? If so, then this is no good.
+
+ // Create temp null block for writing
+ const std::size_t sblksize = JRNL_DBLK_SIZE * JRNL_SBLK_SIZE;
+ void* nullbuf = 0;
+ // Allocate no more than 2MB (4096 sblks) as a null buffer
+ const u_int32_t nullbuffsize_sblks = nsblks > 4096 ? 4096 : nsblks;
+ const std::size_t nullbuffsize = nullbuffsize_sblks * sblksize;
+ if (::posix_memalign(&nullbuf, sblksize, nullbuffsize))
+ {
+ std::ostringstream oss;
+ oss << "posix_memalign() failed: size=" << nullbuffsize << " blk_size=" << sblksize;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__MALLOC, oss.str(), "fcntl", "clean_file");
+ }
+ std::memset(nullbuf, 0, nullbuffsize);
+
+ int fh = ::open(_fname.c_str(), O_WRONLY | O_CREAT | O_DIRECT,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); // 0644 -rw-r--r--
+ if (fh < 0)
+ {
+ std::free(nullbuf);
+ std::ostringstream oss;
+ oss << "open() failed:" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_FCNTL_OPENWR, oss.str(), "fcntl", "clean_file");
+ }
+
+ while (nsblks > 0)
+ {
+ u_int32_t this_write_sblks = nsblks >= nullbuffsize_sblks ? nullbuffsize_sblks : nsblks;
+ if (::write(fh, nullbuf, this_write_sblks * sblksize) == -1)
+ {
+ ::close(fh);
+ std::free(nullbuf);
+ std::ostringstream oss;
+ oss << "wr_size=" << (this_write_sblks * sblksize) << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_FCNTL_WRITE, oss.str(), "fcntl", "clean_file");
+ }
+ nsblks -= this_write_sblks;
+ }
+
+ // Clean up
+ std::free(nullbuf);
+ if (::close(fh))
+ {
+ std::ostringstream oss;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_FCNTL_CLOSE, oss.str(), "fcntl", "clean_file");
+ }
+}
+
+void
+fcntl::create_jfile(const u_int32_t jfsize_sblks)
+{
+ clean_file(jfsize_sblks);
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.h b/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.h
new file mode 100644
index 0000000000..a75e3bc84d
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/fcntl.h
@@ -0,0 +1,156 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file fcntl.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::fcntl (non-logging file
+ * handle), used for controlling journal log files. See class documentation for
+ * details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_FCNTL_H
+#define QPID_LEGACYSTORE_JRNL_FCNTL_H
+
+namespace mrg
+{
+namespace journal
+{
+class fcntl;
+}
+}
+
+#include <cstddef>
+#include <string>
+#include "qpid/legacystore/jrnl/rcvdat.h"
+#include <sys/types.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class fcntl
+ * \brief Journal file controller. There is one instance per journal file.
+ */
+ class fcntl
+ {
+ protected:
+ std::string _fname; ///< File name
+ u_int16_t _pfid; ///< Physical file ID (file number in order of creation)
+ u_int16_t _lfid; ///< Logical file ID (ordinal number in ring store)
+ const u_int32_t _ffull_dblks; ///< File size in dblks (incl. file header)
+ int _wr_fh; ///< Write file handle
+ u_int32_t _rec_enqcnt; ///< Count of enqueued records
+ u_int32_t _rd_subm_cnt_dblks; ///< Read file count (data blocks) for submitted AIO
+ u_int32_t _rd_cmpl_cnt_dblks; ///< Read file count (data blocks) for completed AIO
+ u_int32_t _wr_subm_cnt_dblks; ///< Write file count (data blocks) for submitted AIO
+ u_int32_t _wr_cmpl_cnt_dblks; ///< Write file count (data blocks) for completed AIO
+ u_int16_t _aio_cnt; ///< Outstanding AIO operations on this file
+ bool _fhdr_wr_aio_outstanding; ///< Outstanding file header write on this file
+
+ public:
+ // Constructors with implicit initialize() and open()
+ fcntl(const std::string& fbasename, const u_int16_t pfid, const u_int16_t lfid, const u_int32_t jfsize_sblks,
+ const rcvdat* const ro);
+ virtual ~fcntl();
+
+ virtual bool reset(const rcvdat* const ro = 0);
+ virtual void rd_reset();
+ virtual bool wr_reset(const rcvdat* const ro = 0);
+
+ virtual int open_wr_fh();
+ virtual void close_wr_fh();
+ inline bool is_wr_fh_open() const { return _wr_fh >= 0; }
+
+ inline const std::string& fname() const { return _fname; }
+ inline u_int16_t pfid() const { return _pfid; }
+ inline u_int16_t lfid() const { return _lfid; }
+ inline void set_lfid(const u_int16_t lfid) { _lfid = lfid; }
+ inline int wr_fh() const { return _wr_fh; }
+ inline u_int32_t enqcnt() const { return _rec_enqcnt; }
+ inline u_int32_t incr_enqcnt() { return ++_rec_enqcnt; }
+ u_int32_t add_enqcnt(u_int32_t a);
+ u_int32_t decr_enqcnt();
+ u_int32_t subtr_enqcnt(u_int32_t s);
+
+ inline u_int32_t rd_subm_cnt_dblks() const { return _rd_subm_cnt_dblks; }
+ inline std::size_t rd_subm_offs() const { return _rd_subm_cnt_dblks * JRNL_DBLK_SIZE; }
+ u_int32_t add_rd_subm_cnt_dblks(u_int32_t a);
+
+ inline u_int32_t rd_cmpl_cnt_dblks() const { return _rd_cmpl_cnt_dblks; }
+ inline std::size_t rd_cmpl_offs() const { return _rd_cmpl_cnt_dblks * JRNL_DBLK_SIZE; }
+ u_int32_t add_rd_cmpl_cnt_dblks(u_int32_t a);
+
+ inline u_int32_t wr_subm_cnt_dblks() const { return _wr_subm_cnt_dblks; }
+ inline std::size_t wr_subm_offs() const { return _wr_subm_cnt_dblks * JRNL_DBLK_SIZE; }
+ u_int32_t add_wr_subm_cnt_dblks(u_int32_t a);
+
+ inline u_int32_t wr_cmpl_cnt_dblks() const { return _wr_cmpl_cnt_dblks; }
+ inline std::size_t wr_cmpl_offs() const { return _wr_cmpl_cnt_dblks * JRNL_DBLK_SIZE; }
+ u_int32_t add_wr_cmpl_cnt_dblks(u_int32_t a);
+
+ inline u_int16_t aio_cnt() const { return _aio_cnt; }
+ inline u_int16_t incr_aio_cnt() { return ++_aio_cnt; }
+ u_int16_t decr_aio_cnt();
+
+ inline bool wr_fhdr_aio_outstanding() { return _fhdr_wr_aio_outstanding; }
+ inline void set_wr_fhdr_aio_outstanding(const bool wfao) { _fhdr_wr_aio_outstanding = wfao; }
+
+ // Derived helper functions
+
+ inline bool rd_void() const { return _wr_cmpl_cnt_dblks == 0; }
+ inline bool rd_empty() const { return _wr_cmpl_cnt_dblks <= JRNL_SBLK_SIZE; }
+ inline u_int32_t rd_remaining_dblks() const { return _wr_cmpl_cnt_dblks - _rd_subm_cnt_dblks; }
+ inline bool is_rd_full() const { return _wr_cmpl_cnt_dblks == _rd_subm_cnt_dblks; }
+ inline bool is_rd_compl() const { return _wr_cmpl_cnt_dblks == _rd_cmpl_cnt_dblks; }
+ inline u_int32_t rd_aio_outstanding_dblks() const { return _rd_subm_cnt_dblks - _rd_cmpl_cnt_dblks; }
+ inline bool rd_file_rotate() const { return is_rd_full() && is_wr_compl(); }
+
+ inline bool wr_void() const { return _wr_subm_cnt_dblks == 0; }
+ inline bool wr_empty() const { return _wr_subm_cnt_dblks <= JRNL_SBLK_SIZE; }
+ inline u_int32_t wr_remaining_dblks() const { return _ffull_dblks - _wr_subm_cnt_dblks; }
+ inline bool is_wr_full() const { return _ffull_dblks == _wr_subm_cnt_dblks; }
+ inline bool is_wr_compl() const { return _ffull_dblks == _wr_cmpl_cnt_dblks; }
+ inline u_int32_t wr_aio_outstanding_dblks() const { return _wr_subm_cnt_dblks - _wr_cmpl_cnt_dblks; }
+ inline bool wr_file_rotate() const { return is_wr_full(); }
+
+ // Debug aid
+ const std::string status_str() const;
+
+ protected:
+ virtual void initialize(const std::string& fbasename, const u_int16_t pfid, const u_int16_t lfid,
+ const u_int32_t jfsize_sblks, const rcvdat* const ro);
+
+ static std::string filename(const std::string& fbasename, const u_int16_t pfid);
+ void clean_file(const u_int32_t jfsize_sblks);
+ void create_jfile(const u_int32_t jfsize_sblks);
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_FCNTL_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/file_hdr.h b/qpid/cpp/src/qpid/legacystore/jrnl/file_hdr.h
new file mode 100644
index 0000000000..db20834cbb
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/file_hdr.h
@@ -0,0 +1,211 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file file_hdr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::file_hdr (file
+ * record header), used to start a journal file. It contains some
+ * file metadata and information to aid journal recovery.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_FILE_HDR_H
+#define QPID_LEGACYSTORE_JRNL_FILE_HDR_H
+
+#include <cerrno>
+#include <ctime>
+#include "qpid/legacystore/jrnl/rec_hdr.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for data common to the head of all journal files. In addition to
+ * the common data, this includes the record ID and offset of the first record in
+ * the file.
+ *
+ * This header precedes all data in journal files and occupies the first complete
+ * block in the file. The record ID and offset are updated on each overwrite of the
+ * file.
+ *
+ * File header info in binary format (48 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+ -+
+ * | magic | v | e | flags | |
+ * +---+---+---+---+---+---+---+---+ | struct hdr
+ * | first rid in file | |
+ * +---+---+---+---+---+---+---+---+ -+
+ * | pfid | lfid | reserved (0) |
+ * +---+---+---+---+---+---+---+---+
+ * | fro |
+ * +---+---+---+---+---+---+---+---+
+ * | timestamp (sec) |
+ * +---+---+---+---+---+---+---+---+
+ * | timestamp (ns) |
+ * +---+---+---+---+---+---+---+---+
+ * v = file version (If the format or encoding of this file changes, then this
+ * number should be incremented)
+ * e = endian flag, false (0x00) for little endian, true (0x01) for big endian
+ * pfid = File ID (number used in naming file)
+ * lfid = Logical ID (order used in circular buffer)
+ * fro = First record offset, offset from start of file to first record header
+ * </pre>
+ *
+ * Note that journal files should be transferable between 32- and 64-bit
+ * hardware of the same endianness, but not between hardware of opposite
+ * entianness without some sort of binary conversion utility. Thus buffering
+ * will be needed for types that change size between 32- and 64-bit compiles.
+ */
+ struct file_hdr : rec_hdr
+ {
+ u_int16_t _pfid; ///< Physical file ID (pfid)
+ u_int16_t _lfid; ///< Logical file ID (lfid)
+ u_int32_t _res; ///< Reserved (for alignment/flags)
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Big-endian filler for 32-bit size_t
+#endif
+ std::size_t _fro; ///< First record offset
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Little-endian filler for 32-bit size_t
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler1; ///< Big-endian filler for 32-bit time_t
+#endif
+ std::time_t _ts_sec; ///< Timestamp of journal initilailization
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler1; ///< Little-endian filler for 32-bit time_t
+#endif
+#if defined(JRNL_BIG_ENDIAN)
+ u_int32_t _filler2; ///< Big endian filler for u_int32_t
+#endif
+ u_int32_t _ts_nsec; ///< Timestamp of journal initilailization
+#if defined(JRNL_LITTLE_ENDIAN)
+ u_int32_t _filler2; ///< Little-endian filler for u_int32_t
+#endif
+
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ inline file_hdr(): rec_hdr(), _pfid(0), _lfid(0), _res(0),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _fro(0),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+ _ts_sec(0),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN)
+ _filler2(0),
+#endif
+ _ts_nsec(0)
+#if defined(JRNL_LITTLE_ENDIAN)
+ , _filler2(0)
+#endif
+ {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ inline file_hdr(const u_int32_t magic, const u_int8_t version, const u_int64_t rid,
+ const u_int16_t pfid, const u_int16_t lfid, const std::size_t fro,
+ const bool owi, const bool settime = false):
+ rec_hdr(magic, version, rid, owi), _pfid(pfid), _lfid(lfid), _res(0),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _fro(fro),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+ _ts_sec(0),
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ _filler1(0),
+#endif
+#if defined(JRNL_BIG_ENDIAN)
+ _filler2(0),
+#endif
+ _ts_nsec(0)
+#if defined(JRNL_LITTLE_ENDIAN)
+ , _filler2(0)
+#endif
+ { if (settime) set_time(); }
+
+ /**
+ * \brief Gets the current time from the system clock and sets the timestamp in the struct.
+ */
+ inline void set_time()
+ {
+ // TODO: Standardize on method for getting time that does not requrie a context switch.
+ timespec ts;
+ if (::clock_gettime(CLOCK_REALTIME, &ts))
+ {
+ std::ostringstream oss;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__RTCLOCK, oss.str(), "file_hdr", "set_time");
+ }
+ _ts_sec = ts.tv_sec;
+ _ts_nsec = ts.tv_nsec;
+ }
+
+ /**
+ * \brief Sets the timestamp in the struct to the provided value (in seconds and
+ * nanoseconds).
+ */
+ inline void set_time(timespec& ts)
+ {
+ _ts_sec = ts.tv_sec;
+ _ts_nsec = ts.tv_nsec;
+ }
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(file_hdr); }
+ }; // struct file_hdr
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_FILE_HDR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jcfg.h b/qpid/cpp/src/qpid/legacystore/jrnl/jcfg.h
new file mode 100644
index 0000000000..0a0d0df28d
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jcfg.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jcfg.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains \#defines that control the implementation details of
+ * the journal.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JCFG_H
+#define QPID_LEGACYSTORE_JRNL_JCFG_H
+
+#if defined(__i386__) /* little endian, 32 bits */
+#define JRNL_LITTLE_ENDIAN
+#define JRNL_32_BIT
+#elif defined(__PPC__) || defined(__s390__) /* big endian, 32 bits */
+#define JRNL_BIG_ENDIAN
+#define JRNL_32_BIT
+#elif defined(__ia64__) || defined(__x86_64__) || defined(__alpha__) /* little endian, 64 bits */
+#define JRNL_LITTLE_ENDIAN
+#define JRNL_64_BIT
+#elif defined(__powerpc64__) || defined(__s390x__) /* big endian, 64 bits */
+#define JRNL_BIG_ENDIAN
+#define JRNL_64_BIT
+#else
+#error endian?
+#endif
+
+
+/**
+* <b>Rule:</b> Data block size (JRNL_DBLK_SIZE) MUST be a power of 2 such that
+* <pre>
+* JRNL_DBLK_SIZE * JRNL_SBLK_SIZE == n * 512 (n = 1,2,3...)
+* </pre>
+* (The disk softblock size is 512 for Linux kernels >= 2.6)
+*/
+#define JRNL_DBLK_SIZE 128 ///< Data block size in bytes (CANNOT BE LESS THAN 32!)
+#define JRNL_SBLK_SIZE 4 ///< Disk softblock size in multiples of JRNL_DBLK_SIZE
+#define JRNL_MIN_FILE_SIZE 128 ///< Min. jrnl file size in sblks (excl. file_hdr)
+#define JRNL_MAX_FILE_SIZE 4194176 ///< Max. jrnl file size in sblks (excl. file_hdr)
+#define JRNL_MIN_NUM_FILES 4 ///< Min. number of journal files
+#define JRNL_MAX_NUM_FILES 64 ///< Max. number of journal files
+#define JRNL_ENQ_THRESHOLD 80 ///< Percent full when enqueue connection will be closed
+
+#define JRNL_RMGR_PAGE_SIZE 128 ///< Journal page size in softblocks
+#define JRNL_RMGR_PAGES 16 ///< Number of pages to use in wmgr
+
+#define JRNL_WMGR_DEF_PAGE_SIZE 64 ///< Journal write page size in softblocks (default)
+#define JRNL_WMGR_DEF_PAGES 32 ///< Number of pages to use in wmgr (default)
+
+#define JRNL_WMGR_MAXDTOKPP 1024 ///< Max. dtoks (data blocks) per page in wmgr
+#define JRNL_WMGR_MAXWAITUS 100 ///< Max. wait time (us) before submitting AIO
+
+#define JRNL_INFO_EXTENSION "jinf" ///< Extension for journal info files
+#define JRNL_DATA_EXTENSION "jdat" ///< Extension for journal data files
+#define RHM_JDAT_TXA_MAGIC 0x614d4852 ///< ("RHMa" in little endian) Magic for dtx abort hdrs
+#define RHM_JDAT_TXC_MAGIC 0x634d4852 ///< ("RHMc" in little endian) Magic for dtx commit hdrs
+#define RHM_JDAT_DEQ_MAGIC 0x644d4852 ///< ("RHMd" in little endian) Magic for deq rec hdrs
+#define RHM_JDAT_ENQ_MAGIC 0x654d4852 ///< ("RHMe" in little endian) Magic for enq rec hdrs
+#define RHM_JDAT_FILE_MAGIC 0x664d4852 ///< ("RHMf" in little endian) Magic for file hdrs
+#define RHM_JDAT_EMPTY_MAGIC 0x784d4852 ///< ("RHMx" in little endian) Magic for empty dblk
+#define RHM_JDAT_VERSION 0x01 ///< Version (of file layout)
+#define RHM_CLEAN_CHAR 0xff ///< Char used to clear empty space on disk
+
+#define RHM_LENDIAN_FLAG 0 ///< Value of little endian flag on disk
+#define RHM_BENDIAN_FLAG 1 ///< Value of big endian flag on disk
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JCFG_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.cpp
new file mode 100644
index 0000000000..a03076dca5
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.cpp
@@ -0,0 +1,984 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jcntl.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal top-level control and interface class
+ * mrg::journal::jcntl. See comments in file jcntl.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+
+#include "qpid/legacystore/jrnl/jcntl.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include "qpid/legacystore/jrnl/file_hdr.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jinf.h"
+#include <limits>
+#include <sstream>
+#include <unistd.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+#define AIO_CMPL_TIMEOUT_SEC 5
+#define AIO_CMPL_TIMEOUT_NSEC 0
+#define FINAL_AIO_CMPL_TIMEOUT_SEC 15
+#define FINAL_AIO_CMPL_TIMEOUT_NSEC 0
+
+// Static
+timespec jcntl::_aio_cmpl_timeout; ///< Timeout for blocking libaio returns
+timespec jcntl::_final_aio_cmpl_timeout; ///< Timeout for blocking libaio returns when stopping or finalizing
+bool jcntl::_init = init_statics();
+bool jcntl::init_statics()
+{
+ _aio_cmpl_timeout.tv_sec = AIO_CMPL_TIMEOUT_SEC;
+ _aio_cmpl_timeout.tv_nsec = AIO_CMPL_TIMEOUT_NSEC;
+ _final_aio_cmpl_timeout.tv_sec = FINAL_AIO_CMPL_TIMEOUT_SEC;
+ _final_aio_cmpl_timeout.tv_nsec = FINAL_AIO_CMPL_TIMEOUT_NSEC;
+ return true;
+}
+
+
+// Functions
+
+jcntl::jcntl(const std::string& jid, const std::string& jdir, const std::string& base_filename):
+ _jid(jid),
+ _jdir(jdir, base_filename),
+ _base_filename(base_filename),
+ _init_flag(false),
+ _stop_flag(false),
+ _readonly_flag(false),
+ _autostop(true),
+ _jfsize_sblks(0),
+ _lpmgr(),
+ _emap(),
+ _tmap(),
+ _rrfc(&_lpmgr),
+ _wrfc(&_lpmgr),
+ _rmgr(this, _emap, _tmap, _rrfc),
+ _wmgr(this, _emap, _tmap, _wrfc),
+ _rcvdat()
+{}
+
+jcntl::~jcntl()
+{
+ if (_init_flag && !_stop_flag)
+ try { stop(true); }
+ catch (const jexception& e) { std::cerr << e << std::endl; }
+ _lpmgr.finalize();
+}
+
+void
+jcntl::initialize(const u_int16_t num_jfiles, const bool ae, const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks, const u_int16_t wcache_num_pages, const u_int32_t wcache_pgsize_sblks,
+ aio_callback* const cbp)
+{
+ _init_flag = false;
+ _stop_flag = false;
+ _readonly_flag = false;
+
+ _emap.clear();
+ _tmap.clear();
+
+ _lpmgr.finalize();
+
+ // Set new file geometry parameters
+ assert(num_jfiles >= JRNL_MIN_NUM_FILES);
+ assert(num_jfiles <= JRNL_MAX_NUM_FILES);
+ _emap.set_num_jfiles(num_jfiles);
+ _tmap.set_num_jfiles(num_jfiles);
+
+ assert(jfsize_sblks >= JRNL_MIN_FILE_SIZE);
+ assert(jfsize_sblks <= JRNL_MAX_FILE_SIZE);
+ _jfsize_sblks = jfsize_sblks;
+
+ // Clear any existing journal files
+ _jdir.clear_dir();
+ _lpmgr.initialize(num_jfiles, ae, ae_max_jfiles, this, &new_fcntl);
+
+ _wrfc.initialize(_jfsize_sblks);
+ _rrfc.initialize();
+ _rrfc.set_findex(0);
+ _rmgr.initialize(cbp);
+ _wmgr.initialize(cbp, wcache_pgsize_sblks, wcache_num_pages, JRNL_WMGR_MAXDTOKPP, JRNL_WMGR_MAXWAITUS);
+
+ // Write info file (<basename>.jinf) to disk
+ write_infofile();
+
+ _init_flag = true;
+}
+
+void
+jcntl::recover(const u_int16_t num_jfiles, const bool ae, const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks, const u_int16_t wcache_num_pages, const u_int32_t wcache_pgsize_sblks,
+// const rd_aio_cb rd_cb, const wr_aio_cb wr_cb, const std::vector<std::string>* prep_txn_list_ptr,
+ aio_callback* const cbp, const std::vector<std::string>* prep_txn_list_ptr,
+ u_int64_t& highest_rid)
+{
+ _init_flag = false;
+ _stop_flag = false;
+ _readonly_flag = false;
+
+ _emap.clear();
+ _tmap.clear();
+
+ _lpmgr.finalize();
+
+ assert(num_jfiles >= JRNL_MIN_NUM_FILES);
+ assert(num_jfiles <= JRNL_MAX_NUM_FILES);
+ assert(jfsize_sblks >= JRNL_MIN_FILE_SIZE);
+ assert(jfsize_sblks <= JRNL_MAX_FILE_SIZE);
+ _jfsize_sblks = jfsize_sblks;
+
+ // Verify journal dir and journal files
+ _jdir.verify_dir();
+ _rcvdat.reset(num_jfiles, ae, ae_max_jfiles);
+
+ rcvr_janalyze(_rcvdat, prep_txn_list_ptr);
+ highest_rid = _rcvdat._h_rid;
+ if (_rcvdat._jfull)
+ throw jexception(jerrno::JERR_JCNTL_RECOVERJFULL, "jcntl", "recover");
+ this->log(LOG_DEBUG, _rcvdat.to_log(_jid));
+
+ _lpmgr.recover(_rcvdat, this, &new_fcntl);
+
+ _wrfc.initialize(_jfsize_sblks, &_rcvdat);
+ _rrfc.initialize();
+ _rrfc.set_findex(_rcvdat.ffid());
+ _rmgr.initialize(cbp);
+ _wmgr.initialize(cbp, wcache_pgsize_sblks, wcache_num_pages, JRNL_WMGR_MAXDTOKPP, JRNL_WMGR_MAXWAITUS,
+ (_rcvdat._lffull ? 0 : _rcvdat._eo));
+
+ _readonly_flag = true;
+ _init_flag = true;
+}
+
+void
+jcntl::recover_complete()
+{
+ if (!_readonly_flag)
+ throw jexception(jerrno::JERR_JCNTL_NOTRECOVERED, "jcntl", "recover_complete");
+ for (u_int16_t i=0; i<_lpmgr.num_jfiles(); i++)
+ _lpmgr.get_fcntlp(i)->reset(&_rcvdat);
+ _wrfc.initialize(_jfsize_sblks, &_rcvdat);
+ _rrfc.initialize();
+ _rrfc.set_findex(_rcvdat.ffid());
+ _rmgr.recover_complete();
+ _readonly_flag = false;
+}
+
+void
+jcntl::delete_jrnl_files()
+{
+ stop(true); // wait for AIO to complete
+ _jdir.delete_dir();
+}
+
+
+iores
+jcntl::enqueue_data_record(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const bool transient)
+{
+ iores r;
+ check_wstatus("enqueue_data_record");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.enqueue(data_buff, tot_data_len, this_data_len, dtokp, 0, 0, transient, false), r,
+ dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::enqueue_extern_data_record(const std::size_t tot_data_len, data_tok* dtokp, const bool transient)
+{
+ iores r;
+ check_wstatus("enqueue_extern_data_record");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.enqueue(0, tot_data_len, 0, dtokp, 0, 0, transient, true), r, dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::enqueue_txn_data_record(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const std::string& xid,
+ const bool transient)
+{
+ iores r;
+ check_wstatus("enqueue_tx_data_record");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.enqueue(data_buff, tot_data_len, this_data_len, dtokp, xid.data(), xid.size(),
+ transient, false), r, dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::enqueue_extern_txn_data_record(const std::size_t tot_data_len, data_tok* dtokp,
+ const std::string& xid, const bool transient)
+{
+ iores r;
+ check_wstatus("enqueue_extern_txn_data_record");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.enqueue(0, tot_data_len, 0, dtokp, xid.data(), xid.size(), transient, true), r,
+ dtokp)) ;
+ }
+ return r;
+}
+
+/* TODO
+iores
+jcntl::get_data_record(const u_int64_t& rid, const std::size_t& dsize, const std::size_t& dsize_avail,
+ const void** const data, bool auto_discard)
+{
+ check_rstatus("get_data_record");
+ return _rmgr.get(rid, dsize, dsize_avail, data, auto_discard);
+} */
+
+/* TODO
+iores
+jcntl::discard_data_record(data_tok* const dtokp)
+{
+ check_rstatus("discard_data_record");
+ return _rmgr.discard(dtokp);
+} */
+
+iores
+jcntl::read_data_record(void** const datapp, std::size_t& dsize, void** const xidpp, std::size_t& xidsize,
+ bool& transient, bool& external, data_tok* const dtokp, bool ignore_pending_txns)
+{
+ check_rstatus("read_data");
+ iores res = _rmgr.read(datapp, dsize, xidpp, xidsize, transient, external, dtokp, ignore_pending_txns);
+ if (res == RHM_IORES_RCINVALID)
+ {
+ get_wr_events(0); // check for outstanding write events
+ iores sres = _rmgr.synchronize(); // flushes all outstanding read events
+ if (sres != RHM_IORES_SUCCESS)
+ return sres;
+ _rmgr.wait_for_validity(&_aio_cmpl_timeout, true); // throw if timeout occurs
+ res = _rmgr.read(datapp, dsize, xidpp, xidsize, transient, external, dtokp, ignore_pending_txns);
+ }
+ return res;
+}
+
+iores
+jcntl::dequeue_data_record(data_tok* const dtokp, const bool txn_coml_commit)
+{
+ iores r;
+ check_wstatus("dequeue_data");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.dequeue(dtokp, 0, 0, txn_coml_commit), r, dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::dequeue_txn_data_record(data_tok* const dtokp, const std::string& xid, const bool txn_coml_commit)
+{
+ iores r;
+ check_wstatus("dequeue_data");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.dequeue(dtokp, xid.data(), xid.size(), txn_coml_commit), r, dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::txn_abort(data_tok* const dtokp, const std::string& xid)
+{
+ iores r;
+ check_wstatus("txn_abort");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.abort(dtokp, xid.data(), xid.size()), r, dtokp)) ;
+ }
+ return r;
+}
+
+iores
+jcntl::txn_commit(data_tok* const dtokp, const std::string& xid)
+{
+ iores r;
+ check_wstatus("txn_commit");
+ {
+ slock s(_wr_mutex);
+ while (handle_aio_wait(_wmgr.commit(dtokp, xid.data(), xid.size()), r, dtokp)) ;
+ }
+ return r;
+}
+
+bool
+jcntl::is_txn_synced(const std::string& xid)
+{
+ slock s(_wr_mutex);
+ bool res = _wmgr.is_txn_synced(xid);
+ return res;
+}
+
+int32_t
+jcntl::get_wr_events(timespec* const timeout)
+{
+ stlock t(_wr_mutex);
+ if (!t.locked())
+ return jerrno::LOCK_TAKEN;
+ int32_t res = _wmgr.get_events(pmgr::UNUSED, timeout);
+ return res;
+}
+
+int32_t
+jcntl::get_rd_events(timespec* const timeout)
+{
+ return _rmgr.get_events(pmgr::AIO_COMPLETE, timeout);
+}
+
+void
+jcntl::stop(const bool block_till_aio_cmpl)
+{
+ if (_readonly_flag)
+ check_rstatus("stop");
+ else
+ check_wstatus("stop");
+ _stop_flag = true;
+ if (!_readonly_flag)
+ flush(block_till_aio_cmpl);
+ _rrfc.finalize();
+ _lpmgr.finalize();
+}
+
+u_int16_t
+jcntl::get_earliest_fid()
+{
+ u_int16_t ffid = _wrfc.earliest_index();
+ u_int16_t fid = _wrfc.index();
+ while ( _emap.get_enq_cnt(ffid) == 0 && _tmap.get_txn_pfid_cnt(ffid) == 0 && ffid != fid)
+ {
+ if (++ffid >= _lpmgr.num_jfiles())
+ ffid = 0;
+ }
+ if (!_rrfc.is_active())
+ _rrfc.set_findex(ffid);
+ return ffid;
+}
+
+iores
+jcntl::flush(const bool block_till_aio_cmpl)
+{
+ if (!_init_flag)
+ return RHM_IORES_SUCCESS;
+ if (_readonly_flag)
+ throw jexception(jerrno::JERR_JCNTL_READONLY, "jcntl", "flush");
+ iores res;
+ {
+ slock s(_wr_mutex);
+ res = _wmgr.flush();
+ }
+ if (block_till_aio_cmpl)
+ aio_cmpl_wait();
+ return res;
+}
+
+void
+jcntl::log(log_level ll, const std::string& log_stmt) const
+{
+ log(ll, log_stmt.c_str());
+}
+
+void
+jcntl::log(log_level ll, const char* const log_stmt) const
+{
+ if (ll > LOG_INFO)
+ {
+ std::cout << log_level_str(ll) << ": Journal \"" << _jid << "\": " << log_stmt << std::endl;
+ }
+}
+
+void
+jcntl::chk_wr_frot()
+{
+ if (_wrfc.index() == _rrfc.index())
+ _rmgr.invalidate();
+}
+
+void
+jcntl::fhdr_wr_sync(const u_int16_t lid)
+{
+ fcntl* fcntlp = _lpmgr.get_fcntlp(lid);
+ while (fcntlp->wr_fhdr_aio_outstanding())
+ {
+ if (get_wr_events(&_aio_cmpl_timeout) == jerrno::AIO_TIMEOUT)
+ throw jexception(jerrno::JERR_JCNTL_AIOCMPLWAIT, "jcntl", "fhdr_wr_sync");
+ }
+}
+
+fcntl*
+jcntl::new_fcntl(jcntl* const jcp, const u_int16_t lid, const u_int16_t fid, const rcvdat* const rdp)
+{
+ if (!jcp) return 0;
+ std::ostringstream oss;
+ oss << jcp->jrnl_dir() << "/" << jcp->base_filename();
+ return new fcntl(oss.str(), fid, lid, jcp->jfsize_sblks(), rdp);
+}
+
+// Protected/Private functions
+
+void
+jcntl::check_wstatus(const char* fn_name) const
+{
+ if (!_init_flag)
+ throw jexception(jerrno::JERR__NINIT, "jcntl", fn_name);
+ if (_readonly_flag)
+ throw jexception(jerrno::JERR_JCNTL_READONLY, "jcntl", fn_name);
+ if (_stop_flag)
+ throw jexception(jerrno::JERR_JCNTL_STOPPED, "jcntl", fn_name);
+}
+
+void
+jcntl::check_rstatus(const char* fn_name) const
+{
+ if (!_init_flag)
+ throw jexception(jerrno::JERR__NINIT, "jcntl", fn_name);
+ if (_stop_flag)
+ throw jexception(jerrno::JERR_JCNTL_STOPPED, "jcntl", fn_name);
+}
+
+void
+jcntl::write_infofile() const
+{
+ timespec ts;
+ if (::clock_gettime(CLOCK_REALTIME, &ts))
+ {
+ std::ostringstream oss;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__RTCLOCK, oss.str(), "jcntl", "write_infofile");
+ }
+ jinf ji(_jid, _jdir.dirname(), _base_filename, _lpmgr.num_jfiles(), _lpmgr.is_ae(), _lpmgr.ae_max_jfiles(),
+ _jfsize_sblks, _wmgr.cache_pgsize_sblks(), _wmgr.cache_num_pages(), ts);
+ ji.write();
+}
+
+void
+jcntl::aio_cmpl_wait()
+{
+ //while (_wmgr.get_aio_evt_rem())
+ while (true)
+ {
+ u_int32_t aer;
+ {
+ slock s(_wr_mutex);
+ aer = _wmgr.get_aio_evt_rem();
+ }
+ if (aer == 0) break; // no events left
+ if (get_wr_events(&_aio_cmpl_timeout) == jerrno::AIO_TIMEOUT)
+ throw jexception(jerrno::JERR_JCNTL_AIOCMPLWAIT, "jcntl", "aio_cmpl_wait");
+ }
+}
+
+bool
+jcntl::handle_aio_wait(const iores res, iores& resout, const data_tok* dtp)
+{
+ resout = res;
+ if (res == RHM_IORES_PAGE_AIOWAIT)
+ {
+ while (_wmgr.curr_pg_blocked())
+ {
+ if (_wmgr.get_events(pmgr::UNUSED, &_aio_cmpl_timeout) == jerrno::AIO_TIMEOUT)
+ {
+ std::ostringstream oss;
+ oss << "get_events() returned JERR_JCNTL_AIOCMPLWAIT; wmgr_status: " << _wmgr.status_str();
+ this->log(LOG_CRITICAL, oss.str());
+ throw jexception(jerrno::JERR_JCNTL_AIOCMPLWAIT, "jcntl", "handle_aio_wait");
+ }
+ }
+ return true;
+ }
+ else if (res == RHM_IORES_FILE_AIOWAIT)
+ {
+ while (_wmgr.curr_file_blocked())
+ {
+ if (_wmgr.get_events(pmgr::UNUSED, &_aio_cmpl_timeout) == jerrno::AIO_TIMEOUT)
+ {
+ std::ostringstream oss;
+ oss << "get_events() returned JERR_JCNTL_AIOCMPLWAIT; wmgr_status: " << _wmgr.status_str();
+ this->log(LOG_CRITICAL, oss.str());
+ throw jexception(jerrno::JERR_JCNTL_AIOCMPLWAIT, "jcntl", "handle_aio_wait");
+ }
+ }
+ _wrfc.wr_reset();
+ resout = RHM_IORES_SUCCESS;
+ data_tok::write_state ws = dtp->wstate();
+ return ws == data_tok::ENQ_PART || ws == data_tok::DEQ_PART || ws == data_tok::ABORT_PART ||
+ ws == data_tok::COMMIT_PART;
+ }
+ return false;
+}
+
+void
+jcntl::rcvr_janalyze(rcvdat& rd, const std::vector<std::string>* prep_txn_list_ptr)
+{
+ jinf ji(_jdir.dirname() + "/" + _base_filename + "." + JRNL_INFO_EXTENSION, true);
+
+ // If the number of files does not tie up with the jinf file from the journal being recovered,
+ // use the jinf data.
+ if (rd._njf != ji.num_jfiles())
+ {
+ std::ostringstream oss;
+ oss << "Recovery found " << ji.num_jfiles() <<
+ " files (different from --num-jfiles value of " << rd._njf << ").";
+ this->log(LOG_WARN, oss.str());
+ rd._njf = ji.num_jfiles();
+ _rcvdat._enq_cnt_list.resize(rd._njf);
+ }
+ _emap.set_num_jfiles(rd._njf);
+ _tmap.set_num_jfiles(rd._njf);
+ if (_jfsize_sblks != ji.jfsize_sblks())
+ {
+ std::ostringstream oss;
+ oss << "Recovery found file size = " << (ji.jfsize_sblks() / JRNL_RMGR_PAGE_SIZE) <<
+ " (different from --jfile-size-pgs value of " <<
+ (_jfsize_sblks / JRNL_RMGR_PAGE_SIZE) << ").";
+ this->log(LOG_WARN, oss.str());
+ _jfsize_sblks = ji.jfsize_sblks();
+ }
+ if (_jdir.dirname().compare(ji.jdir()))
+ {
+ std::ostringstream oss;
+ oss << "Journal file location change: original = \"" << ji.jdir() <<
+ "\"; current = \"" << _jdir.dirname() << "\"";
+ this->log(LOG_WARN, oss.str());
+ ji.set_jdir(_jdir.dirname());
+ }
+
+ try
+ {
+ rd._ffid = ji.get_first_pfid();
+ rd._lfid = ji.get_last_pfid();
+ rd._owi = ji.get_initial_owi();
+ rd._frot = ji.get_frot();
+ rd._jempty = false;
+ ji.get_normalized_pfid_list(rd._fid_list); // _pfid_list
+ }
+ catch (const jexception& e)
+ {
+ if (e.err_code() != jerrno::JERR_JINF_JDATEMPTY) throw;
+ }
+
+ // Restore all read and write pointers and transactions
+ if (!rd._jempty)
+ {
+ u_int16_t fid = rd._ffid;
+ std::ifstream ifs;
+ bool lowi = rd._owi; // local copy of owi to be used during analysis
+ while (rcvr_get_next_record(fid, &ifs, lowi, rd)) ;
+ if (ifs.is_open()) ifs.close();
+
+ // Remove all txns from tmap that are not in the prepared list
+ if (prep_txn_list_ptr)
+ {
+ std::vector<std::string> xid_list;
+ _tmap.xid_list(xid_list);
+ for (std::vector<std::string>::iterator itr = xid_list.begin(); itr != xid_list.end(); itr++)
+ {
+ std::vector<std::string>::const_iterator pitr =
+ std::find(prep_txn_list_ptr->begin(), prep_txn_list_ptr->end(), *itr);
+ if (pitr == prep_txn_list_ptr->end()) // not found in prepared list
+ {
+ txn_data_list tdl = _tmap.get_remove_tdata_list(*itr); // tdl will be empty if xid not found
+ // Unlock any affected enqueues in emap
+ for (tdl_itr i=tdl.begin(); i<tdl.end(); i++)
+ {
+ if (i->_enq_flag) // enq op - decrement enqueue count
+ rd._enq_cnt_list[i->_pfid]--;
+ else if (_emap.is_enqueued(i->_drid, true)) // deq op - unlock enq record
+ {
+ int16_t ret = _emap.unlock(i->_drid);
+ if (ret < enq_map::EMAP_OK) // fail
+ {
+ // enq_map::unlock()'s only error is enq_map::EMAP_RID_NOT_FOUND
+ std::ostringstream oss;
+ oss << std::hex << "_emap.unlock(): drid=0x\"" << i->_drid;
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "jcntl", "rcvr_janalyze");
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Check for file full condition - add one to _jfsize_sblks to account for file header
+ rd._lffull = rd._eo == (1 + _jfsize_sblks) * JRNL_SBLK_SIZE * JRNL_DBLK_SIZE;
+
+ // Check for journal full condition
+ u_int16_t next_wr_fid = (rd._lfid + 1) % rd._njf;
+ rd._jfull = rd._ffid == next_wr_fid && rd._enq_cnt_list[next_wr_fid] && rd._lffull;
+ }
+}
+
+bool
+jcntl::rcvr_get_next_record(u_int16_t& fid, std::ifstream* ifsp, bool& lowi, rcvdat& rd)
+{
+ std::size_t cum_size_read = 0;
+ void* xidp = 0;
+ rec_hdr h;
+
+ bool hdr_ok = false;
+ std::streampos file_pos;
+ while (!hdr_ok)
+ {
+ if (!ifsp->is_open())
+ {
+ if (!jfile_cycle(fid, ifsp, lowi, rd, true))
+ return false;
+ }
+ file_pos = ifsp->tellg();
+ ifsp->read((char*)&h, sizeof(rec_hdr));
+ if (ifsp->gcount() == sizeof(rec_hdr))
+ hdr_ok = true;
+ else
+ {
+ if (!jfile_cycle(fid, ifsp, lowi, rd, true))
+ return false;
+ }
+ }
+
+ switch(h._magic)
+ {
+ case RHM_JDAT_ENQ_MAGIC:
+ {
+ enq_rec er;
+ u_int16_t start_fid = fid; // fid may increment in decode() if record folds over file boundary
+ if (!decode(er, fid, ifsp, cum_size_read, h, lowi, rd, file_pos))
+ return false;
+ if (!er.is_transient()) // Ignore transient msgs
+ {
+ rd._enq_cnt_list[start_fid]++;
+ if (er.xid_size())
+ {
+ er.get_xid(&xidp);
+ assert(xidp != 0);
+ std::string xid((char*)xidp, er.xid_size());
+ _tmap.insert_txn_data(xid, txn_data(h._rid, 0, start_fid, true));
+ if (_tmap.set_aio_compl(xid, h._rid) < txn_map::TMAP_OK) // fail - xid or rid not found
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_tmap.set_aio_compl: txn_enq xid=\"" << xid << "\" rid=0x" << h._rid;
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "jcntl", "rcvr_get_next_record");
+ }
+ std::free(xidp);
+ }
+ else
+ {
+ if (_emap.insert_pfid(h._rid, start_fid) < enq_map::EMAP_OK) // fail
+ {
+ // The only error code emap::insert_pfid() returns is enq_map::EMAP_DUP_RID.
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << h._rid << " _pfid=0x" << start_fid;
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "jcntl", "rcvr_get_next_record");
+ }
+ }
+ }
+ }
+ break;
+ case RHM_JDAT_DEQ_MAGIC:
+ {
+ deq_rec dr;
+ u_int16_t start_fid = fid; // fid may increment in decode() if record folds over file boundary
+ if (!decode(dr, fid, ifsp, cum_size_read, h, lowi, rd, file_pos))
+ return false;
+ if (dr.xid_size())
+ {
+ // If the enqueue is part of a pending txn, it will not yet be in emap
+ _emap.lock(dr.deq_rid()); // ignore not found error
+ dr.get_xid(&xidp);
+ assert(xidp != 0);
+ std::string xid((char*)xidp, dr.xid_size());
+ _tmap.insert_txn_data(xid, txn_data(dr.rid(), dr.deq_rid(), start_fid, false,
+ dr.is_txn_coml_commit()));
+ if (_tmap.set_aio_compl(xid, dr.rid()) < txn_map::TMAP_OK) // fail - xid or rid not found
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_tmap.set_aio_compl: txn_deq xid=\"" << xid << "\" rid=0x" << dr.rid();
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "jcntl", "rcvr_get_next_record");
+ }
+ std::free(xidp);
+ }
+ else
+ {
+ int16_t enq_fid = _emap.get_remove_pfid(dr.deq_rid(), true);
+ if (enq_fid >= enq_map::EMAP_OK) // ignore not found error
+ rd._enq_cnt_list[enq_fid]--;
+ }
+ }
+ break;
+ case RHM_JDAT_TXA_MAGIC:
+ {
+ txn_rec ar;
+ if (!decode(ar, fid, ifsp, cum_size_read, h, lowi, rd, file_pos))
+ return false;
+ // Delete this txn from tmap, unlock any locked records in emap
+ ar.get_xid(&xidp);
+ assert(xidp != 0);
+ std::string xid((char*)xidp, ar.xid_size());
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid); // tdl will be empty if xid not found
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ if (itr->_enq_flag)
+ rd._enq_cnt_list[itr->_pfid]--;
+ else
+ _emap.unlock(itr->_drid); // ignore not found error
+ }
+ std::free(xidp);
+ }
+ break;
+ case RHM_JDAT_TXC_MAGIC:
+ {
+ txn_rec cr;
+ if (!decode(cr, fid, ifsp, cum_size_read, h, lowi, rd, file_pos))
+ return false;
+ // Delete this txn from tmap, process records into emap
+ cr.get_xid(&xidp);
+ assert(xidp != 0);
+ std::string xid((char*)xidp, cr.xid_size());
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid); // tdl will be empty if xid not found
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ if (itr->_enq_flag) // txn enqueue
+ {
+ if (_emap.insert_pfid(itr->_rid, itr->_pfid) < enq_map::EMAP_OK) // fail
+ {
+ // The only error code emap::insert_pfid() returns is enq_map::EMAP_DUP_RID.
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << itr->_rid << " _pfid=0x" << itr->_pfid;
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "jcntl", "rcvr_get_next_record");
+ }
+ }
+ else // txn dequeue
+ {
+ int16_t enq_fid = _emap.get_remove_pfid(itr->_drid, true);
+ if (enq_fid >= enq_map::EMAP_OK)
+ rd._enq_cnt_list[enq_fid]--;
+ }
+ }
+ std::free(xidp);
+ }
+ break;
+ case RHM_JDAT_EMPTY_MAGIC:
+ {
+ u_int32_t rec_dblks = jrec::size_dblks(sizeof(rec_hdr));
+ ifsp->ignore(rec_dblks * JRNL_DBLK_SIZE - sizeof(rec_hdr));
+ assert(!ifsp->fail() && !ifsp->bad());
+ if (!jfile_cycle(fid, ifsp, lowi, rd, false))
+ return false;
+ }
+ break;
+ case 0:
+ check_journal_alignment(fid, file_pos, rd);
+ return false;
+ default:
+ // Stop as this is the overwrite boundary.
+ check_journal_alignment(fid, file_pos, rd);
+ return false;
+ }
+ return true;
+}
+
+bool
+jcntl::decode(jrec& rec, u_int16_t& fid, std::ifstream* ifsp, std::size_t& cum_size_read,
+ rec_hdr& h, bool& lowi, rcvdat& rd, std::streampos& file_offs)
+{
+ u_int16_t start_fid = fid;
+ std::streampos start_file_offs = file_offs;
+ if (!check_owi(fid, h, lowi, rd, file_offs))
+ return false;
+ bool done = false;
+ while (!done)
+ {
+ try { done = rec.rcv_decode(h, ifsp, cum_size_read); }
+ catch (const jexception& e)
+ {
+// TODO - review this logic and tidy up how rd._lfid is assigned. See new jinf.get_end_file() fn.
+// Original
+// if (e.err_code() != jerrno::JERR_JREC_BADRECTAIL ||
+// fid != (rd._ffid ? rd._ffid - 1 : _num_jfiles - 1)) throw;
+// Tried this, but did not work
+// if (e.err_code() != jerrno::JERR_JREC_BADRECTAIL || h._magic != 0) throw;
+ check_journal_alignment(start_fid, start_file_offs, rd);
+// rd._lfid = start_fid;
+ return false;
+ }
+ if (!done && !jfile_cycle(fid, ifsp, lowi, rd, false))
+ {
+ check_journal_alignment(start_fid, start_file_offs, rd);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+jcntl::jfile_cycle(u_int16_t& fid, std::ifstream* ifsp, bool& lowi, rcvdat& rd, const bool jump_fro)
+{
+ if (ifsp->is_open())
+ {
+ if (ifsp->eof() || !ifsp->good())
+ {
+ ifsp->clear();
+ rd._eo = ifsp->tellg(); // remember file offset before closing
+ assert(rd._eo != std::numeric_limits<std::size_t>::max()); // Check for error code -1
+ ifsp->close();
+ if (++fid >= rd._njf)
+ {
+ fid = 0;
+ lowi = !lowi; // Flip local owi
+ }
+ if (fid == rd._ffid) // used up all journal files
+ return false;
+ }
+ }
+ if (!ifsp->is_open())
+ {
+ std::ostringstream oss;
+ oss << _jdir.dirname() << "/" << _base_filename << ".";
+ oss << std::hex << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
+ ifsp->clear(); // clear eof flag, req'd for older versions of c++
+ ifsp->open(oss.str().c_str(), std::ios_base::in | std::ios_base::binary);
+ if (!ifsp->good())
+ throw jexception(jerrno::JERR__FILEIO, oss.str(), "jcntl", "jfile_cycle");
+
+ // Read file header
+ file_hdr fhdr;
+ ifsp->read((char*)&fhdr, sizeof(fhdr));
+ assert(ifsp->good());
+ if (fhdr._magic == RHM_JDAT_FILE_MAGIC)
+ {
+ assert(fhdr._lfid == fid);
+ if (!rd._fro)
+ rd._fro = fhdr._fro;
+ std::streamoff foffs = jump_fro ? fhdr._fro : JRNL_DBLK_SIZE * JRNL_SBLK_SIZE;
+ ifsp->seekg(foffs);
+ }
+ else
+ {
+ ifsp->close();
+ return false;
+ }
+ }
+ return true;
+}
+
+bool
+jcntl::check_owi(const u_int16_t fid, rec_hdr& h, bool& lowi, rcvdat& rd, std::streampos& file_pos)
+{
+ if (rd._ffid ? h.get_owi() == lowi : h.get_owi() != lowi) // Overwrite indicator changed
+ {
+ u_int16_t expected_fid = rd._ffid ? rd._ffid - 1 : rd._njf - 1;
+ if (fid == expected_fid)
+ {
+ check_journal_alignment(fid, file_pos, rd);
+ return false;
+ }
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0') << "Magic=0x" << std::setw(8) << h._magic;
+ oss << " fid=0x" << std::setw(4) << fid << " rid=0x" << std::setw(8) << h._rid;
+ oss << " foffs=0x" << std::setw(8) << file_pos;
+ oss << " expected_fid=0x" << std::setw(4) << expected_fid;
+ throw jexception(jerrno::JERR_JCNTL_OWIMISMATCH, oss.str(), "jcntl",
+ "check_owi");
+ }
+ if (rd._h_rid == 0)
+ rd._h_rid = h._rid;
+ else if (h._rid - rd._h_rid < 0x8000000000000000ULL) // RFC 1982 comparison for unsigned 64-bit
+ rd._h_rid = h._rid;
+ return true;
+}
+
+
+void
+jcntl::check_journal_alignment(const u_int16_t fid, std::streampos& file_pos, rcvdat& rd)
+{
+ unsigned sblk_offs = file_pos % (JRNL_DBLK_SIZE * JRNL_SBLK_SIZE);
+ if (sblk_offs)
+ {
+ {
+ std::ostringstream oss;
+ oss << std::hex << "Bad record alignment found at fid=0x" << fid;
+ oss << " offs=0x" << file_pos << " (likely journal overwrite boundary); " << std::dec;
+ oss << (JRNL_SBLK_SIZE - (sblk_offs/JRNL_DBLK_SIZE)) << " filler record(s) required.";
+ this->log(LOG_WARN, oss.str());
+ }
+ const u_int32_t xmagic = RHM_JDAT_EMPTY_MAGIC;
+ std::ostringstream oss;
+ oss << _jdir.dirname() << "/" << _base_filename << ".";
+ oss << std::hex << std::setfill('0') << std::setw(4) << fid << "." << JRNL_DATA_EXTENSION;
+ std::ofstream ofsp(oss.str().c_str(),
+ std::ios_base::in | std::ios_base::out | std::ios_base::binary);
+ if (!ofsp.good())
+ throw jexception(jerrno::JERR__FILEIO, oss.str(), "jcntl", "check_journal_alignment");
+ ofsp.seekp(file_pos);
+ void* buff = std::malloc(JRNL_DBLK_SIZE);
+ assert(buff != 0);
+ std::memcpy(buff, (const void*)&xmagic, sizeof(xmagic));
+ // Normally, RHM_CLEAN must be set before these fills are done, but this is a recover
+ // situation (i.e. performance is not an issue), and it makes the location of the write
+ // clear should inspection of the file be required.
+ std::memset((char*)buff + sizeof(xmagic), RHM_CLEAN_CHAR, JRNL_DBLK_SIZE - sizeof(xmagic));
+
+ while (file_pos % (JRNL_DBLK_SIZE * JRNL_SBLK_SIZE))
+ {
+ ofsp.write((const char*)buff, JRNL_DBLK_SIZE);
+ assert(!ofsp.fail());
+ std::ostringstream oss;
+ oss << std::hex << "Recover phase write: Wrote filler record: fid=0x" << fid << " offs=0x" << file_pos;
+ this->log(LOG_NOTICE, oss.str());
+ file_pos = ofsp.tellp();
+ }
+ ofsp.close();
+ std::free(buff);
+ rd._lfid = fid;
+ if (!rd._frot)
+ rd._ffid = (fid + 1) % rd._njf;
+ this->log(LOG_INFO, "Bad record alignment fixed.");
+ }
+ rd._eo = file_pos;
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.h b/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.h
new file mode 100644
index 0000000000..294e9ced05
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jcntl.h
@@ -0,0 +1,722 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jcntl.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal top-level control and interface class
+ * mrg::journal::jcntl. See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JCNTL_H
+#define QPID_LEGACYSTORE_JRNL_JCNTL_H
+
+namespace mrg
+{
+namespace journal
+{
+ class jcntl;
+}
+}
+
+#include <cstddef>
+#include <deque>
+#include "qpid/legacystore/jrnl/jdir.h"
+#include "qpid/legacystore/jrnl/fcntl.h"
+#include "qpid/legacystore/jrnl/lpmgr.h"
+#include "qpid/legacystore/jrnl/rcvdat.h"
+#include "qpid/legacystore/jrnl/slock.h"
+#include "qpid/legacystore/jrnl/smutex.h"
+#include "qpid/legacystore/jrnl/rmgr.h"
+#include "qpid/legacystore/jrnl/wmgr.h"
+#include "qpid/legacystore/jrnl/wrfc.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \brief Access and control interface for the journal. This is the top-level class for the
+ * journal.
+ *
+ * This is the top-level journal class; one instance of this class controls one instance of the
+ * journal and all its files and associated control structures. Besides this class, the only
+ * other class that needs to be used at a higher level is the data_tok class, one instance of
+ * which is used per data block written to the journal, and is used to track its status through
+ * the AIO enqueue, read and dequeue process.
+ */
+ class jcntl
+ {
+ protected:
+ /**
+ * \brief Journal ID
+ *
+ * This string uniquely identifies this journal instance. It will most likely be associated
+ * with the identity of the message queue with which it is associated.
+ */
+ // TODO: This is not included in any files at present, add to file_hdr?
+ std::string _jid;
+
+ /**
+ * \brief Journal directory
+ *
+ * This string stores the path to the journal directory. It may be absolute or relative, and
+ * should not end in a file separator character. (e.g. "/fastdisk/jdata" is correct,
+ * "/fastdisk/jdata/" is not.)
+ */
+ jdir _jdir;
+
+ /**
+ * \brief Base filename
+ *
+ * This string contains the base filename used for the journal files. The filenames will
+ * start with this base, and have various sections added to it to derive the final file names
+ * that will be written to disk. No file separator characters should be included here, but
+ * all other legal filename characters are valid.
+ */
+ std::string _base_filename;
+
+ /**
+ * \brief Initialized flag
+ *
+ * This flag starts out set to false, is set to true once this object has been initialized,
+ * either by calling initialize() or recover().
+ */
+ bool _init_flag;
+
+ /**
+ * \brief Stopped flag
+ *
+ * This flag starts out false, and is set to true when stop() is called. At this point, the
+ * journal will no longer accept messages until either initialize() or recover() is called.
+ * There is no way other than through initialization to reset this flag.
+ */
+ // TODO: It would be helpful to distinguish between states stopping and stopped. If stop(true) is called,
+ // then we are stopping, but must wait for all outstanding aios to return before being finally stopped. During
+ // this period, however, no new enqueue/dequeue/read requests may be accepted.
+ bool _stop_flag;
+
+ /**
+ * \brief Read-only state flag used during recover.
+ *
+ * When true, this flag prevents journal write operations (enqueue and dequeue), but
+ * allows read to occur. It is used during recovery, and is reset when recovered() is
+ * called.
+ */
+ bool _readonly_flag;
+
+ /**
+ * \brief If set, calls stop() if the jouranl write pointer overruns dequeue low water
+ * marker. If not set, then attempts to write will throw exceptions until the journal
+ * file low water marker moves to the next journal file.
+ */
+ bool _autostop; ///< Autostop flag - stops journal when overrun occurs
+
+ // Journal control structures
+ u_int32_t _jfsize_sblks; ///< Journal file size in sblks
+ lpmgr _lpmgr; ///< LFID-PFID manager tracks inserted journal files
+ enq_map _emap; ///< Enqueue map for low water mark management
+ txn_map _tmap; ///< Transaction map open transactions
+ rrfc _rrfc; ///< Read journal rotating file controller
+ wrfc _wrfc; ///< Write journal rotating file controller
+ rmgr _rmgr; ///< Read page manager which manages AIO
+ wmgr _wmgr; ///< Write page manager which manages AIO
+ rcvdat _rcvdat; ///< Recovery data used for recovery
+ smutex _wr_mutex; ///< Mutex for journal writes
+
+ public:
+ static timespec _aio_cmpl_timeout; ///< Timeout for blocking libaio returns
+ static timespec _final_aio_cmpl_timeout; ///< Timeout for blocking libaio returns when stopping or finalizing
+
+ /**
+ * \brief Journal constructor.
+ *
+ * Constructor which sets the physical file location and base name.
+ *
+ * \param jid A unique identifier for this journal instance.
+ * \param jdir The directory which will contain the journal files.
+ * \param base_filename The string which will be used to start all journal filenames.
+ */
+ jcntl(const std::string& jid, const std::string& jdir, const std::string& base_filename);
+
+ /**
+ * \brief Destructor.
+ */
+ virtual ~jcntl();
+
+ inline const std::string& id() const { return _jid; }
+ inline const std::string& jrnl_dir() const { return _jdir.dirname(); }
+
+ /**
+ * \brief Initialize the journal for storing data.
+ *
+ * Initialize the journal by creating new journal data files and initializing internal
+ * control structures. When complete, the journal will be empty, and ready to store data.
+ *
+ * <b>NOTE: Any existing journal will be ignored by this operation.</b> To use recover
+ * the data from an existing journal, use recover().
+ *
+ * <b>NOTE: If <i>NULL</i> is passed to the deque pointers, they will be internally created
+ * and deleted.</b>
+ *
+ * <b>NOTE: If <i>NULL</i> is passed to the callbacks, internal default callbacks will be
+ * used.</b>
+ *
+ * \param num_jfiles The number of journal files to be created.
+ * \param auto_expand If true, allows journal file auto-expansion. In this mode, the journal will automatically
+ * add files to the journal if it runs out of space. No more than ae_max_jfiles may be added. If false, then
+ * no files are added and an exception will be thrown if the journal runs out of file space.
+ * \param ae_max_jfiles Upper limit of journal files for auto-expand mode. When auto_expand is true, this is the
+ * maximum total number of files allowed in the journal (original plus those added by auto-expand mode). If
+ * this number of files exist and the journal runs out of space, an exception will be thrown. This number
+ * must be greater than the num_jfiles parameter value but cannot exceed the maximum number of files for a
+ * single journal; if num_jfiles is already at its maximum value, then auto-expand will be disabled.
+ * \param jfsize_sblks The size of each journal file expressed in softblocks.
+ * \param wcache_num_pages The number of write cache pages to create.
+ * \param wcache_pgsize_sblks The size in sblks of each write cache page.
+ * \param cbp Pointer to object containing callback functions for read and write operations. May be 0 (NULL).
+ *
+ * \exception TODO
+ */
+ void initialize(const u_int16_t num_jfiles, const bool auto_expand, const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks, const u_int16_t wcache_num_pages, const u_int32_t wcache_pgsize_sblks,
+ aio_callback* const cbp);
+
+ /**
+ * /brief Initialize journal by recovering state from previously written journal.
+ *
+ * Initialize journal by recovering state from previously written journal. The journal files
+ * are analyzed, and all records that have not been dequeued and that remain in the journal
+ * will be available for reading. The journal is placed in a read-only state until
+ * recovered() is called; any calls to enqueue or dequeue will fail with an exception
+ * in this state.
+ *
+ * <b>NOTE: If <i>NULL</i> is passed to the deque pointers, they will be internally created
+ * and deleted.</b>
+ *
+ * <b>NOTE: If <i>NULL</i> is passed to the callbacks, internal default callbacks will be
+ * used.</b>
+ *
+ * \param num_jfiles The number of journal files to be created.
+ * \param auto_expand If true, allows journal file auto-expansion. In this mode, the journal will automatically
+ * add files to the journal if it runs out of space. No more than ae_max_jfiles may be added. If false, then
+ * no files are added and an exception will be thrown if the journal runs out of file space.
+ * \param ae_max_jfiles Upper limit of journal files for auto-expand mode. When auto_expand is true, this is the
+ * maximum total number of files allowed in the journal (original plus those added by auto-expand mode). If
+ * this number of files exist and the journal runs out of space, an exception will be thrown. This number
+ * must be greater than the num_jfiles parameter value but cannot exceed the maximum number of files for a
+ * single journal; if num_jfiles is already at its maximum value, then auto-expand will be disabled.
+ * \param jfsize_sblks The size of each journal file expressed in softblocks.
+ * \param wcache_num_pages The number of write cache pages to create.
+ * \param wcache_pgsize_sblks The size in sblks of each write cache page.
+ * \param cbp Pointer to object containing callback functions for read and write operations. May be 0 (NULL).
+ * \param prep_txn_list_ptr
+ * \param highest_rid Returns the highest rid found in the journal during recover
+ *
+ * \exception TODO
+ */
+ void recover(const u_int16_t num_jfiles, const bool auto_expand, const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks, const u_int16_t wcache_num_pages, const u_int32_t wcache_pgsize_sblks,
+ aio_callback* const cbp, const std::vector<std::string>* prep_txn_list_ptr, u_int64_t& highest_rid);
+
+ /**
+ * \brief Notification to the journal that recovery is complete and that normal operation
+ * may resume.
+ *
+ * This call notifies the journal that recovery is complete and that normal operation
+ * may resume. The read pointers are reset so that all records read as a part of recover
+ * may be re-read during normal operation. The read-only flag is then reset, allowing
+ * enqueue and dequeue operations to resume.
+ *
+ * \exception TODO
+ */
+ void recover_complete();
+
+ /**
+ * \brief Stops journal and deletes all journal files.
+ *
+ * Clear the journal directory of all journal files matching the base filename.
+ *
+ * \exception TODO
+ */
+ void delete_jrnl_files();
+
+ /**
+ * \brief Enqueue data.
+ *
+ * Enqueue data or part thereof. If a large data block is being written, then it may be
+ * enqueued in parts by setting this_data_len to the size of the data being written in this
+ * call. The total data size must be known in advance, however, as this is written into the
+ * record header on the first record write. The state of the write (i.e. how much has been
+ * written so far) is maintained in the data token dtokp. Partial writes will return in state
+ * ENQ_PART.
+ *
+ * Note that a return value of anything other than RHM_IORES_SUCCESS implies that this write
+ * operation did not complete successfully or was partially completed. The action taken under
+ * these conditions depends on the value of the return. For example, RHM_IORES_AIO_WAIT
+ * implies that all pages in the write page cache are waiting for AIO operations to return,
+ * and that the call should be remade after waiting a bit.
+ *
+ * Example: If a write of 99 kB is divided into three equal parts, then the following states
+ * and returns would characterize a successful operation:
+ * <pre>
+ * dtok. dtok. dtok.
+ * Pperation Return wstate() dsize() written() Comment
+ * -----------------+--------+--------+-------+---------+------------------------------------
+ * NONE 0 0 Value of dtok before op
+ * edr(99000, 33000) SUCCESS ENQ_PART 99000 33000 Enqueue part 1
+ * edr(99000, 33000) AIO_WAIT ENQ_PART 99000 50000 Enqueue part 2, not completed
+ * edr(99000, 33000) SUCCESS ENQ_PART 99000 66000 Enqueue part 2 again
+ * edr(99000, 33000) SUCCESS ENQ 99000 99000 Enqueue part 3
+ * </pre>
+ *
+ * \param data_buff Pointer to data to be enqueued for this enqueue operation.
+ * \param tot_data_len Total data length.
+ * \param this_data_len Amount to be written in this enqueue operation.
+ * \param dtokp Pointer to data token which contains the details of the enqueue operation.
+ * \param transient Flag indicating transient persistence (ie, ignored on recover).
+ *
+ * \exception TODO
+ */
+ iores enqueue_data_record(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const bool transient = false);
+
+ iores enqueue_extern_data_record(const std::size_t tot_data_len, data_tok* dtokp,
+ const bool transient = false);
+
+ /**
+ * \brief Enqueue data.
+ *
+ * \param data_buff Pointer to data to be enqueued for this enqueue operation.
+ * \param tot_data_len Total data length.
+ * \param this_data_len Amount to be written in this enqueue operation.
+ * \param dtokp Pointer to data token which contains the details of the enqueue operation.
+ * \param xid String containing xid. An empty string (i.e. length=0) will be considered
+ * non-transactional.
+ * \param transient Flag indicating transient persistence (ie, ignored on recover).
+ *
+ * \exception TODO
+ */
+ iores enqueue_txn_data_record(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const std::string& xid,
+ const bool transient = false);
+ iores enqueue_extern_txn_data_record(const std::size_t tot_data_len, data_tok* dtokp,
+ const std::string& xid, const bool transient = false);
+
+ /* TODO
+ **
+ * \brief Retrieve details of next record to be read without consuming the record.
+ *
+ * Retrieve information about current read record. A pointer to the data is returned, along
+ * with the data size and available data size. Data is considered "available" when the AIO
+ * operations to fill page-cache pages from disk have returned, and is ready for consumption.
+ *
+ * If <i>dsize_avail</i> &lt; <i>dsize</i>, then not all of the data is available or part of
+ * the data is in non-contiguous memory, and a subsequent call will update both the pointer
+ * and <i>dsize_avail</i> if more pages have returned from AIO.
+ *
+ * The <i>dsize_avail</i> parameter will return the amount of data from this record that is
+ * available in the page cache as contiguous memory, even if it spans page cache boundaries.
+ * However, if a record spans the end of the page cache and continues at the beginning, even
+ * if both parts are ready for consumption, then this must be divided into at least two
+ * get_data_record() operations, as the data is contained in at least two non-contiguous
+ * segments of the page cache.
+ *
+ * Once all the available data for a record is exposed, it can not be read again using
+ * this function. It must be consumed prior to getting the next record. This can be done by
+ * calling discard_data_record() or read_data_record(). However, if parameter
+ * <i>auto_discard</i> is set to <b><i>true</i></b>, then this record will be automatically
+ * consumed when the entire record has become available without having to explicitly call
+ * discard_next_data_record() or read_data_record().
+ *
+ * If the current record is an open transactional record, then it cannot be read until it is
+ * committed. If it is aborted, it can never be read. Under this condition, get_data_record()
+ * will return RHM_IORES_TXPENDING, the data pointer will be set to NULL and all data
+ * lengths will be set to 0.
+ *
+ * Example: Read a record of 30k. Assume a read page cache of 10 pages of size 10k starting
+ * at address base_ptr (page0 = base_ptr, page1 = page_ptr+10k, etc.). The first 15k of
+ * the record falls at the end of the page cache, the remaining 15k folded to the beginning.
+ * The current page (page 8) containing 5k is available, the remaining pages which contain
+ * this record are pending AIO return:
+ * <pre>
+ * call dsize
+ * no. dsize avail data ptr Return Comment
+ * ----+-----+-----+------------+--------+--------------------------------------------------
+ * 1 30k 5k base_ptr+85k SUCCESS Initial call, read first 5k
+ * 2 30k 0k base_ptr+90k AIO_WAIT AIO still pending; no further pages avail
+ * 3 30k 10k base_ptr+90k SUCCESS AIO now returned; now read till end of page cache
+ * 4 30k 15k base_ptr SUCCESS data_ptr now pointing to start of page cache
+ * </pre>
+ *
+ * \param rid Reference that returns the record ID (rid)
+ * \param dsize Reference that returns the total data size of the record data .
+ * \param dsize_avail Reference that returns the amount of the data that is available for
+ * consumption.
+ * \param data Pointer to data pointer which will point to the first byte of the next record
+ * data.
+ * \param auto_discard If <b><i>true</i></b>, automatically discard the record being read if
+ * the entire record is available (i.e. dsize == dsize_avail). Otherwise
+ * discard_next_data_record() must be explicitly called.
+ *
+ * \exception TODO
+ *
+ // *** NOT YET IMPLEMENTED ***
+ iores get_data_record(const u_int64_t& rid, const std::size_t& dsize,
+ const std::size_t& dsize_avail, const void** const data, bool auto_discard = false);
+ */
+
+ /* TODO
+ **
+ * \brief Discard (skip) next record to be read without reading or retrieving it.
+ *
+ * \exception TODO
+ *
+ // *** NOT YET IMPLEMENTED ***
+ iores discard_data_record(data_tok* const dtokp);
+ */
+
+ /**
+ * \brief Reads data from the journal. It is the responsibility of the reader to free
+ * the memory that is allocated through this call - see below for details.
+ *
+ * Reads the next non-dequeued data record from the journal.
+ *
+ * <b>Note</b> that this call allocates memory into which the data and XID are copied. It
+ * is the responsibility of the caller to free this memory. The memory for the data and
+ * XID are allocated in a single call, and the XID precedes the data in the memory space.
+ * Thus, where an XID exists, freeing the XID pointer will free both the XID and data memory.
+ * However, if an XID does not exist for the message, the XID pointer xidpp is set to NULL,
+ * and it is the data pointer datapp that must be freed. Should neither an XID nor data be
+ * present (ie an empty record), then no memory is allocated, and both pointers will be NULL.
+ * In this case, there is no need to free memory.
+ *
+ * TODO: Fix this lousy interface. The caller should NOT be required to clean up these
+ * pointers! Rather use a struct, or better still, let the data token carry the data and
+ * xid pointers and lengths, and have the data token both allocate and delete.
+ *
+ * \param datapp Pointer to pointer that will be set to point to memory allocated and
+ * containing the data. Will be set to NULL if the call fails or there is no data
+ * in the record.
+ * \param dsize Ref that will be set to the size of the data. Will be set to 0 if the call
+ * fails or if there is no data in the record.
+ * \param xidpp Pointer to pointer that will be set to point to memory allocated and
+ * containing the XID. Will be set to NULL if the call fails or there is no XID attached
+ * to this record.
+ * \param xidsize Ref that will be set to the size of the XID.
+ * \param transient Ref that will be set true if record is transient.
+ * \param external Ref that will be set true if record is external. In this case, the data
+ * pointer datapp will be set to NULL, but dsize will contain the size of the data.
+ * NOTE: If there is an xid, then xidpp must be freed.
+ * \param dtokp Pointer to data_tok instance for this data, used to track state of data
+ * through journal.
+ * \param ignore_pending_txns When false (default), if the next record to be read is locked
+ * by a pending transaction, the read fails with RHM_IORES_TXPENDING. However, if set
+ * to true, then locks are ignored. This is required for reading of the Transaction
+ * Prepared List (TPL) which may have its entries locked, but may be read from
+ * time-to-time, and needs all its records (locked and unlocked) to be available.
+ *
+ * \exception TODO
+ */
+ iores read_data_record(void** const datapp, std::size_t& dsize, void** const xidpp,
+ std::size_t& xidsize, bool& transient, bool& external, data_tok* const dtokp,
+ bool ignore_pending_txns = false);
+
+ /**
+ * \brief Dequeues (marks as no longer needed) data record in journal.
+ *
+ * Dequeues (marks as no longer needed) data record in journal. Note that it is possible
+ * to use the same data token instance used to enqueue this data; it contains the record ID
+ * needed to correctly mark this data as dequeued in the journal. Otherwise the RID of the
+ * record to be dequeued and the write state of ENQ must be manually set in a new or reset
+ * instance of data_tok.
+ *
+ * \param dtokp Pointer to data_tok instance for this data, used to track state of data
+ * through journal.
+ * \param txn_coml_commit Only used for preparedXID journal. When used for dequeueing
+ * prepared XID list items, sets whether the complete() was called in commit or abort
+ * mode.
+ *
+ * \exception TODO
+ */
+ iores dequeue_data_record(data_tok* const dtokp, const bool txn_coml_commit = false);
+
+ /**
+ * \brief Dequeues (marks as no longer needed) data record in journal.
+ *
+ * Dequeues (marks as no longer needed) data record in journal as part of a transaction.
+ * Note that it is possible to use the same data token instance used to enqueue this data;
+ * it contains the RID needed to correctly mark this data as dequeued in the journal.
+ * Otherwise the RID of the record to be dequeued and the write state of ENQ must be
+ * manually set in a new or reset instance of data_tok.
+ *
+ * \param dtokp Pointer to data_tok instance for this data, used to track state of data
+ * through journal.
+ * \param xid String containing xid. An empty string (i.e. length=0) will be considered
+ * non-transactional.
+ * \param txn_coml_commit Only used for preparedXID journal. When used for dequeueing
+ * prepared XID list items, sets whether the complete() was called in commit or abort
+ * mode.
+ *
+ * \exception TODO
+ */
+ iores dequeue_txn_data_record(data_tok* const dtokp, const std::string& xid, const bool txn_coml_commit = false);
+
+ /**
+ * \brief Abort the transaction for all records enqueued or dequeued with the matching xid.
+ *
+ * Abort the transaction for all records enqueued with the matching xid. All enqueued records
+ * are effectively deleted from the journal, and can not be read. All dequeued records remain
+ * as though they had never been dequeued.
+ *
+ * \param dtokp Pointer to data_tok instance for this data, used to track state of data
+ * through journal.
+ * \param xid String containing xid.
+ *
+ * \exception TODO
+ */
+ iores txn_abort(data_tok* const dtokp, const std::string& xid);
+
+ /**
+ * \brief Commit the transaction for all records enqueued or dequeued with the matching xid.
+ *
+ * Commit the transaction for all records enqueued with the matching xid. All enqueued
+ * records are effectively released for reading and dequeueing. All dequeued records are
+ * removed and can no longer be accessed.
+ *
+ * \param dtokp Pointer to data_tok instance for this data, used to track state of data
+ * through journal.
+ * \param xid String containing xid.
+ *
+ * \exception TODO
+ */
+ iores txn_commit(data_tok* const dtokp, const std::string& xid);
+
+ /**
+ * \brief Check whether all the enqueue records for the given xid have reached disk.
+ *
+ * \param xid String containing xid.
+ *
+ * \exception TODO
+ */
+ bool is_txn_synced(const std::string& xid);
+
+ /**
+ * \brief Forces a check for returned AIO write events.
+ *
+ * Forces a check for returned AIO write events. This is normally performed by enqueue() and
+ * dequeue() operations, but if these operations cease, then this call needs to be made to
+ * force the processing of any outstanding AIO operations.
+ */
+ int32_t get_wr_events(timespec* const timeout);
+
+ /**
+ * \brief Forces a check for returned AIO read events.
+ *
+ * Forces a check for returned AIO read events. This is normally performed by read_data()
+ * operations, but if these operations cease, then this call needs to be made to force the
+ * processing of any outstanding AIO operations.
+ */
+ int32_t get_rd_events(timespec* const timeout);
+
+ /**
+ * \brief Stop the journal from accepting any further requests to read or write data.
+ *
+ * This operation is used to stop the journal. This is the normal mechanism for bringing the
+ * journal to an orderly stop. Any outstanding AIO operations or partially written pages in
+ * the write page cache will by flushed and will complete.
+ *
+ * <b>Note:</b> The journal cannot be restarted without either initializing it or restoring
+ * it.
+ *
+ * \param block_till_aio_cmpl If true, will block the thread while waiting for all
+ * outstanding AIO operations to complete.
+ */
+ void stop(const bool block_till_aio_cmpl = false);
+
+ /**
+ * \brief Force a flush of the write page cache, creating a single AIO write operation.
+ */
+ iores flush(const bool block_till_aio_cmpl = false);
+
+ inline u_int32_t get_enq_cnt() const { return _emap.size(); }
+
+ inline u_int32_t get_wr_aio_evt_rem() const { slock l(_wr_mutex); return _wmgr.get_aio_evt_rem(); }
+
+ inline u_int32_t get_rd_aio_evt_rem() const { return _rmgr.get_aio_evt_rem(); }
+
+ inline u_int32_t get_wr_outstanding_aio_dblks() const
+ { return _wrfc.aio_outstanding_dblks(); }
+
+ inline u_int32_t get_wr_outstanding_aio_dblks(u_int16_t lfid) const
+ { return _lpmgr.get_fcntlp(lfid)->wr_aio_outstanding_dblks(); }
+
+ inline u_int32_t get_rd_outstanding_aio_dblks() const
+ { return _rrfc.aio_outstanding_dblks(); }
+
+ inline u_int32_t get_rd_outstanding_aio_dblks(u_int16_t lfid) const
+ { return _lpmgr.get_fcntlp(lfid)->rd_aio_outstanding_dblks(); }
+
+ inline u_int16_t get_rd_fid() const { return _rrfc.index(); }
+ inline u_int16_t get_wr_fid() const { return _wrfc.index(); }
+ u_int16_t get_earliest_fid();
+
+ /**
+ * \brief Check if a particular rid is enqueued. Note that this function will return
+ * false if the rid is transactionally enqueued and is not committed, or if it is
+ * locked (i.e. transactionally dequeued, but the dequeue has not been committed).
+ */
+ inline bool is_enqueued(const u_int64_t rid, bool ignore_lock = false)
+ { return _emap.is_enqueued(rid, ignore_lock); }
+ inline bool is_locked(const u_int64_t rid)
+ { if (_emap.is_enqueued(rid, true) < enq_map::EMAP_OK) return false; return _emap.is_locked(rid) == enq_map::EMAP_TRUE; }
+ inline void enq_rid_list(std::vector<u_int64_t>& rids) { _emap.rid_list(rids); }
+ inline void enq_xid_list(std::vector<std::string>& xids) { _tmap.xid_list(xids); }
+ inline u_int32_t get_open_txn_cnt() const { return _tmap.size(); }
+ // TODO Make this a const, but txn_map must support const first.
+ inline txn_map& get_txn_map() { return _tmap; }
+
+ /**
+ * \brief Check if the journal is stopped.
+ *
+ * \return <b><i>true</i></b> if the jouranl is stopped;
+ * <b><i>false</i></b> otherwise.
+ */
+ inline bool is_stopped() { return _stop_flag; }
+
+ /**
+ * \brief Check if the journal is ready to read and write data.
+ *
+ * Checks if the journal is ready to read and write data. This function will return
+ * <b><i>true</i></b> if the journal has been either initialized or restored, and the stop()
+ * function has not been called since the initialization.
+ *
+ * Note that the journal may also be stopped if an internal error occurs (such as running out
+ * of data journal file space).
+ *
+ * \return <b><i>true</i></b> if the journal is ready to read and write data;
+ * <b><i>false</i></b> otherwise.
+ */
+ inline bool is_ready() const { return _init_flag && !_stop_flag; }
+
+ inline bool is_read_only() const { return _readonly_flag; }
+
+ /**
+ * \brief Get the journal directory.
+ *
+ * This returns the journal directory as set during initialization. This is the directory
+ * into which the journal files will be written.
+ */
+ inline const std::string& dirname() const { return _jdir.dirname(); }
+
+ /**
+ * \brief Get the journal base filename.
+ *
+ * Get the journal base filename as set during initialization. This is the prefix used in all
+ * journal files of this instance. Note that if more than one instance of the journal shares
+ * the same directory, their base filenames <b>MUST</b> be different or else the instances
+ * will overwrite one another.
+ */
+ inline const std::string& base_filename() const { return _base_filename; }
+
+ inline u_int16_t num_jfiles() const { return _lpmgr.num_jfiles(); }
+
+ inline fcntl* get_fcntlp(const u_int16_t lfid) const { return _lpmgr.get_fcntlp(lfid); }
+
+ inline u_int32_t jfsize_sblks() const { return _jfsize_sblks; }
+
+ // Logging
+ virtual void log(log_level level, const std::string& log_stmt) const;
+ virtual void log(log_level level, const char* const log_stmt) const;
+
+ // FIXME these are _rmgr to _wmgr interactions, remove when _rmgr contains ref to _wmgr:
+ void chk_wr_frot();
+ inline u_int32_t unflushed_dblks() { return _wmgr.unflushed_dblks(); }
+ void fhdr_wr_sync(const u_int16_t lid);
+ inline u_int32_t wr_subm_cnt_dblks(const u_int16_t lfid) const { return _lpmgr.get_fcntlp(lfid)->wr_subm_cnt_dblks(); }
+
+ // Management instrumentation callbacks
+ inline virtual void instr_incr_outstanding_aio_cnt() {}
+ inline virtual void instr_decr_outstanding_aio_cnt() {}
+
+ /**
+ * /brief Static function for creating new fcntl objects for use with obj_arr.
+ */
+ static fcntl* new_fcntl(jcntl* const jcp, const u_int16_t lid, const u_int16_t fid, const rcvdat* const rdp);
+
+ protected:
+ static bool _init;
+ static bool init_statics();
+
+ /**
+ * \brief Check status of journal before allowing write operations.
+ */
+ void check_wstatus(const char* fn_name) const;
+
+ /**
+ * \brief Check status of journal before allowing read operations.
+ */
+ void check_rstatus(const char* fn_name) const;
+
+ /**
+ * \brief Write info file &lt;basefilename&gt;.jinf to disk
+ */
+ void write_infofile() const;
+
+ /**
+ * \brief Call that blocks while waiting for all outstanding AIOs to complete
+ */
+ void aio_cmpl_wait();
+
+ /**
+ * \brief Call that blocks until at least one message returns; used to wait for
+ * AIO wait conditions to clear.
+ */
+ bool handle_aio_wait(const iores res, iores& resout, const data_tok* dtp);
+
+ /**
+ * \brief Analyze journal for recovery.
+ */
+ void rcvr_janalyze(rcvdat& rd, const std::vector<std::string>* prep_txn_list_ptr);
+
+ bool rcvr_get_next_record(u_int16_t& fid, std::ifstream* ifsp, bool& lowi, rcvdat& rd);
+
+ bool decode(jrec& rec, u_int16_t& fid, std::ifstream* ifsp, std::size_t& cum_size_read,
+ rec_hdr& h, bool& lowi, rcvdat& rd, std::streampos& rec_offset);
+
+ bool jfile_cycle(u_int16_t& fid, std::ifstream* ifsp, bool& lowi, rcvdat& rd,
+ const bool jump_fro);
+
+ bool check_owi(const u_int16_t fid, rec_hdr& h, bool& lowi, rcvdat& rd,
+ std::streampos& read_pos);
+
+ void check_journal_alignment(const u_int16_t fid, std::streampos& rec_offset, rcvdat& rd);
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JCNTL_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jdir.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jdir.cpp
new file mode 100644
index 0000000000..a874c6c945
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jdir.cpp
@@ -0,0 +1,463 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jdir.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::jdir (journal data
+ * directory), used for controlling and manipulating journal data
+ * direcories and files. See comments in file jdir.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/jdir.h"
+
+#include <cstdlib>
+#include <cstring>
+#include <cerrno>
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jcfg.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+#include <sys/stat.h>
+#include <unistd.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+jdir::jdir(const std::string& dirname, const std::string& _base_filename):
+ _dirname(dirname),
+ _base_filename(_base_filename)
+{}
+
+jdir::~jdir()
+{}
+
+// === create_dir ===
+
+void
+jdir::create_dir()
+{
+ create_dir(_dirname);
+}
+
+
+void
+jdir::create_dir(const char* dirname)
+{
+ create_dir(std::string(dirname));
+}
+
+
+void
+jdir::create_dir(const std::string& dirname)
+{
+ std::size_t fdp = dirname.find_last_of('/');
+ if (fdp != std::string::npos)
+ {
+ std::string parent_dir = dirname.substr(0, fdp);
+ if (!exists(parent_dir))
+ create_dir(parent_dir);
+ }
+ if (::mkdir(dirname.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH))
+ {
+ if (errno != EEXIST) // Dir exists, ignore
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_MKDIR, oss.str(), "jdir", "create_dir");
+ }
+ }
+}
+
+
+// === clear_dir ===
+
+void
+jdir::clear_dir(const bool create_flag)
+{
+ clear_dir(_dirname, _base_filename, create_flag);
+}
+
+void
+jdir::clear_dir(const char* dirname, const char* base_filename, const bool create_flag)
+{
+ clear_dir(std::string(dirname), std::string(base_filename), create_flag);
+}
+
+
+void
+jdir::clear_dir(const std::string& dirname, const std::string&
+#ifndef RHM_JOWRITE
+ base_filename
+#endif
+ , const bool create_flag)
+{
+ DIR* dir = ::opendir(dirname.c_str());
+ if (!dir)
+ {
+ if (errno == 2 && create_flag) // ENOENT (No such file or dir)
+ {
+ create_dir(dirname);
+ return;
+ }
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_OPENDIR, oss.str(), "jdir", "clear_dir");
+ }
+#ifndef RHM_JOWRITE
+ struct dirent* entry;
+ bool found = false;
+ std::string bak_dir;
+ while ((entry = ::readdir(dir)) != 0)
+ {
+ // Ignore . and ..
+ if (std::strcmp(entry->d_name, ".") != 0 && std::strcmp(entry->d_name, "..") != 0)
+ {
+ if (std::strlen(entry->d_name) > base_filename.size())
+ {
+ if (std::strncmp(entry->d_name, base_filename.c_str(), base_filename.size()) == 0)
+ {
+ if (!found)
+ {
+ bak_dir = create_bak_dir(dirname, base_filename);
+ found = true;
+ }
+ std::ostringstream oldname;
+ oldname << dirname << "/" << entry->d_name;
+ std::ostringstream newname;
+ newname << bak_dir << "/" << entry->d_name;
+ if (::rename(oldname.str().c_str(), newname.str().c_str()))
+ {
+ ::closedir(dir);
+ std::ostringstream oss;
+ oss << "file=\"" << oldname.str() << "\" dest=\"" <<
+ newname.str() << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_FMOVE, oss.str(), "jdir", "clear_dir");
+ }
+ }
+ }
+ }
+ }
+// FIXME: Find out why this fails with false alarms/errors from time to time...
+// While commented out, there is no error capture from reading dir entries.
+// check_err(errno, dir, dirname, "clear_dir");
+#endif
+ close_dir(dir, dirname, "clear_dir");
+}
+
+// === push_down ===
+
+std::string
+jdir::push_down(const std::string& dirname, const std::string& target_dir, const std::string& bak_dir_base)
+{
+ std::string bak_dir_name = create_bak_dir(dirname, bak_dir_base);
+
+ DIR* dir = ::opendir(dirname.c_str());
+ if (!dir)
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_OPENDIR, oss.str(), "jdir", "push_down");
+ }
+ // Copy contents of targetDirName into bak dir
+ struct dirent* entry;
+ while ((entry = ::readdir(dir)) != 0)
+ {
+ // Search for targetDirName in storeDirName
+ if (std::strcmp(entry->d_name, target_dir.c_str()) == 0)
+ {
+ std::ostringstream oldname;
+ oldname << dirname << "/" << target_dir;
+ std::ostringstream newname;
+ newname << bak_dir_name << "/" << target_dir;
+ if (::rename(oldname.str().c_str(), newname.str().c_str()))
+ {
+ ::closedir(dir);
+ std::ostringstream oss;
+ oss << "file=\"" << oldname.str() << "\" dest=\"" << newname.str() << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_FMOVE, oss.str(), "jdir", "push_down");
+ }
+ break;
+ }
+ }
+ close_dir(dir, dirname, "push_down");
+ return bak_dir_name;
+}
+
+// === verify_dir ===
+
+void
+jdir::verify_dir()
+{
+ verify_dir(_dirname, _base_filename);
+}
+
+void
+jdir::verify_dir(const char* dirname, const char* base_filename)
+{
+ verify_dir(std::string(dirname), std::string(base_filename));
+}
+
+
+void
+jdir::verify_dir(const std::string& dirname, const std::string& base_filename)
+{
+ if (!is_dir(dirname))
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"";
+ throw jexception(jerrno::JERR_JDIR_NOTDIR, oss.str(), "jdir", "verify_dir");
+ }
+
+ // Read jinf file, then verify all journal files are present
+ jinf ji(dirname + "/" + base_filename + "." + JRNL_INFO_EXTENSION, true);
+ for (u_int16_t fnum=0; fnum < ji.num_jfiles(); fnum++)
+ {
+ std::ostringstream oss;
+ oss << dirname << "/" << base_filename << ".";
+ oss << std::setw(4) << std::setfill('0') << std::hex << fnum;
+ oss << "." << JRNL_DATA_EXTENSION;
+ if (!exists(oss.str()))
+ throw jexception(jerrno::JERR_JDIR_NOSUCHFILE, oss.str(), "jdir", "verify_dir");
+ }
+}
+
+
+// === delete_dir ===
+
+void
+jdir::delete_dir(bool children_only)
+{
+ delete_dir(_dirname, children_only);
+}
+
+void
+jdir::delete_dir(const char* dirname, bool children_only)
+{
+ delete_dir(std::string(dirname), children_only);
+}
+
+void
+jdir::delete_dir(const std::string& dirname, bool children_only)
+{
+ struct dirent* entry;
+ struct stat s;
+ DIR* dir = ::opendir(dirname.c_str());
+ if (!dir)
+ {
+ if (errno == ENOENT) // dir does not exist.
+ return;
+
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_OPENDIR, oss.str(), "jdir", "delete_dir");
+ }
+ else
+ {
+ while ((entry = ::readdir(dir)) != 0)
+ {
+ // Ignore . and ..
+ if (std::strcmp(entry->d_name, ".") != 0 && std::strcmp(entry->d_name, "..") != 0)
+ {
+ std::string full_name(dirname + "/" + entry->d_name);
+ if (::lstat(full_name.c_str(), &s))
+ {
+ ::closedir(dir);
+ std::ostringstream oss;
+ oss << "stat: file=\"" << full_name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_STAT, oss.str(), "jdir", "delete_dir");
+ }
+ if (S_ISREG(s.st_mode) || S_ISLNK(s.st_mode)) // This is a file or slink
+ {
+ if(::unlink(full_name.c_str()))
+ {
+ ::closedir(dir);
+ std::ostringstream oss;
+ oss << "unlink: file=\"" << entry->d_name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_UNLINK, oss.str(), "jdir", "delete_dir");
+ }
+ }
+ else if (S_ISDIR(s.st_mode)) // This is a dir
+ {
+ delete_dir(full_name);
+ }
+ else // all other types, throw up!
+ {
+ ::closedir(dir);
+ std::ostringstream oss;
+ oss << "file=\"" << entry->d_name << "\" is not a dir, file or slink.";
+ oss << " (mode=0x" << std::hex << s.st_mode << std::dec << ")";
+ throw jexception(jerrno::JERR_JDIR_BADFTYPE, oss.str(), "jdir", "delete_dir");
+ }
+ }
+ }
+
+// FIXME: Find out why this fails with false alarms/errors from time to time...
+// While commented out, there is no error capture from reading dir entries.
+// check_err(errno, dir, dirname, "delete_dir");
+ }
+ // Now dir is empty, close and delete it
+ close_dir(dir, dirname, "delete_dir");
+
+ if (!children_only)
+ if (::rmdir(dirname.c_str()))
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_RMDIR, oss.str(), "jdir", "delete_dir");
+ }
+}
+
+
+std::string
+jdir::create_bak_dir(const std::string& dirname, const std::string& base_filename)
+{
+ DIR* dir = ::opendir(dirname.c_str());
+ long dir_num = 0L;
+ if (!dir)
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dirname << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_OPENDIR, oss.str(), "jdir", "create_bak_dir");
+ }
+ struct dirent* entry;
+ while ((entry = ::readdir(dir)) != 0)
+ {
+ // Ignore . and ..
+ if (std::strcmp(entry->d_name, ".") != 0 && std::strcmp(entry->d_name, "..") != 0)
+ {
+ if (std::strlen(entry->d_name) == base_filename.size() + 10) // Format: basename.bak.XXXX
+ {
+ std::ostringstream oss;
+ oss << "_" << base_filename << ".bak.";
+ if (std::strncmp(entry->d_name, oss.str().c_str(), base_filename.size() + 6) == 0)
+ {
+ long this_dir_num = std::strtol(entry->d_name + base_filename.size() + 6, 0, 16);
+ if (this_dir_num > dir_num)
+ dir_num = this_dir_num;
+ }
+ }
+ }
+ }
+// FIXME: Find out why this fails with false alarms/errors from time to time...
+// While commented out, there is no error capture from reading dir entries.
+// check_err(errno, dir, dirname, "create_bak_dir");
+ close_dir(dir, dirname, "create_bak_dir");
+
+ std::ostringstream dn;
+ dn << dirname << "/_" << base_filename << ".bak." << std::hex << std::setw(4) <<
+ std::setfill('0') << ++dir_num;
+ if (::mkdir(dn.str().c_str(), S_IRWXU | S_IRWXG | S_IROTH))
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dn.str() << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_MKDIR, oss.str(), "jdir", "create_bak_dir");
+ }
+ return std::string(dn.str());
+}
+
+bool
+jdir::is_dir(const char* name)
+{
+ struct stat s;
+ if (::stat(name, &s))
+ {
+ std::ostringstream oss;
+ oss << "file=\"" << name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_STAT, oss.str(), "jdir", "is_dir");
+ }
+ return S_ISDIR(s.st_mode);
+}
+
+bool
+jdir::is_dir(const std::string& name)
+{
+ return is_dir(name.c_str());
+}
+
+bool
+jdir::exists(const char* name)
+{
+ struct stat s;
+ if (::stat(name, &s))
+ {
+ if (errno == ENOENT) // No such dir or file
+ return false;
+ // Throw for any other condition
+ std::ostringstream oss;
+ oss << "file=\"" << name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_STAT, oss.str(), "jdir", "exists");
+ }
+ return true;
+}
+
+bool
+jdir::exists(const std::string& name)
+{
+ return exists(name.c_str());
+}
+
+void
+jdir::check_err(const int err_num, DIR* dir, const std::string& dir_name, const std::string& fn_name)
+{
+ if (err_num)
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dir_name << "\"" << FORMAT_SYSERR(err_num);
+ ::closedir(dir); // Try to close, it makes no sense to trap errors here...
+ throw jexception(jerrno::JERR_JDIR_READDIR, oss.str(), "jdir", fn_name);
+ }
+}
+
+void
+jdir::close_dir(DIR* dir, const std::string& dir_name, const std::string& fn_name)
+{
+ if (::closedir(dir))
+ {
+ std::ostringstream oss;
+ oss << "dir=\"" << dir_name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JDIR_CLOSEDIR, oss.str(), "jdir", fn_name);
+ }
+}
+
+std::ostream&
+operator<<(std::ostream& os, const jdir& jdir)
+{
+ os << jdir._dirname;
+ return os;
+}
+
+std::ostream&
+operator<<(std::ostream& os, const jdir* jdirPtr)
+{
+ os << jdirPtr->_dirname;
+ return os;
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jdir.h b/qpid/cpp/src/qpid/legacystore/jrnl/jdir.h
new file mode 100644
index 0000000000..e129b794d6
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jdir.h
@@ -0,0 +1,379 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jdir.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::jdir (%journal data
+ * directory), used for controlling and manipulating %journal data
+ * directories and files. See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JDIR_H
+#define QPID_LEGACYSTORE_JRNL_JDIR_H
+
+namespace mrg
+{
+namespace journal
+{
+class jdir;
+}
+}
+
+#include "qpid/legacystore/jrnl/jinf.h"
+#include <dirent.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class jdir
+ * \brief Class to manage the %journal directory
+ */
+ class jdir
+ {
+ private:
+ std::string _dirname;
+ std::string _base_filename;
+
+ public:
+
+ /**
+ * \brief Sole constructor
+ *
+ * \param dirname Name of directory to be managed.
+ * \param base_filename Filename root used in the creation of %journal files
+ * and sub-directories.
+ */
+ jdir(const std::string& dirname, const std::string& base_filename);
+
+ virtual ~jdir();
+
+
+ /**
+ * \brief Create %journal directory as set in the dirname parameter of the constructor.
+ * Recursive creation is supported.
+ *
+ * \exception jerrno::JERR_JDIR_MKDIR The creation of dirname failed.
+ */
+ void create_dir();
+
+ /**
+ * \brief Static function to create a directory. Recursive creation is supported.
+ *
+ * \param dirname C-string containing name of directory.
+ *
+ * \exception jerrno::JERR_JDIR_MKDIR The creation of dirname failed.
+ */
+ static void create_dir(const char* dirname);
+
+ /**
+ * \brief Static function to create a directory. Recursive creation is supported.
+ *
+ * \param dirname String containing name of directory.
+ *
+ * \exception jerrno::JERR_JDIR_MKDIR The creation of dirname failed.
+ */
+ static void create_dir(const std::string& dirname);
+
+
+ /**
+ * \brief Clear the %journal directory of files matching the base filename
+ * by moving them into a subdirectory. This fn uses the dirname and base_filename
+ * that were set on construction.
+ *
+ * \param create_flag If set, create dirname if it is non-existent, otherwise throw
+ * exception.
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_FMOVE Moving the files from the %journal directory to the created backup
+ * directory failed.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ */
+ void clear_dir(const bool create_flag = true);
+
+ /**
+ * \brief Clear the directory dirname of %journal files matching base_filename
+ * by moving them into a subdirectory.
+ *
+ * \param dirname C-string containing name of %journal directory.
+ * \param base_filename C-string containing base filename of %journal files to be matched
+ * for moving into subdirectory.
+ * \param create_flag If set, create dirname if it is non-existent, otherwise throw
+ * exception
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_FMOVE Moving the files from the %journal directory to the created backup
+ * directory failed.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ */
+ static void clear_dir(const char* dirname, const char* base_filename,
+ const bool create_flag = true);
+
+ /**
+ * \brief Clear the directory dirname of %journal files matching base_filename
+ * by moving them into a subdirectory.
+ *
+ * \param dirname String containing name of %journal directory.
+ * \param base_filename String containing base filename of %journal files to be matched
+ * for moving into subdirectory.
+ * \param create_flag If set, create dirname if it is non-existent, otherwise throw
+ * exception
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_FMOVE Moving the files from the %journal directory to the created backup
+ * directory failed.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ */
+ static void clear_dir(const std::string& dirname, const std::string& base_filename,
+ const bool create_flag = true);
+
+
+
+ /**
+ * \brief Move (push down) the directory target_dir located in directory dirname into a backup directory
+ * named _bak_dir_base.XXXX (note prepended underscore), where XXXX is an increasing hex serial number
+ * starting at 0000.
+ *
+ * \param dirname Full path to directory containing directory to be pushed down.
+ * \param target_dir Name of directory in dirname to be pushed down.
+ * \param bak_dir_base Base name for backup directory to be created in dirname, into which target_dir will be moved.
+ * \return Name of backup dir into which target_dir was pushed.
+ */
+ static std::string push_down(const std::string& dirname, const std::string& target_dir, const std::string& bak_dir_base);
+
+
+ /**
+ * \brief Verify that dirname is a valid %journal directory.
+ *
+ * The validation reads the .%jinf file, and using this information verifies that all the expected %journal
+ * (.jdat) files are present.
+ *
+ * \exception jerrno::JERR_JDIR_NOTDIR dirname is not a directory
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname
+ * \exception jerrno::JERR__FILEIO Error reading %jinf file
+ * \exception jerrno::JERR_JINF_CVALIDFAIL Error validating %jinf file
+ * \exception jerrno::JERR_JDIR_NOSUCHFILE Expected jdat file is missing
+ */
+ void verify_dir();
+
+ /**
+ * \brief Verify that dirname is a valid %journal directory.
+ *
+ * The validation reads the .%jinf file, and using this information verifies that all the expected %journal
+ * (.jdat) files are present.
+ *
+ * \param dirname C-string containing name of %journal directory.
+ * \param base_filename C-string containing base filename of %journal files to be matched for moving into sub-directory.
+ *
+ * \exception jerrno::JERR_JDIR_NOTDIR dirname is not a directory
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname
+ * \exception jerrno::JERR__FILEIO Error reading %jinf file
+ * \exception jerrno::JERR_JINF_CVALIDFAIL Error validating %jinf file
+ * \exception jerrno::JERR_JDIR_NOSUCHFILE Expected jdat file is missing
+ */
+ static void verify_dir(const char* dirname, const char* base_filename);
+
+ /**
+ * \brief Verify that dirname is a valid %journal directory.
+ *
+ * The validation reads the .%jinf file, and using this information verifies that all the expected %journal
+ * (.jdat) files are present.
+ *
+ * \param dirname String containing name of %journal directory.
+ * \param base_filename String containing base filename of %journal files to be matched for moving into sub-directory.
+ *
+ * \exception jerrno::JERR_JDIR_NOTDIR dirname is not a directory
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname
+ * \exception jerrno::JERR__FILEIO Error reading %jinf file
+ * \exception jerrno::JERR_JINF_CVALIDFAIL Error validating %jinf file
+ * \exception jerrno::JERR_JDIR_NOSUCHFILE Expected jdat file is missing
+ */
+ static void verify_dir(const std::string& dirname, const std::string& base_filename);
+
+ /**
+ * \brief Delete the %journal directory and all files and sub--directories that it may
+ * contain. This is equivilent of rm -rf.
+ *
+ * FIXME: links are not handled correctly.
+ *
+ * \param children_only If true, delete only children of dirname, but leave dirname itself.
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname.
+ * \exception jerrno::JERR_JDIR_UNLINK A file could not be deleted.
+ * \exception jerrno::JERR_JDIR_BADFTYPE A dir entry is neiter a file nor a dir.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ * \exception jerrno::JERR_JDIR_RMDIR A directory could not be deleted.
+ */
+ void delete_dir(bool children_only = false );
+
+ /**
+ * \brief Delete the %journal directory and all files and sub--directories that it may
+ * contain. This is equivilent of rm -rf.
+ *
+ * FIXME: links are not handled correctly.
+ *
+ * \param dirname C-string containing name of directory to be deleted.
+ * \param children_only If true, delete only children of dirname, but leave dirname itself.
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname.
+ * \exception jerrno::JERR_JDIR_UNLINK A file could not be deleted.
+ * \exception jerrno::JERR_JDIR_BADFTYPE A dir entry is neiter a file nor a dir.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ * \exception jerrno::JERR_JDIR_RMDIR A directory could not be deleted.
+ */
+ static void delete_dir(const char* dirname, bool children_only = false);
+
+ /**
+ * \brief Delete the %journal directory and all files and sub--directories that it may
+ * contain. This is equivilent of rm -rf.
+ *
+ * FIXME: links are not handled correctly.
+ *
+ * \param dirname String containing name of directory to be deleted.
+ * \param children_only If true, delete only children of dirname, but leave dirname itself.
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat dirname.
+ * \exception jerrno::JERR_JDIR_UNLINK A file could not be deleted.
+ * \exception jerrno::JERR_JDIR_BADFTYPE A dir entry is neiter a file nor a dir.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ * \exception jerrno::JERR_JDIR_RMDIR A directory could not be deleted.
+ */
+ static void delete_dir(const std::string& dirname, bool children_only = false);
+
+ /**
+ * \brief Create bakup directory that is next in sequence and move all %journal files
+ * matching base_filename into it.
+ *
+ * In directory dirname, search for existing backup directory using pattern
+ * "_basename.bak.XXXX" where XXXX is a hexadecimal sequence, and create next directory
+ * based on highest number found. Move all %journal files which match the base_fileaname
+ * parameter into this new backup directory.
+ *
+ * \param dirname String containing name of %journal directory.
+ * \param base_filename String containing base filename of %journal files to be matched
+ * for moving into subdirectory.
+ *
+ * \exception jerrno::JERR_JDIR_OPENDIR The %journal directory could not be opened.
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ * \exception jerrno::JERR_JDIR_MKDIR The backup directory could not be deleted.
+ */
+ static std::string create_bak_dir(const std::string& dirname,
+ const std::string& base_filename);
+
+ /**
+ * \brief Return the directory name as a string.
+ */
+ inline const std::string& dirname() const { return _dirname; }
+
+ /**
+ * \brief Return the %journal base filename name as a string.
+ */
+ inline const std::string& base_filename() const { return _base_filename; }
+
+ /**
+ * \brief Test whether the named file is a directory.
+ *
+ * \param name Name of file to be tested.
+ * \return <b><i>true</i></b> if the named file is a directory; <b><i>false</i></b>
+ * otherwise.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat name.
+ */
+ static bool is_dir(const char* name);
+
+ /**
+ * \brief Test whether the named file is a directory.
+ *
+ * \param name Name of file to be tested.
+ * \return <b><i>true</i></b> if the named file is a directory; <b><i>false</i></b>
+ * otherwise.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat name.
+ */
+ static bool is_dir(const std::string& name);
+
+
+ /**
+ * \brief Test whether the named entity exists on the filesystem.
+ *
+ * If stat() fails with error ENOENT, then this will return <b><i>false</i></b>. If
+ * stat() succeeds, then <b><i>true</i></b> is returned, irrespective of the file type.
+ * If stat() fails with any other error, an exception is thrown.
+ *
+ * \param name Name of entity to be tested.
+ * \return <b><i>true</i></b> if the named entity exists; <b><i>false</i></b>
+ * otherwise.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat name.
+ */
+ static bool exists(const char* name);
+
+ /**
+ * \brief Test whether the named entity exists on the filesystem.
+ *
+ * If stat() fails with error ENOENT, then this will return <b><i>false</i></b>. If
+ * stat() succeeds, then <b><i>true</i></b> is returned, irrespective of the file type.
+ * If stat() fails with any other error, an exception is thrown.
+ *
+ * \param name Name of entity to be tested.
+ * \return <b><i>true</i></b> if the named entity exists; <b><i>false</i></b>
+ * otherwise.
+ * \exception jerrno::JERR_JDIR_STAT Could not stat name.
+ */
+ static bool exists(const std::string& name);
+
+ /**
+ * \brief Stream operator
+ */
+ friend std::ostream& operator<<(std::ostream& os, const jdir& jdir);
+
+ /**
+ * \brief Stream operator
+ */
+ friend std::ostream& operator<<(std::ostream& os, const jdir* jdirPtr);
+
+ private:
+ /**
+ * \brief Check for error, if non-zero close DIR handle and throw JERR_JDIR_READDIR
+ *
+ * \exception jerrno::JERR_JDIR_READDIR Error while reading contents of dir.
+ */
+ static void check_err(const int err_num, DIR* dir, const std::string& dir_name, const std::string& fn_name);
+
+ /**
+ * \brief Close a DIR handle, throw JERR_JDIR_CLOSEDIR if error occurs during close
+ *
+ * \exception jerrno::JERR_JDIR_CLOSEDIR The directory handle could not be closed.
+ */
+ static void close_dir(DIR* dir, const std::string& dir_name, const std::string& fn_name);
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JDIR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.cpp
new file mode 100644
index 0000000000..4962ce63ab
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.cpp
@@ -0,0 +1,253 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jerrno.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::jerrno (journal error
+ * codes). See comments in file jerrno.h for details.
+ *
+ * See file jerrno.h for class details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/jerrno.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+std::map<u_int32_t, const char*> jerrno::_err_map;
+std::map<u_int32_t, const char*>::iterator jerrno::_err_map_itr;
+bool jerrno::_initialized = jerrno::__init();
+
+// generic errors
+const u_int32_t jerrno::JERR__MALLOC = 0x0100;
+const u_int32_t jerrno::JERR__UNDERFLOW = 0x0101;
+const u_int32_t jerrno::JERR__NINIT = 0x0102;
+const u_int32_t jerrno::JERR__AIO = 0x0103;
+const u_int32_t jerrno::JERR__FILEIO = 0x0104;
+const u_int32_t jerrno::JERR__RTCLOCK = 0x0105;
+const u_int32_t jerrno::JERR__PTHREAD = 0x0106;
+const u_int32_t jerrno::JERR__TIMEOUT = 0x0107;
+const u_int32_t jerrno::JERR__UNEXPRESPONSE = 0x0108;
+const u_int32_t jerrno::JERR__RECNFOUND = 0x0109;
+const u_int32_t jerrno::JERR__NOTIMPL = 0x010a;
+
+// class jcntl
+const u_int32_t jerrno::JERR_JCNTL_STOPPED = 0x0200;
+const u_int32_t jerrno::JERR_JCNTL_READONLY = 0x0201;
+const u_int32_t jerrno::JERR_JCNTL_AIOCMPLWAIT = 0x0202;
+const u_int32_t jerrno::JERR_JCNTL_UNKNOWNMAGIC = 0x0203;
+const u_int32_t jerrno::JERR_JCNTL_NOTRECOVERED = 0x0204;
+const u_int32_t jerrno::JERR_JCNTL_RECOVERJFULL = 0x0205;
+const u_int32_t jerrno::JERR_JCNTL_OWIMISMATCH = 0x0206;
+
+// class jdir
+const u_int32_t jerrno::JERR_JDIR_NOTDIR = 0x0300;
+const u_int32_t jerrno::JERR_JDIR_MKDIR = 0x0301;
+const u_int32_t jerrno::JERR_JDIR_OPENDIR = 0x0302;
+const u_int32_t jerrno::JERR_JDIR_READDIR = 0x0303;
+const u_int32_t jerrno::JERR_JDIR_CLOSEDIR = 0x0304;
+const u_int32_t jerrno::JERR_JDIR_RMDIR = 0x0305;
+const u_int32_t jerrno::JERR_JDIR_NOSUCHFILE = 0x0306;
+const u_int32_t jerrno::JERR_JDIR_FMOVE = 0x0307;
+const u_int32_t jerrno::JERR_JDIR_STAT = 0x0308;
+const u_int32_t jerrno::JERR_JDIR_UNLINK = 0x0309;
+const u_int32_t jerrno::JERR_JDIR_BADFTYPE = 0x030a;
+
+// class fcntl
+const u_int32_t jerrno::JERR_FCNTL_OPENWR = 0x0400;
+const u_int32_t jerrno::JERR_FCNTL_WRITE = 0x0401;
+const u_int32_t jerrno::JERR_FCNTL_CLOSE = 0x0402;
+const u_int32_t jerrno::JERR_FCNTL_FILEOFFSOVFL = 0x0403;
+const u_int32_t jerrno::JERR_FCNTL_CMPLOFFSOVFL = 0x0404;
+const u_int32_t jerrno::JERR_FCNTL_RDOFFSOVFL = 0x0405;
+
+// class lfmgr
+const u_int32_t jerrno::JERR_LFMGR_BADAEFNUMLIM = 0x0500;
+const u_int32_t jerrno::JERR_LFMGR_AEFNUMLIMIT = 0x0501;
+const u_int32_t jerrno::JERR_LFMGR_AEDISABLED = 0x0502;
+
+// class rrfc
+const u_int32_t jerrno::JERR_RRFC_OPENRD = 0x0600;
+
+// class jrec, enq_rec, deq_rec, txn_rec
+const u_int32_t jerrno::JERR_JREC_BADRECHDR = 0x0700;
+const u_int32_t jerrno::JERR_JREC_BADRECTAIL = 0x0701;
+
+// class wmgr
+const u_int32_t jerrno::JERR_WMGR_BADPGSTATE = 0x0801;
+const u_int32_t jerrno::JERR_WMGR_BADDTOKSTATE = 0x0802;
+const u_int32_t jerrno::JERR_WMGR_ENQDISCONT = 0x0803;
+const u_int32_t jerrno::JERR_WMGR_DEQDISCONT = 0x0804;
+const u_int32_t jerrno::JERR_WMGR_DEQRIDNOTENQ = 0x0805;
+
+// class rmgr
+const u_int32_t jerrno::JERR_RMGR_UNKNOWNMAGIC = 0x0900;
+const u_int32_t jerrno::JERR_RMGR_RIDMISMATCH = 0x0901;
+//const u_int32_t jerrno::JERR_RMGR_FIDMISMATCH = 0x0902;
+const u_int32_t jerrno::JERR_RMGR_ENQSTATE = 0x0903;
+const u_int32_t jerrno::JERR_RMGR_BADRECTYPE = 0x0904;
+
+// class data_tok
+const u_int32_t jerrno::JERR_DTOK_ILLEGALSTATE = 0x0a00;
+// const u_int32_t jerrno::JERR_DTOK_RIDNOTSET = 0x0a01;
+
+// class enq_map, txn_map
+const u_int32_t jerrno::JERR_MAP_DUPLICATE = 0x0b00;
+const u_int32_t jerrno::JERR_MAP_NOTFOUND = 0x0b01;
+const u_int32_t jerrno::JERR_MAP_LOCKED = 0x0b02;
+
+// class jinf
+const u_int32_t jerrno::JERR_JINF_CVALIDFAIL = 0x0c00;
+const u_int32_t jerrno::JERR_JINF_NOVALUESTR = 0x0c01;
+const u_int32_t jerrno::JERR_JINF_BADVALUESTR = 0x0c02;
+const u_int32_t jerrno::JERR_JINF_JDATEMPTY = 0x0c03;
+const u_int32_t jerrno::JERR_JINF_TOOMANYFILES = 0x0c04;
+const u_int32_t jerrno::JERR_JINF_INVALIDFHDR = 0x0c05;
+const u_int32_t jerrno::JERR_JINF_STAT = 0x0c06;
+const u_int32_t jerrno::JERR_JINF_NOTREGFILE = 0x0c07;
+const u_int32_t jerrno::JERR_JINF_BADFILESIZE = 0x0c08;
+const u_int32_t jerrno::JERR_JINF_OWIBAD = 0x0c09;
+const u_int32_t jerrno::JERR_JINF_ZEROLENFILE = 0x0c0a;
+
+// Negative returns for some functions
+const int32_t jerrno::AIO_TIMEOUT = -1;
+const int32_t jerrno::LOCK_TAKEN = -2;
+
+
+// static initialization fn
+
+bool
+jerrno::__init()
+{
+ // generic errors
+ _err_map[JERR__MALLOC] = "JERR__MALLOC: Buffer memory allocation failed.";
+ _err_map[JERR__UNDERFLOW] = "JERR__UNDERFLOW: Underflow error";
+ _err_map[JERR__NINIT] = "JERR__NINIT: Operation on uninitialized class.";
+ _err_map[JERR__AIO] = "JERR__AIO: AIO error.";
+ _err_map[JERR__FILEIO] = "JERR__FILEIO: File read or write failure.";
+ _err_map[JERR__RTCLOCK] = "JERR__RTCLOCK: Reading real-time clock failed.";
+ _err_map[JERR__PTHREAD] = "JERR__PTHREAD: pthread failure.";
+ _err_map[JERR__TIMEOUT] = "JERR__TIMEOUT: Timeout waiting for event.";
+ _err_map[JERR__UNEXPRESPONSE] = "JERR__UNEXPRESPONSE: Unexpected response to call or event.";
+ _err_map[JERR__RECNFOUND] = "JERR__RECNFOUND: Record not found.";
+ _err_map[JERR__NOTIMPL] = "JERR__NOTIMPL: Not implemented";
+
+ // class jcntl
+ _err_map[JERR_JCNTL_STOPPED] = "JERR_JCNTL_STOPPED: Operation on stopped journal.";
+ _err_map[JERR_JCNTL_READONLY] = "JERR_JCNTL_READONLY: Write operation on read-only journal (during recovery).";
+ _err_map[JERR_JCNTL_AIOCMPLWAIT] = "JERR_JCNTL_AIOCMPLWAIT: Timeout waiting for AIOs to complete.";
+ _err_map[JERR_JCNTL_UNKNOWNMAGIC] = "JERR_JCNTL_UNKNOWNMAGIC: Found record with unknown magic.";
+ _err_map[JERR_JCNTL_NOTRECOVERED] = "JERR_JCNTL_NOTRECOVERED: Operation requires recover() to be run first.";
+ _err_map[JERR_JCNTL_RECOVERJFULL] = "JERR_JCNTL_RECOVERJFULL: Journal data files full, cannot write.";
+ _err_map[JERR_JCNTL_OWIMISMATCH] = "JERR_JCNTL_OWIMISMATCH: Overwrite Indicator (OWI) change found in unexpected location.";
+
+ // class jdir
+ _err_map[JERR_JDIR_NOTDIR] = "JERR_JDIR_NOTDIR: Directory name exists but is not a directory.";
+ _err_map[JERR_JDIR_MKDIR] = "JERR_JDIR_MKDIR: Directory creation failed.";
+ _err_map[JERR_JDIR_OPENDIR] = "JERR_JDIR_OPENDIR: Directory open failed.";
+ _err_map[JERR_JDIR_READDIR] = "JERR_JDIR_READDIR: Directory read failed.";
+ _err_map[JERR_JDIR_CLOSEDIR] = "JERR_JDIR_CLOSEDIR: Directory close failed.";
+ _err_map[JERR_JDIR_RMDIR] = "JERR_JDIR_RMDIR: Directory delete failed.";
+ _err_map[JERR_JDIR_NOSUCHFILE] = "JERR_JDIR_NOSUCHFILE: File does not exist.";
+ _err_map[JERR_JDIR_FMOVE] = "JERR_JDIR_FMOVE: File move failed.";
+ _err_map[JERR_JDIR_STAT] = "JERR_JDIR_STAT: File stat failed.";
+ _err_map[JERR_JDIR_UNLINK] = "JERR_JDIR_UNLINK: File delete failed.";
+ _err_map[JERR_JDIR_BADFTYPE] = "JERR_JDIR_BADFTYPE: Bad or unknown file type (stat mode).";
+
+ // class fcntl
+ _err_map[JERR_FCNTL_OPENWR] = "JERR_FCNTL_OPENWR: Unable to open file for write.";
+ _err_map[JERR_FCNTL_WRITE] = "JERR_FCNTL_WRITE: Unable to write to file.";
+ _err_map[JERR_FCNTL_CLOSE] = "JERR_FCNTL_CLOSE: File close failed.";
+ _err_map[JERR_FCNTL_FILEOFFSOVFL] = "JERR_FCNTL_FILEOFFSOVFL: Attempted increase file offset past file size.";
+ _err_map[JERR_FCNTL_CMPLOFFSOVFL] = "JERR_FCNTL_CMPLOFFSOVFL: Attempted increase completed file offset past submitted offset.";
+ _err_map[JERR_FCNTL_RDOFFSOVFL] = "JERR_FCNTL_RDOFFSOVFL: Attempted increase read offset past write offset.";
+
+ // class lfmgr
+ _err_map[JERR_LFMGR_BADAEFNUMLIM] = "JERR_LFMGR_BADAEFNUMLIM: Auto-expand file number limit lower than initial number of journal files.";
+ _err_map[JERR_LFMGR_AEFNUMLIMIT] = "JERR_LFMGR_AEFNUMLIMIT: Exceeded auto-expand file number limit.";
+ _err_map[JERR_LFMGR_AEDISABLED] = "JERR_LFMGR_AEDISABLED: Attempted to expand with auto-expand disabled.";
+
+ // class rrfc
+ _err_map[JERR_RRFC_OPENRD] = "JERR_RRFC_OPENRD: Unable to open file for read.";
+
+ // class jrec, enq_rec, deq_rec, txn_rec
+ _err_map[JERR_JREC_BADRECHDR] = "JERR_JREC_BADRECHDR: Invalid data record header.";
+ _err_map[JERR_JREC_BADRECTAIL] = "JERR_JREC_BADRECTAIL: Invalid data record tail.";
+
+ // class wmgr
+ _err_map[JERR_WMGR_BADPGSTATE] = "JERR_WMGR_BADPGSTATE: Page buffer in illegal state for operation.";
+ _err_map[JERR_WMGR_BADDTOKSTATE] = "JERR_WMGR_BADDTOKSTATE: Data token in illegal state for operation.";
+ _err_map[JERR_WMGR_ENQDISCONT] = "JERR_WMGR_ENQDISCONT: Enqueued new dtok when previous enqueue returned partly completed (state ENQ_PART).";
+ _err_map[JERR_WMGR_DEQDISCONT] = "JERR_WMGR_DEQDISCONT: Dequeued new dtok when previous dequeue returned partly completed (state DEQ_PART).";
+ _err_map[JERR_WMGR_DEQRIDNOTENQ] = "JERR_WMGR_DEQRIDNOTENQ: Dequeue rid is not enqueued.";
+
+ // class rmgr
+ _err_map[JERR_RMGR_UNKNOWNMAGIC] = "JERR_RMGR_UNKNOWNMAGIC: Found record with unknown magic.";
+ _err_map[JERR_RMGR_RIDMISMATCH] = "JERR_RMGR_RIDMISMATCH: RID mismatch between current record and dtok RID";
+ //_err_map[JERR_RMGR_FIDMISMATCH] = "JERR_RMGR_FIDMISMATCH: FID mismatch between emap and rrfc";
+ _err_map[JERR_RMGR_ENQSTATE] = "JERR_RMGR_ENQSTATE: Attempted read when data token wstate was not ENQ";
+ _err_map[JERR_RMGR_BADRECTYPE] = "JERR_RMGR_BADRECTYPE: Attempted operation on inappropriate record type";
+
+ // class data_tok
+ _err_map[JERR_DTOK_ILLEGALSTATE] = "JERR_MTOK_ILLEGALSTATE: Attempted to change to illegal state.";
+ //_err_map[JERR_DTOK_RIDNOTSET] = "JERR_DTOK_RIDNOTSET: Record ID not set.";
+
+ // class enq_map, txn_map
+ _err_map[JERR_MAP_DUPLICATE] = "JERR_MAP_DUPLICATE: Attempted to insert record into map using duplicate key.";
+ _err_map[JERR_MAP_NOTFOUND] = "JERR_MAP_NOTFOUND: Key not found in map.";
+ _err_map[JERR_MAP_LOCKED] = "JERR_MAP_LOCKED: Record ID locked by a pending transaction.";
+
+ // class jinf
+ _err_map[JERR_JINF_CVALIDFAIL] = "JERR_JINF_CVALIDFAIL: Journal compatibility validation failure.";
+ _err_map[JERR_JINF_NOVALUESTR] = "JERR_JINF_NOVALUESTR: No value attribute found in jinf file.";
+ _err_map[JERR_JINF_BADVALUESTR] = "JERR_JINF_BADVALUESTR: Bad format for value attribute in jinf file";
+ _err_map[JERR_JINF_JDATEMPTY] = "JERR_JINF_JDATEMPTY: Journal data files empty.";
+ _err_map[JERR_JINF_TOOMANYFILES] = "JERR_JINF_TOOMANYFILES: Too many journal data files.";
+ _err_map[JERR_JINF_INVALIDFHDR] = "JERR_JINF_INVALIDFHDR: Invalid journal data file header";
+ _err_map[JERR_JINF_STAT] = "JERR_JINF_STAT: Error while trying to stat a journal data file";
+ _err_map[JERR_JINF_NOTREGFILE] = "JERR_JINF_NOTREGFILE: Target journal data file is not a regular file";
+ _err_map[JERR_JINF_BADFILESIZE] = "JERR_JINF_BADFILESIZE: Journal data file is of incorrect or unexpected size";
+ _err_map[JERR_JINF_OWIBAD] = "JERR_JINF_OWIBAD: Journal data files have inconsistent OWI flags; >1 transition found in non-auto-expand or min-size journal";
+ _err_map[JERR_JINF_ZEROLENFILE] = "JERR_JINF_ZEROLENFILE: Journal info file zero length";
+
+ //_err_map[] = "";
+
+ return true;
+}
+
+const char*
+jerrno::err_msg(const u_int32_t err_no) throw ()
+{
+ _err_map_itr = _err_map.find(err_no);
+ if (_err_map_itr == _err_map.end())
+ return "<Unknown error code>";
+ return _err_map_itr->second;
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.h b/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.h
new file mode 100644
index 0000000000..4c8b71c423
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jerrno.h
@@ -0,0 +1,173 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jerrno.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::jerrno (journal error
+ * codes). See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JERRNO_H
+#define QPID_LEGACYSTORE_JRNL_JERRNO_H
+
+namespace mrg
+{
+namespace journal
+{
+class jerrno;
+}
+}
+
+#include <map>
+#include <string>
+#include <sys/types.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class jerrno
+ * \brief Class containing static error definitions and static map for error messages.
+ */
+ class jerrno
+ {
+ static std::map<u_int32_t, const char*> _err_map; ///< Map of error messages
+ static std::map<u_int32_t, const char*>::iterator _err_map_itr; ///< Iterator
+ static bool _initialized; ///< Dummy flag, used to initialise map.
+
+ public:
+ // generic errors
+ static const u_int32_t JERR__MALLOC; ///< Buffer memory allocation failed
+ static const u_int32_t JERR__UNDERFLOW; ///< Underflow error
+ static const u_int32_t JERR__NINIT; ///< Operation on uninitialized class
+ static const u_int32_t JERR__AIO; ///< AIO failure
+ static const u_int32_t JERR__FILEIO; ///< File read or write failure
+ static const u_int32_t JERR__RTCLOCK; ///< Reading real-time clock failed
+ static const u_int32_t JERR__PTHREAD; ///< pthread failure
+ static const u_int32_t JERR__TIMEOUT; ///< Timeout waiting for an event
+ static const u_int32_t JERR__UNEXPRESPONSE; ///< Unexpected response to call or event
+ static const u_int32_t JERR__RECNFOUND; ///< Record not found
+ static const u_int32_t JERR__NOTIMPL; ///< Not implemented
+
+ // class jcntl
+ static const u_int32_t JERR_JCNTL_STOPPED; ///< Operation on stopped journal
+ static const u_int32_t JERR_JCNTL_READONLY; ///< Write operation on read-only journal
+ static const u_int32_t JERR_JCNTL_AIOCMPLWAIT; ///< Timeout waiting for AIOs to complete
+ static const u_int32_t JERR_JCNTL_UNKNOWNMAGIC; ///< Found record with unknown magic
+ static const u_int32_t JERR_JCNTL_NOTRECOVERED; ///< Req' recover() to be called first
+ static const u_int32_t JERR_JCNTL_RECOVERJFULL; ///< Journal data files full, cannot write
+ static const u_int32_t JERR_JCNTL_OWIMISMATCH; ///< OWI change found in unexpected location
+
+ // class jdir
+ static const u_int32_t JERR_JDIR_NOTDIR; ///< Exists but is not a directory
+ static const u_int32_t JERR_JDIR_MKDIR; ///< Directory creation failed
+ static const u_int32_t JERR_JDIR_OPENDIR; ///< Directory open failed
+ static const u_int32_t JERR_JDIR_READDIR; ///< Directory read failed
+ static const u_int32_t JERR_JDIR_CLOSEDIR; ///< Directory close failed
+ static const u_int32_t JERR_JDIR_RMDIR; ///< Directory delete failed
+ static const u_int32_t JERR_JDIR_NOSUCHFILE; ///< File does not exist
+ static const u_int32_t JERR_JDIR_FMOVE; ///< File move failed
+ static const u_int32_t JERR_JDIR_STAT; ///< File stat failed
+ static const u_int32_t JERR_JDIR_UNLINK; ///< File delete failed
+ static const u_int32_t JERR_JDIR_BADFTYPE; ///< Bad or unknown file type (stat mode)
+
+ // class fcntl
+ static const u_int32_t JERR_FCNTL_OPENWR; ///< Unable to open file for write
+ static const u_int32_t JERR_FCNTL_WRITE; ///< Unable to write to file
+ static const u_int32_t JERR_FCNTL_CLOSE; ///< File close failed
+ static const u_int32_t JERR_FCNTL_FILEOFFSOVFL; ///< Increased offset past file size
+ static const u_int32_t JERR_FCNTL_CMPLOFFSOVFL; ///< Increased cmpl offs past subm offs
+ static const u_int32_t JERR_FCNTL_RDOFFSOVFL; ///< Increased read offs past write offs
+
+ // class lfmgr
+ static const u_int32_t JERR_LFMGR_BADAEFNUMLIM; ///< Bad auto-expand file number limit
+ static const u_int32_t JERR_LFMGR_AEFNUMLIMIT; ///< Exceeded auto-expand file number limit
+ static const u_int32_t JERR_LFMGR_AEDISABLED; ///< Attempted to expand with auto-expand disabled
+
+ // class rrfc
+ static const u_int32_t JERR_RRFC_OPENRD; ///< Unable to open file for read
+
+ // class jrec, enq_rec, deq_rec, txn_rec
+ static const u_int32_t JERR_JREC_BADRECHDR; ///< Invalid data record header
+ static const u_int32_t JERR_JREC_BADRECTAIL; ///< Invalid data record tail
+
+ // class wmgr
+ static const u_int32_t JERR_WMGR_BADPGSTATE; ///< Page buffer in illegal state.
+ static const u_int32_t JERR_WMGR_BADDTOKSTATE; ///< Data token in illegal state.
+ static const u_int32_t JERR_WMGR_ENQDISCONT; ///< Enq. new dtok when previous part compl.
+ static const u_int32_t JERR_WMGR_DEQDISCONT; ///< Deq. new dtok when previous part compl.
+ static const u_int32_t JERR_WMGR_DEQRIDNOTENQ; ///< Deq. rid not enqueued
+
+ // class rmgr
+ static const u_int32_t JERR_RMGR_UNKNOWNMAGIC; ///< Found record with unknown magic
+ static const u_int32_t JERR_RMGR_RIDMISMATCH; ///< RID mismatch between rec and dtok
+ //static const u_int32_t JERR_RMGR_FIDMISMATCH; ///< FID mismatch between emap and rrfc
+ static const u_int32_t JERR_RMGR_ENQSTATE; ///< Attempted read when wstate not ENQ
+ static const u_int32_t JERR_RMGR_BADRECTYPE; ///< Attempted op on incorrect rec type
+
+ // class data_tok
+ static const u_int32_t JERR_DTOK_ILLEGALSTATE; ///< Attempted to change to illegal state
+// static const u_int32_t JERR_DTOK_RIDNOTSET; ///< Record ID not set
+
+ // class enq_map, txn_map
+ static const u_int32_t JERR_MAP_DUPLICATE; ///< Attempted to insert using duplicate key
+ static const u_int32_t JERR_MAP_NOTFOUND; ///< Key not found in map
+ static const u_int32_t JERR_MAP_LOCKED; ///< rid locked by pending txn
+
+ // class jinf
+ static const u_int32_t JERR_JINF_CVALIDFAIL; ///< Compatibility validation failure
+ static const u_int32_t JERR_JINF_NOVALUESTR; ///< No value attr found in jinf file
+ static const u_int32_t JERR_JINF_BADVALUESTR; ///< Bad format for value attr in jinf file
+ static const u_int32_t JERR_JINF_JDATEMPTY; ///< Journal data files empty
+ static const u_int32_t JERR_JINF_TOOMANYFILES; ///< Too many journal data files
+ static const u_int32_t JERR_JINF_INVALIDFHDR; ///< Invalid file header
+ static const u_int32_t JERR_JINF_STAT; ///< Error while trying to stat a file
+ static const u_int32_t JERR_JINF_NOTREGFILE; ///< Target file is not a regular file
+ static const u_int32_t JERR_JINF_BADFILESIZE; ///< File is of incorrect or unexpected size
+ static const u_int32_t JERR_JINF_OWIBAD; ///< OWI inconsistent (>1 transition in non-ae journal)
+ static const u_int32_t JERR_JINF_ZEROLENFILE; ///< Journal info file is zero length (empty).
+
+ // Negative returns for some functions
+ static const int32_t AIO_TIMEOUT; ///< Timeout waiting for AIO return
+ static const int32_t LOCK_TAKEN; ///< Attempted to take lock, but it was taken by another thread
+ /**
+ * \brief Method to access error message from known error number.
+ */
+ static const char* err_msg(const u_int32_t err_no) throw ();
+
+ private:
+ /**
+ * \brief Static function to initialize map.
+ */
+ static bool __init();
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JERRNO_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jexception.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jexception.cpp
new file mode 100644
index 0000000000..5c571020e4
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jexception.cpp
@@ -0,0 +1,183 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jexception.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Generic journal exception class mrg::journal::jexception. See comments
+ * in file jexception.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/jexception.h"
+
+#include <iomanip>
+#include <sstream>
+#include "qpid/legacystore/jrnl/jerrno.h"
+
+#define CATLEN(p) MAX_MSG_SIZE - std::strlen(p) - 1
+
+namespace mrg
+{
+namespace journal
+{
+
+jexception::jexception() throw ():
+ std::exception(),
+ _err_code(0)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code) throw ():
+ std::exception(),
+ _err_code(err_code)
+{
+ format();
+}
+
+jexception::jexception(const char* additional_info) throw ():
+ std::exception(),
+ _err_code(0),
+ _additional_info(additional_info)
+{
+ format();
+}
+
+jexception::jexception(const std::string& additional_info) throw ():
+ std::exception(),
+ _err_code(0),
+ _additional_info(additional_info)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const char* additional_info) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _additional_info(additional_info)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const std::string& additional_info) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _additional_info(additional_info)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const char* throwing_class,
+ const char* throwing_fn) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _throwing_class(throwing_class),
+ _throwing_fn(throwing_fn)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const std::string& throwing_class,
+ const std::string& throwing_fn) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _throwing_class(throwing_class),
+ _throwing_fn(throwing_fn)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const char* additional_info,
+ const char* throwing_class, const char* throwing_fn) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _additional_info(additional_info),
+ _throwing_class(throwing_class),
+ _throwing_fn(throwing_fn)
+{
+ format();
+}
+
+jexception::jexception(const u_int32_t err_code, const std::string& additional_info,
+ const std::string& throwing_class, const std::string& throwing_fn) throw ():
+ std::exception(),
+ _err_code(err_code),
+ _additional_info(additional_info),
+ _throwing_class(throwing_class),
+ _throwing_fn(throwing_fn)
+{
+ format();
+}
+
+jexception::~jexception() throw ()
+{}
+
+void
+jexception::format()
+{
+ const bool ai = !_additional_info.empty();
+ const bool tc = !_throwing_class.empty();
+ const bool tf = !_throwing_fn.empty();
+ std::ostringstream oss;
+ oss << "jexception 0x" << std::hex << std::setfill('0') << std::setw(4) << _err_code << " ";
+ if (tc)
+ {
+ oss << _throwing_class;
+ if (tf)
+ oss << "::";
+ else
+ oss << " ";
+ }
+ if (tf)
+ oss << _throwing_fn << "() ";
+ if (tc || tf)
+ oss << "threw " << jerrno::err_msg(_err_code);
+ if (ai)
+ oss << " (" << _additional_info << ")";
+ _what.assign(oss.str());
+}
+
+const char*
+jexception::what() const throw ()
+{
+ return _what.c_str();
+}
+
+std::ostream&
+operator<<(std::ostream& os, const jexception& je)
+{
+ os << je.what();
+ return os;
+}
+
+std::ostream&
+operator<<(std::ostream& os, const jexception* jePtr)
+{
+ os << jePtr->what();
+ return os;
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jexception.h b/qpid/cpp/src/qpid/legacystore/jrnl/jexception.h
new file mode 100644
index 0000000000..34d8373235
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jexception.h
@@ -0,0 +1,142 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jexception.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Generic journal exception class mrg::journal::jexception (derived
+ * from class std::exception). Intended to serve as a common exception
+ * class for all more speicalized exceptions in the message journal. See
+ * class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JEXCEPTION_H
+#define QPID_LEGACYSTORE_JRNL_JEXCEPTION_H
+
+namespace mrg
+{
+namespace journal
+{
+class jexception;
+}
+}
+
+#include <cerrno>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <exception>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include <sstream>
+#include <string>
+#include <sys/types.h>
+
+// Macro for formatting commom system errors
+#define FORMAT_SYSERR(errno) " errno=" << errno << " (" << std::strerror(errno) << ")"
+
+#define MALLOC_CHK(ptr, var, cls, fn) if(ptr == 0) { \
+ clean(); \
+ std::ostringstream oss; \
+ oss << var << ": malloc() failed: " << FORMAT_SYSERR(errno); \
+ throw jexception(jerrno::JERR__MALLOC, oss.str(), cls, fn); \
+ }
+
+// TODO: The following is a temporary bug-tracking aid which forces a core.
+// Replace with the commented out version below when BZ484048 is resolved.
+#define PTHREAD_CHK(err, pfn, cls, fn) if(err != 0) { \
+ std::ostringstream oss; \
+ oss << cls << "::" << fn << "(): " << pfn; \
+ errno = err; \
+ ::perror(oss.str().c_str()); \
+ ::abort(); \
+ }
+/*
+#define PTHREAD_CHK(err, pfn, cls, fn) if(err != 0) { \
+ std::ostringstream oss; \
+ oss << pfn << " failed: " << FORMAT_SYSERR(err); \
+ throw jexception(jerrno::JERR__PTHREAD, oss.str(), cls, fn); \
+ }
+*/
+
+#define ASSERT(cond, msg) if(cond == 0) { \
+ std::cerr << msg << std::endl; \
+ ::abort(); \
+ }
+
+namespace mrg
+{
+namespace journal
+{
+ /**
+ * \class jexception
+ * \brief Generic journal exception class
+ */
+ class jexception : public std::exception
+ {
+ private:
+ u_int32_t _err_code;
+ std::string _additional_info;
+ std::string _throwing_class;
+ std::string _throwing_fn;
+ std::string _what;
+ void format();
+
+ public:
+ jexception() throw ();
+
+ jexception(const u_int32_t err_code) throw ();
+
+ jexception(const char* additional_info) throw ();
+ jexception(const std::string& additional_info) throw ();
+
+ jexception(const u_int32_t err_code, const char* additional_info) throw ();
+ jexception(const u_int32_t err_code, const std::string& additional_info) throw ();
+
+ jexception(const u_int32_t err_code, const char* throwing_class, const char* throwing_fn)
+ throw ();
+ jexception(const u_int32_t err_code, const std::string& throwing_class,
+ const std::string& throwing_fn) throw ();
+
+ jexception(const u_int32_t err_code, const char* additional_info,
+ const char* throwing_class, const char* throwing_fn) throw ();
+ jexception(const u_int32_t err_code, const std::string& additional_info,
+ const std::string& throwing_class, const std::string& throwing_fn) throw ();
+
+ virtual ~jexception() throw ();
+ virtual const char* what() const throw (); // override std::exception::what()
+
+ inline u_int32_t err_code() const throw () { return _err_code; }
+ inline const std::string additional_info() const throw () { return _additional_info; }
+ inline const std::string throwing_class() const throw () { return _throwing_class; }
+ inline const std::string throwing_fn() const throw () { return _throwing_fn; }
+
+ friend std::ostream& operator<<(std::ostream& os, const jexception& je);
+ friend std::ostream& operator<<(std::ostream& os, const jexception* jePtr);
+ }; // class jexception
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JEXCEPTION_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jinf.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jinf.cpp
new file mode 100644
index 0000000000..4117bd3581
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jinf.cpp
@@ -0,0 +1,540 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jinf.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::jinf class.
+ *
+ * See jinf.h comments for details of this class.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "jrnl/jinf.h"
+
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <fstream>
+#include "qpid/legacystore/jrnl/file_hdr.h"
+#include "qpid/legacystore/jrnl/jcntl.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/lp_map.h"
+#include <sstream>
+#include <sys/stat.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+jinf::jinf(const std::string& jinf_filename, bool validate_flag):
+ _jver(0),
+ _filename(jinf_filename),
+ _num_jfiles(0),
+ _ae(false),
+ _ae_max_jfiles(0),
+ _jfsize_sblks(0),
+ _sblk_size_dblks(0),
+ _dblk_size(0),
+ _wcache_pgsize_sblks(0),
+ _wcache_num_pages(0),
+ _rcache_pgsize_sblks(0),
+ _rcache_num_pages(0),
+ _tm_ptr(0),
+ _valid_flag(false),
+ _analyzed_flag(false),
+ _initial_owi(false),
+ _frot(false)
+{
+ read(_filename);
+ if (validate_flag)
+ validate();
+}
+
+jinf::jinf(const std::string& jid, const std::string& jdir, const std::string& base_filename, const u_int16_t num_jfiles,
+ const bool auto_expand, const u_int16_t ae_max_jfiles, const u_int32_t jfsize_sblks,
+ const u_int32_t wcache_pgsize_sblks, const u_int16_t wcache_num_pages, const timespec& ts):
+ _jver(RHM_JDAT_VERSION),
+ _jid(jid),
+ _jdir(jdir),
+ _base_filename(base_filename),
+ _ts(ts),
+ _num_jfiles(num_jfiles),
+ _ae(auto_expand),
+ _ae_max_jfiles(ae_max_jfiles),
+ _jfsize_sblks(jfsize_sblks),
+ _sblk_size_dblks(JRNL_SBLK_SIZE),
+ _dblk_size(JRNL_DBLK_SIZE),
+ _wcache_pgsize_sblks(wcache_pgsize_sblks),
+ _wcache_num_pages(wcache_num_pages),
+ _rcache_pgsize_sblks(JRNL_RMGR_PAGE_SIZE),
+ _rcache_num_pages(JRNL_RMGR_PAGES),
+ _tm_ptr(std::localtime(&ts.tv_sec)),
+ _valid_flag(false),
+ _analyzed_flag(false),
+ _initial_owi(false)
+{
+ set_filename();
+}
+
+jinf::~jinf()
+{}
+
+void
+jinf::validate()
+{
+ bool err = false;
+ std::ostringstream oss;
+ if (_jver != RHM_JDAT_VERSION)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "RHM_JDAT_VERSION mismatch: found=" << (int)_jver;
+ oss << "; required=" << RHM_JDAT_VERSION << std::endl;
+ err = true;
+ }
+ if (_num_jfiles < JRNL_MIN_NUM_FILES)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "Number of journal files too small: found=" << _num_jfiles;
+ oss << "; minimum=" << JRNL_MIN_NUM_FILES << std::endl;
+ err = true;
+ }
+ if (_num_jfiles > JRNL_MAX_NUM_FILES)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "Number of journal files too large: found=" << _num_jfiles;
+ oss << "; maximum=" << JRNL_MAX_NUM_FILES << std::endl;
+ err = true;
+ }
+ if (_ae)
+ {
+ if (_ae_max_jfiles < _num_jfiles)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "Number of journal files exceeds auto-expansion limit: found=" << _num_jfiles;
+ oss << "; maximum=" << _ae_max_jfiles;
+ err = true;
+ }
+ if (_ae_max_jfiles > JRNL_MAX_NUM_FILES)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "Auto-expansion file limit too large: found=" << _ae_max_jfiles;
+ oss << "; maximum=" << JRNL_MAX_NUM_FILES;
+ err = true;
+ }
+ }
+ if (_jfsize_sblks < JRNL_MIN_FILE_SIZE)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "Journal file size too small: found=" << _jfsize_sblks;
+ oss << "; minimum=" << JRNL_MIN_FILE_SIZE << " (sblks)" << std::endl;
+ err = true;
+ }
+ if (_sblk_size_dblks != JRNL_SBLK_SIZE)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "JRNL_SBLK_SIZE mismatch: found=" << _sblk_size_dblks;
+ oss << "; required=" << JRNL_SBLK_SIZE << std::endl;
+ err = true;
+ }
+ if (_dblk_size != JRNL_DBLK_SIZE)
+ {
+ oss << "File \"" << _filename << "\": ";
+ oss << "JRNL_DBLK_SIZE mismatch: found=" << _dblk_size;
+ oss << "; required=" << JRNL_DBLK_SIZE << std::endl;
+ err = true;
+ }
+ if (err)
+ throw jexception(jerrno::JERR_JINF_CVALIDFAIL, oss.str(), "jinf", "validate");
+ _valid_flag = true;
+}
+
+void
+jinf::analyze()
+{
+ lp_map early_map; // map for all owi flags same as pfid 0
+ lp_map late_map; // map for all owi flags opposite to pfid 0
+ bool late_latch = false; // latch for owi switchover
+
+ if (!_valid_flag)
+ validate();
+ bool done = false;
+ for (u_int16_t pfid=0; pfid<_num_jfiles && !done; pfid++)
+ {
+ std::ostringstream oss;
+ if (_jdir.at(_jdir.size() - 1) == '/')
+ oss << _jdir << _base_filename << ".";
+ else
+ oss << _jdir << "/" << _base_filename << ".";
+ oss << std::setw(4) << std::setfill('0') << std::hex << pfid;
+ oss << "." << JRNL_DATA_EXTENSION;
+
+ // Check size of each file is consistent and expected
+ u_int32_t fsize = get_filesize(oss.str());
+ if (fsize != (_jfsize_sblks + 1) * _sblk_size_dblks * _dblk_size)
+ {
+ std::ostringstream oss1;
+ oss1 << "File \"" << oss.str() << "\": size=" << fsize << "; expected=" << ((_jfsize_sblks + 1) * _sblk_size_dblks * _dblk_size);
+ throw jexception(jerrno::JERR_JINF_BADFILESIZE, oss1.str(), "jinf", "analyze");
+ }
+
+ std::ifstream jifs(oss.str().c_str());
+ if (!jifs.good())
+ throw jexception(jerrno::JERR__FILEIO, oss.str(), "jinf", "analyze");
+ file_hdr fhdr;
+ jifs.read((char*)&fhdr, sizeof(fhdr));
+ if (fhdr._magic != RHM_JDAT_FILE_MAGIC) // No file header
+ {
+ if (fhdr._magic != 0)
+ throw jexception(jerrno::JERR_JINF_INVALIDFHDR, oss.str(), "jinf", "analyze");
+ if (!pfid) // pfid 0 == lid 0 cannot be empty
+ throw jexception(jerrno::JERR_JINF_JDATEMPTY, oss.str(), "jinf", "analyze");
+ _frot = true;
+ done = true;
+ }
+ else
+ {
+ assert(pfid == fhdr._pfid);
+ if (pfid == 0)
+ {
+ _initial_owi = fhdr.get_owi();
+ early_map.insert(fhdr._lfid, pfid);
+ }
+ else
+ {
+ if (_initial_owi == fhdr.get_owi())
+ {
+ early_map.insert(fhdr._lfid, pfid);
+ if (late_latch && (!_ae || _num_jfiles == JRNL_MIN_NUM_FILES))
+ throw jexception(jerrno::JERR_JINF_OWIBAD, oss.str(), "jinf", "analyze");
+ }
+ else
+ {
+ late_map.insert(fhdr._lfid, pfid);
+ late_latch = true;
+ }
+ }
+ }
+ jifs.close();
+ } // for (pfid)
+
+ // If this is not the first rotation, all files should be in either early or late maps
+ if (!_frot) assert(early_map.size() + late_map.size() == _num_jfiles);
+
+ _pfid_list.clear();
+ late_map.get_pfid_list(_pfid_list);
+ early_map.get_pfid_list(_pfid_list);
+
+ // Check OWI consistency
+// for (u_int16_t lfid=0; lfid<_num_jfiles && !done; lfid++)
+// {
+// throw jexception(jerrno::JERR_JINF_OWIBAD, oss.str(), "jinf", "analyze");
+// }
+
+ _analyzed_flag = true;
+}
+
+void
+jinf::write()
+{
+ std::ostringstream oss;
+ oss << _jdir << "/" << _base_filename << "." << JRNL_INFO_EXTENSION;
+ std::ofstream of(oss.str().c_str(), std::ofstream::out | std::ofstream::trunc);
+ if (!of.good())
+ throw jexception(jerrno::JERR__FILEIO, oss.str(), "jinf", "write");
+ of << xml_str();
+ of.close();
+}
+
+u_int16_t
+jinf::incr_num_jfiles()
+{
+ if (_num_jfiles >= JRNL_MAX_NUM_FILES)
+ throw jexception(jerrno::JERR_JINF_TOOMANYFILES, "jinf", "incr_num_jfiles");
+ return ++_num_jfiles;
+}
+
+u_int16_t
+jinf::get_first_pfid()
+{
+ if (!_analyzed_flag)
+ analyze();
+ return *_pfid_list.begin();
+}
+
+u_int16_t
+jinf::get_last_pfid()
+{
+ if (!_analyzed_flag)
+ analyze();
+ return *_pfid_list.rbegin();
+}
+
+jinf::pfid_list&
+jinf::get_pfid_list()
+{
+ if (!_analyzed_flag)
+ analyze();
+ return _pfid_list;
+}
+
+void
+jinf::get_normalized_pfid_list(pfid_list& pfid_list)
+{
+ if (!_analyzed_flag)
+ analyze();
+ pfid_list.clear();
+ u_int16_t s = _pfid_list.size();
+ u_int16_t iz = 0; // index of 0 value
+ while (_pfid_list[iz] && iz < s)
+ iz++;
+ assert(_pfid_list[iz] == 0);
+ for (u_int16_t i = iz; i < iz + s; i++)
+ pfid_list.push_back(_pfid_list[i % s]);
+ assert(pfid_list[0] == 0);
+ assert(pfid_list.size() == s);
+}
+
+bool
+jinf::get_initial_owi()
+{
+ if (!_analyzed_flag)
+ analyze();
+ return _initial_owi;
+}
+
+bool
+jinf::get_frot()
+{
+ if (!_analyzed_flag)
+ analyze();
+ return _frot;
+}
+
+std::string
+jinf::to_string() const
+{
+ std::ostringstream oss;
+ oss << std::setfill('0');
+ oss << "Journal ID \"" << _jid << "\" initialized " << (_tm_ptr->tm_year + 1900) << "/";
+ oss << std::setw(2) << (_tm_ptr->tm_mon + 1) << "/" << std::setw(2) << _tm_ptr->tm_mday << " ";
+ oss << std::setw(2) << _tm_ptr->tm_hour << ":" << std::setw(2) << _tm_ptr->tm_min << ":";
+ oss << std::setw(2) << _tm_ptr->tm_sec << "." << std::setw(9) << _ts.tv_nsec << ":" << std::endl;
+ oss << " Journal directory: \"" << _jdir << "\"" << std::endl;
+ oss << " Journal base filename: \"" << _base_filename << "\"" << std::endl;
+ oss << " Journal version: " << (unsigned)_jver << std::endl;
+ oss << " Number of journal files: " << _num_jfiles << std::endl;
+// TODO: Uncomment these lines when auto-expand is enabled.
+// oss << " Auto-expand mode: " << (_ae ? "enabled" : "disabled") << std::endl;
+// if (_ae) oss << " Max. number of journal files (in auto-expand mode): " << _ae_max_jfiles << std::endl;
+ oss << " Journal file size: " << _jfsize_sblks << " sblks" << std::endl;
+ oss << " Softblock size (JRNL_SBLK_SIZE): " << _sblk_size_dblks << " dblks" << std::endl;
+ oss << " Datablock size (JRNL_DBLK_SIZE): " << _dblk_size << " bytes" << std::endl;
+ oss << " Write page size: " << _wcache_pgsize_sblks << " sblks" << std::endl;
+ oss << " Number of write pages: " << _wcache_num_pages << std::endl;
+ oss << " Read page size (JRNL_RMGR_PAGE_SIZE): " << _rcache_pgsize_sblks << " sblks" << std::endl;
+ oss << " Number of read pages (JRNL_RMGR_PAGES): " << _rcache_num_pages << std::endl;
+ return oss.str();
+}
+
+std::string
+jinf::xml_str() const
+{
+ // TODO: This is *not* an XML writer, rather for simplicity, it uses literals. I'm sure a more elegant way can be
+ // found to do this using the real thing...
+
+ std::ostringstream oss;
+ oss << std::setfill('0');
+ oss << "<?xml version=\"1.0\" ?>" << std::endl;
+ oss << "<jrnl>" << std::endl;
+ oss << " <journal_version value=\"" << (unsigned)_jver << "\" />" << std::endl;
+ oss << " <journal_id>" << std::endl;
+ oss << " <id_string value=\"" << _jid << "\" />" << std::endl;
+ oss << " <directory value=\"" << _jdir << "\" />" << std::endl;
+ oss << " <base_filename value=\"" << _base_filename << "\" />" << std::endl;
+ oss << " </journal_id>" << std::endl;
+ oss << " <creation_time>" << std::endl;
+ oss << " <seconds value=\"" << _ts.tv_sec << "\" />" << std::endl;
+ oss << " <nanoseconds value=\"" << _ts.tv_nsec << "\" />" << std::endl;
+ oss << " <string value=\"" << (_tm_ptr->tm_year + 1900) << "/";
+ oss << std::setw(2) << (_tm_ptr->tm_mon + 1) << "/" << std::setw(2) << _tm_ptr->tm_mday << " ";
+ oss << std::setw(2) << _tm_ptr->tm_hour << ":" << std::setw(2) << _tm_ptr->tm_min << ":";
+ oss << std::setw(2) << _tm_ptr->tm_sec << "." << std::setw(9) << _ts.tv_nsec;
+ oss << "\" />" << std::endl;
+ oss << " </creation_time>" << std::endl;
+ oss << " <journal_file_geometry>" << std::endl;
+ oss << " <number_jrnl_files value=\"" << _num_jfiles << "\" />" << std::endl;
+ oss << " <auto_expand value=\"" << (_ae ? "true" : "false") << "\" />" << std::endl;
+ if (_ae) oss << " <auto_expand_max_jrnl_files value=\"" << _ae_max_jfiles << "\" />" << std::endl;
+ oss << " <jrnl_file_size_sblks value=\"" << _jfsize_sblks << "\" />" << std::endl;
+ oss << " <JRNL_SBLK_SIZE value=\"" << _sblk_size_dblks << "\" />" << std::endl;
+ oss << " <JRNL_DBLK_SIZE value=\"" << _dblk_size << "\" />" << std::endl;
+ oss << " </journal_file_geometry>" << std::endl;
+ oss << " <cache_geometry>" << std::endl;
+ oss << " <wcache_pgsize_sblks value=\"" << _wcache_pgsize_sblks << "\" />" << std::endl;
+ oss << " <wcache_num_pages value=\"" << _wcache_num_pages << "\" />" << std::endl;
+ oss << " <JRNL_RMGR_PAGE_SIZE value=\"" << _rcache_pgsize_sblks << "\" />" << std::endl;
+ oss << " <JRNL_RMGR_PAGES value=\"" << _rcache_num_pages << "\" />" << std::endl;
+ oss << " </cache_geometry>" << std::endl;
+ oss << "</jrnl>" << std::endl;
+ return oss.str();
+}
+
+void
+jinf::set_filename()
+{
+ std::ostringstream oss;
+ oss << _jdir << "/" << _base_filename << "." << JRNL_INFO_EXTENSION;
+ _filename = oss.str().c_str();
+}
+
+void
+jinf::read(const std::string& jinf_filename)
+{
+ // TODO: This is *not* an XML reader, rather for simplicity, it is a brute-force line reader which relies on string
+ // recognition. It relies on the format of xml_str() above; it will not handle a XML restructuring.
+ // *** Can it be replaced cheaply by a real XML reader? Should it be, or is this sufficient? ***
+
+ char buff[1024]; // limit of line input length
+ std::ifstream jinfs(jinf_filename.c_str());
+ if (!jinfs.good())
+ throw jexception(jerrno::JERR__FILEIO, jinf_filename.c_str(), "jinf", "read");
+ u_int32_t charcnt = 0;
+ while (jinfs.good())
+ {
+ jinfs.getline(buff, 1023);
+ charcnt += std::strlen(buff);
+ if (std::strstr(buff, "journal_version"))
+ _jver = u_int16_value(buff);
+ else if(std::strstr(buff, "id_string"))
+ string_value(_jid, buff);
+ else if(std::strstr(buff, "directory"))
+ string_value(_jdir, buff);
+ else if(std::strstr(buff, "base_filename"))
+ string_value(_base_filename, buff);
+ else if(std::strstr(buff, "number_jrnl_files"))
+ _num_jfiles = u_int16_value(buff);
+ else if(std::strstr(buff, "auto_expand_max_jrnl_files"))
+ _ae_max_jfiles = u_int16_value(buff);
+ else if(std::strstr(buff, "auto_expand"))
+ _ae = bool_value(buff);
+ else if(std::strstr(buff, "jrnl_file_size_sblks"))
+ _jfsize_sblks = u_int32_value(buff);
+ else if(std::strstr(buff, "JRNL_SBLK_SIZE"))
+ _sblk_size_dblks = u_int16_value(buff);
+ else if(std::strstr(buff, "JRNL_DBLK_SIZE"))
+ _dblk_size = u_int32_value(buff);
+ else if(std::strstr(buff, "wcache_pgsize_sblks"))
+ _wcache_pgsize_sblks = u_int32_value(buff);
+ else if(std::strstr(buff, "wcache_num_pages"))
+ _wcache_num_pages = u_int32_value(buff);
+ else if(std::strstr(buff, "JRNL_RMGR_PAGE_SIZE"))
+ _rcache_pgsize_sblks = u_int32_value(buff);
+ else if(std::strstr(buff, "JRNL_RMGR_PAGES"))
+ _rcache_num_pages = u_int32_value(buff);
+ else if(std::strstr(buff, "nanoseconds"))
+ _ts.tv_nsec = u_int32_value(buff);
+ else if(std::strstr(buff, "seconds"))
+ {
+ _ts.tv_sec = u_int32_value(buff);
+ _tm_ptr = std::localtime(&_ts.tv_sec);
+ }
+ }
+ jinfs.close();
+ if (charcnt == 0)
+ throw jexception(jerrno::JERR_JINF_ZEROLENFILE, jinf_filename.c_str(), "jinf", "read");
+}
+
+bool
+jinf::bool_value(char* line) const
+{
+ return std::strcmp(find_value(line), "true") == 0;
+}
+
+u_int16_t
+jinf::u_int16_value(char* line) const
+{
+ return std::atoi(find_value(line));
+}
+
+u_int32_t
+jinf::u_int32_value(char* line) const
+{
+ return std::atol(find_value(line));
+}
+
+std::string&
+jinf::string_value(std::string& str, char* line) const
+{
+ str.assign(find_value(line));
+ return str;
+}
+
+char*
+jinf::find_value(char* line) const
+{
+ const char* target1_str = "value=\"";
+ int target2_char = '\"';
+ char* t1 = std::strstr(line, target1_str);
+ if (t1 == 0)
+ {
+ std::ostringstream oss;
+ oss << "File \"" << _filename << "\": line=" << line;
+ throw jexception(jerrno::JERR_JINF_NOVALUESTR, oss.str(), "jinf", "find_value");
+ }
+ t1 += std::strlen(target1_str);
+
+ char* t2 = std::strchr(t1, target2_char);
+ if (t2 == 0)
+ {
+ std::ostringstream oss;
+ oss << "File \"" << _filename << "\": line=" << line;
+ throw jexception(jerrno::JERR_JINF_BADVALUESTR, oss.str(), "jinf", "find_value");
+ }
+ *t2 = '\0';
+ return t1;
+}
+
+u_int32_t
+jinf::get_filesize(const std::string& file_name) const
+{
+ struct stat s;
+ if (::stat(file_name.c_str(), &s))
+ {
+ std::ostringstream oss;
+ oss << "stat: file=\"" << file_name << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_JINF_STAT, oss.str(), "jinf", "get_filesize");
+ }
+ if (!S_ISREG(s.st_mode)) // not a regular file,
+ {
+ std::ostringstream oss;
+ oss << "File \"" << file_name << "\" is not a regular file: mode=0x" << std::hex << s.st_mode;
+ throw jexception(jerrno::JERR_JINF_NOTREGFILE, oss.str(), "jinf", "get_filesize");
+ }
+ return u_int32_t(s.st_size);
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jinf.h b/qpid/cpp/src/qpid/legacystore/jrnl/jinf.h
new file mode 100644
index 0000000000..73f5386a19
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jinf.h
@@ -0,0 +1,133 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jinf.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::jinf class.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JINF_H
+#define QPID_LEGACYSTORE_JRNL_JINF_H
+
+#include <ctime>
+#include <string>
+#include <sys/types.h>
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+ /**
+ * \class jinf
+ * \brief Class to handle the journal information file &lt;basename&gt;.jinf.
+ */
+ class jinf
+ {
+ public:
+ typedef std::vector<u_int16_t> pfid_list; // pfids
+ typedef pfid_list::const_iterator pfidl_citr;
+
+ private:
+ u_int8_t _jver;
+ std::string _jid;
+ std::string _jdir;
+ std::string _base_filename;
+ std::string _filename;
+ timespec _ts;
+ u_int16_t _num_jfiles;
+ bool _ae;
+ u_int32_t _ae_max_jfiles;
+ u_int32_t _jfsize_sblks;
+ u_int16_t _sblk_size_dblks;
+ u_int32_t _dblk_size;
+ u_int32_t _wcache_pgsize_sblks;
+ u_int16_t _wcache_num_pages;
+ u_int32_t _rcache_pgsize_sblks;
+ u_int16_t _rcache_num_pages;
+ std::tm* _tm_ptr;
+ bool _valid_flag;
+ bool _analyzed_flag;
+ pfid_list _pfid_list;
+ bool _initial_owi;
+ bool _frot;
+
+ public:
+ // constructor for reading existing jinf file
+ jinf(const std::string& jinf_filename, bool validate_flag);
+ // constructor for writing jinf file
+ jinf(const std::string& jid, const std::string& jdir, const std::string& base_filename,
+ const u_int16_t num_jfiles, const bool auto_expand, const u_int16_t ae_max_jfiles,
+ const u_int32_t jfsize_sblks, const u_int32_t wcache_pgsize_sblks, const u_int16_t wcache_num_pages,
+ const timespec& ts);
+ virtual ~jinf();
+
+ void validate();
+ void analyze();
+ void write();
+
+ inline u_int8_t jver() const { return _jver; }
+ inline const std::string& jid() const { return _jid; }
+ inline const std::string& jdir() const { return _jdir; }
+ inline void set_jdir(const std::string& jdir) { _jdir = jdir; }
+ inline const std::string& base_filename() const { return _base_filename; }
+ inline const timespec& ts() const { return _ts; }
+ inline u_int16_t num_jfiles() const { return _num_jfiles; }
+ u_int16_t incr_num_jfiles();
+ inline bool is_ae() const { return _ae; }
+ inline u_int16_t ae_max_jfiles() const { return _ae_max_jfiles; }
+ inline u_int32_t jfsize_sblks() const { return _jfsize_sblks; }
+ inline u_int16_t sblk_size_dblks() const { return _sblk_size_dblks; }
+ inline u_int32_t dblk_size() const { return _dblk_size; }
+ inline u_int32_t wcache_pgsize_sblks() const { return _wcache_pgsize_sblks; }
+ inline u_int16_t wcache_num_pages() const { return _wcache_num_pages; }
+ inline u_int32_t rcache_pgsize_sblks() const { return _rcache_pgsize_sblks; }
+ inline u_int16_t rcache_num_pages() const { return _rcache_num_pages; }
+ u_int16_t get_first_pfid();
+ u_int16_t get_last_pfid();
+ pfid_list& get_pfid_list();
+ void get_normalized_pfid_list(pfid_list& pfid_list);
+ bool get_initial_owi();
+ bool get_frot();
+
+ std::string to_string() const;
+ std::string xml_str() const;
+
+ private:
+ void set_filename();
+ void read(const std::string& jinf_filename);
+ bool bool_value(char* line) const;
+ u_int16_t u_int16_value(char* line) const;
+ u_int32_t u_int32_value(char* line) const;
+ std::string& string_value(std::string& str, char* line) const;
+ char* find_value(char* line) const;
+ u_int32_t get_filesize(const std::string& file_name) const;
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JINF_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jrec.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/jrec.cpp
new file mode 100644
index 0000000000..61b9b6cc9b
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jrec.cpp
@@ -0,0 +1,119 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jrec.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing source code for class mrg::journal::jrec (abstract journal
+ * jrecord). See comments in file jrec.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/jrec.h"
+
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+jrec::jrec() {}
+jrec::~jrec() {}
+
+void
+jrec::chk_hdr(const rec_hdr& hdr)
+{
+ if (hdr._magic == 0)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "enq magic NULL: rid=0x" << hdr._rid;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "jrec", "chk_hdr");
+ }
+ if (hdr._version != RHM_JDAT_VERSION)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "version: rid=0x" << hdr._rid;
+ oss << ": expected=0x" << std::setw(2) << (int)RHM_JDAT_VERSION;
+ oss << " read=0x" << std::setw(2) << (int)hdr._version;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "jrec", "chk_hdr");
+ }
+#if defined (JRNL_LITTLE_ENDIAN)
+ u_int8_t endian_flag = RHM_LENDIAN_FLAG;
+#else
+ u_int8_t endian_flag = RHM_BENDIAN_FLAG;
+#endif
+ if (hdr._eflag != endian_flag)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "endian_flag: rid=" << hdr._rid;
+ oss << ": expected=0x" << std::setw(2) << (int)endian_flag;
+ oss << " read=0x" << std::setw(2) << (int)hdr._eflag;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "jrec", "chk_hdr");
+ }
+}
+
+void
+jrec::chk_rid(const rec_hdr& hdr, const u_int64_t rid)
+{
+ if (hdr._rid != rid)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "rid mismatch: expected=0x" << rid;
+ oss << " read=0x" << hdr._rid;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "jrec", "chk_hdr");
+ }
+}
+
+void
+jrec::chk_tail(const rec_tail& tail, const rec_hdr& hdr)
+{
+ if (tail._xmagic != ~hdr._magic)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "magic: rid=0x" << hdr._rid;
+ oss << ": expected=0x" << ~hdr._magic;
+ oss << " read=0x" << tail._xmagic;
+ throw jexception(jerrno::JERR_JREC_BADRECTAIL, oss.str(), "jrec", "chk_tail");
+ }
+ if (tail._rid != hdr._rid)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "rid: rid=0x" << hdr._rid;
+ oss << ": read=0x" << tail._rid;
+ throw jexception(jerrno::JERR_JREC_BADRECTAIL, oss.str(), "jrec", "chk_tail");
+ }
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/jrec.h b/qpid/cpp/src/qpid/legacystore/jrnl/jrec.h
new file mode 100644
index 0000000000..9d0771cabd
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/jrec.h
@@ -0,0 +1,183 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file jrec.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing source code for class mrg::journal::jrec (abstract journal
+ * jrecord). See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_JREC_H
+#define QPID_LEGACYSTORE_JRNL_JREC_H
+
+namespace mrg
+{
+namespace journal
+{
+class jrec;
+}
+}
+
+#include <cstddef>
+#include <fstream>
+#include "qpid/legacystore/jrnl/rec_hdr.h"
+#include "qpid/legacystore/jrnl/rec_tail.h"
+#include <string>
+#include <sys/types.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class jrec
+ * \brief Abstract class for all file jrecords, both data and log. This class establishes
+ * the common data format and structure for these jrecords.
+ */
+ class jrec
+ {
+ public:
+ jrec();
+ virtual ~jrec();
+
+ /**
+ * \brief Encode this instance of jrec into the write buffer at the disk-block-aligned
+ * pointer wptr starting at position rec_offs_dblks in the encoded record to a
+ * maximum size of max_size_dblks.
+ *
+ * This call encodes the content of the data contianed in this instance of jrec into a
+ * disk-softblock-aligned (defined by JRNL_SBLK_SIZE) buffer pointed to by parameter
+ * wptr. No more than paramter max_size_dblks data-blocks may be written to the buffer.
+ * The parameter rec_offs_dblks is the offset in data-blocks within the fully encoded
+ * data block this instance represents at which to start encoding.
+ *
+ * Encoding entails writing the record header (struct enq_hdr), the data and the record tail
+ * (struct enq_tail). The record must be data-block-aligned (defined by JRNL_DBLK_SIZE),
+ * thus any remaining space in the final data-block is ignored; the returned value is the
+ * number of data-blocks consumed from the page by the encode action. Provided the initial
+ * alignment requirements are met, records may be of arbitrary size and may span multiple
+ * data-blocks, disk-blocks and/or pages.
+ *
+ * Since the record size in data-blocks is known, the general usage pattern is to call
+ * encode() as many times as is needed to fully encode the data. Each call to encode()
+ * will encode as much of the record as it can to what remains of the current page cache,
+ * and will return the number of data-blocks actually encoded.
+ *
+ * <b>Example:</b> Assume that record r1 was previously written to page 0, and that this
+ * is an instance representing record r2. Being larger than the page size ps, r2 would span
+ * multiple pages as follows:
+ * <pre>
+ * |<---ps--->|
+ * +----------+----------+----------+----...
+ * | |r2a| r2b | r2c | |
+ * |<-r1-><----------r2----------> |
+ * +----------+----------+----------+----...
+ * page: p0 p1 p2
+ * </pre>
+ * Encoding record r2 will require multiple calls to encode; one for each page which
+ * is involved. Record r2 is divided logically into sections r2a, r2b and r2c at the
+ * points where the page boundaries intersect with the record. Assuming a page size
+ * of ps, the page boundary pointers are represented by their names p0, p1... and the
+ * sizes of the record segments are represented by their names r1, r2a, r2b..., the calls
+ * should be as follows:
+ * <pre>
+ * encode(p0+r1, 0, ps-r1); (returns r2a data-blocks)
+ * encode(p1, r2a, ps); (returns r2b data-blocks which equals ps)
+ * encode(p2, r2a+r2b, ps); (returns r2c data-blocks)
+ * </pre>
+ *
+ * \param wptr Data-block-aligned pointer to position in page buffer where encoding is to
+ * take place.
+ * \param rec_offs_dblks Offset in data-blocks within record from which to start encoding.
+ * \param max_size_dblks Maximum number of data-blocks to write to pointer wptr.
+ * \returns Number of data-blocks encoded.
+ */
+ virtual u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) = 0;
+
+ /**
+ * \brief Decode into this instance of jrec from the read buffer at the disk-block-aligned
+ * pointer rptr starting at position jrec_offs_dblks in the encoded record to a
+ * maximum size of max_size_blks.
+ *
+ * This call decodes a record in the page buffer pointed to by the data-block-aligned
+ * (defined by JRNL_DBLK_SIZE) parameter rptr into this instance of jrec. No more than
+ * paramter max_size_dblks data-blocks may be read from the buffer. The parameter
+ * jrec_offs_dblks is the offset in data-blocks within the encoded record at which to start
+ * decoding.
+ *
+ * Decoding entails reading the record header, the data and the tail. The record is
+ * data-block-aligned (defined by JRNL_DBLK_SIZE); the returned value is the number of
+ * data-blocks read from the buffer by the decode action. As the record data size is only
+ * known once the header is read, the number of calls required to complete reading the
+ * record will depend on the vlaues within this instance which are set when the
+ * header is decoded.
+ *
+ * A non-zero value for jrec_offs_dblks implies that this is not the first call to
+ * decode and the record data will be appended at this offset.
+ *
+ * \param h Reference to instance of struct hdr, already read from page buffer and used
+ * to determine record type
+ * \param rptr Data-block-aligned pointer to position in page buffer where decoding is to
+ * begin.
+ * \param rec_offs_dblks Offset within record from which to start appending the decoded
+ * record.
+ * \param max_size_dblks Maximum number of data-blocks to read from pointer rptr.
+ * \returns Number of data-blocks read (consumed).
+ */
+ virtual u_int32_t decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks) = 0;
+
+ virtual bool rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs) = 0;
+
+ virtual std::string& str(std::string& str) const = 0;
+ virtual std::size_t data_size() const = 0;
+ virtual std::size_t xid_size() const = 0;
+ virtual std::size_t rec_size() const = 0;
+ inline virtual u_int32_t rec_size_dblks() const { return size_dblks(rec_size()); }
+ static inline u_int32_t size_dblks(const std::size_t size)
+ { return size_blks(size, JRNL_DBLK_SIZE); }
+ static inline u_int32_t size_sblks(const std::size_t size)
+ { return size_blks(size, JRNL_DBLK_SIZE * JRNL_SBLK_SIZE); }
+ static inline u_int32_t size_blks(const std::size_t size, const std::size_t blksize)
+ { return (size + blksize - 1)/blksize; }
+ virtual u_int64_t rid() const = 0;
+
+ protected:
+ virtual void chk_hdr() const = 0;
+ virtual void chk_hdr(u_int64_t rid) const = 0;
+ virtual void chk_tail() const = 0;
+ static void chk_hdr(const rec_hdr& hdr);
+ static void chk_rid(const rec_hdr& hdr, u_int64_t rid);
+ static void chk_tail(const rec_tail& tail, const rec_hdr& hdr);
+ virtual void clean() = 0;
+ }; // class jrec
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_JREC_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.cpp
new file mode 100644
index 0000000000..8024ddadd2
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.cpp
@@ -0,0 +1,82 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file lp_map.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::lp_map (logical file map). See
+ * comments in file lp_map.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/lp_map.h"
+
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+lp_map::lp_map() : _map() {}
+lp_map::~lp_map() {}
+
+void
+lp_map::insert(u_int16_t lfid, u_int16_t pfid)
+{
+ lfpair ip = lfpair(lfid, pfid);
+ lfret ret = _map.insert(ip);
+ if (ret.second == false)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "lfid=0x" << lfid << " pfid=0x" << pfid;
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "lp_map", "insert");
+ }
+}
+
+void
+lp_map::get_pfid_list(std::vector<u_int16_t>& pfid_list)
+{
+ for (lp_map_citr_t i = _map.begin(); i != _map.end(); i++)
+ pfid_list.push_back(i->second);
+}
+
+// debug aid
+std::string
+lp_map::to_string()
+{
+ std::ostringstream oss;
+ oss << "{lfid:pfid ";
+ for (lp_map_citr_t i=_map.begin(); i!=_map.end(); i++)
+ {
+ if (i != _map.begin()) oss << ", ";
+ oss << (*i).first << ":" << (*i).second;
+ }
+ oss << "}";
+ return oss.str();
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.h b/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.h
new file mode 100644
index 0000000000..c43cbc0173
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/lp_map.h
@@ -0,0 +1,83 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file lp_map.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::lp_map (logical file map).
+ * See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_LP_MAP_H
+#define QPID_LEGACYSTORE_JRNL_LP_MAP_H
+
+#include <map>
+#include <string>
+#include <sys/types.h>
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+ /**
+ * \class lp_map
+ * \brief Maps the logical file id (lfid) to the physical file id (pfid) in the journal.
+ *
+ * NOTE: NOT THREAD SAFE
+ */
+ class lp_map
+ {
+ public:
+ typedef std::map<u_int16_t, u_int16_t> lp_map_t;
+ typedef lp_map_t::const_iterator lp_map_citr_t;
+ typedef lp_map_t::const_reverse_iterator lp_map_critr_t;
+
+ private:
+ typedef std::pair<u_int16_t, u_int16_t> lfpair;
+ typedef std::pair<lp_map_t::iterator, bool> lfret;
+ lp_map_t _map;
+
+ public:
+ lp_map();
+ virtual ~lp_map();
+
+ void insert(u_int16_t lfid, u_int16_t pfid);
+ inline u_int16_t size() const { return u_int16_t(_map.size()); }
+ inline bool empty() const { return _map.empty(); }
+ inline lp_map_citr_t begin() { return _map.begin(); }
+ inline lp_map_citr_t end() { return _map.end(); }
+ inline lp_map_critr_t rbegin() { return _map.rbegin(); }
+ inline lp_map_critr_t rend() { return _map.rend(); }
+ void get_pfid_list(std::vector<u_int16_t>& pfid_list);
+
+ // debug aid
+ std::string to_string();
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_LP_MAP_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.cpp
new file mode 100644
index 0000000000..d7b0c9f516
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.cpp
@@ -0,0 +1,226 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file lpmgr.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::lpmgr (non-logging file
+ * handle), used for controlling journal log files. See comments in file
+ * lpmgr.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/lpmgr.h"
+
+#include <cassert>
+#include <qpid/legacystore/jrnl/jerrno.h>
+#include <qpid/legacystore/jrnl/jexception.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+lpmgr::lpmgr() : _ae(false), _ae_max_jfiles(0)
+{}
+
+lpmgr::~lpmgr()
+{
+ finalize();
+}
+
+void
+lpmgr::initialize(const u_int16_t num_jfiles,
+ const bool ae,
+ const u_int16_t ae_max_jfiles,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp)
+{
+ assert(jcp != 0);
+ finalize();
+
+ // Validate params
+ if (ae && ae_max_jfiles > 0 && ae_max_jfiles <= num_jfiles)
+ {
+ std::ostringstream oss;
+ oss << "ae_max_jfiles (" << ae_max_jfiles << ") <= num_jfiles (" << num_jfiles << ")";
+ throw jexception(jerrno::JERR_LFMGR_BADAEFNUMLIM, oss.str(), "lpmgr", "initialize");
+ }
+ _ae = ae;
+ _ae_max_jfiles = ae_max_jfiles;
+
+ const std::size_t num_res_files = ae
+ ? (ae_max_jfiles ? ae_max_jfiles : JRNL_MAX_NUM_FILES)
+ : num_jfiles;
+ _fcntl_arr.reserve(num_res_files);
+ append(jcp, fp, num_jfiles);
+}
+
+void
+lpmgr::recover(const rcvdat& rd,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp)
+{
+ assert(jcp != 0);
+ finalize();
+
+ // Validate rd params
+ if (rd._aemjf > 0 && rd._aemjf <= rd._njf)
+ {
+ std::ostringstream oss;
+ oss << "ae_max_jfiles (" << rd._aemjf << ") <= num_jfiles (" << rd._njf << ")";
+ throw jexception(jerrno::JERR_LFMGR_BADAEFNUMLIM, oss.str(), "lpmgr", "recover");
+ }
+ _ae = rd._ae;
+ _ae_max_jfiles = rd._aemjf;
+
+ const std::size_t num_res_files = rd._ae
+ ? (rd._aemjf ? rd._aemjf : JRNL_MAX_NUM_FILES)
+ : rd._njf;
+ _fcntl_arr.reserve(num_res_files);
+ _fcntl_arr.assign(rd._njf, 0);
+ std::vector<u_int16_t> lfid_list(rd._fid_list.size(), 0);
+ for (std::size_t lid = 0; lid < rd._fid_list.size(); lid++)
+ lfid_list[rd._fid_list[lid]] = lid;
+ // NOTE: rd._fid_list may be smaller than rd._njf (journal may be empty or not yet file-cycled)
+ for (std::size_t pfid = 0; pfid < rd._njf; pfid++)
+ if (pfid < rd._fid_list.size())
+ _fcntl_arr[lfid_list[pfid]] = fp(jcp, lfid_list[pfid], pfid, &rd);
+ else
+ _fcntl_arr[pfid] = fp(jcp, pfid, pfid, &rd);
+}
+
+void
+lpmgr::insert(const u_int16_t after_lfid,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp,
+ const u_int16_t num_jfiles)
+{
+ assert(jcp != 0);
+ assert(after_lfid < _fcntl_arr.size());
+ if (!_ae) throw jexception(jerrno::JERR_LFMGR_AEDISABLED, "lpmgr", "insert");
+ if (num_jfiles == 0) return;
+ std::size_t pfid = _fcntl_arr.size();
+ const u_int16_t eff_ae_max_jfiles = _ae_max_jfiles ? _ae_max_jfiles : JRNL_MAX_NUM_FILES;
+ if (pfid + num_jfiles > eff_ae_max_jfiles)
+ {
+ std::ostringstream oss;
+ oss << "num_files=" << pfid << " incr=" << num_jfiles << " limit=" << _ae_max_jfiles;
+ throw jexception(jerrno::JERR_LFMGR_AEFNUMLIMIT, oss.str(), "lpmgr", "insert");
+ }
+ for (std::size_t lid = after_lfid + 1; lid <= after_lfid + num_jfiles; lid++, pfid++)
+ _fcntl_arr.insert(_fcntl_arr.begin() + lid, fp(jcp, lid, pfid, 0));
+ for (std::size_t lid = after_lfid + num_jfiles + 1; lid < _fcntl_arr.size(); lid++)
+ {
+ fcntl* p = _fcntl_arr[lid];
+ assert(p != 0);
+ p->set_lfid(p->lfid() + num_jfiles);
+ }
+}
+
+void
+lpmgr::finalize()
+{
+ for (u_int32_t i = 0; i < _fcntl_arr.size(); i++)
+ delete _fcntl_arr[i];
+ _fcntl_arr.clear();
+ _ae = false;
+ _ae_max_jfiles = 0;
+}
+
+void
+lpmgr::set_ae(const bool ae)
+{
+ if (ae && _ae_max_jfiles > 0 && _ae_max_jfiles <= _fcntl_arr.size())
+ {
+ std::ostringstream oss;
+ oss << "ae_max_jfiles (" << _ae_max_jfiles << ") <= _fcntl_arr.size (" << _fcntl_arr.size() << ")";
+ throw jexception(jerrno::JERR_LFMGR_BADAEFNUMLIM, oss.str(), "lpmgr", "set_ae");
+ }
+ if (ae && _fcntl_arr.max_size() < _ae_max_jfiles)
+ _fcntl_arr.reserve(_ae_max_jfiles ? _ae_max_jfiles : JRNL_MAX_NUM_FILES);
+ _ae = ae;
+}
+
+void
+lpmgr::set_ae_max_jfiles(const u_int16_t ae_max_jfiles)
+{
+ if (_ae && ae_max_jfiles > 0 && ae_max_jfiles <= _fcntl_arr.size())
+ {
+ std::ostringstream oss;
+ oss << "ae_max_jfiles (" << _ae_max_jfiles << ") <= _fcntl_arr.size() (" << _fcntl_arr.size() << ")";
+ throw jexception(jerrno::JERR_LFMGR_BADAEFNUMLIM, oss.str(), "lpmgr", "set_ae_max_jfiles");
+ }
+ if (_ae && _fcntl_arr.max_size() < ae_max_jfiles)
+ _fcntl_arr.reserve(ae_max_jfiles ? ae_max_jfiles : JRNL_MAX_NUM_FILES);
+ _ae_max_jfiles = ae_max_jfiles;
+}
+
+u_int16_t
+lpmgr::ae_jfiles_rem() const
+{
+ if (_ae_max_jfiles > _fcntl_arr.size()) return _ae_max_jfiles - _fcntl_arr.size();
+ if (_ae_max_jfiles == 0) return JRNL_MAX_NUM_FILES - _fcntl_arr.size();
+ return 0;
+}
+
+// Testing functions
+
+void
+lpmgr::get_pfid_list(std::vector<u_int16_t>& pfid_list) const
+{
+ pfid_list.clear();
+ for (std::size_t i = 0; i < _fcntl_arr.size(); i++)
+ pfid_list.push_back(_fcntl_arr[i]->pfid());
+}
+
+void
+lpmgr::get_lfid_list(std::vector<u_int16_t>& lfid_list) const
+{
+ lfid_list.clear();
+ lfid_list.assign(_fcntl_arr.size(), 0);
+ for (std::size_t i = 0; i < _fcntl_arr.size(); i++)
+ lfid_list[_fcntl_arr[i]->pfid()] = i;
+}
+
+// === protected fns ===
+
+void
+lpmgr::append(jcntl* const jcp,
+ new_obj_fn_ptr fp,
+ const u_int16_t num_jfiles)
+{
+ std::size_t s = _fcntl_arr.size();
+ if (_ae_max_jfiles && s + num_jfiles > _ae_max_jfiles)
+ {
+ std::ostringstream oss;
+ oss << "num_files=" << s << " incr=" << num_jfiles << " limit=" << _ae_max_jfiles;
+ throw jexception(jerrno::JERR_LFMGR_AEFNUMLIMIT, oss.str(), "lpmgr", "append");
+ }
+ for (std::size_t i = s; i < s + num_jfiles; i++)
+ _fcntl_arr.push_back(fp(jcp, i, i, 0));
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.h b/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.h
new file mode 100644
index 0000000000..be5c4494cc
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/lpmgr.h
@@ -0,0 +1,303 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file lpmgr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Class mrg::journal::lpmgr. See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_LPMGR_H
+#define QPID_LEGACYSTORE_JRNL_LPMGR_H
+
+namespace mrg
+{
+namespace journal
+{
+ class jcntl;
+ class lpmgr;
+}
+}
+
+#include "qpid/legacystore/jrnl/fcntl.h"
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \brief LFID-PFID manager. This class maps the logical file id (lfid) to the physical file id (pfid) so that files
+ * may be inserted into the file ring buffer in (nearly) arbitrary logical locations while the physical ids continue
+ * to be appended. NOTE: NOT THREAD SAFE.
+ *
+ * The entire functionality of the LFID-PFID manager is to maintain an array of pointers to fcntl objects which have
+ * a one-to-one relationship to the physical %journal files. The logical file id (lfid) is used as an index to the
+ * array to read the mapped physical file id (pfid). By altering the order of these pointers within the array, the
+ * mapping of logical to physical files may be altered. This can be used to allow for the logical insertion of
+ * %journal files into a ring buffer, even though the physical file ids must be appended to those that preceded them.
+ *
+ * Since the insert() operation uses after-lfid as its position parameter, it is not possible to insert before lfid
+ * 0 - i.e. It is only possible to insert after an existing lfid. Consequently, lfid 0 and pfid 0 are always
+ * coincident in a %journal. Note, however, that inserting before lfid 0 is logically equivilent to inserting after
+ * the last lfid.
+ *
+ * When one or more files are inserted after a particular lfid, the lfids of the following files are incremented. The
+ * pfids of the inserted files follow those of all existing files, thus leading to a lfid-pfid discreppancy (ie no
+ * longer a one-to-one mapping):
+ *
+ * Example: Before insertion, %journal file headers would look as follows:
+ * <pre>
+ * Logical view (sorted by lfid): Physical view (sorted by pfid):
+ * +---+---+---+---+---+---+ +---+---+---+---+---+---+
+ * pfid --> | 0 | 1 | 2 | 3 | 4 | 5 | pfid --> | 0 | 1 | 2 | 3 | 4 | 5 |
+ * lfid --> | 0 | 1 | 2 | 3 | 4 | 5 | lfid --> | 0 | 1 | 2 | 3 | 4 | 5 |
+ * +---+---+---+---+---+---+ +---+---+---+---+---+---+
+ * </pre>
+ *
+ * After insertion of 2 files after lid 2 (marked with *s):
+ * <pre>
+ * Logical view (sorted by lfid): Physical view (sorted by pfid):
+ * +---+---+---+---+---+---+---+---+ +---+---+---+---+---+---+---+---+
+ * pfid --> | 0 | 1 | 2 |*6*|*7*| 3 | 4 | 5 | pfid --> | 0 | 1 | 2 | 3 | 4 | 5 |*6*|*7*|
+ * lfid --> | 0 | 1 | 2 |*3*|*4*| 5 | 6 | 7 | lfid --> | 0 | 1 | 2 | 5 | 6 | 7 |*3*|*4*|
+ * +---+---+---+---+---+---+---+---+ +---+---+---+---+---+---+---+---+
+ * </pre>
+ *
+ * The insert() function updates the internal map immediately, but the physical files (which have both the pfid and
+ * lfid written into the file header) are only updated as they are overwritten in the normal course of enqueueing
+ * and dequeueing messages. If the %journal should fail after insertion but before the files following those inserted
+ * are overwritten, then duplicate lfids will be present (though no duplicate pfids are possible). The overwrite
+ * indicator (owi) flag and the pfid numbers may be used to resolve the ambiguity and determine the logically earlier
+ * lfid in this case.
+ *
+ * Example: Before insertion, the current active write file being lfid/pfid 2 as determined by the owi flag, %journal
+ * file headers would look as follows:
+ * <pre>
+ * Logical view (sorted by lfid): Physical view (sorted by pfid):
+ * +---+---+---+---+---+---+ +---+---+---+---+---+---+
+ * pfid --> | 0 | 1 | 2 | 3 | 4 | 5 | pfid --> | 0 | 1 | 2 | 3 | 4 | 5 |
+ * lfid --> | 0 | 1 | 2 | 3 | 4 | 5 | lfid --> | 0 | 1 | 2 | 3 | 4 | 5 |
+ * owi --> | t | t | t | f | f | f | owi --> | t | t | t | f | f | f |
+ * +---+---+---+---+---+---+ +---+---+---+---+---+---+
+ * </pre>
+ *
+ * After inserting 2 files after lfid 2 and then 3 (the newly inserted file) - marked with *s:
+ * <pre>
+ * Logical view (sorted by lfid): Physical view (sorted by pfid):
+ * +---+---+---+---+---+---+---+---+ +---+---+---+---+---+---+---+---+
+ * pfid --> | 0 | 1 | 2 |*6*|*7*| 3 | 4 | 5 | pfid --> | 0 | 1 | 2 | 3 | 4 | 5 |*3*|*4*|
+ * lfid --> | 0 | 1 | 2 |*3*|*4*| 3 | 4 | 5 | lfid --> | 0 | 1 | 2 | 3 | 4 | 5 |*3*|*4*|
+ * owi --> | t | t | t | t | t | f | f | f | owi --> | t | t | t | f | f | f | t | t |
+ * +---+---+---+---+---+---+---+---+ +---+---+---+---+---+---+---+---+
+ * </pre>
+ *
+ * If a broker failure occurs at this point, then there are two independent tests that may be made to resolve
+ * duplicate lfids during recovery in such cases:
+ * <ol>
+ * <li>The correct lfid has owi flag that matches that of pfid/lfid 0</li>
+ * <li>The most recently inserted (hence correct) lfid has pfids that are higher than the duplicate that was not
+ * overwritten</li>
+ * </ol>
+ *
+ * NOTE: NOT THREAD SAFE. Provide external thread protection if used in multi-threaded environments.
+ */
+ class lpmgr
+ {
+ public:
+ /**
+ * \brief Function pointer to function that will create a new fcntl object and return its pointer.
+ *
+ * \param jcp Pointer to jcntl instance from which journal file details will be obtained.
+ * \param lfid Logical file ID for new fcntl instance.
+ * \param pfid Physical file ID for file associated with new fcntl instance.
+ * \param rdp Pointer to rcvdat instance which conatins recovery information for new fcntl instance when
+ * recovering an existing file, or null if a new file is to be created.
+ */
+ typedef fcntl* (new_obj_fn_ptr)(jcntl* const jcp,
+ const u_int16_t lfid,
+ const u_int16_t pfid,
+ const rcvdat* const rdp);
+
+ private:
+ bool _ae; ///< Auto-expand mode
+ u_int16_t _ae_max_jfiles; ///< Max file count for auto-expansion; 0 = no limit
+ std::vector<fcntl*> _fcntl_arr; ///< Array of pointers to fcntl objects
+
+ public:
+ lpmgr();
+ virtual ~lpmgr();
+
+ /**
+ * \brief Initialize from scratch for a known number of %journal files. All lfid values are identical to pfid
+ * values (which is normal before any inserts have occurred).
+ *
+ * \param num_jfiles Number of files to be created, and consequently the number of fcntl objects in array
+ * _fcntl_arr.
+ * \param ae If true, allows auto-expansion; if false, disables auto-expansion.
+ * \param ae_max_jfiles The maximum number of files allowed for auto-expansion. Cannot be lower than the current
+ * number of files. However, a zero value disables the limit checks, and allows unlimited
+ * expansion.
+ * \param jcp Pointer to jcntl instance. This is used to find the file path and base filename so that
+ * new files may be created.
+ * \param fp Pointer to function which creates and returns a pointer to a new fcntl object (and hence
+ * causes a new %journal file to be created).
+ */
+ void initialize(const u_int16_t num_jfiles,
+ const bool ae,
+ const u_int16_t ae_max_jfiles,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp);
+
+ /**
+ * \brief Initialize from a known lfid-pfid map pfid_list (within rcvdat param rd), which is usually obtained
+ * from a recover. The index of pfid_list is the logical file id (lfid); the value contained in the vector is
+ * the physical file id (pfid).
+ *
+ * \param rd Ref to rcvdat struct which contains recovery data and the pfid_list.
+ * \param jcp Pointer to jcntl instance. This is used to find the file path and base filename so that
+ * new files may be created.
+ * \param fp Pointer to function which creates and returns a pointer to a new fcntl object (and hence
+ * causes a new %journal file to be created).
+ */
+ void recover(const rcvdat& rd,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp);
+
+ /**
+ * \brief Insert num_jfiles files after lfid index after_lfid. This causes all lfids after after_lfid to be
+ * increased by num_jfiles.
+ *
+ * Note that it is not possible to insert <i>before</i> lfid 0, and thus lfid 0 should always point to pfid 0.
+ * Inserting before lfid 0 is logically equivilent to inserting after the last lfid in a circular buffer.
+ *
+ * \param after_lfid Lid index after which to insert file(s).
+ * \param jcp Pointer to jcntl instance. This is used to find the file path and base filename so that
+ * new files may be created.
+ * \param fp Pointer to function which creates and returns a pointer to a new fcntl object (and hence
+ * causes a new %journal file to be created).
+ * \param num_jfiles The number of files by which to increase.
+ */
+ void insert(const u_int16_t after_lfid,
+ jcntl* const jcp,
+ new_obj_fn_ptr fp,
+ const u_int16_t num_jfiles = 1);
+
+ /**
+ * \brief Clears _fcntl_arr and deletes all fcntl instances.
+ */
+ void finalize();
+
+ /**
+ * \brief Returns true if initialized; false otherwise. After construction, will return false until initialize()
+ * is called; thereafter true until finalize() is called, whereupon it will return false again.
+ *
+ * \return True if initialized; false otherwise.
+ */
+ inline bool is_init() const { return _fcntl_arr.size() > 0; }
+
+ /**
+ * \brief Returns true if auto-expand mode is enabled; false if not.
+ *
+ * \return True if auto-expand mode is enabled; false if not.
+ */
+ inline bool is_ae() const { return _ae; }
+
+ /**
+ * \brief Sets the auto-expand mode to enabled if ae is true, to disabled otherwise. The value of _ae_max_jfiles
+ * must be valid to succeed (i.e. _ae_max_jfiles must be greater than the current number of files or be zero).
+ *
+ * \param ae If true will enable auto-expand mode; if false will disable it.
+ */
+ void set_ae(const bool ae);
+
+ /**
+ * \brief Returns the number of %journal files, including any that were appended or inserted since
+ * initialization.
+ *
+ * \return Number of %journal files if initialized; 0 otherwise.
+ */
+ inline u_int16_t num_jfiles() const { return static_cast<u_int16_t>(_fcntl_arr.size()); }
+
+ /**
+ * \brief Returns the maximum number of files allowed for auto-expansion.
+ *
+ * \return Maximum number of files allowed for auto-expansion. A zero value represents a disabled limit
+ * - i.e. unlimited expansion.
+ */
+ inline u_int16_t ae_max_jfiles() const { return _ae_max_jfiles; }
+
+ /**
+ * \brief Sets the maximum number of files allowed for auto-expansion. A zero value disables the limit.
+ *
+ * \param ae_max_jfiles The maximum number of files allowed for auto-expansion. Cannot be lower than the current
+ * number of files. However, a zero value disables the limit checks, and allows unlimited
+ * expansion.
+ */
+ void set_ae_max_jfiles(const u_int16_t ae_max_jfiles);
+
+ /**
+ * \brief Calculates the number of future files available for auto-expansion.
+ *
+ * \return The number of future files available for auto-expansion.
+ */
+ u_int16_t ae_jfiles_rem() const;
+
+ /**
+ * \brief Get a pointer to fcntl instance for a given lfid.
+ *
+ * \return Pointer to fcntl object corresponding to logical file id lfid, or 0 if lfid is out of range
+ * (greater than number of files in use).
+ */
+ inline fcntl* get_fcntlp(const u_int16_t lfid) const
+ { if (lfid >= _fcntl_arr.size()) return 0; return _fcntl_arr[lfid]; }
+
+ // Testing functions
+ void get_pfid_list(std::vector<u_int16_t>& pfid_list) const;
+ void get_lfid_list(std::vector<u_int16_t>& lfid_list) const;
+
+ protected:
+
+ /**
+ * \brief Append num_jfiles files to the end of the logical and file id sequence. This is similar to extending
+ * the from-scratch initialization.
+ *
+ * \param jcp Pointer to jcntl instance. This is used to find the file path and base filename so that
+ * new files may be created.
+ * \param fp Pointer to function which creates and returns a pointer to a new fcntl object (and hence
+ * causes a new %journal file to be created).
+ * \param num_jfiles The number of files by which to increase.
+ */
+ void append(jcntl* const jcp,
+ new_obj_fn_ptr fp,
+ const u_int16_t num_jfiles = 1);
+
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_LPMGR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.cpp
new file mode 100644
index 0000000000..3dc61e2661
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.cpp
@@ -0,0 +1,215 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file pmgr.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::pmgr (page manager). See
+ * comments in file pmgr.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/pmgr.h"
+
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include "qpid/legacystore/jrnl/jcfg.h"
+#include "qpid/legacystore/jrnl/jcntl.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include <sstream>
+
+
+namespace mrg
+{
+namespace journal
+{
+
+pmgr::page_cb::page_cb(u_int16_t index):
+ _index(index),
+ _state(UNUSED),
+ _wdblks(0),
+ _rdblks(0),
+ _pdtokl(0),
+ _wfh(0),
+ _rfh(0),
+ _pbuff(0)
+{}
+
+const char*
+pmgr::page_cb::state_str() const
+{
+ switch(_state)
+ {
+ case UNUSED:
+ return "UNUSED";
+ case IN_USE:
+ return "IN_USE";
+ case AIO_PENDING:
+ return "AIO_PENDING";
+ case AIO_COMPLETE:
+ return "AIO_COMPLETE";
+ }
+ return "<unknown>";
+}
+
+const u_int32_t pmgr::_sblksize = JRNL_SBLK_SIZE * JRNL_DBLK_SIZE;
+
+pmgr::pmgr(jcntl* jc, enq_map& emap, txn_map& tmap):
+ _cache_pgsize_sblks(0),
+ _cache_num_pages(0),
+ _jc(jc),
+ _emap(emap),
+ _tmap(tmap),
+ _page_base_ptr(0),
+ _page_ptr_arr(0),
+ _page_cb_arr(0),
+ _aio_cb_arr(0),
+ _aio_event_arr(0),
+ _ioctx(0),
+ _pg_index(0),
+ _pg_cntr(0),
+ _pg_offset_dblks(0),
+ _aio_evt_rem(0),
+ _cbp(0),
+ _enq_rec(),
+ _deq_rec(),
+ _txn_rec()
+{}
+
+pmgr::~pmgr()
+{
+ pmgr::clean();
+}
+
+void
+pmgr::initialize(aio_callback* const cbp, const u_int32_t cache_pgsize_sblks, const u_int16_t cache_num_pages)
+{
+ // As static use of this class keeps old values around, clean up first...
+ pmgr::clean();
+ _pg_index = 0;
+ _pg_cntr = 0;
+ _pg_offset_dblks = 0;
+ _aio_evt_rem = 0;
+ _cache_pgsize_sblks = cache_pgsize_sblks;
+ _cache_num_pages = cache_num_pages;
+ _cbp = cbp;
+
+ // 1. Allocate page memory (as a single block)
+ std::size_t cache_pgsize = _cache_num_pages * _cache_pgsize_sblks * _sblksize;
+ if (::posix_memalign(&_page_base_ptr, _sblksize, cache_pgsize))
+ {
+ clean();
+ std::ostringstream oss;
+ oss << "posix_memalign(): blksize=" << _sblksize << " size=" << cache_pgsize;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__MALLOC, oss.str(), "pmgr", "initialize");
+ }
+ // 2. Allocate array of page pointers
+ _page_ptr_arr = (void**)std::malloc(_cache_num_pages * sizeof(void*));
+ MALLOC_CHK(_page_ptr_arr, "_page_ptr_arr", "pmgr", "initialize");
+
+ // 3. Allocate and initilaize page control block (page_cb) array
+ _page_cb_arr = (page_cb*)std::malloc(_cache_num_pages * sizeof(page_cb));
+ MALLOC_CHK(_page_cb_arr, "_page_cb_arr", "pmgr", "initialize");
+ std::memset(_page_cb_arr, 0, _cache_num_pages * sizeof(page_cb));
+
+ // 5. Allocate IO control block (iocb) array
+ _aio_cb_arr = (aio_cb*)std::malloc(_cache_num_pages * sizeof(aio_cb));
+ MALLOC_CHK(_aio_cb_arr, "_aio_cb_arr", "pmgr", "initialize");
+
+ // 6. Set page pointers in _page_ptr_arr, _page_cb_arr and iocbs to pages within page block
+ for (u_int16_t i=0; i<_cache_num_pages; i++)
+ {
+ _page_ptr_arr[i] = (void*)((char*)_page_base_ptr + _cache_pgsize_sblks * _sblksize * i);
+ _page_cb_arr[i]._index = i;
+ _page_cb_arr[i]._state = UNUSED;
+ _page_cb_arr[i]._pbuff = _page_ptr_arr[i];
+ _page_cb_arr[i]._pdtokl = new std::deque<data_tok*>;
+ _page_cb_arr[i]._pdtokl->clear();
+ _aio_cb_arr[i].data = (void*)&_page_cb_arr[i];
+ }
+
+ // 7. Allocate io_event array, max one event per cache page plus one for each file
+ const u_int16_t max_aio_evts = _cache_num_pages + _jc->num_jfiles();
+ _aio_event_arr = (aio_event*)std::malloc(max_aio_evts * sizeof(aio_event));
+ MALLOC_CHK(_aio_event_arr, "_aio_event_arr", "pmgr", "initialize");
+
+ // 8. Initialize AIO context
+ if (int ret = aio::queue_init(max_aio_evts, &_ioctx))
+ {
+ std::ostringstream oss;
+ oss << "io_queue_init() failed: " << FORMAT_SYSERR(-ret);
+ throw jexception(jerrno::JERR__AIO, oss.str(), "pmgr", "initialize");
+ }
+}
+
+void
+pmgr::clean()
+{
+ // clean up allocated memory here
+
+ if (_ioctx)
+ aio::queue_release(_ioctx);
+
+ std::free(_page_base_ptr);
+ _page_base_ptr = 0;
+
+ if (_page_cb_arr)
+ {
+ for (int i=0; i<_cache_num_pages; i++)
+ delete _page_cb_arr[i]._pdtokl;
+ std::free(_page_ptr_arr);
+ _page_ptr_arr = 0;
+ }
+
+ std::free(_page_cb_arr);
+ _page_cb_arr = 0;
+
+ std::free(_aio_cb_arr);
+ _aio_cb_arr = 0;
+
+ std::free(_aio_event_arr);
+ _aio_event_arr = 0;
+}
+
+const char*
+pmgr::page_state_str(page_state ps)
+{
+ switch (ps)
+ {
+ case UNUSED:
+ return "UNUSED";
+ case IN_USE:
+ return "IN_USE";
+ case AIO_PENDING:
+ return "AIO_PENDING";
+ case AIO_COMPLETE:
+ return "AIO_COMPLETE";
+ }
+ return "<page_state unknown>";
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.h b/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.h
new file mode 100644
index 0000000000..64115e225e
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/pmgr.h
@@ -0,0 +1,142 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file pmgr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::pmgr (page manager). See
+ * class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_PMGR_H
+#define QPID_LEGACYSTORE_JRNL_PMGR_H
+
+namespace mrg
+{
+namespace journal
+{
+ class pmgr;
+ class jcntl;
+}
+}
+
+#include <deque>
+#include "qpid/legacystore/jrnl/aio.h"
+#include "qpid/legacystore/jrnl/aio_callback.h"
+#include "qpid/legacystore/jrnl/data_tok.h"
+#include "qpid/legacystore/jrnl/deq_rec.h"
+#include "qpid/legacystore/jrnl/enq_map.h"
+#include "qpid/legacystore/jrnl/enq_rec.h"
+#include "qpid/legacystore/jrnl/fcntl.h"
+#include "qpid/legacystore/jrnl/txn_map.h"
+#include "qpid/legacystore/jrnl/txn_rec.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \brief Abstract class for managing either read or write page cache of arbitrary size and
+ * number of cache_num_pages.
+ */
+ class pmgr
+ {
+ public:
+ /**
+ * \brief Enumeration of possible stats of a page within a page cache.
+ */
+ enum page_state
+ {
+ UNUSED, ///< A page is uninitialized, contains no data.
+ IN_USE, ///< Page is in use.
+ AIO_PENDING, ///< An AIO request outstanding.
+ AIO_COMPLETE ///< An AIO request is complete.
+ };
+
+ protected:
+ /**
+ * \brief Page control block, carries control and state information for each page in the
+ * cache.
+ */
+ struct page_cb
+ {
+ u_int16_t _index; ///< Index of this page
+ page_state _state; ///< Status of page
+ u_int64_t _frid; ///< First rid in page (used for fhdr init)
+ u_int32_t _wdblks; ///< Total number of dblks in page so far
+ u_int32_t _rdblks; ///< Total number of dblks in page
+ std::deque<data_tok*>* _pdtokl; ///< Page message tokens list
+ fcntl* _wfh; ///< File handle for incrementing write compl counts
+ fcntl* _rfh; ///< File handle for incrementing read compl counts
+ void* _pbuff; ///< Page buffer
+
+ page_cb(u_int16_t index); ///< Convenience constructor
+ const char* state_str() const; ///< Return state as string for this pcb
+ };
+
+ static const u_int32_t _sblksize; ///< Disk softblock size
+ u_int32_t _cache_pgsize_sblks; ///< Size of page cache cache_num_pages
+ u_int16_t _cache_num_pages; ///< Number of page cache cache_num_pages
+ jcntl* _jc; ///< Pointer to journal controller
+ enq_map& _emap; ///< Ref to enqueue map
+ txn_map& _tmap; ///< Ref to transaction map
+ void* _page_base_ptr; ///< Base pointer to page memory
+ void** _page_ptr_arr; ///< Array of pointers to cache_num_pages in page memory
+ page_cb* _page_cb_arr; ///< Array of page_cb structs
+ aio_cb* _aio_cb_arr; ///< Array of iocb structs
+ aio_event* _aio_event_arr; ///< Array of io_events
+ io_context_t _ioctx; ///< AIO context for read/write operations
+ u_int16_t _pg_index; ///< Index of current page being used
+ u_int32_t _pg_cntr; ///< Page counter; determines if file rotation req'd
+ u_int32_t _pg_offset_dblks; ///< Page offset (used so far) in data blocks
+ u_int32_t _aio_evt_rem; ///< Remaining AIO events
+ aio_callback* _cbp; ///< Pointer to callback object
+
+ enq_rec _enq_rec; ///< Enqueue record used for encoding/decoding
+ deq_rec _deq_rec; ///< Dequeue record used for encoding/decoding
+ txn_rec _txn_rec; ///< Transaction record used for encoding/decoding
+
+ public:
+ pmgr(jcntl* jc, enq_map& emap, txn_map& tmap);
+ virtual ~pmgr();
+
+ virtual int32_t get_events(page_state state, timespec* const timeout, bool flush = false) = 0;
+ inline u_int32_t get_aio_evt_rem() const { return _aio_evt_rem; }
+ static const char* page_state_str(page_state ps);
+ inline u_int32_t cache_pgsize_sblks() const { return _cache_pgsize_sblks; }
+ inline u_int16_t cache_num_pages() const { return _cache_num_pages; }
+
+ protected:
+ virtual void initialize(aio_callback* const cbp, const u_int32_t cache_pgsize_sblks,
+ const u_int16_t cache_num_pages);
+ virtual void rotate_page() = 0;
+ virtual void clean();
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_PMGR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rcvdat.h b/qpid/cpp/src/qpid/legacystore/jrnl/rcvdat.h
new file mode 100644
index 0000000000..a7ef2341f0
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rcvdat.h
@@ -0,0 +1,181 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rcvdat.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Contains structure for recovery status and offset data.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_RCVDAT_H
+#define QPID_LEGACYSTORE_JRNL_RCVDAT_H
+
+#include <cstddef>
+#include <iomanip>
+#include <map>
+#include "qpid/legacystore/jrnl/jcfg.h"
+#include <sstream>
+#include <sys/types.h>
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+
+ struct rcvdat
+ {
+ u_int16_t _njf; ///< Number of journal files
+ bool _ae; ///< Auto-expand mode
+ u_int16_t _aemjf; ///< Auto-expand mode max journal files
+ bool _owi; ///< Overwrite indicator
+ bool _frot; ///< First rotation flag
+ bool _jempty; ///< Journal data files empty
+ u_int16_t _ffid; ///< First file id
+ std::size_t _fro; ///< First record offset in ffid
+ u_int16_t _lfid; ///< Last file id
+ std::size_t _eo; ///< End offset (first byte past last record)
+ u_int64_t _h_rid; ///< Highest rid found
+ bool _lffull; ///< Last file is full
+ bool _jfull; ///< Journal is full
+ std::vector<u_int16_t> _fid_list; ///< Fid-lid mapping - list of fids in order of lid
+ std::vector<u_int32_t> _enq_cnt_list; ///< Number enqueued records found for each file
+
+ rcvdat():
+ _njf(0),
+ _ae(false),
+ _aemjf(0),
+ _owi(false),
+ _frot(false),
+ _jempty(true),
+ _ffid(0),
+ _fro(0),
+ _lfid(0),
+ _eo(0),
+ _h_rid(0),
+ _lffull(false),
+ _jfull(false),
+ _fid_list(),
+ _enq_cnt_list()
+ {}
+
+ void reset(const u_int16_t num_jfiles, const bool auto_expand, const u_int16_t ae_max_jfiles)
+ {
+ _njf = num_jfiles;
+ _ae = auto_expand;
+ _aemjf = ae_max_jfiles;
+ _owi = false;
+ _frot = false;
+ _jempty = true;
+ _ffid = 0;
+ _fro = 0;
+ _lfid = 0;
+ _eo = 0;
+ _h_rid = 0;
+ _lffull = false;
+ _jfull = false;
+ _fid_list.clear();
+ _enq_cnt_list.clear();
+ _enq_cnt_list.resize(num_jfiles, 0);
+ }
+
+ // Find first fid with enqueued records
+ u_int16_t ffid()
+ {
+ u_int16_t index = _ffid;
+ while (index != _lfid && _enq_cnt_list[index] == 0)
+ {
+ if (++index >= _njf)
+ index = 0;
+ }
+ return index;
+ }
+
+ std::string to_string(const std::string& jid)
+ {
+ std::ostringstream oss;
+ oss << "Recover file analysis (jid=\"" << jid << "\"):" << std::endl;
+ oss << " Number of journal files (_njf) = " << _njf << std::endl;
+ oss << " Auto-expand mode (_ae) = " << (_ae ? "TRUE" : "FALSE") << std::endl;
+ if (_ae) oss << " Auto-expand mode max journal files (_aemjf) = " << _aemjf << std::endl;
+ oss << " Overwrite indicator (_owi) = " << (_owi ? "TRUE" : "FALSE") << std::endl;
+ oss << " First rotation (_frot) = " << (_frot ? "TRUE" : "FALSE") << std::endl;
+ oss << " Journal empty (_jempty) = " << (_jempty ? "TRUE" : "FALSE") << std::endl;
+ oss << " First (earliest) fid (_ffid) = " << _ffid << std::endl;
+ oss << " First record offset in first fid (_fro) = 0x" << std::hex << _fro <<
+ std::dec << " (" << (_fro/JRNL_DBLK_SIZE) << " dblks)" << std::endl;
+ oss << " Last (most recent) fid (_lfid) = " << _lfid << std::endl;
+ oss << " End offset (_eo) = 0x" << std::hex << _eo << std::dec << " (" <<
+ (_eo/JRNL_DBLK_SIZE) << " dblks)" << std::endl;
+ oss << " Highest rid (_h_rid) = 0x" << std::hex << _h_rid << std::dec << std::endl;
+ oss << " Last file full (_lffull) = " << (_lffull ? "TRUE" : "FALSE") << std::endl;
+ oss << " Journal full (_jfull) = " << (_jfull ? "TRUE" : "FALSE") << std::endl;
+ oss << " Normalized fid list (_fid_list) = [";
+ for (std::vector<u_int16_t>::const_iterator i = _fid_list.begin(); i < _fid_list.end(); i++)
+ {
+ if (i != _fid_list.begin()) oss << ", ";
+ oss << *i;
+ }
+ oss << "]" << std::endl;
+ oss << " Enqueued records (txn & non-txn):" << std::endl;
+ for (unsigned i=0; i<_enq_cnt_list.size(); i++)
+ oss << " File " << std::setw(2) << i << ": " << _enq_cnt_list[i] <<
+ std::endl;
+ return oss.str();
+ }
+
+ std::string to_log(const std::string& jid)
+ {
+ std::ostringstream oss;
+ oss << "Recover file analysis (jid=\"" << jid << "\"):";
+ oss << " njf=" << _njf;
+ oss << " ae=" << (_owi ? "T" : "F");
+ oss << " aemjf=" << _aemjf;
+ oss << " owi=" << (_ae ? "T" : "F");
+ oss << " frot=" << (_frot ? "T" : "F");
+ oss << " jempty=" << (_jempty ? "T" : "F");
+ oss << " ffid=" << _ffid;
+ oss << " fro=0x" << std::hex << _fro << std::dec << " (" <<
+ (_fro/JRNL_DBLK_SIZE) << " dblks)";
+ oss << " lfid=" << _lfid;
+ oss << " eo=0x" << std::hex << _eo << std::dec << " (" <<
+ (_eo/JRNL_DBLK_SIZE) << " dblks)";
+ oss << " h_rid=0x" << std::hex << _h_rid << std::dec;
+ oss << " lffull=" << (_lffull ? "T" : "F");
+ oss << " jfull=" << (_jfull ? "T" : "F");
+ oss << " Enqueued records (txn & non-txn): [ ";
+ for (unsigned i=0; i<_enq_cnt_list.size(); i++)
+ {
+ if (i) oss << " ";
+ oss << "fid_" << std::setw(2) << std::setfill('0') << i << "=" << _enq_cnt_list[i];
+ }
+ oss << " ]";
+ return oss.str();
+ }
+ };
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_RCVDAT_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rec_hdr.h b/qpid/cpp/src/qpid/legacystore/jrnl/rec_hdr.h
new file mode 100644
index 0000000000..ff6325a760
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rec_hdr.h
@@ -0,0 +1,143 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rec_hdr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rec_hdr (record header),
+ * which is a common initial header used for all journal record structures
+ * except the record tail (rec_tail).
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_REC_HDR_H
+#define QPID_LEGACYSTORE_JRNL_REC_HDR_H
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/jcfg.h"
+#include <sys/types.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for data common to the head of all journal files and records.
+ * This includes identification for the file type, the encoding version, endian
+ * indicator and a record ID.
+ *
+ * File header info in binary format (16 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+
+ * | magic | v | e | flags |
+ * +---+---+---+---+---+---+---+---+
+ * | rid |
+ * +---+---+---+---+---+---+---+---+
+ * v = file version (If the format or encoding of this file changes, then this
+ * number should be incremented)
+ * e = endian flag, false (0x00) for little endian, true (0x01) for big endian
+ * </pre>
+ *
+ * Note that journal files should be transferable between 32- and 64-bit
+ * hardware of the same endianness, but not between hardware of opposite
+ * entianness without some sort of binary conversion utility. Thus buffering
+ * will be needed for types that change size between 32- and 64-bit compiles.
+ */
+ struct rec_hdr
+ {
+ u_int32_t _magic; ///< File type identifier (magic number)
+ u_int8_t _version; ///< File encoding version
+ u_int8_t _eflag; ///< Flag for determining endianness
+ u_int16_t _uflag; ///< User-defined flags
+ u_int64_t _rid; ///< Record ID (rotating 64-bit counter)
+
+ // Global flags
+ static const u_int16_t HDR_OVERWRITE_INDICATOR_MASK = 0x1;
+
+ // Convenience constructors and methods
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ inline rec_hdr(): _magic(0), _version(0), _eflag(0), _uflag(0), _rid(0) {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ inline rec_hdr(const u_int32_t magic, const u_int8_t version, const u_int64_t rid,
+ const bool owi): _magic(magic), _version(version),
+#if defined(JRNL_BIG_ENDIAN)
+ _eflag(RHM_BENDIAN_FLAG),
+#else
+ _eflag(RHM_LENDIAN_FLAG),
+#endif
+ _uflag(owi ? HDR_OVERWRITE_INDICATOR_MASK : 0), _rid(rid) {}
+
+ /**
+ * \brief Convenience copy method.
+ */
+ inline void hdr_copy(const rec_hdr& h)
+ {
+ _magic = h._magic;
+ _version = h._version;
+ _eflag = h._eflag;
+ _uflag = h._uflag;
+ _rid =h._rid;
+ }
+
+ /**
+ * \brief Resets all fields to 0
+ */
+ inline void reset()
+ {
+ _magic = 0;
+ _version = 0;
+ _eflag = 0;
+ _uflag = 0;
+ _rid = 0;
+ }
+
+ inline bool get_owi() const { return _uflag & HDR_OVERWRITE_INDICATOR_MASK; }
+
+ inline void set_owi(const bool owi)
+ {
+ _uflag = owi ? _uflag | HDR_OVERWRITE_INDICATOR_MASK :
+ _uflag & (~HDR_OVERWRITE_INDICATOR_MASK);
+ }
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(rec_hdr); }
+ }; // struct rec_hdr
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_REC_HDR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rec_tail.h b/qpid/cpp/src/qpid/legacystore/jrnl/rec_tail.h
new file mode 100644
index 0000000000..0c36151927
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rec_tail.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rec_tail.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rec_tail (record tail), used to
+ * finalize a persistent record. The presence of a valid tail at the expected
+ * position in the journal file indicates that the record write was completed.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_REC_TAIL_H
+#define QPID_LEGACYSTORE_JRNL_REC_TAIL_H
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/jcfg.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for data common to the tail of all records. The magic number
+ * used here is the binary inverse (1's complement) of the magic used in the
+ * record header; this minimizes possible confusion with other headers that may
+ * be present during recovery. The tail is used with all records that have either
+ * XIDs or data - ie any size-variable content. Currently the only records that
+ * do NOT use the tail are non-transactional dequeues and filler records.
+ *
+ * Record header info in binary format (12 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+
+ * | ~(magic) | rid |
+ * +---+---+---+---+---+---+---+---+
+ * | rid (con't) |
+ * +---+---+---+---+
+ * </pre>
+ */
+ struct rec_tail
+ {
+ u_int32_t _xmagic; ///< Binary inverse (1's complement) of hdr magic number
+ u_int64_t _rid; ///< ID (rotating 64-bit counter)
+
+
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ inline rec_tail(): _xmagic(0xffffffff), _rid(0) {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction from
+ * existing enq_hdr instance.
+ */
+ inline rec_tail(const rec_hdr& h): _xmagic(~h._magic), _rid(h._rid) {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ inline rec_tail(const u_int32_t xmagic, const u_int64_t rid): _xmagic(xmagic), _rid(rid) {}
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(rec_tail); }
+ };
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_REC_TAIL_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rfc.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/rfc.cpp
new file mode 100644
index 0000000000..9b5ed95e81
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rfc.cpp
@@ -0,0 +1,82 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rfc.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rfc (rotating
+ * file controller). See comments in file rfc.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/rfc.h"
+
+#include <cassert>
+
+namespace mrg
+{
+namespace journal
+{
+
+rfc::rfc(const lpmgr* lpmp): _lpmp(lpmp), _fc_index(0), _curr_fc(0)
+{}
+
+rfc::~rfc()
+{}
+
+void
+rfc::finalize()
+{
+ unset_findex();
+}
+
+void
+rfc::set_findex(const u_int16_t fc_index)
+{
+ _fc_index = fc_index;
+ _curr_fc = _lpmp->get_fcntlp(fc_index);
+ _curr_fc->rd_reset();
+}
+
+void
+rfc::unset_findex()
+{
+ _fc_index = 0;
+ _curr_fc = 0;
+}
+
+std::string
+rfc::status_str() const
+{
+ if (!_lpmp->is_init())
+ return "state: Uninitialized";
+ if (_curr_fc == 0)
+ return "state: Inactive";
+ std::ostringstream oss;
+ oss << "state: Active";
+ return oss.str();
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rfc.h b/qpid/cpp/src/qpid/legacystore/jrnl/rfc.h
new file mode 100644
index 0000000000..faa5d566ba
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rfc.h
@@ -0,0 +1,193 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rfc.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rfc (rotating
+ * file controller). See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_RFC_H
+#define QPID_LEGACYSTORE_JRNL_RFC_H
+
+namespace mrg
+{
+namespace journal
+{
+class rfc;
+}
+}
+
+#include "qpid/legacystore/jrnl/lpmgr.h"
+#include "qpid/legacystore/jrnl/enums.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class rfc
+ * \brief Rotating File Controller (rfc) - Class to handle the manangement of an array of file controllers (fcntl)
+ * objects for use in a circular disk buffer (journal). Each fcntl object corresponds to a file in the journal.
+ *
+ * The following states exist in this class:
+ *
+ * <pre>
+ * is_init() is_active()
+ * +===+ _lpmp.is_init() == false
+ * +---------->| | Uninitialized: _curr_fc == 0 F F
+ * | +-->+===+ --+
+ * | | |
+ * | | |
+ * | finalize() initialize()
+ * | | |
+ * | | |
+ * | +-- +===+<--+ _lpmp.is_init() == true
+ * finalize() | | Inactive: _curr_fc == 0 T F
+ * | +-->+===+ --+
+ * | | |
+ * | | |
+ * | unset_findex() set_findex()
+ * | | |
+ * | | |
+ * | +-- +===+<--+ _lpmp.is_init() == true
+ * +---------- | | Active: _curr_fc != 0 T T
+ * +===+
+ * </pre>
+ *
+ * The Uninitialized state is where the class starts after construction. Once the number of files is known and
+ * the array of file controllers allocated, then initialize() is called to set these, causing the state to move
+ * to Inactive.
+ *
+ * The Inactive state has the file controllers allocated and pointing to their respective journal files, but no
+ * current file controller has been selected. The pointer to the current file controller _curr_fc is null. Once the
+ * index of the active file is known, then calling set_findex() will set the index and internal pointer
+ * to the currently active file controller. This moves the state to Active.
+ *
+ * Note TODO: Comment on sync issues between change in num files in _lpmp and _fc_index/_curr_fc.
+ */
+ class rfc
+ {
+ protected:
+ const lpmgr* _lpmp; ///< Pointer to jcntl's lpmgr instance containing lfid/pfid map and fcntl objects
+ u_int16_t _fc_index; ///< Index of current file controller
+ fcntl* _curr_fc; ///< Pointer to current file controller
+
+ public:
+ rfc(const lpmgr* lpmp);
+ virtual ~rfc();
+
+ /**
+ * \brief Initialize the controller, moving from state Uninitialized to Inactive. The main function of
+ * initialize() is to set the number of files and the pointer to the fcntl array.
+ */
+ virtual inline void initialize() {}
+
+ /**
+ * \brief Reset the controller to Uninitialized state, usually called when the journal is stopped. Once called,
+ * initialize() must be called to reuse an instance.
+ */
+ virtual void finalize();
+
+ /**
+ * \brief Check initialization state: true = Not Uninitialized, ie Initialized or Active; false = Uninitialized.
+ */
+ virtual inline bool is_init() const { return _lpmp->is_init(); }
+
+ /**
+ * \brief Check active state: true = Initialized and _curr_fc not null; false otherwise.
+ */
+ virtual inline bool is_active() const { return _lpmp->is_init() && _curr_fc != 0; }
+
+ /**
+ * \brief Sets the current file index and active fcntl object. Moves to state Active.
+ */
+ virtual void set_findex(const u_int16_t fc_index);
+
+ /**
+ * \brief Nulls the current file index and active fcntl pointer, moves to state Inactive.
+ */
+ virtual void unset_findex();
+
+ /**
+ * \brief Rotate active file controller to next file in rotating file group.
+ * \exception jerrno::JERR__NINIT if called before calling initialize().
+ */
+ virtual iores rotate() = 0;
+
+ /**
+ * \brief Returns the index of the currently active file within the rotating file group.
+ */
+ inline u_int16_t index() const { return _fc_index; }
+
+ /**
+ * \brief Returns the currently active journal file controller within the rotating file group.
+ */
+ inline fcntl* file_controller() const { return _curr_fc; }
+
+ /**
+ * \brief Returns the currently active physical file id (pfid)
+ */
+ inline u_int16_t pfid() const { return _curr_fc->pfid(); }
+
+ // Convenience access methods to current file controller
+ // Note: Do not call when not in active state
+
+ inline u_int32_t enqcnt() const { return _curr_fc->enqcnt(); }
+ inline u_int32_t incr_enqcnt() { return _curr_fc->incr_enqcnt(); }
+ inline u_int32_t incr_enqcnt(const u_int16_t fid) { return _lpmp->get_fcntlp(fid)->incr_enqcnt(); }
+ inline u_int32_t add_enqcnt(const u_int32_t a) { return _curr_fc->add_enqcnt(a); }
+ inline u_int32_t add_enqcnt(const u_int16_t fid, const u_int32_t a)
+ { return _lpmp->get_fcntlp(fid)->add_enqcnt(a); }
+ inline u_int32_t decr_enqcnt(const u_int16_t fid) { return _lpmp->get_fcntlp(fid)->decr_enqcnt(); }
+ inline u_int32_t subtr_enqcnt(const u_int16_t fid, const u_int32_t s)
+ { return _lpmp->get_fcntlp(fid)->subtr_enqcnt(s); }
+
+ virtual inline u_int32_t subm_cnt_dblks() const = 0;
+ virtual inline std::size_t subm_offs() const = 0;
+ virtual inline u_int32_t add_subm_cnt_dblks(u_int32_t a) = 0;
+
+ virtual inline u_int32_t cmpl_cnt_dblks() const = 0;
+ virtual inline std::size_t cmpl_offs() const = 0;
+ virtual inline u_int32_t add_cmpl_cnt_dblks(u_int32_t a) = 0;
+
+ virtual inline bool is_void() const = 0;
+ virtual inline bool is_empty() const = 0;
+ virtual inline u_int32_t remaining_dblks() const = 0;
+ virtual inline bool is_full() const = 0;
+ virtual inline bool is_compl() const = 0;
+ virtual inline u_int32_t aio_outstanding_dblks() const = 0;
+ virtual inline bool file_rotate() const = 0;
+
+ // Debug aid
+ virtual std::string status_str() const;
+ }; // class rfc
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_RFC_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.cpp
new file mode 100644
index 0000000000..3a11817d1e
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.cpp
@@ -0,0 +1,698 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rmgr.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rmgr (read manager). See
+ * comments in file rmgr.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/rmgr.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include "qpid/legacystore/jrnl/jcntl.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+rmgr::rmgr(jcntl* jc, enq_map& emap, txn_map& tmap, rrfc& rrfc):
+ pmgr(jc, emap, tmap),
+ _rrfc(rrfc),
+ _hdr(),
+ _fhdr_buffer(0),
+ _fhdr_aio_cb_ptr(0),
+ _fhdr_rd_outstanding(false)
+{}
+
+rmgr::~rmgr()
+{
+ rmgr::clean();
+}
+
+void
+rmgr::initialize(aio_callback* const cbp)
+{
+ pmgr::initialize(cbp, JRNL_RMGR_PAGE_SIZE, JRNL_RMGR_PAGES);
+ clean();
+ // Allocate memory for reading file header
+ if (::posix_memalign(&_fhdr_buffer, _sblksize, _sblksize))
+ {
+ std::ostringstream oss;
+ oss << "posix_memalign(): blksize=" << _sblksize << " size=" << _sblksize;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__MALLOC, oss.str(), "rmgr", "initialize");
+ }
+ _fhdr_aio_cb_ptr = new aio_cb;
+ std::memset(_fhdr_aio_cb_ptr, 0, sizeof(aio_cb*));
+}
+
+void
+rmgr::clean()
+{
+ std::free(_fhdr_buffer);
+ _fhdr_buffer = 0;
+
+ if (_fhdr_aio_cb_ptr)
+ {
+ delete _fhdr_aio_cb_ptr;
+ _fhdr_aio_cb_ptr = 0;
+ }
+}
+
+iores
+rmgr::read(void** const datapp, std::size_t& dsize, void** const xidpp, std::size_t& xidsize,
+ bool& transient, bool& external, data_tok* dtokp, bool ignore_pending_txns)
+{
+ iores res = pre_read_check(dtokp);
+ if (res != RHM_IORES_SUCCESS)
+ {
+ set_params_null(datapp, dsize, xidpp, xidsize);
+ return res;
+ }
+
+ if (dtokp->rstate() == data_tok::SKIP_PART)
+ {
+ if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
+ {
+ aio_cycle(); // check if rd AIOs returned; initiate new reads if possible
+ return RHM_IORES_PAGE_AIOWAIT;
+ }
+ const iores res = skip(dtokp);
+ if (res != RHM_IORES_SUCCESS)
+ {
+ set_params_null(datapp, dsize, xidpp, xidsize);
+ return res;
+ }
+ }
+ if (dtokp->rstate() == data_tok::READ_PART)
+ {
+ assert(dtokp->rid() == _hdr._rid);
+ void* rptr = (void*)((char*)_page_ptr_arr[_pg_index] + (_pg_offset_dblks * JRNL_DBLK_SIZE));
+ const iores res = read_enq(_hdr, rptr, dtokp);
+ dsize = _enq_rec.get_data(datapp);
+ xidsize = _enq_rec.get_xid(xidpp);
+ transient = _enq_rec.is_transient();
+ external = _enq_rec.is_external();
+ return res;
+ }
+
+ set_params_null(datapp, dsize, xidpp, xidsize);
+ _hdr.reset();
+ // Read header, determine next record type
+ while (true)
+ {
+ if(dblks_rem() == 0 && _rrfc.is_compl() && !_rrfc.is_wr_aio_outstanding())
+ {
+ aio_cycle(); // check if rd AIOs returned; initiate new reads if possible
+ if(dblks_rem() == 0 && _rrfc.is_compl() && !_rrfc.is_wr_aio_outstanding())
+ {
+ if (_jc->unflushed_dblks() > 0)
+ _jc->flush();
+ else if (!_aio_evt_rem)
+ return RHM_IORES_EMPTY;
+ }
+ }
+ if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
+ {
+ aio_cycle();
+ return RHM_IORES_PAGE_AIOWAIT;
+ }
+ void* rptr = (void*)((char*)_page_ptr_arr[_pg_index] + (_pg_offset_dblks * JRNL_DBLK_SIZE));
+ std::memcpy(&_hdr, rptr, sizeof(rec_hdr));
+ switch (_hdr._magic)
+ {
+ case RHM_JDAT_ENQ_MAGIC:
+ {
+ _enq_rec.reset(); // sets enqueue rec size
+ // Check if RID of this rec is still enqueued, if so read it, else skip
+ bool is_enq = false;
+ int16_t fid = _emap.get_pfid(_hdr._rid);
+ if (fid < enq_map::EMAP_OK)
+ {
+ bool enforce_txns = !_jc->is_read_only() && !ignore_pending_txns;
+ // Block read for transactionally locked record (only when not recovering)
+ if (fid == enq_map::EMAP_LOCKED && enforce_txns)
+ return RHM_IORES_TXPENDING;
+
+ // (Recover mode only) Ok, not in emap - now search tmap, if present then read
+ is_enq = _tmap.is_enq(_hdr._rid);
+ if (enforce_txns && is_enq)
+ return RHM_IORES_TXPENDING;
+ }
+ else
+ is_enq = true;
+
+ if (is_enq) // ok, this record is enqueued, check it, then read it...
+ {
+ if (dtokp->rid())
+ {
+ if (_hdr._rid != dtokp->rid())
+ {
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << _hdr._rid << "; dtok_rid=0x" << dtokp->rid()
+ << "; dtok_id=0x" << dtokp->id();
+ throw jexception(jerrno::JERR_RMGR_RIDMISMATCH, oss.str(), "rmgr", "read");
+ }
+ }
+ else
+ dtokp->set_rid(_hdr._rid);
+
+// TODO: Add member _fid to pmgr::page_cb which indicates the fid from which this page was
+// populated. When this value is set in wmgr::flush() somewehere, then uncomment the following
+// check:
+// if (fid != _page_cb_arr[_pg_index]._fid)
+// {
+// std::ostringstream oss;
+// oss << std::hex << std::setfill('0');
+// oss << "rid=0x" << std::setw(16) << _hdr._rid;
+// oss << "; emap_fid=0x" << std::setw(4) << fid;
+// oss << "; current_fid=" << _rrfc.fid();
+// throw jexception(jerrno::JERR_RMGR_FIDMISMATCH, oss.str(), "rmgr",
+// "read");
+// }
+
+ const iores res = read_enq(_hdr, rptr, dtokp);
+ dsize = _enq_rec.get_data(datapp);
+ xidsize = _enq_rec.get_xid(xidpp);
+ transient = _enq_rec.is_transient();
+ external = _enq_rec.is_external();
+ return res;
+ }
+ else // skip this record, it is already dequeued
+ consume_xid_rec(_hdr, rptr, dtokp);
+ break;
+ }
+ case RHM_JDAT_DEQ_MAGIC:
+ consume_xid_rec(_hdr, rptr, dtokp);
+ break;
+ case RHM_JDAT_TXA_MAGIC:
+ consume_xid_rec(_hdr, rptr, dtokp);
+ break;
+ case RHM_JDAT_TXC_MAGIC:
+ consume_xid_rec(_hdr, rptr, dtokp);
+ break;
+ case RHM_JDAT_EMPTY_MAGIC:
+ consume_filler();
+ break;
+ default:
+ return RHM_IORES_EMPTY;
+ }
+ }
+}
+
+int32_t
+rmgr::get_events(page_state state, timespec* const timeout, bool flush)
+{
+ if (_aio_evt_rem == 0) // no events to get
+ return 0;
+
+ int32_t ret;
+ if ((ret = aio::getevents(_ioctx, flush ? _aio_evt_rem : 1, _aio_evt_rem/*_cache_num_pages + _jc->num_jfiles()*/, _aio_event_arr, timeout)) < 0)
+ {
+ if (ret == -EINTR) // Interrupted by signal
+ return 0;
+ std::ostringstream oss;
+ oss << "io_getevents() failed: " << std::strerror(-ret) << " (" << ret << ")";
+ throw jexception(jerrno::JERR__AIO, oss.str(), "rmgr", "get_events");
+ }
+ if (ret == 0 && timeout)
+ return jerrno::AIO_TIMEOUT;
+
+ std::vector<u_int16_t> pil;
+ pil.reserve(ret);
+ for (int i=0; i<ret; i++) // Index of returned AIOs
+ {
+ if (_aio_evt_rem == 0)
+ {
+ std::ostringstream oss;
+ oss << "_aio_evt_rem; evt " << (i + 1) << " of " << ret;
+ throw jexception(jerrno::JERR__UNDERFLOW, oss.str(), "rmgr", "get_events");
+ }
+ _aio_evt_rem--;
+ aio_cb* aiocbp = _aio_event_arr[i].obj; // This I/O control block (iocb)
+ page_cb* pcbp = (page_cb*)(aiocbp->data); // This page control block (pcb)
+ long aioret = (long)_aio_event_arr[i].res;
+ if (aioret < 0)
+ {
+ std::ostringstream oss;
+ oss << "AIO read operation failed: " << std::strerror(-aioret) << " (" << aioret << ")";
+ oss << " [pg=" << pcbp->_index << " buf=" << aiocbp->u.c.buf;
+ oss << " rsize=0x" << std::hex << aiocbp->u.c.nbytes;
+ oss << " offset=0x" << aiocbp->u.c.offset << std::dec;
+ oss << " fh=" << aiocbp->aio_fildes << "]";
+ throw jexception(jerrno::JERR__AIO, oss.str(), "rmgr", "get_events");
+ }
+
+ if (pcbp) // Page reads have pcb
+ {
+ if (pcbp->_rfh->rd_subm_cnt_dblks() >= JRNL_SBLK_SIZE) // Detects if write reset of this fcntl obj has occurred.
+ {
+ // Increment the completed read offset
+ // NOTE: We cannot use _rrfc here, as it may have rotated since submitting count.
+ // Use stored pointer to fcntl in the pcb instead.
+ pcbp->_rdblks = aiocbp->u.c.nbytes / JRNL_DBLK_SIZE;
+ pcbp->_rfh->add_rd_cmpl_cnt_dblks(pcbp->_rdblks);
+ pcbp->_state = state;
+ pil[i] = pcbp->_index;
+ }
+ }
+ else // File header reads have no pcb
+ {
+ std::memcpy(&_fhdr, _fhdr_buffer, sizeof(file_hdr));
+ _rrfc.add_cmpl_cnt_dblks(JRNL_SBLK_SIZE);
+
+ u_int32_t fro_dblks = (_fhdr._fro / JRNL_DBLK_SIZE) - JRNL_SBLK_SIZE;
+ // Check fro_dblks does not exceed the write pointers which can happen in some corrupted journal recoveries
+ if (fro_dblks > _jc->wr_subm_cnt_dblks(_fhdr._pfid) - JRNL_SBLK_SIZE)
+ fro_dblks = _jc->wr_subm_cnt_dblks(_fhdr._pfid) - JRNL_SBLK_SIZE;
+ _pg_cntr = fro_dblks / (JRNL_RMGR_PAGE_SIZE * JRNL_SBLK_SIZE);
+ u_int32_t tot_pg_offs_dblks = _pg_cntr * JRNL_RMGR_PAGE_SIZE * JRNL_SBLK_SIZE;
+ _pg_index = _pg_cntr % JRNL_RMGR_PAGES;
+ _pg_offset_dblks = fro_dblks - tot_pg_offs_dblks;
+ _rrfc.add_subm_cnt_dblks(tot_pg_offs_dblks);
+ _rrfc.add_cmpl_cnt_dblks(tot_pg_offs_dblks);
+
+ _fhdr_rd_outstanding = false;
+ _rrfc.set_valid();
+ }
+ }
+
+ // Perform AIO return callback
+ if (_cbp && ret)
+ _cbp->rd_aio_cb(pil);
+ return ret;
+}
+
+void
+rmgr::recover_complete()
+{}
+
+void
+rmgr::invalidate()
+{
+ if (_rrfc.is_valid())
+ _rrfc.set_invalid();
+}
+
+void
+rmgr::flush(timespec* timeout)
+{
+ // Wait for any outstanding AIO read operations to complete before synchronizing
+ while (_aio_evt_rem)
+ {
+ if (get_events(AIO_COMPLETE, timeout) == jerrno::AIO_TIMEOUT) // timed out, nothing returned
+ {
+ throw jexception(jerrno::JERR__TIMEOUT,
+ "Timed out waiting for outstanding read aio to return", "rmgr", "init_validation");
+ }
+ }
+
+ // Reset all read states and pointers
+ for (int i=0; i<_cache_num_pages; i++)
+ _page_cb_arr[i]._state = UNUSED;
+ _rrfc.unset_findex();
+ _pg_index = 0;
+ _pg_offset_dblks = 0;
+}
+
+bool
+rmgr::wait_for_validity(timespec* timeout, const bool throw_on_timeout)
+{
+ bool timed_out = false;
+ while (!_rrfc.is_valid() && !timed_out)
+ {
+ timed_out = get_events(AIO_COMPLETE, timeout) == jerrno::AIO_TIMEOUT;
+ if (timed_out && throw_on_timeout)
+ throw jexception(jerrno::JERR__TIMEOUT, "Timed out waiting for read validity", "rmgr", "wait_for_validity");
+ }
+ return _rrfc.is_valid();
+}
+
+iores
+rmgr::pre_read_check(data_tok* dtokp)
+{
+ if (_aio_evt_rem)
+ get_events(AIO_COMPLETE, 0);
+
+ if (!_rrfc.is_valid())
+ return RHM_IORES_RCINVALID;
+
+ // block reads until outstanding file header read completes as fro is needed to read
+ if (_fhdr_rd_outstanding)
+ return RHM_IORES_PAGE_AIOWAIT;
+
+ if(dblks_rem() == 0 && _rrfc.is_compl() && !_rrfc.is_wr_aio_outstanding())
+ {
+ aio_cycle(); // check if any AIOs have returned
+ if(dblks_rem() == 0 && _rrfc.is_compl() && !_rrfc.is_wr_aio_outstanding())
+ {
+ if (_jc->unflushed_dblks() > 0)
+ _jc->flush();
+ else if (!_aio_evt_rem)
+ return RHM_IORES_EMPTY;
+ }
+ }
+
+ // Check write state of this token is ENQ - required for read
+ if (dtokp)
+ {
+ if (!dtokp->is_readable())
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "dtok_id=0x" << std::setw(8) << dtokp->id();
+ oss << "; dtok_rid=0x" << std::setw(16) << dtokp->rid();
+ oss << "; dtok_wstate=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_RMGR_ENQSTATE, oss.str(), "rmgr", "pre_read_check");
+ }
+ }
+
+ return RHM_IORES_SUCCESS;
+}
+
+iores
+rmgr::read_enq(rec_hdr& h, void* rptr, data_tok* dtokp)
+{
+ if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
+ {
+ aio_cycle(); // check if any AIOs have returned
+ return RHM_IORES_PAGE_AIOWAIT;
+ }
+
+ // Read data from this page, first block will have header and data size.
+ u_int32_t dblks_rd = _enq_rec.decode(h, rptr, dtokp->dblocks_read(), dblks_rem());
+ dtokp->incr_dblocks_read(dblks_rd);
+
+ _pg_offset_dblks += dblks_rd;
+
+ // If data still incomplete, move to next page and decode again
+ while (dtokp->dblocks_read() < _enq_rec.rec_size_dblks())
+ {
+ rotate_page();
+ if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
+ {
+ dtokp->set_rstate(data_tok::READ_PART);
+ dtokp->set_dsize(_enq_rec.data_size());
+ return RHM_IORES_PAGE_AIOWAIT;
+ }
+
+ rptr = (void*)((char*)_page_ptr_arr[_pg_index]);
+ dblks_rd = _enq_rec.decode(h, rptr, dtokp->dblocks_read(), dblks_rem());
+ dtokp->incr_dblocks_read(dblks_rd);
+ _pg_offset_dblks += dblks_rd;
+ }
+
+ // If we have finished with this page, rotate it
+ if (dblks_rem() == 0)
+ rotate_page();
+
+ // Set the record size in dtokp
+ dtokp->set_rstate(data_tok::READ);
+ dtokp->set_dsize(_enq_rec.data_size());
+ return RHM_IORES_SUCCESS;
+}
+
+void
+rmgr::consume_xid_rec(rec_hdr& h, void* rptr, data_tok* dtokp)
+{
+ if (h._magic == RHM_JDAT_ENQ_MAGIC)
+ {
+ enq_hdr ehdr;
+ std::memcpy(&ehdr, rptr, sizeof(enq_hdr));
+ if (ehdr.is_external())
+ dtokp->set_dsize(ehdr._xidsize + sizeof(enq_hdr) + sizeof(rec_tail));
+ else
+ dtokp->set_dsize(ehdr._xidsize + ehdr._dsize + sizeof(enq_hdr) + sizeof(rec_tail));
+ }
+ else if (h._magic == RHM_JDAT_DEQ_MAGIC)
+ {
+ deq_hdr dhdr;
+ std::memcpy(&dhdr, rptr, sizeof(deq_hdr));
+ if (dhdr._xidsize)
+ dtokp->set_dsize(dhdr._xidsize + sizeof(deq_hdr) + sizeof(rec_tail));
+ else
+ dtokp->set_dsize(sizeof(deq_hdr));
+ }
+ else if (h._magic == RHM_JDAT_TXA_MAGIC || h._magic == RHM_JDAT_TXC_MAGIC)
+ {
+ txn_hdr thdr;
+ std::memcpy(&thdr, rptr, sizeof(txn_hdr));
+ dtokp->set_dsize(thdr._xidsize + sizeof(txn_hdr) + sizeof(rec_tail));
+ }
+ else
+ {
+ std::ostringstream oss;
+ oss << "Record type found = \"" << (char*)&h._magic << "\"";
+ throw jexception(jerrno::JERR_RMGR_BADRECTYPE, oss.str(), "rmgr", "consume_xid_rec");
+ }
+ dtokp->set_dblocks_read(0);
+ skip(dtokp);
+}
+
+void
+rmgr::consume_filler()
+{
+ // Filler (Magic "RHMx") is one dblk by definition
+ _pg_offset_dblks++;
+ if (dblks_rem() == 0)
+ rotate_page();
+}
+
+iores
+rmgr::skip(data_tok* dtokp)
+{
+ u_int32_t dsize_dblks = jrec::size_dblks(dtokp->dsize());
+ u_int32_t tot_dblk_cnt = dtokp->dblocks_read();
+ while (true)
+ {
+ u_int32_t this_dblk_cnt = 0;
+ if (dsize_dblks - tot_dblk_cnt > dblks_rem())
+ this_dblk_cnt = dblks_rem();
+ else
+ this_dblk_cnt = dsize_dblks - tot_dblk_cnt;
+ if (this_dblk_cnt)
+ {
+ dtokp->incr_dblocks_read(this_dblk_cnt);
+ _pg_offset_dblks += this_dblk_cnt;
+ tot_dblk_cnt += this_dblk_cnt;
+ }
+ // If skip still incomplete, move to next page and decode again
+ if (tot_dblk_cnt < dsize_dblks)
+ {
+ if (dblks_rem() == 0)
+ rotate_page();
+ if (_page_cb_arr[_pg_index]._state != AIO_COMPLETE)
+ {
+ dtokp->set_rstate(data_tok::SKIP_PART);
+ return RHM_IORES_PAGE_AIOWAIT;
+ }
+ }
+ else
+ {
+ // Skip complete, put state back to unread
+ dtokp->set_rstate(data_tok::UNREAD);
+ dtokp->set_dsize(0);
+ dtokp->set_dblocks_read(0);
+
+ // If we have finished with this page, rotate it
+ if (dblks_rem() == 0)
+ rotate_page();
+ return RHM_IORES_SUCCESS;
+ }
+ }
+}
+
+iores
+rmgr::aio_cycle()
+{
+ // Perform validity checks
+ if (_fhdr_rd_outstanding) // read of file header still outstanding in aio
+ return RHM_IORES_SUCCESS;
+ if (!_rrfc.is_valid())
+ {
+ // Flush and reset all read states and pointers
+ flush(&jcntl::_aio_cmpl_timeout);
+
+ _jc->get_earliest_fid(); // determine initial file to read; calls _rrfc.set_findex() to set value
+ // If this file has not yet been written to, return RHM_IORES_EMPTY
+ if (_rrfc.is_void() && !_rrfc.is_wr_aio_outstanding())
+ return RHM_IORES_EMPTY;
+ init_file_header_read(); // send off AIO read request for file header
+ return RHM_IORES_SUCCESS;
+ }
+
+ int16_t first_uninit = -1;
+ u_int16_t num_uninit = 0;
+ u_int16_t num_compl = 0;
+ bool outstanding = false;
+ // Index must start with current buffer and cycle around so that first
+ // uninitialized buffer is initialized first
+ for (u_int16_t i=_pg_index; i<_pg_index+_cache_num_pages; i++)
+ {
+ int16_t ci = i % _cache_num_pages;
+ switch (_page_cb_arr[ci]._state)
+ {
+ case UNUSED:
+ if (first_uninit < 0)
+ first_uninit = ci;
+ num_uninit++;
+ break;
+ case IN_USE:
+ break;
+ case AIO_PENDING:
+ outstanding = true;
+ break;
+ case AIO_COMPLETE:
+ num_compl++;
+ break;
+ default:;
+ }
+ }
+ iores res = RHM_IORES_SUCCESS;
+ if (num_uninit)
+ res = init_aio_reads(first_uninit, num_uninit);
+ else if (num_compl == _cache_num_pages) // This condition exists after invalidation
+ res = init_aio_reads(0, _cache_num_pages);
+ if (outstanding)
+ get_events(AIO_COMPLETE, 0);
+ return res;
+}
+
+iores
+rmgr::init_aio_reads(const int16_t first_uninit, const u_int16_t num_uninit)
+{
+ for (int16_t i=0; i<num_uninit; i++)
+ {
+ if (_rrfc.is_void()) // Nothing to do; this file not yet written to
+ break;
+
+ if (_rrfc.subm_offs() == 0)
+ {
+ _rrfc.add_subm_cnt_dblks(JRNL_SBLK_SIZE);
+ _rrfc.add_cmpl_cnt_dblks(JRNL_SBLK_SIZE);
+ }
+
+ // TODO: Future perf improvement: Do a single AIO read for all available file
+ // space into all contiguous empty pages in one AIO operation.
+
+ u_int32_t file_rem_dblks = _rrfc.remaining_dblks();
+ file_rem_dblks -= file_rem_dblks % JRNL_SBLK_SIZE; // round down to closest sblk boundary
+ u_int32_t pg_size_dblks = JRNL_RMGR_PAGE_SIZE * JRNL_SBLK_SIZE;
+ u_int32_t rd_size = file_rem_dblks > pg_size_dblks ? pg_size_dblks : file_rem_dblks;
+ if (rd_size)
+ {
+ int16_t pi = (i + first_uninit) % _cache_num_pages;
+ // TODO: For perf, combine contiguous pages into single read
+ // 1 or 2 AIOs needed depending on whether read block folds
+ aio_cb* aiocbp = &_aio_cb_arr[pi];
+ aio::prep_pread_2(aiocbp, _rrfc.fh(), _page_ptr_arr[pi], rd_size * JRNL_DBLK_SIZE, _rrfc.subm_offs());
+ if (aio::submit(_ioctx, 1, &aiocbp) < 0)
+ throw jexception(jerrno::JERR__AIO, "rmgr", "init_aio_reads");
+ _rrfc.add_subm_cnt_dblks(rd_size);
+ _aio_evt_rem++;
+ _page_cb_arr[pi]._state = AIO_PENDING;
+ _page_cb_arr[pi]._rfh = _rrfc.file_controller();
+ }
+ else // If there is nothing to read for this page, neither will there be for the others...
+ break;
+ if (_rrfc.file_rotate())
+ _rrfc.rotate();
+ }
+ return RHM_IORES_SUCCESS;
+}
+
+void
+rmgr::rotate_page()
+{
+ _page_cb_arr[_pg_index]._rdblks = 0;
+ _page_cb_arr[_pg_index]._state = UNUSED;
+ if (_pg_offset_dblks >= JRNL_RMGR_PAGE_SIZE * JRNL_SBLK_SIZE)
+ {
+ _pg_offset_dblks = 0;
+ _pg_cntr++;
+ }
+ if (++_pg_index >= _cache_num_pages)
+ _pg_index = 0;
+ aio_cycle();
+ _pg_offset_dblks = 0;
+ // This counter is for bookkeeping only, page rotates are handled directly in init_aio_reads()
+ // FIXME: _pg_cntr should be sync'd with aio ops, not use of page as it is now...
+ // Need to move reset into if (_rrfc.file_rotate()) above.
+ if (_pg_cntr >= (_jc->jfsize_sblks() / JRNL_RMGR_PAGE_SIZE))
+ _pg_cntr = 0;
+}
+
+u_int32_t
+rmgr::dblks_rem() const
+{
+ return _page_cb_arr[_pg_index]._rdblks - _pg_offset_dblks;
+}
+
+void
+rmgr::set_params_null(void** const datapp, std::size_t& dsize, void** const xidpp, std::size_t& xidsize)
+{
+ *datapp = 0;
+ dsize = 0;
+ *xidpp = 0;
+ xidsize = 0;
+}
+
+void
+rmgr::init_file_header_read()
+{
+ _jc->fhdr_wr_sync(_rrfc.index()); // wait if the file header write is outstanding
+ int rfh = _rrfc.fh();
+ aio::prep_pread_2(_fhdr_aio_cb_ptr, rfh, _fhdr_buffer, _sblksize, 0);
+ if (aio::submit(_ioctx, 1, &_fhdr_aio_cb_ptr) < 0)
+ throw jexception(jerrno::JERR__AIO, "rmgr", "init_file_header_read");
+ _aio_evt_rem++;
+ _rrfc.add_subm_cnt_dblks(JRNL_SBLK_SIZE);
+ _fhdr_rd_outstanding = true;
+}
+
+/* TODO (sometime in the future)
+const iores
+rmgr::get(const u_int64_t& rid, const std::size_t& dsize, const std::size_t& dsize_avail,
+ const void** const data, bool auto_discard)
+{
+ return RHM_IORES_SUCCESS;
+}
+
+const iores
+rmgr::discard(data_tok* dtokp)
+{
+ return RHM_IORES_SUCCESS;
+}
+*/
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.h b/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.h
new file mode 100644
index 0000000000..ae4b5f56c8
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rmgr.h
@@ -0,0 +1,114 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rmgr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rmgr (read manager). See
+ * class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_RMGR_H
+#define QPID_LEGACYSTORE_JRNL_RMGR_H
+
+namespace mrg
+{
+namespace journal
+{
+class rmgr;
+}
+}
+
+#include <cstring>
+#include "jrnl/enums.h"
+#include "jrnl/file_hdr.h"
+#include "jrnl/pmgr.h"
+#include "jrnl/rec_hdr.h"
+#include "jrnl/rrfc.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \brief Class for managing a read page cache of arbitrary size and number of pages.
+ *
+ * The read page cache works on the principle of filling as many pages as possilbe in advance of
+ * reading the data. This ensures that delays caused by AIO operations are minimized.
+ */
+ class rmgr : public pmgr
+ {
+ private:
+ rrfc& _rrfc; ///< Ref to read rotating file controller
+ rec_hdr _hdr; ///< Header used to determind record type
+
+ void* _fhdr_buffer; ///< Buffer used for fhdr reads
+ aio_cb* _fhdr_aio_cb_ptr; ///< iocb pointer for fhdr reads
+ file_hdr _fhdr; ///< file header instance for reading file headers
+ bool _fhdr_rd_outstanding; ///< true if a fhdr read is outstanding
+
+ public:
+ rmgr(jcntl* jc, enq_map& emap, txn_map& tmap, rrfc& rrfc);
+ virtual ~rmgr();
+
+ using pmgr::initialize;
+ void initialize(aio_callback* const cbp);
+ iores read(void** const datapp, std::size_t& dsize, void** const xidpp,
+ std::size_t& xidsize, bool& transient, bool& external, data_tok* dtokp,
+ bool ignore_pending_txns);
+ int32_t get_events(page_state state, timespec* const timeout, bool flush = false);
+ void recover_complete();
+ inline iores synchronize() { if (_rrfc.is_valid()) return RHM_IORES_SUCCESS; return aio_cycle(); }
+ void invalidate();
+ bool wait_for_validity(timespec* const timeout, const bool throw_on_timeout = false);
+
+ /* TODO (if required)
+ const iores get(const u_int64_t& rid, const std::size_t& dsize, const std::size_t& dsize_avail,
+ const void** const data, bool auto_discard);
+ const iores discard(data_tok* dtok);
+ */
+
+ private:
+ void clean();
+ void flush(timespec* timeout);
+ iores pre_read_check(data_tok* dtokp);
+ iores read_enq(rec_hdr& h, void* rptr, data_tok* dtokp);
+ void consume_xid_rec(rec_hdr& h, void* rptr, data_tok* dtokp);
+ void consume_filler();
+ iores skip(data_tok* dtokp);
+ iores aio_cycle();
+ iores init_aio_reads(const int16_t first_uninit, const u_int16_t num_uninit);
+ void rotate_page();
+ u_int32_t dblks_rem() const;
+ void set_params_null(void** const datapp, std::size_t& dsize, void** const xidpp,
+ std::size_t& xidsize);
+ void init_file_header_read();
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_RMGR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.cpp
new file mode 100644
index 0000000000..fc6f5d427f
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.cpp
@@ -0,0 +1,125 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rrfc.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rrfc (rotating
+ * file controller). See comments in file rrfc.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+
+#include "qpid/legacystore/jrnl/rrfc.h"
+
+#include <cerrno>
+#include <fcntl.h>
+#include <unistd.h>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+rrfc::rrfc(const lpmgr* lpmp): rfc(lpmp), _fh(-1), _valid(false)
+{}
+
+rrfc::~rrfc()
+{
+ close_fh();
+}
+
+void
+rrfc::finalize()
+{
+ unset_findex();
+ rfc::finalize();
+}
+
+void
+rrfc::set_findex(const u_int16_t fc_index)
+{
+ rfc::set_findex(fc_index);
+ open_fh(_curr_fc->fname());
+}
+
+void
+rrfc::unset_findex()
+{
+ set_invalid();
+ close_fh();
+ rfc::unset_findex();
+}
+
+iores
+rrfc::rotate()
+{
+ if (!_lpmp->num_jfiles())
+ throw jexception(jerrno::JERR__NINIT, "rrfc", "rotate");
+ u_int16_t next_fc_index = _fc_index + 1;
+ if (next_fc_index == _lpmp->num_jfiles())
+ next_fc_index = 0;
+ set_findex(next_fc_index);
+ return RHM_IORES_SUCCESS;
+}
+
+std::string
+rrfc::status_str() const
+{
+ std::ostringstream oss;
+ oss << "rrfc: " << rfc::status_str();
+ if (is_active())
+ oss << " fcntl[" << _fc_index << "]: " << _curr_fc->status_str();
+ return oss.str();
+}
+
+// === protected functions ===
+
+void
+rrfc::open_fh(const std::string& fn)
+{
+ close_fh();
+ _fh = ::open(fn.c_str(), O_RDONLY | O_DIRECT);
+ if (_fh < 0)
+ {
+ std::ostringstream oss;
+ oss << "file=\"" << fn << "\"" << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR_RRFC_OPENRD, oss.str(), "rrfc", "open_fh");
+ }
+}
+
+void
+rrfc::close_fh()
+{
+ if (_fh >= 0)
+ {
+ ::close(_fh);
+ _fh = -1;
+ }
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.h b/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.h
new file mode 100644
index 0000000000..5066d6048a
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/rrfc.h
@@ -0,0 +1,179 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file rrfc.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::rrfc (rotating
+ * file controller). See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_RRFC_H
+#define QPID_LEGACYSTORE_JRNL_RRFC_H
+
+namespace mrg
+{
+namespace journal
+{
+class rrfc;
+}
+}
+
+#include "qpid/legacystore/jrnl/fcntl.h"
+#include "qpid/legacystore/jrnl/rfc.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class rrfc
+ * \brief Read Rotating File Controller (rrfc) - Subclassed from pure virtual class rfc. Used to control the read
+ * pipeline in a rotating file buffer or journal. See class rfc for further details.
+ *
+ * The states that exist in this class are identical to class rfc from which it inherits, but in addition, the value
+ * of the read file handle _fh is also considered. The calls to set_findex also opens the file handle _fh to the
+ * active file for reading. Similarly, unset_findex() closes this file handle.
+ *
+ * <pre>
+ * is_init() is_active()
+ * +===+ _lpmp.is_init() == false
+ * +---------->| | Uninitialized: _curr_fc == 0 F F
+ * | +-->+===+ --+ _fh == -1
+ * | | |
+ * | | |
+ * | finalize() initialize()
+ * | | |
+ * | | |
+ * | +-- +===+<--+ _lpmp.is_init() == true
+ * finalize() | | Inactive: _curr_fc == 0 T F
+ * | +-->+===+ --+ _fh == -1
+ * | | |
+ * | | |
+ * | unset_findex() set_findex()
+ * | | |
+ * | | |
+ * | +-- +===+<--+ _lpmp.is_init() == true
+ * +---------- | | Active: _curr_fc != 0 T T
+ * +===+ _fh >= 0
+ * </pre>
+ *
+ * In adition to the states above, class rrfc contains a validity flag. This is operated indepenedently of the state
+ * machine. This flag (_valid) indicates when the read buffers are valid for reading. This is not strictly a state,
+ * but simply a flag used to keep track of the status, and is set/unset with calls to set_valid() and set_invalid()
+ * respectively.
+ */
+ class rrfc : public rfc
+ {
+ protected:
+ int _fh; ///< Read file handle
+ bool _valid; ///< Flag is true when read pages contain vailid data
+
+ public:
+ rrfc(const lpmgr* lpmp);
+ virtual ~rrfc();
+
+ /**
+ * \brief Initialize the controller, moving from state Uninitialized to Initialized. The main function of
+ * initialize() is to set the number of files and the pointer to the fcntl array.
+ */
+ inline void initialize() { rfc::initialize(); _valid = false; }
+
+ /**
+ * \brief Reset the controller to Uninitialized state, usually called when the journal is stopped. Once called,
+ * initialize() must be called to reuse an instance.
+ */
+ void finalize();
+
+ /**
+ * \brief Opens the file handle for reading a particular fid. Moves to state open.
+ */
+ void set_findex(const u_int16_t fc_index);
+
+ /**
+ * \brief Closes the read file handle and nulls the active fcntl pointer. Moves to state closed.
+ */
+ void unset_findex();
+
+ /**
+ * \brief Check the state: true = open; false = closed.
+ */
+ inline bool is_active() const { return _curr_fc != 0 && _fh >= 0; }
+
+ /**
+ * \brief Sets the validity flag which indicates that the read buffers contain valid data for reading.
+ */
+ inline void set_invalid() { _valid = false; }
+
+ /**
+ * \brief Resets the validity flag wich indicates that the read buffers are no longer synchronized and cannot
+ * be read whithout resynchronization.
+ */
+ inline void set_valid() { _valid = true; }
+
+ /**
+ * \brief Checks the read buffer validity status: true = valid, can be read; false = invalid, synchronization
+ * required.
+ */
+ inline bool is_valid() const { return _valid; }
+
+ /**
+ * \brief Rotate active file controller to next file in rotating file group.
+ * \exception jerrno::JERR__NINIT if called before calling initialize().
+ */
+ iores rotate();
+
+ inline int fh() const { return _fh; }
+
+ inline u_int32_t subm_cnt_dblks() const { return _curr_fc->rd_subm_cnt_dblks(); }
+ inline std::size_t subm_offs() const { return _curr_fc->rd_subm_offs(); }
+ inline u_int32_t add_subm_cnt_dblks(u_int32_t a) { return _curr_fc->add_rd_subm_cnt_dblks(a); }
+
+ inline u_int32_t cmpl_cnt_dblks() const { return _curr_fc->rd_cmpl_cnt_dblks(); }
+ inline std::size_t cmpl_offs() const { return _curr_fc->rd_cmpl_offs(); }
+ inline u_int32_t add_cmpl_cnt_dblks(u_int32_t a) { return _curr_fc->add_rd_cmpl_cnt_dblks(a); }
+
+ inline bool is_void() const { return _curr_fc->rd_void(); }
+ inline bool is_empty() const { return _curr_fc->rd_empty(); }
+ inline u_int32_t remaining_dblks() const { return _curr_fc->rd_remaining_dblks(); }
+ inline bool is_full() const { return _curr_fc->is_rd_full(); }
+ inline bool is_compl() const { return _curr_fc->is_rd_compl(); }
+ inline u_int32_t aio_outstanding_dblks() const { return _curr_fc->rd_aio_outstanding_dblks(); }
+ inline bool file_rotate() const { return _curr_fc->rd_file_rotate(); }
+ inline bool is_wr_aio_outstanding() const { return _curr_fc->wr_aio_outstanding_dblks() > 0; }
+
+ // Debug aid
+ std::string status_str() const;
+
+ protected:
+ void open_fh(const std::string& fn);
+ void close_fh();
+ }; // class rrfc
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_RRFC_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/slock.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/slock.cpp
new file mode 100644
index 0000000000..8f26d349ef
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/slock.cpp
@@ -0,0 +1,33 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file slock.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::slock (scoped lock). See
+ * comments in file slock.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/slock.h"
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/slock.h b/qpid/cpp/src/qpid/legacystore/jrnl/slock.h
new file mode 100644
index 0000000000..c05b5cf336
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/slock.h
@@ -0,0 +1,85 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file slock.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal scoped lock class mrg::journal::slock and scoped try-lock
+ * class mrg::journal::stlock.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_SLOCK_H
+#define QPID_LEGACYSTORE_JRNL_SLOCK_H
+
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/jrnl/smutex.h"
+#include <pthread.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ // Ultra-simple scoped lock class, auto-releases mutex when it goes out-of-scope
+ class slock
+ {
+ protected:
+ const smutex& _sm;
+ public:
+ inline slock(const smutex& sm) : _sm(sm)
+ {
+ PTHREAD_CHK(::pthread_mutex_lock(_sm.get()), "::pthread_mutex_lock", "slock", "slock");
+ }
+ inline ~slock()
+ {
+ PTHREAD_CHK(::pthread_mutex_unlock(_sm.get()), "::pthread_mutex_unlock", "slock", "~slock");
+ }
+ };
+
+ // Ultra-simple scoped try-lock class, auto-releases mutex when it goes out-of-scope
+ class stlock
+ {
+ protected:
+ const smutex& _sm;
+ bool _locked;
+ public:
+ inline stlock(const smutex& sm) : _sm(sm), _locked(false)
+ {
+ int ret = ::pthread_mutex_trylock(_sm.get());
+ _locked = (ret == 0); // check if lock obtained
+ if (!_locked && ret != EBUSY) PTHREAD_CHK(ret, "::pthread_mutex_trylock", "stlock", "stlock");
+ }
+ inline ~stlock()
+ {
+ if (_locked)
+ PTHREAD_CHK(::pthread_mutex_unlock(_sm.get()), "::pthread_mutex_unlock", "stlock", "~stlock");
+ }
+ inline bool locked() const { return _locked; }
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_SLOCK_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/smutex.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/smutex.cpp
new file mode 100644
index 0000000000..6f8991ca5b
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/smutex.cpp
@@ -0,0 +1,33 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file smutex.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::smutex (scoped mutex). See
+ * comments in file smutex.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/smutex.h"
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/smutex.h b/qpid/cpp/src/qpid/legacystore/jrnl/smutex.h
new file mode 100644
index 0000000000..def0fb70f6
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/smutex.h
@@ -0,0 +1,64 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file smutex.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal scoped mutex class mrg::journal::smutex.
+ *
+ * \author Kim van der Riet
+ */
+
+
+#ifndef QPID_LEGACYSTORE_JRNL_SMUTEX_H
+#define QPID_LEGACYSTORE_JRNL_SMUTEX_H
+
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <pthread.h>
+
+namespace mrg
+{
+namespace journal
+{
+
+ // Ultra-simple scoped mutex class that allows a posix mutex to be initialized and destroyed with error checks
+ class smutex
+ {
+ protected:
+ mutable pthread_mutex_t _m;
+ public:
+ inline smutex()
+ {
+ PTHREAD_CHK(::pthread_mutex_init(&_m, 0), "::pthread_mutex_init", "smutex", "smutex");
+ }
+ inline virtual ~smutex()
+ {
+ PTHREAD_CHK(::pthread_mutex_destroy(&_m), "::pthread_mutex_destroy", "smutex", "~smutex");
+ }
+ inline pthread_mutex_t* get() const { return &_m; }
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_SMUTEX_H
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ServerProxy.cpp.tmpl b/qpid/cpp/src/qpid/legacystore/jrnl/time_ns.cpp
index cce369f98b..976068ef68 100644
--- a/qpid/gentools/templ.cpp/model/AMQP_ServerProxy.cpp.tmpl
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/time_ns.cpp
@@ -1,4 +1,3 @@
-&{AMQP_ServerProxy.cpp}
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -8,9 +7,9 @@
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
- *
+ *
* http://www.apache.org/licenses/LICENSE-2.0
- *
+ *
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -20,32 +19,37 @@
*
*/
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
+/**
+ * \file time_ns.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal time struct mrg::journal::time_ns, derived from
+ * the timespec struct and provided with helper functions.
+ *
+ * \author Kim van der Riet
*/
-#include <sstream>
+#include "qpid/legacystore/jrnl/time_ns.h"
-#include <AMQP_ServerProxy.h>
-#include <AMQFrame.h>
-%{MLIST} ${spc_method_body_include}
-
-namespace qpid {
-namespace framing {
-
-AMQP_ServerProxy::AMQP_ServerProxy(OutputHandler* out, u_int8_t major, u_int8_t minor) :
-%{CLIST} ${spc_constructor_initializer}
-{}
-
- // Inner class instance get methods
-
-%{CLIST} ${spc_inner_class_get_method}
-
- // Inner class implementation
-
-%{CLIST} ${spc_inner_class_impl}
+#include <sstream>
-} /* namespace framing */
-} /* namespace qpid */
+namespace mrg
+{
+namespace journal
+{
+
+const std::string
+time_ns::str(int precision) const
+{
+ const double t = tv_sec + (tv_nsec/1e9);
+ std::ostringstream oss;
+ oss.setf(std::ios::fixed, std::ios::floatfield);
+ oss.precision(precision);
+ oss << t;
+ return oss.str();
+}
+
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/time_ns.h b/qpid/cpp/src/qpid/legacystore/jrnl/time_ns.h
new file mode 100644
index 0000000000..a9f69e2631
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/time_ns.h
@@ -0,0 +1,105 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file time_ns.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * Messaging journal time struct mrg::journal::time_ns, derived from
+ * the timespec struct and provided with helper functions.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_TIME_NS_H
+#define QPID_LEGACYSTORE_JRNL_TIME_NS_H
+
+#include <cerrno>
+#include <ctime>
+#include <string>
+
+namespace mrg
+{
+namespace journal
+{
+
+struct time_ns : public timespec
+{
+ inline time_ns() { tv_sec = 0; tv_nsec = 0; }
+ inline time_ns(const std::time_t sec, const long nsec = 0) { tv_sec = sec; tv_nsec = nsec; }
+ inline time_ns(const time_ns& t) { tv_sec = t.tv_sec; tv_nsec = t.tv_nsec; }
+
+ inline void set_zero() { tv_sec = 0; tv_nsec = 0; }
+ inline bool is_zero() const { return tv_sec == 0 && tv_nsec == 0; }
+ inline int now() { if(::clock_gettime(CLOCK_REALTIME, this)) return errno; return 0; }
+ const std::string str(int precision = 6) const;
+
+ inline time_ns& operator=(const time_ns& rhs)
+ { tv_sec = rhs.tv_sec; tv_nsec = rhs.tv_nsec; return *this; }
+ inline time_ns& operator+=(const time_ns& rhs)
+ {
+ tv_nsec += rhs.tv_nsec;
+ if (tv_nsec >= 1000000000L) { tv_sec++; tv_nsec -= 1000000000L; }
+ tv_sec += rhs.tv_sec;
+ return *this;
+ }
+ inline time_ns& operator+=(const long ns)
+ {
+ tv_nsec += ns;
+ if (tv_nsec >= 1000000000L) { tv_sec++; tv_nsec -= 1000000000L; }
+ return *this;
+ }
+ inline time_ns& operator-=(const long ns)
+ {
+ tv_nsec -= ns;
+ if (tv_nsec < 0) { tv_sec--; tv_nsec += 1000000000L; }
+ return *this;
+ }
+ inline time_ns& operator-=(const time_ns& rhs)
+ {
+ tv_nsec -= rhs.tv_nsec;
+ if (tv_nsec < 0) { tv_sec--; tv_nsec += 1000000000L; }
+ tv_sec -= rhs.tv_sec;
+ return *this;
+ }
+ inline const time_ns operator+(const time_ns& rhs)
+ { time_ns t(*this); t += rhs; return t; }
+ inline const time_ns operator-(const time_ns& rhs)
+ { time_ns t(*this); t -= rhs; return t; }
+ inline bool operator==(const time_ns& rhs)
+ { return tv_sec == rhs.tv_sec && tv_nsec == rhs.tv_nsec; }
+ inline bool operator!=(const time_ns& rhs)
+ { return tv_sec != rhs.tv_sec || tv_nsec != rhs.tv_nsec; }
+ inline bool operator>(const time_ns& rhs)
+ { if(tv_sec == rhs.tv_sec) return tv_nsec > rhs.tv_nsec; return tv_sec > rhs.tv_sec; }
+ inline bool operator>=(const time_ns& rhs)
+ { if(tv_sec == rhs.tv_sec) return tv_nsec >= rhs.tv_nsec; return tv_sec >= rhs.tv_sec; }
+ inline bool operator<(const time_ns& rhs)
+ { if(tv_sec == rhs.tv_sec) return tv_nsec < rhs.tv_nsec; return tv_sec < rhs.tv_sec; }
+ inline bool operator<=(const time_ns& rhs)
+ { if(tv_sec == rhs.tv_sec) return tv_nsec <= rhs.tv_nsec; return tv_sec <= rhs.tv_sec; }
+};
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_TIME_NS_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/txn_hdr.h b/qpid/cpp/src/qpid/legacystore/jrnl/txn_hdr.h
new file mode 100644
index 0000000000..94b812ccec
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/txn_hdr.h
@@ -0,0 +1,125 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file txn_hdr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::txn_hdr (transaction
+ * record header), used to start a transaction (commit or abort) record.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_TXN_HDR_H
+#define QPID_LEGACYSTORE_JRNL_TXN_HDR_H
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/rec_hdr.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+#pragma pack(1)
+
+ /**
+ * \brief Struct for transaction commit and abort records.
+ *
+ * Struct for DTX commit and abort records. Only the magic distinguishes between them. Since
+ * this record must be used in the context of a valid XID, the xidsize field must not be zero.
+ * Immediately following this record is the XID itself which is xidsize bytes long, followed by
+ * a rec_tail.
+ *
+ * Note that this record had its own rid distinct from the rids of the record(s) making up the
+ * transaction it is committing or aborting.
+ *
+ * Record header info in binary format (24 bytes):
+ * <pre>
+ * 0 7
+ * +---+---+---+---+---+---+---+---+ -+
+ * | magic | v | e | flags | |
+ * +---+---+---+---+---+---+---+---+ | struct hdr
+ * | rid | |
+ * +---+---+---+---+---+---+---+---+ -+
+ * | xidsize |
+ * +---+---+---+---+---+---+---+---+
+ * v = file version (If the format or encoding of this file changes, then this
+ * number should be incremented)
+ * e = endian flag, false (0x00) for little endian, true (0x01) for big endian
+ * </pre>
+ *
+ * Note that journal files should be transferable between 32- and 64-bit
+ * hardware of the same endianness, but not between hardware of opposite
+ * entianness without some sort of binary conversion utility. Thus buffering
+ * will be needed for types that change size between 32- and 64-bit compiles.
+ */
+ struct txn_hdr : rec_hdr
+ {
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Big-endian filler for 32-bit size_t
+#endif
+ std::size_t _xidsize; ///< XID size
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ u_int32_t _filler0; ///< Little-endian filler for 32-bit size_t
+#endif
+
+ /**
+ * \brief Default constructor, which sets all values to 0.
+ */
+ txn_hdr(): rec_hdr(),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(0)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler0(0)
+#endif
+ {}
+
+ /**
+ * \brief Convenience constructor which initializes values during construction.
+ */
+ txn_hdr(const u_int32_t magic, const u_int8_t version, const u_int64_t rid,
+ const std::size_t xidsize, const bool owi): rec_hdr(magic, version, rid, owi),
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ _filler0(0),
+#endif
+ _xidsize(xidsize)
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ , _filler0(0)
+#endif
+ {}
+
+ /**
+ * \brief Returns the size of the header in bytes.
+ */
+ inline static std::size_t size() { return sizeof(txn_hdr); }
+ };
+
+#pragma pack()
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_TXN_HDR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.cpp
new file mode 100644
index 0000000000..c514670601
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.cpp
@@ -0,0 +1,256 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file txn_map.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::txn_map (transaction map). See
+ * comments in file txn_map.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/txn_map.h"
+
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include "qpid/legacystore/jrnl/slock.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+// return/error codes
+int16_t txn_map::TMAP_RID_NOT_FOUND = -2;
+int16_t txn_map::TMAP_XID_NOT_FOUND = -1;
+int16_t txn_map::TMAP_OK = 0;
+int16_t txn_map::TMAP_NOT_SYNCED = 0;
+int16_t txn_map::TMAP_SYNCED = 1;
+
+txn_data_struct::txn_data_struct(const u_int64_t rid, const u_int64_t drid, const u_int16_t pfid,
+ const bool enq_flag, const bool commit_flag):
+ _rid(rid),
+ _drid(drid),
+ _pfid(pfid),
+ _enq_flag(enq_flag),
+ _commit_flag(commit_flag),
+ _aio_compl(false)
+{}
+
+txn_map::txn_map():
+ _map(),
+ _pfid_txn_cnt()
+{}
+
+txn_map::~txn_map() {}
+
+void
+txn_map::set_num_jfiles(const u_int16_t num_jfiles)
+{
+ _pfid_txn_cnt.resize(num_jfiles, 0);
+}
+
+u_int32_t
+txn_map::get_txn_pfid_cnt(const u_int16_t pfid) const
+{
+ return _pfid_txn_cnt.at(pfid);
+}
+
+bool
+txn_map::insert_txn_data(const std::string& xid, const txn_data& td)
+{
+ bool ok = true;
+ slock s(_mutex);
+ xmap_itr itr = _map.find(xid);
+ if (itr == _map.end()) // not found in map
+ {
+ txn_data_list list;
+ list.push_back(td);
+ std::pair<xmap_itr, bool> ret = _map.insert(xmap_param(xid, list));
+ if (!ret.second) // duplicate
+ ok = false;
+ }
+ else
+ itr->second.push_back(td);
+ _pfid_txn_cnt.at(td._pfid)++;
+ return ok;
+}
+
+const txn_data_list
+txn_map::get_tdata_list(const std::string& xid)
+{
+ slock s(_mutex);
+ return get_tdata_list_nolock(xid);
+}
+
+const txn_data_list
+txn_map::get_tdata_list_nolock(const std::string& xid)
+{
+ xmap_itr itr = _map.find(xid);
+ if (itr == _map.end()) // not found in map
+ return _empty_data_list;
+ return itr->second;
+}
+
+const txn_data_list
+txn_map::get_remove_tdata_list(const std::string& xid)
+{
+ slock s(_mutex);
+ xmap_itr itr = _map.find(xid);
+ if (itr == _map.end()) // not found in map
+ return _empty_data_list;
+ txn_data_list list = itr->second;
+ _map.erase(itr);
+ for (tdl_itr i=list.begin(); i!=list.end(); i++)
+ _pfid_txn_cnt.at(i->_pfid)--;
+ return list;
+}
+
+bool
+txn_map::in_map(const std::string& xid)
+{
+ slock s(_mutex);
+ xmap_itr itr= _map.find(xid);
+ return itr != _map.end();
+}
+
+u_int32_t
+txn_map::enq_cnt()
+{
+ return cnt(true);
+}
+
+u_int32_t
+txn_map::deq_cnt()
+{
+ return cnt(true);
+}
+
+u_int32_t
+txn_map::cnt(const bool enq_flag)
+{
+ slock s(_mutex);
+ u_int32_t c = 0;
+ for (xmap_itr i = _map.begin(); i != _map.end(); i++)
+ {
+ for (tdl_itr j = i->second.begin(); j < i->second.end(); j++)
+ {
+ if (j->_enq_flag == enq_flag)
+ c++;
+ }
+ }
+ return c;
+}
+
+int16_t
+txn_map::is_txn_synced(const std::string& xid)
+{
+ slock s(_mutex);
+ xmap_itr itr = _map.find(xid);
+ if (itr == _map.end()) // not found in map
+ return TMAP_XID_NOT_FOUND;
+ bool is_synced = true;
+ for (tdl_itr litr = itr->second.begin(); litr < itr->second.end(); litr++)
+ {
+ if (!litr->_aio_compl)
+ {
+ is_synced = false;
+ break;
+ }
+ }
+ return is_synced ? TMAP_SYNCED : TMAP_NOT_SYNCED;
+}
+
+int16_t
+txn_map::set_aio_compl(const std::string& xid, const u_int64_t rid)
+{
+ slock s(_mutex);
+ xmap_itr itr = _map.find(xid);
+ if (itr == _map.end()) // xid not found in map
+ return TMAP_XID_NOT_FOUND;
+ for (tdl_itr litr = itr->second.begin(); litr < itr->second.end(); litr++)
+ {
+ if (litr->_rid == rid)
+ {
+ litr->_aio_compl = true;
+ return TMAP_OK; // rid found
+ }
+ }
+ // xid present, but rid not found
+ return TMAP_RID_NOT_FOUND;
+}
+
+bool
+txn_map::data_exists(const std::string& xid, const u_int64_t rid)
+{
+ bool found = false;
+ {
+ slock s(_mutex);
+ txn_data_list tdl = get_tdata_list_nolock(xid);
+ tdl_itr itr = tdl.begin();
+ while (itr != tdl.end() && !found)
+ {
+ found = itr->_rid == rid;
+ itr++;
+ }
+ }
+ return found;
+}
+
+bool
+txn_map::is_enq(const u_int64_t rid)
+{
+ bool found = false;
+ {
+ slock s(_mutex);
+ for (xmap_itr i = _map.begin(); i != _map.end() && !found; i++)
+ {
+ txn_data_list list = i->second;
+ for (tdl_itr j = list.begin(); j < list.end() && !found; j++)
+ {
+ if (j->_enq_flag)
+ found = j->_rid == rid;
+ else
+ found = j->_drid == rid;
+ }
+ }
+ }
+ return found;
+}
+
+void
+txn_map::xid_list(std::vector<std::string>& xv)
+{
+ xv.clear();
+ {
+ slock s(_mutex);
+ for (xmap_itr itr = _map.begin(); itr != _map.end(); itr++)
+ xv.push_back(itr->first);
+ }
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.h b/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.h
new file mode 100644
index 0000000000..6b38564e53
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/txn_map.h
@@ -0,0 +1,159 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file txn_map.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::txn_map (transaction map).
+ * See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_TXN_MAP_H
+#define QPID_LEGACYSTORE_JRNL_TXN_MAP_H
+
+namespace mrg
+{
+namespace journal
+{
+ class txn_map;
+}
+}
+
+#include "qpid/legacystore/jrnl/smutex.h"
+#include <map>
+#include <pthread.h>
+#include <string>
+#include <sys/types.h>
+#include <vector>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \struct txn_data_struct
+ * \brief Struct encapsulating transaction data necessary for processing a transaction
+ * in the journal once it is closed with either a commit or abort.
+ */
+ struct txn_data_struct
+ {
+ u_int64_t _rid; ///< Record id for this operation
+ u_int64_t _drid; ///< Dequeue record id for this operation
+ u_int16_t _pfid; ///< Physical file id, to be used when transferring to emap on commit
+ bool _enq_flag; ///< If true, enq op, otherwise deq op
+ bool _commit_flag; ///< (2PC transactions) Records 2PC complete c/a mode
+ bool _aio_compl; ///< Initially false, set to true when record AIO returns
+ txn_data_struct(const u_int64_t rid, const u_int64_t drid, const u_int16_t pfid,
+ const bool enq_flag, const bool commit_flag = false);
+ };
+ typedef txn_data_struct txn_data;
+ typedef std::vector<txn_data> txn_data_list;
+ typedef txn_data_list::iterator tdl_itr;
+
+ /**
+ * \class txn_map
+ * \brief Class for storing transaction data for each open (ie not committed or aborted)
+ * xid in the store. If aborted, records are discarded; if committed, they are
+ * transferred to the enqueue map.
+ *
+ * The data is encapsulated by struct txn_data_struct. A vector containing the information
+ * for each operation included as part of the same transaction is mapped against the
+ * xid.
+ *
+ * The aio_compl flag is set true as each AIO write operation for the enqueue or dequeue
+ * returns. Checking that all of these flags are true for a given xid is the mechanism
+ * used to determine if the transaction is syncronized (through method is_txn_synced()).
+ *
+ * On transaction commit, then each operation is handled as follows:
+ *
+ * If an enqueue (_enq_flag is true), then the rid and pfid are transferred to the enq_map.
+ * If a dequeue (_enq_flag is false), then the rid stored in the drid field is used to
+ * remove the corresponding record from the enq_map.
+ *
+ * On transaction abort, then each operation is handled as follows:
+ *
+ * If an enqueue (_enq_flag is true), then the data is simply discarded.
+ * If a dequeue (_enq_flag is false), then the lock for the corresponding enqueue in enq_map
+ * (if not a part of the same transaction) is removed, and the data discarded.
+ *
+ * <pre>
+ * key data
+ *
+ * xid1 --- vector< [ rid, drid, pfid, enq_flag, commit_flag, aio_compl ] >
+ * xid2 --- vector< [ rid, drid, pfid, enq_flag, commit_flag, aio_compl ] >
+ * xid3 --- vector< [ rid, drid, pfid, enq_flag, commit_flag, aio_compl ] >
+ * ...
+ * </pre>
+ */
+ class txn_map
+ {
+ public:
+ // return/error codes
+ static int16_t TMAP_RID_NOT_FOUND;
+ static int16_t TMAP_XID_NOT_FOUND;
+ static int16_t TMAP_OK;
+ static int16_t TMAP_NOT_SYNCED;
+ static int16_t TMAP_SYNCED;
+
+ private:
+ typedef std::pair<std::string, txn_data_list> xmap_param;
+ typedef std::map<std::string, txn_data_list> xmap;
+ typedef xmap::iterator xmap_itr;
+
+ xmap _map;
+ smutex _mutex;
+ std::vector<u_int32_t> _pfid_txn_cnt;
+ const txn_data_list _empty_data_list;
+
+ public:
+ txn_map();
+ virtual ~txn_map();
+
+ void set_num_jfiles(const u_int16_t num_jfiles);
+ u_int32_t get_txn_pfid_cnt(const u_int16_t pfid) const;
+ bool insert_txn_data(const std::string& xid, const txn_data& td);
+ const txn_data_list get_tdata_list(const std::string& xid);
+ const txn_data_list get_remove_tdata_list(const std::string& xid);
+ bool in_map(const std::string& xid);
+ u_int32_t enq_cnt();
+ u_int32_t deq_cnt();
+ int16_t is_txn_synced(const std::string& xid); // -1=xid not found; 0=not synced; 1=synced
+ int16_t set_aio_compl(const std::string& xid, const u_int64_t rid); // -2=rid not found; -1=xid not found; 0=done
+ bool data_exists(const std::string& xid, const u_int64_t rid);
+ bool is_enq(const u_int64_t rid);
+ inline void clear() { _map.clear(); }
+ inline bool empty() const { return _map.empty(); }
+ inline size_t size() const { return _map.size(); }
+ void xid_list(std::vector<std::string>& xv);
+ private:
+ u_int32_t cnt(const bool enq_flag);
+ const txn_data_list get_tdata_list_nolock(const std::string& xid);
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_TXN_MAP_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.cpp
new file mode 100644
index 0000000000..918a6ce902
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.cpp
@@ -0,0 +1,447 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file txn_rec.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::txn_rec (journal dequeue
+ * record) class. See comments in file txn_rec.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/txn_rec.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+txn_rec::txn_rec():
+ _txn_hdr(),
+ _xidp(0),
+ _buff(0),
+ _txn_tail()
+{
+ _txn_hdr._version = RHM_JDAT_VERSION;
+}
+
+txn_rec::txn_rec(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
+ const std::size_t xidlen, const bool owi):
+ _txn_hdr(magic, RHM_JDAT_VERSION, rid, xidlen, owi),
+ _xidp(xidp),
+ _buff(0),
+ _txn_tail(_txn_hdr)
+{}
+
+txn_rec::~txn_rec()
+{
+ clean();
+}
+
+void
+txn_rec::reset(const u_int32_t magic)
+{
+ _txn_hdr._magic = magic;
+ _txn_hdr._rid = 0;
+ _txn_hdr._xidsize = 0;
+ _xidp = 0;
+ _buff = 0;
+ _txn_tail._xmagic = ~magic;
+ _txn_tail._rid = 0;
+}
+
+void
+txn_rec::reset(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
+ const std::size_t xidlen, const bool owi)
+{
+ _txn_hdr._magic = magic;
+ _txn_hdr._rid = rid;
+ _txn_hdr.set_owi(owi);
+ _txn_hdr._xidsize = xidlen;
+ _xidp = xidp;
+ _buff = 0;
+ _txn_tail._xmagic = ~magic;
+ _txn_tail._rid = rid;
+}
+
+u_int32_t
+txn_rec::encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(wptr != 0);
+ assert(max_size_dblks > 0);
+ assert(_xidp != 0 && _txn_hdr._xidsize > 0);
+
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t rem = max_size_dblks * JRNL_DBLK_SIZE;
+ std::size_t wr_cnt = 0;
+ if (rec_offs_dblks) // Continuation of split dequeue record (over 2 or more pages)
+ {
+ if (size_dblks(rec_size()) - rec_offs_dblks > max_size_dblks) // Further split required
+ {
+ rec_offs -= sizeof(_txn_hdr);
+ std::size_t wsize = _txn_hdr._xidsize > rec_offs ? _txn_hdr._xidsize - rec_offs : 0;
+ std::size_t wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= _txn_hdr._xidsize - wsize2;
+ if (rem)
+ {
+ wsize = sizeof(_txn_tail) > rec_offs ? sizeof(_txn_tail) - rec_offs : 0;
+ wsize2 = wsize;
+ if (wsize)
+ {
+ if (wsize > rem)
+ wsize = rem;
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_txn_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ rec_offs -= sizeof(_txn_tail) - wsize2;
+ }
+ assert(rem == 0);
+ assert(rec_offs == 0);
+ }
+ else // No further split required
+ {
+ rec_offs -= sizeof(_txn_hdr);
+ std::size_t wsize = _txn_hdr._xidsize > rec_offs ? _txn_hdr._xidsize - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy(wptr, (const char*)_xidp + rec_offs, wsize);
+ wr_cnt += wsize;
+ }
+ rec_offs -= _txn_hdr._xidsize - wsize;
+ wsize = sizeof(_txn_tail) > rec_offs ? sizeof(_txn_tail) - rec_offs : 0;
+ if (wsize)
+ {
+ std::memcpy((char*)wptr + wr_cnt, (char*)&_txn_tail + rec_offs, wsize);
+ wr_cnt += wsize;
+#ifdef RHM_CLEAN
+ std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+ std::size_t dblk_rec_size = size_dblks(rec_size() - rec_offs) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ rec_offs -= sizeof(_txn_tail) - wsize;
+ assert(rec_offs == 0);
+ }
+ }
+ else // Start at beginning of data record
+ {
+ // Assumption: the header will always fit into the first dblk
+ std::memcpy(wptr, (void*)&_txn_hdr, sizeof(_txn_hdr));
+ wr_cnt = sizeof(_txn_hdr);
+ if (size_dblks(rec_size()) > max_size_dblks) // Split required
+ {
+ std::size_t wsize;
+ rem -= sizeof(_txn_hdr);
+ if (rem)
+ {
+ wsize = rem >= _txn_hdr._xidsize ? _txn_hdr._xidsize : rem;
+ std::memcpy((char*)wptr + wr_cnt, _xidp, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ if (rem)
+ {
+ wsize = rem >= sizeof(_txn_tail) ? sizeof(_txn_tail) : rem;
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_txn_tail, wsize);
+ wr_cnt += wsize;
+ rem -= wsize;
+ }
+ assert(rem == 0);
+ }
+ else // No split required
+ {
+ std::memcpy((char*)wptr + wr_cnt, _xidp, _txn_hdr._xidsize);
+ wr_cnt += _txn_hdr._xidsize;
+ std::memcpy((char*)wptr + wr_cnt, (void*)&_txn_tail, sizeof(_txn_tail));
+ wr_cnt += sizeof(_txn_tail);
+#ifdef RHM_CLEAN
+ std::size_t dblk_rec_size = size_dblks(rec_size()) * JRNL_DBLK_SIZE;
+ std::memset((char*)wptr + wr_cnt, RHM_CLEAN_CHAR, dblk_rec_size - wr_cnt);
+#endif
+ }
+ }
+ return size_dblks(wr_cnt);
+}
+
+u_int32_t
+txn_rec::decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks)
+{
+ assert(rptr != 0);
+ assert(max_size_dblks > 0);
+
+ std::size_t rd_cnt = 0;
+ if (rec_offs_dblks) // Continuation of record on new page
+ {
+ const u_int32_t hdr_xid_dblks = size_dblks(txn_hdr::size() + _txn_hdr._xidsize);
+ const u_int32_t hdr_xid_tail_dblks = size_dblks(txn_hdr::size() + _txn_hdr._xidsize +
+ rec_tail::size());
+ const std::size_t rec_offs = rec_offs_dblks * JRNL_DBLK_SIZE;
+
+ if (hdr_xid_tail_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of xid fits within this page
+ if (rec_offs - txn_hdr::size() < _txn_hdr._xidsize)
+ {
+ // Part of xid still outstanding, copy remainder of xid and tail
+ const std::size_t xid_offs = rec_offs - txn_hdr::size();
+ const std::size_t xid_rem = _txn_hdr._xidsize - xid_offs;
+ std::memcpy((char*)_buff + xid_offs, rptr, xid_rem);
+ rd_cnt = xid_rem;
+ std::memcpy((void*)&_txn_tail, ((char*)rptr + rd_cnt), sizeof(_txn_tail));
+ chk_tail();
+ rd_cnt += sizeof(_txn_tail);
+ }
+ else
+ {
+ // Tail or part of tail only outstanding, complete tail
+ const std::size_t tail_offs = rec_offs - txn_hdr::size() - _txn_hdr._xidsize;
+ const std::size_t tail_rem = rec_tail::size() - tail_offs;
+ std::memcpy((char*)&_txn_tail + tail_offs, rptr, tail_rem);
+ chk_tail();
+ rd_cnt = tail_rem;
+ }
+ }
+ else if (hdr_xid_dblks - rec_offs_dblks <= max_size_dblks)
+ {
+ // Remainder of xid fits within this page, tail split
+ const std::size_t xid_offs = rec_offs - txn_hdr::size();
+ const std::size_t xid_rem = _txn_hdr._xidsize - xid_offs;
+ std::memcpy((char*)_buff + xid_offs, rptr, xid_rem);
+ rd_cnt += xid_rem;
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_txn_tail, ((char*)rptr + xid_rem), tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else
+ {
+ // Remainder of xid split
+ const std::size_t xid_cp_size = (max_size_dblks * JRNL_DBLK_SIZE);
+ std::memcpy((char*)_buff + rec_offs - txn_hdr::size(), rptr, xid_cp_size);
+ rd_cnt += xid_cp_size;
+ }
+ }
+ else // Start of record
+ {
+ // Get and check header
+ _txn_hdr.hdr_copy(h);
+ rd_cnt = sizeof(rec_hdr);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ rd_cnt += sizeof(u_int32_t); // Filler 0
+#endif
+ _txn_hdr._xidsize = *(std::size_t*)((char*)rptr + rd_cnt);
+ rd_cnt = _txn_hdr.size();
+ chk_hdr();
+ _buff = std::malloc(_txn_hdr._xidsize);
+ MALLOC_CHK(_buff, "_buff", "txn_rec", "decode");
+ const u_int32_t hdr_xid_dblks = size_dblks(txn_hdr::size() + _txn_hdr._xidsize);
+ const u_int32_t hdr_xid_tail_dblks = size_dblks(txn_hdr::size() + _txn_hdr._xidsize +
+ rec_tail::size());
+
+ // Check if record (header + xid + tail) fits within this page, we can check the
+ // tail before the expense of copying data to memory
+ if (hdr_xid_tail_dblks <= max_size_dblks)
+ {
+ // Entire header, xid and tail fits within this page
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _txn_hdr._xidsize);
+ rd_cnt += _txn_hdr._xidsize;
+ std::memcpy((void*)&_txn_tail, (char*)rptr + rd_cnt, sizeof(_txn_tail));
+ rd_cnt += sizeof(_txn_tail);
+ chk_tail();
+ }
+ else if (hdr_xid_dblks <= max_size_dblks)
+ {
+ // Entire header and xid fit within this page, tail split
+ std::memcpy(_buff, (char*)rptr + rd_cnt, _txn_hdr._xidsize);
+ rd_cnt += _txn_hdr._xidsize;
+ const std::size_t tail_rem = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ if (tail_rem)
+ {
+ std::memcpy((void*)&_txn_tail, (char*)rptr + rd_cnt, tail_rem);
+ rd_cnt += tail_rem;
+ }
+ }
+ else
+ {
+ // Header fits within this page, xid split
+ const std::size_t xid_cp_size = (max_size_dblks * JRNL_DBLK_SIZE) - rd_cnt;
+ std::memcpy(_buff, (char*)rptr + rd_cnt, xid_cp_size);
+ rd_cnt += xid_cp_size;
+ }
+ }
+ return size_dblks(rd_cnt);
+}
+
+bool
+txn_rec::rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs)
+{
+ if (rec_offs == 0)
+ {
+ // Read header, allocate for xid
+ _txn_hdr.hdr_copy(h);
+#if defined(JRNL_BIG_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ ifsp->read((char*)&_txn_hdr._xidsize, sizeof(std::size_t));
+#if defined(JRNL_LITTLE_ENDIAN) && defined(JRNL_32_BIT)
+ ifsp->ignore(sizeof(u_int32_t)); // _filler0
+#endif
+ rec_offs = sizeof(_txn_hdr);
+ _buff = std::malloc(_txn_hdr._xidsize);
+ MALLOC_CHK(_buff, "_buff", "txn_rec", "rcv_decode");
+ }
+ if (rec_offs < sizeof(_txn_hdr) + _txn_hdr._xidsize)
+ {
+ // Read xid (or continue reading xid)
+ std::size_t offs = rec_offs - sizeof(_txn_hdr);
+ ifsp->read((char*)_buff + offs, _txn_hdr._xidsize - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < _txn_hdr._xidsize - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ if (rec_offs < sizeof(_txn_hdr) + _txn_hdr._xidsize + sizeof(rec_tail))
+ {
+ // Read tail (or continue reading tail)
+ std::size_t offs = rec_offs - sizeof(_txn_hdr) - _txn_hdr._xidsize;
+ ifsp->read((char*)&_txn_tail + offs, sizeof(rec_tail) - offs);
+ std::size_t size_read = ifsp->gcount();
+ rec_offs += size_read;
+ if (size_read < sizeof(rec_tail) - offs)
+ {
+ assert(ifsp->eof());
+ // As we may have read past eof, turn off fail bit
+ ifsp->clear(ifsp->rdstate()&(~std::ifstream::failbit));
+ assert(!ifsp->fail() && !ifsp->bad());
+ return false;
+ }
+ }
+ ifsp->ignore(rec_size_dblks() * JRNL_DBLK_SIZE - rec_size());
+ chk_tail(); // Throws if tail invalid or record incomplete
+ assert(!ifsp->fail() && !ifsp->bad());
+ return true;
+}
+
+std::size_t
+txn_rec::get_xid(void** const xidpp)
+{
+ if (!_buff)
+ {
+ *xidpp = 0;
+ return 0;
+ }
+ *xidpp = _buff;
+ return _txn_hdr._xidsize;
+}
+
+std::string&
+txn_rec::str(std::string& str) const
+{
+ std::ostringstream oss;
+ if (_txn_hdr._magic == RHM_JDAT_TXA_MAGIC)
+ oss << "dtxa_rec: m=" << _txn_hdr._magic;
+ else
+ oss << "dtxc_rec: m=" << _txn_hdr._magic;
+ oss << " v=" << (int)_txn_hdr._version;
+ oss << " rid=" << _txn_hdr._rid;
+ oss << " xid=\"" << _xidp << "\"";
+ str.append(oss.str());
+ return str;
+}
+
+std::size_t
+txn_rec::xid_size() const
+{
+ return _txn_hdr._xidsize;
+}
+
+std::size_t
+txn_rec::rec_size() const
+{
+ return txn_hdr::size() + _txn_hdr._xidsize + rec_tail::size();
+}
+
+void
+txn_rec::chk_hdr() const
+{
+ jrec::chk_hdr(_txn_hdr);
+ if (_txn_hdr._magic != RHM_JDAT_TXA_MAGIC && _txn_hdr._magic != RHM_JDAT_TXC_MAGIC)
+ {
+ std::ostringstream oss;
+ oss << std::hex << std::setfill('0');
+ oss << "dtx magic: rid=0x" << std::setw(16) << _txn_hdr._rid;
+ oss << ": expected=(0x" << std::setw(8) << RHM_JDAT_TXA_MAGIC;
+ oss << " or 0x" << RHM_JDAT_TXC_MAGIC;
+ oss << ") read=0x" << std::setw(2) << (int)_txn_hdr._magic;
+ throw jexception(jerrno::JERR_JREC_BADRECHDR, oss.str(), "txn_rec", "chk_hdr");
+ }
+}
+
+void
+txn_rec::chk_hdr(u_int64_t rid) const
+{
+ chk_hdr();
+ jrec::chk_rid(_txn_hdr, rid);
+}
+
+void
+txn_rec::chk_tail() const
+{
+ jrec::chk_tail(_txn_tail, _txn_hdr);
+}
+
+void
+txn_rec::clean()
+{
+ // clean up allocated memory here
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.h b/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.h
new file mode 100644
index 0000000000..1a49df1c96
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/txn_rec.h
@@ -0,0 +1,101 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file txn_rec.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * This file contains the code for the mrg::journal::txn_rec (journal data
+ * record) class. See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_TXN_REC_H
+#define QPID_LEGACYSTORE_JRNL_TXN_REC_H
+
+namespace mrg
+{
+namespace journal
+{
+class txn_rec;
+}
+}
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/jrec.h"
+#include "qpid/legacystore/jrnl/txn_hdr.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class txn_rec
+ * \brief Class to handle a single journal DTX commit or abort record.
+ */
+ class txn_rec : public jrec
+ {
+ private:
+ txn_hdr _txn_hdr; ///< transaction header
+ const void* _xidp; ///< xid pointer for encoding (writing to disk)
+ void* _buff; ///< Pointer to buffer to receive data read from disk
+ rec_tail _txn_tail; ///< Record tail
+
+ public:
+ // constructor used for read operations and xid must have memory allocated
+ txn_rec();
+ // constructor used for write operations, where xid already exists
+ txn_rec(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
+ const std::size_t xidlen, const bool owi);
+ virtual ~txn_rec();
+
+ // Prepare instance for use in reading data from journal
+ void reset(const u_int32_t magic);
+ // Prepare instance for use in writing data to journal
+ void reset(const u_int32_t magic, const u_int64_t rid, const void* const xidp,
+ const std::size_t xidlen, const bool owi);
+ u_int32_t encode(void* wptr, u_int32_t rec_offs_dblks, u_int32_t max_size_dblks);
+ u_int32_t decode(rec_hdr& h, void* rptr, u_int32_t rec_offs_dblks,
+ u_int32_t max_size_dblks);
+ // Decode used for recover
+ bool rcv_decode(rec_hdr h, std::ifstream* ifsp, std::size_t& rec_offs);
+
+ std::size_t get_xid(void** const xidpp);
+ std::string& str(std::string& str) const;
+ inline std::size_t data_size() const { return 0; } // This record never carries data
+ std::size_t xid_size() const;
+ std::size_t rec_size() const;
+ inline u_int64_t rid() const { return _txn_hdr._rid; }
+
+ private:
+ void chk_hdr() const;
+ void chk_hdr(u_int64_t rid) const;
+ void chk_tail() const;
+ virtual void clean();
+ }; // class txn_rec
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_TXN_REC_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.cpp
new file mode 100644
index 0000000000..4353fcfbca
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.cpp
@@ -0,0 +1,1051 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file wmgr.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::wmgr (write manager). See
+ * comments in file wmgr.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/wmgr.h"
+
+#include <cassert>
+#include <cerrno>
+#include <cstdlib>
+#include <cstring>
+#include "qpid/legacystore/jrnl/file_hdr.h"
+#include "qpid/legacystore/jrnl/jcntl.h"
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include <sstream>
+
+namespace mrg
+{
+namespace journal
+{
+
+wmgr::wmgr(jcntl* jc, enq_map& emap, txn_map& tmap, wrfc& wrfc):
+ pmgr(jc, emap, tmap),
+ _wrfc(wrfc),
+ _max_dtokpp(0),
+ _max_io_wait_us(0),
+ _fhdr_base_ptr(0),
+ _fhdr_ptr_arr(0),
+ _fhdr_aio_cb_arr(0),
+ _cached_offset_dblks(0),
+ _jfsize_dblks(0),
+ _jfsize_pgs(0),
+ _num_jfiles(0),
+ _enq_busy(false),
+ _deq_busy(false),
+ _abort_busy(false),
+ _commit_busy(false),
+ _txn_pending_set()
+{}
+
+wmgr::wmgr(jcntl* jc, enq_map& emap, txn_map& tmap, wrfc& wrfc,
+ const u_int32_t max_dtokpp, const u_int32_t max_iowait_us):
+ pmgr(jc, emap, tmap /* , dtoklp */),
+ _wrfc(wrfc),
+ _max_dtokpp(max_dtokpp),
+ _max_io_wait_us(max_iowait_us),
+ _fhdr_base_ptr(0),
+ _fhdr_ptr_arr(0),
+ _fhdr_aio_cb_arr(0),
+ _cached_offset_dblks(0),
+ _jfsize_dblks(0),
+ _jfsize_pgs(0),
+ _num_jfiles(0),
+ _enq_busy(false),
+ _deq_busy(false),
+ _abort_busy(false),
+ _commit_busy(false),
+ _txn_pending_set()
+{}
+
+wmgr::~wmgr()
+{
+ wmgr::clean();
+}
+
+void
+wmgr::initialize(aio_callback* const cbp, const u_int32_t wcache_pgsize_sblks,
+ const u_int16_t wcache_num_pages, const u_int32_t max_dtokpp, const u_int32_t max_iowait_us,
+ std::size_t eo)
+{
+ _enq_busy = false;
+ _deq_busy = false;
+ _abort_busy = false;
+ _commit_busy = false;
+ _max_dtokpp = max_dtokpp;
+ _max_io_wait_us = max_iowait_us;
+
+ initialize(cbp, wcache_pgsize_sblks, wcache_num_pages);
+
+ _jfsize_dblks = _jc->jfsize_sblks() * JRNL_SBLK_SIZE;
+ _jfsize_pgs = _jc->jfsize_sblks() / _cache_pgsize_sblks;
+ assert(_jc->jfsize_sblks() % JRNL_RMGR_PAGE_SIZE == 0);
+
+ if (eo)
+ {
+ const u_int32_t wr_pg_size_dblks = _cache_pgsize_sblks * JRNL_SBLK_SIZE;
+ u_int32_t data_dblks = (eo / JRNL_DBLK_SIZE) - 4; // 4 dblks for file hdr
+ _pg_cntr = data_dblks / wr_pg_size_dblks;
+ _pg_offset_dblks = data_dblks - (_pg_cntr * wr_pg_size_dblks);
+ }
+}
+
+iores
+wmgr::enqueue(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const void* const xid_ptr,
+ const std::size_t xid_len, const bool transient, const bool external)
+{
+ if (xid_len)
+ assert(xid_ptr != 0);
+
+ if (_deq_busy || _abort_busy || _commit_busy)
+ return RHM_IORES_BUSY;
+
+ if (this_data_len != tot_data_len && !external)
+ return RHM_IORES_NOTIMPL;
+
+ iores res = pre_write_check(WMGR_ENQUEUE, dtokp, xid_len, tot_data_len, external);
+ if (res != RHM_IORES_SUCCESS)
+ return res;
+
+ bool cont = false;
+ if (_enq_busy) // If enqueue() exited last time with RHM_IORES_FULL or RHM_IORES_PAGE_AIOWAIT
+ {
+ if (dtokp->wstate() == data_tok::ENQ_PART)
+ cont = true;
+ else
+ {
+ std::ostringstream oss;
+ oss << "This data_tok: id=" << dtokp->id() << " state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_ENQDISCONT, oss.str(), "wmgr", "enqueue");
+ }
+ }
+
+ u_int64_t rid = (dtokp->external_rid() | cont) ? dtokp->rid() : _wrfc.get_incr_rid();
+ _enq_rec.reset(rid, data_buff, tot_data_len, xid_ptr, xid_len, _wrfc.owi(), transient,
+ external);
+ if (!cont)
+ {
+ dtokp->set_rid(rid);
+ dtokp->set_dequeue_rid(0);
+ if (xid_len)
+ dtokp->set_xid(xid_ptr, xid_len);
+ else
+ dtokp->clear_xid();
+ _enq_busy = true;
+ }
+ bool done = false;
+ while (!done)
+ {
+ assert(_pg_offset_dblks < _cache_pgsize_sblks * JRNL_SBLK_SIZE);
+ void* wptr = (void*)((char*)_page_ptr_arr[_pg_index] + _pg_offset_dblks * JRNL_DBLK_SIZE);
+ u_int32_t data_offs_dblks = dtokp->dblocks_written();
+ u_int32_t ret = _enq_rec.encode(wptr, data_offs_dblks,
+ (_cache_pgsize_sblks * JRNL_SBLK_SIZE) - _pg_offset_dblks);
+
+ // Remember fid which contains the record header in case record is split over several files
+ if (data_offs_dblks == 0)
+ dtokp->set_fid(_wrfc.index());
+ _pg_offset_dblks += ret;
+ _cached_offset_dblks += ret;
+ dtokp->incr_dblocks_written(ret);
+ dtokp->incr_pg_cnt();
+ _page_cb_arr[_pg_index]._pdtokl->push_back(dtokp);
+
+ // Is the encoding of this record complete?
+ if (dtokp->dblocks_written() >= _enq_rec.rec_size_dblks())
+ {
+ // TODO: Incorrect - must set state to ENQ_CACHED; ENQ_SUBM is set when AIO returns.
+ dtokp->set_wstate(data_tok::ENQ_SUBM);
+ dtokp->set_dsize(tot_data_len);
+ // Only add this data token to page token list when submit is complete, this way
+ // long multi-page messages have their token on the page containing the END of the
+ // message. AIO callbacks will then only process this token when entire message is
+ // enqueued.
+ _wrfc.incr_enqcnt(dtokp->fid());
+
+ if (xid_len) // If part of transaction, add to transaction map
+ {
+ std::string xid((const char*)xid_ptr, xid_len);
+ _tmap.insert_txn_data(xid, txn_data(rid, 0, dtokp->fid(), true));
+ }
+ else
+ {
+ if (_emap.insert_pfid(rid, dtokp->fid()) < enq_map::EMAP_OK) // fail
+ {
+ // The only error code emap::insert_pfid() returns is enq_map::EMAP_DUP_RID.
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << rid << " _pfid=0x" << dtokp->fid();
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "wmgr", "enqueue");
+ }
+ }
+
+ done = true;
+ }
+ else
+ dtokp->set_wstate(data_tok::ENQ_PART);
+
+ file_header_check(rid, cont, _enq_rec.rec_size_dblks() - data_offs_dblks);
+ flush_check(res, cont, done);
+ }
+ if (dtokp->wstate() >= data_tok::ENQ_SUBM)
+ _enq_busy = false;
+ return res;
+}
+
+iores
+wmgr::dequeue(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len, const bool txn_coml_commit)
+{
+ if (xid_len)
+ assert(xid_ptr != 0);
+
+ if (_enq_busy || _abort_busy || _commit_busy)
+ return RHM_IORES_BUSY;
+
+ iores res = pre_write_check(WMGR_DEQUEUE, dtokp);
+ if (res != RHM_IORES_SUCCESS)
+ return res;
+
+ bool cont = false;
+ if (_deq_busy) // If dequeue() exited last time with RHM_IORES_FULL or RHM_IORES_PAGE_AIOWAIT
+ {
+ if (dtokp->wstate() == data_tok::DEQ_PART)
+ cont = true;
+ else
+ {
+ std::ostringstream oss;
+ oss << "This data_tok: id=" << dtokp->id() << " state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_DEQDISCONT, oss.str(), "wmgr", "dequeue");
+ }
+ }
+
+ const bool ext_rid = dtokp->external_rid();
+ u_int64_t rid = (ext_rid | cont) ? dtokp->rid() : _wrfc.get_incr_rid();
+ u_int64_t dequeue_rid = (ext_rid | cont) ? dtokp->dequeue_rid() : dtokp->rid();
+ _deq_rec.reset(rid, dequeue_rid, xid_ptr, xid_len, _wrfc.owi(), txn_coml_commit);
+ if (!cont)
+ {
+ if (!ext_rid)
+ {
+ dtokp->set_rid(rid);
+ dtokp->set_dequeue_rid(dequeue_rid);
+ }
+ if (xid_len)
+ dtokp->set_xid(xid_ptr, xid_len);
+ else
+ dtokp->clear_xid();
+ dequeue_check(dtokp->xid(), dequeue_rid);
+ dtokp->set_dblocks_written(0); // Reset dblks_written from previous op
+ _deq_busy = true;
+ }
+ bool done = false;
+ while (!done)
+ {
+ assert(_pg_offset_dblks < _cache_pgsize_sblks * JRNL_SBLK_SIZE);
+ void* wptr = (void*)((char*)_page_ptr_arr[_pg_index] + _pg_offset_dblks * JRNL_DBLK_SIZE);
+ u_int32_t data_offs_dblks = dtokp->dblocks_written();
+ u_int32_t ret = _deq_rec.encode(wptr, data_offs_dblks,
+ (_cache_pgsize_sblks * JRNL_SBLK_SIZE) - _pg_offset_dblks);
+
+ // Remember fid which contains the record header in case record is split over several files
+ if (data_offs_dblks == 0)
+ dtokp->set_fid(_wrfc.index());
+ _pg_offset_dblks += ret;
+ _cached_offset_dblks += ret;
+ dtokp->incr_dblocks_written(ret);
+ dtokp->incr_pg_cnt();
+ _page_cb_arr[_pg_index]._pdtokl->push_back(dtokp);
+
+ // Is the encoding of this record complete?
+ if (dtokp->dblocks_written() >= _deq_rec.rec_size_dblks())
+ {
+ // TODO: Incorrect - must set state to ENQ_CACHED; ENQ_SUBM is set when AIO returns.
+ dtokp->set_wstate(data_tok::DEQ_SUBM);
+
+ if (xid_len) // If part of transaction, add to transaction map
+ {
+ // If the enqueue is part of a pending txn, it will not yet be in emap
+ _emap.lock(dequeue_rid); // ignore rid not found error
+ std::string xid((const char*)xid_ptr, xid_len);
+ _tmap.insert_txn_data(xid, txn_data(rid, dequeue_rid, dtokp->fid(), false));
+ }
+ else
+ {
+ int16_t fid = _emap.get_remove_pfid(dtokp->dequeue_rid());
+ if (fid < enq_map::EMAP_OK) // fail
+ {
+ if (fid == enq_map::EMAP_RID_NOT_FOUND)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << rid;
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "wmgr", "dequeue");
+ }
+ if (fid == enq_map::EMAP_LOCKED)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << rid;
+ throw jexception(jerrno::JERR_MAP_LOCKED, oss.str(), "wmgr", "dequeue");
+ }
+ }
+ _wrfc.decr_enqcnt(fid);
+ }
+
+ done = true;
+ }
+ else
+ dtokp->set_wstate(data_tok::DEQ_PART);
+
+ file_header_check(rid, cont, _deq_rec.rec_size_dblks() - data_offs_dblks);
+ flush_check(res, cont, done);
+ }
+ if (dtokp->wstate() >= data_tok::DEQ_SUBM)
+ _deq_busy = false;
+ return res;
+}
+
+iores
+wmgr::abort(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len)
+{
+ // commit and abort MUST have a valid xid
+ assert(xid_ptr != 0 && xid_len > 0);
+
+ if (_enq_busy || _deq_busy || _commit_busy)
+ return RHM_IORES_BUSY;
+
+ iores res = pre_write_check(WMGR_ABORT, dtokp);
+ if (res != RHM_IORES_SUCCESS)
+ return res;
+
+ bool cont = false;
+ if (_abort_busy) // If abort() exited last time with RHM_IORES_FULL or RHM_IORES_PAGE_AIOWAIT
+ {
+ if (dtokp->wstate() == data_tok::ABORT_PART)
+ cont = true;
+ else
+ {
+ std::ostringstream oss;
+ oss << "This data_tok: id=" << dtokp->id() << " state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_DEQDISCONT, oss.str(), "wmgr", "abort");
+ }
+ }
+
+ u_int64_t rid = (dtokp->external_rid() | cont) ? dtokp->rid() : _wrfc.get_incr_rid();
+ _txn_rec.reset(RHM_JDAT_TXA_MAGIC, rid, xid_ptr, xid_len, _wrfc.owi());
+ if (!cont)
+ {
+ dtokp->set_rid(rid);
+ dtokp->set_dequeue_rid(0);
+ dtokp->set_xid(xid_ptr, xid_len);
+ dtokp->set_dblocks_written(0); // Reset dblks_written from previous op
+ _abort_busy = true;
+ }
+ bool done = false;
+ while (!done)
+ {
+ assert(_pg_offset_dblks < _cache_pgsize_sblks * JRNL_SBLK_SIZE);
+ void* wptr = (void*)((char*)_page_ptr_arr[_pg_index] + _pg_offset_dblks * JRNL_DBLK_SIZE);
+ u_int32_t data_offs_dblks = dtokp->dblocks_written();
+ u_int32_t ret = _txn_rec.encode(wptr, data_offs_dblks,
+ (_cache_pgsize_sblks * JRNL_SBLK_SIZE) - _pg_offset_dblks);
+
+ // Remember fid which contains the record header in case record is split over several files
+ if (data_offs_dblks == 0)
+ dtokp->set_fid(_wrfc.index());
+ _pg_offset_dblks += ret;
+ _cached_offset_dblks += ret;
+ dtokp->incr_dblocks_written(ret);
+ dtokp->incr_pg_cnt();
+ _page_cb_arr[_pg_index]._pdtokl->push_back(dtokp);
+
+ // Is the encoding of this record complete?
+ if (dtokp->dblocks_written() >= _txn_rec.rec_size_dblks())
+ {
+ dtokp->set_wstate(data_tok::ABORT_SUBM);
+
+ // Delete this txn from tmap, unlock any locked records in emap
+ std::string xid((const char*)xid_ptr, xid_len);
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid); // tdl will be empty if xid not found
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ if (!itr->_enq_flag)
+ _emap.unlock(itr->_drid); // ignore rid not found error
+ if (itr->_enq_flag)
+ _wrfc.decr_enqcnt(itr->_pfid);
+ }
+ std::pair<std::set<std::string>::iterator, bool> res = _txn_pending_set.insert(xid);
+ if (!res.second)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_txn_pending_set: xid=\"" << xid << "\"";
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "wmgr", "abort");
+ }
+
+ done = true;
+ }
+ else
+ dtokp->set_wstate(data_tok::ABORT_PART);
+
+ file_header_check(rid, cont, _txn_rec.rec_size_dblks() - data_offs_dblks);
+ flush_check(res, cont, done);
+ }
+ if (dtokp->wstate() >= data_tok::ABORT_SUBM)
+ _abort_busy = false;
+ return res;
+}
+
+iores
+wmgr::commit(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len)
+{
+ // commit and abort MUST have a valid xid
+ assert(xid_ptr != 0 && xid_len > 0);
+
+ if (_enq_busy || _deq_busy || _abort_busy)
+ return RHM_IORES_BUSY;
+
+ iores res = pre_write_check(WMGR_COMMIT, dtokp);
+ if (res != RHM_IORES_SUCCESS)
+ return res;
+
+ bool cont = false;
+ if (_commit_busy) // If commit() exited last time with RHM_IORES_FULL or RHM_IORES_PAGE_AIOWAIT
+ {
+ if (dtokp->wstate() == data_tok::COMMIT_PART)
+ cont = true;
+ else
+ {
+ std::ostringstream oss;
+ oss << "This data_tok: id=" << dtokp->id() << " state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_DEQDISCONT, oss.str(), "wmgr", "commit");
+ }
+ }
+
+ u_int64_t rid = (dtokp->external_rid() | cont) ? dtokp->rid() : _wrfc.get_incr_rid();
+ _txn_rec.reset(RHM_JDAT_TXC_MAGIC, rid, xid_ptr, xid_len, _wrfc.owi());
+ if (!cont)
+ {
+ dtokp->set_rid(rid);
+ dtokp->set_dequeue_rid(0);
+ dtokp->set_xid(xid_ptr, xid_len);
+ dtokp->set_dblocks_written(0); // Reset dblks_written from previous op
+ _commit_busy = true;
+ }
+ bool done = false;
+ while (!done)
+ {
+ assert(_pg_offset_dblks < _cache_pgsize_sblks * JRNL_SBLK_SIZE);
+ void* wptr = (void*)((char*)_page_ptr_arr[_pg_index] + _pg_offset_dblks * JRNL_DBLK_SIZE);
+ u_int32_t data_offs_dblks = dtokp->dblocks_written();
+ u_int32_t ret = _txn_rec.encode(wptr, data_offs_dblks,
+ (_cache_pgsize_sblks * JRNL_SBLK_SIZE) - _pg_offset_dblks);
+
+ // Remember fid which contains the record header in case record is split over several files
+ if (data_offs_dblks == 0)
+ dtokp->set_fid(_wrfc.index());
+ _pg_offset_dblks += ret;
+ _cached_offset_dblks += ret;
+ dtokp->incr_dblocks_written(ret);
+ dtokp->incr_pg_cnt();
+ _page_cb_arr[_pg_index]._pdtokl->push_back(dtokp);
+
+ // Is the encoding of this record complete?
+ if (dtokp->dblocks_written() >= _txn_rec.rec_size_dblks())
+ {
+ dtokp->set_wstate(data_tok::COMMIT_SUBM);
+
+ // Delete this txn from tmap, process records into emap
+ std::string xid((const char*)xid_ptr, xid_len);
+ txn_data_list tdl = _tmap.get_remove_tdata_list(xid); // tdl will be empty if xid not found
+ for (tdl_itr itr = tdl.begin(); itr != tdl.end(); itr++)
+ {
+ if (itr->_enq_flag) // txn enqueue
+ {
+ if (_emap.insert_pfid(itr->_rid, itr->_pfid) < enq_map::EMAP_OK) // fail
+ {
+ // The only error code emap::insert_pfid() returns is enq_map::EMAP_DUP_RID.
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << itr->_rid << " _pfid=0x" << itr->_pfid;
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "wmgr", "commit");
+ }
+ }
+ else // txn dequeue
+ {
+ int16_t fid = _emap.get_remove_pfid(itr->_drid, true);
+ if (fid < enq_map::EMAP_OK) // fail
+ {
+ if (fid == enq_map::EMAP_RID_NOT_FOUND)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << rid;
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "wmgr", "dequeue");
+ }
+ if (fid == enq_map::EMAP_LOCKED)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "rid=0x" << rid;
+ throw jexception(jerrno::JERR_MAP_LOCKED, oss.str(), "wmgr", "dequeue");
+ }
+ }
+ _wrfc.decr_enqcnt(fid);
+ }
+ }
+ std::pair<std::set<std::string>::iterator, bool> res = _txn_pending_set.insert(xid);
+ if (!res.second)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_txn_pending_set: xid=\"" << xid << "\"";
+ throw jexception(jerrno::JERR_MAP_DUPLICATE, oss.str(), "wmgr", "commit");
+ }
+
+ done = true;
+ }
+ else
+ dtokp->set_wstate(data_tok::COMMIT_PART);
+
+ file_header_check(rid, cont, _txn_rec.rec_size_dblks() - data_offs_dblks);
+ flush_check(res, cont, done);
+ }
+ if (dtokp->wstate() >= data_tok::COMMIT_SUBM)
+ _commit_busy = false;
+ return res;
+}
+
+void
+wmgr::file_header_check(const u_int64_t rid, const bool cont, const u_int32_t rec_dblks_rem)
+{
+ // Has the file header been written (i.e. write pointers still at 0)?
+ if (_wrfc.is_void())
+ {
+ bool file_fit = rec_dblks_rem <= _jfsize_dblks;
+ bool file_full = rec_dblks_rem == _jfsize_dblks;
+ std::size_t fro = 0;
+ if (cont)
+ {
+ if (file_fit && !file_full)
+ fro = (rec_dblks_rem + JRNL_SBLK_SIZE) * JRNL_DBLK_SIZE;
+ }
+ else
+ fro = JRNL_SBLK_SIZE * JRNL_DBLK_SIZE;
+ write_fhdr(rid, _wrfc.index(), _wrfc.index(), fro);
+ }
+}
+
+void
+wmgr::flush_check(iores& res, bool& cont, bool& done)
+{
+ // Is page is full, flush
+ if (_pg_offset_dblks >= _cache_pgsize_sblks * JRNL_SBLK_SIZE)
+ {
+ res = write_flush();
+ assert(res == RHM_IORES_SUCCESS);
+
+ if (_page_cb_arr[_pg_index]._state == AIO_PENDING && !done)
+ {
+ res = RHM_IORES_PAGE_AIOWAIT;
+ done = true;
+ }
+
+ // If file is full, rotate to next file
+ if (_pg_cntr >= _jfsize_pgs)
+ {
+ iores rfres = rotate_file();
+ if (rfres != RHM_IORES_SUCCESS)
+ res = rfres;
+ if (!done)
+ {
+ if (rfres == RHM_IORES_SUCCESS)
+ cont = true;
+ else
+ done = true;
+ }
+ }
+ }
+}
+
+iores
+wmgr::flush()
+{
+ iores res = write_flush();
+ if (_pg_cntr >= _jfsize_pgs)
+ {
+ iores rfres = rotate_file();
+ if (rfres != RHM_IORES_SUCCESS)
+ res = rfres;
+ }
+ return res;
+}
+
+iores
+wmgr::write_flush()
+{
+ iores res = RHM_IORES_SUCCESS;
+ // Don't bother flushing an empty page or one that is still in state AIO_PENDING
+ if (_cached_offset_dblks)
+ {
+ if (_page_cb_arr[_pg_index]._state == AIO_PENDING)
+ res = RHM_IORES_PAGE_AIOWAIT;
+ else
+ {
+ if (_page_cb_arr[_pg_index]._state != IN_USE)
+ {
+ std::ostringstream oss;
+ oss << "pg_index=" << _pg_index << " state=" << _page_cb_arr[_pg_index].state_str();
+ throw jexception(jerrno::JERR_WMGR_BADPGSTATE, oss.str(), "wmgr",
+ "write_flush");
+ }
+
+ // Send current page using AIO
+
+ // In manual flushes, dblks may not coincide with sblks, add filler records ("RHMx")
+ // if necessary.
+ dblk_roundup();
+
+ std::size_t pg_offs = (_pg_offset_dblks - _cached_offset_dblks) * JRNL_DBLK_SIZE;
+ aio_cb* aiocbp = &_aio_cb_arr[_pg_index];
+ aio::prep_pwrite_2(aiocbp, _wrfc.fh(),
+ (char*)_page_ptr_arr[_pg_index] + pg_offs, _cached_offset_dblks * JRNL_DBLK_SIZE,
+ _wrfc.subm_offs());
+ page_cb* pcbp = (page_cb*)(aiocbp->data); // This page control block (pcb)
+ pcbp->_wdblks = _cached_offset_dblks;
+ pcbp->_wfh = _wrfc.file_controller();
+ if (aio::submit(_ioctx, 1, &aiocbp) < 0)
+ throw jexception(jerrno::JERR__AIO, "wmgr", "write_flush");
+ _wrfc.add_subm_cnt_dblks(_cached_offset_dblks);
+ _wrfc.incr_aio_cnt();
+ _aio_evt_rem++;
+ _cached_offset_dblks = 0;
+ _jc->instr_incr_outstanding_aio_cnt();
+
+ rotate_page(); // increments _pg_index, resets _pg_offset_dblks if req'd
+ if (_page_cb_arr[_pg_index]._state == UNUSED)
+ _page_cb_arr[_pg_index]._state = IN_USE;
+ }
+ }
+ get_events(UNUSED, 0);
+ if (_page_cb_arr[_pg_index]._state == UNUSED)
+ _page_cb_arr[_pg_index]._state = IN_USE;
+ return res;
+}
+
+iores
+wmgr::rotate_file()
+{
+ _pg_cntr = 0;
+ iores res = _wrfc.rotate();
+ _jc->chk_wr_frot();
+ return res;
+}
+
+int32_t
+wmgr::get_events(page_state state, timespec* const timeout, bool flush)
+{
+ if (_aio_evt_rem == 0) // no events to get
+ return 0;
+
+ int ret = 0;
+ if ((ret = aio::getevents(_ioctx, flush ? _aio_evt_rem : 1, _aio_evt_rem/*_cache_num_pages + _jc->num_jfiles()*/, _aio_event_arr, timeout)) < 0)
+ {
+ if (ret == -EINTR) // Interrupted by signal
+ return 0;
+ std::ostringstream oss;
+ oss << "io_getevents() failed: " << std::strerror(-ret) << " (" << ret << ")";
+ throw jexception(jerrno::JERR__AIO, oss.str(), "wmgr", "get_events");
+ }
+
+ if (ret == 0 && timeout)
+ return jerrno::AIO_TIMEOUT;
+
+ int32_t tot_data_toks = 0;
+ for (int i=0; i<ret; i++) // Index of returned AIOs
+ {
+ if (_aio_evt_rem == 0)
+ {
+ std::ostringstream oss;
+ oss << "_aio_evt_rem; evt " << (i + 1) << " of " << ret;
+ throw jexception(jerrno::JERR__UNDERFLOW, oss.str(), "wmgr", "get_events");
+ }
+ _aio_evt_rem--;
+ aio_cb* aiocbp = _aio_event_arr[i].obj; // This I/O control block (iocb)
+ page_cb* pcbp = (page_cb*)(aiocbp->data); // This page control block (pcb)
+ long aioret = (long)_aio_event_arr[i].res;
+ if (aioret < 0)
+ {
+ std::ostringstream oss;
+ oss << "AIO write operation failed: " << std::strerror(-aioret) << " (" << aioret << ") [";
+ if (pcbp)
+ oss << "pg=" << pcbp->_index;
+ else
+ {
+ file_hdr* fhp = (file_hdr*)aiocbp->u.c.buf;
+ oss << "fid=" << fhp->_pfid;
+ }
+ oss << " size=" << aiocbp->u.c.nbytes;
+ oss << " offset=" << aiocbp->u.c.offset << " fh=" << aiocbp->aio_fildes << "]";
+ throw jexception(jerrno::JERR__AIO, oss.str(), "wmgr", "get_events");
+ }
+ if (pcbp) // Page writes have pcb
+ {
+ u_int32_t s = pcbp->_pdtokl->size();
+ std::vector<data_tok*> dtokl;
+ dtokl.reserve(s);
+ for (u_int32_t k=0; k<s; k++)
+ {
+ data_tok* dtokp = pcbp->_pdtokl->at(k);
+ if (dtokp->decr_pg_cnt() == 0)
+ {
+ std::set<std::string>::iterator it;
+ switch (dtokp->wstate())
+ {
+ case data_tok::ENQ_SUBM:
+ dtokl.push_back(dtokp);
+ tot_data_toks++;
+ dtokp->set_wstate(data_tok::ENQ);
+ if (dtokp->has_xid())
+ // Ignoring return value here. A non-zero return can signify that the transaction
+ // has committed or aborted, and which was completed prior to the aio returning.
+ _tmap.set_aio_compl(dtokp->xid(), dtokp->rid());
+ break;
+ case data_tok::DEQ_SUBM:
+ dtokl.push_back(dtokp);
+ tot_data_toks++;
+ dtokp->set_wstate(data_tok::DEQ);
+ if (dtokp->has_xid())
+ // Ignoring return value - see note above.
+ _tmap.set_aio_compl(dtokp->xid(), dtokp->rid());
+ break;
+ case data_tok::ABORT_SUBM:
+ dtokl.push_back(dtokp);
+ tot_data_toks++;
+ dtokp->set_wstate(data_tok::ABORTED);
+ it = _txn_pending_set.find(dtokp->xid());
+ if (it == _txn_pending_set.end())
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_txn_pending_set: abort xid=\"";
+ oss << dtokp->xid() << "\"";
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "wmgr",
+ "get_events");
+ }
+ _txn_pending_set.erase(it);
+ break;
+ case data_tok::COMMIT_SUBM:
+ dtokl.push_back(dtokp);
+ tot_data_toks++;
+ dtokp->set_wstate(data_tok::COMMITTED);
+ it = _txn_pending_set.find(dtokp->xid());
+ if (it == _txn_pending_set.end())
+ {
+ std::ostringstream oss;
+ oss << std::hex << "_txn_pending_set: commit xid=\"";
+ oss << dtokp->xid() << "\"";
+ throw jexception(jerrno::JERR_MAP_NOTFOUND, oss.str(), "wmgr",
+ "get_events");
+ }
+ _txn_pending_set.erase(it);
+ break;
+ case data_tok::ENQ_PART:
+ case data_tok::DEQ_PART:
+ case data_tok::ABORT_PART:
+ case data_tok::COMMIT_PART:
+ // ignore these
+ break;
+ default:
+ // throw for anything else
+ std::ostringstream oss;
+ oss << "dtok_id=" << dtokp->id() << " dtok_state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_BADDTOKSTATE, oss.str(), "wmgr",
+ "get_events");
+ } // switch
+ } // if
+ } // for
+
+ // Increment the completed write offset
+ // NOTE: We cannot use _wrfc here, as it may have rotated since submitting count.
+ // Use stored pointer to fcntl in the pcb instead.
+ pcbp->_wfh->add_wr_cmpl_cnt_dblks(pcbp->_wdblks);
+ pcbp->_wfh->decr_aio_cnt();
+ _jc->instr_decr_outstanding_aio_cnt();
+
+ // Clean up this pcb's data_tok list
+ pcbp->_pdtokl->clear();
+ pcbp->_state = state;
+
+ // Perform AIO return callback
+ if (_cbp && tot_data_toks)
+ _cbp->wr_aio_cb(dtokl);
+ }
+ else // File header writes have no pcb
+ {
+ // get lfid from original file header record, update info for that lfid
+ file_hdr* fhp = (file_hdr*)aiocbp->u.c.buf;
+ u_int32_t lfid = fhp->_lfid;
+ fcntl* fcntlp = _jc->get_fcntlp(lfid);
+ fcntlp->add_wr_cmpl_cnt_dblks(JRNL_SBLK_SIZE);
+ fcntlp->decr_aio_cnt();
+ fcntlp->set_wr_fhdr_aio_outstanding(false);
+ }
+ }
+
+ return tot_data_toks;
+}
+
+bool
+wmgr::is_txn_synced(const std::string& xid)
+{
+ // Ignore xid not found error here
+ if (_tmap.is_txn_synced(xid) == txn_map::TMAP_NOT_SYNCED)
+ return false;
+ // Check for outstanding commit/aborts
+ std::set<std::string>::iterator it = _txn_pending_set.find(xid);
+ return it == _txn_pending_set.end();
+}
+
+void
+wmgr::initialize(aio_callback* const cbp, const u_int32_t wcache_pgsize_sblks, const u_int16_t wcache_num_pages)
+{
+ pmgr::initialize(cbp, wcache_pgsize_sblks, wcache_num_pages);
+ wmgr::clean();
+ _num_jfiles = _jc->num_jfiles();
+ if (::posix_memalign(&_fhdr_base_ptr, _sblksize, _sblksize * _num_jfiles))
+ {
+ wmgr::clean();
+ std::ostringstream oss;
+ oss << "posix_memalign(): blksize=" << _sblksize << " size=" << _sblksize;
+ oss << FORMAT_SYSERR(errno);
+ throw jexception(jerrno::JERR__MALLOC, oss.str(), "wmgr", "initialize");
+ }
+ _fhdr_ptr_arr = (void**)std::malloc(_num_jfiles * sizeof(void*));
+ MALLOC_CHK(_fhdr_ptr_arr, "_fhdr_ptr_arr", "wmgr", "initialize");
+ _fhdr_aio_cb_arr = (aio_cb**)std::malloc(sizeof(aio_cb*) * _num_jfiles);
+ MALLOC_CHK(_fhdr_aio_cb_arr, "_fhdr_aio_cb_arr", "wmgr", "initialize");
+ std::memset(_fhdr_aio_cb_arr, 0, sizeof(aio_cb*) * _num_jfiles);
+ for (u_int16_t i=0; i<_num_jfiles; i++)
+ {
+ _fhdr_ptr_arr[i] = (void*)((char*)_fhdr_base_ptr + _sblksize * i);
+ _fhdr_aio_cb_arr[i] = new aio_cb;
+ }
+ _page_cb_arr[0]._state = IN_USE;
+ _ddtokl.clear();
+ _cached_offset_dblks = 0;
+ _enq_busy = false;
+}
+
+iores
+wmgr::pre_write_check(const _op_type op, const data_tok* const dtokp,
+ const std::size_t xidsize, const std::size_t dsize, const bool external
+ ) const
+{
+ // Check status of current file
+ if (!_wrfc.is_wr_reset())
+ {
+ if (!_wrfc.wr_reset())
+ return RHM_IORES_FULL;
+ }
+
+ // Check status of current page is ok for writing
+ if (_page_cb_arr[_pg_index]._state != IN_USE)
+ {
+ if (_page_cb_arr[_pg_index]._state == UNUSED)
+ _page_cb_arr[_pg_index]._state = IN_USE;
+ else if (_page_cb_arr[_pg_index]._state == AIO_PENDING)
+ return RHM_IORES_PAGE_AIOWAIT;
+ else
+ {
+ std::ostringstream oss;
+ oss << "jrnl=" << _jc->id() << " op=" << _op_str[op];
+ oss << " index=" << _pg_index << " pg_state=" << _page_cb_arr[_pg_index].state_str();
+ throw jexception(jerrno::JERR_WMGR_BADPGSTATE, oss.str(), "wmgr", "pre_write_check");
+ }
+ }
+
+ // operation-specific checks
+ switch (op)
+ {
+ case WMGR_ENQUEUE:
+ {
+ // Check for enqueue reaching cutoff threshold
+ u_int32_t size_dblks = jrec::size_dblks(enq_rec::rec_size(xidsize, dsize,
+ external));
+ if (!_enq_busy && _wrfc.enq_threshold(_cached_offset_dblks + size_dblks))
+ return RHM_IORES_ENQCAPTHRESH;
+ if (!dtokp->is_writable())
+ {
+ std::ostringstream oss;
+ oss << "jrnl=" << _jc->id() << " op=" << _op_str[op];
+ oss << " dtok_id=" << dtokp->id() << " dtok_state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_BADDTOKSTATE, oss.str(), "wmgr",
+ "pre_write_check");
+ }
+ }
+ break;
+ case WMGR_DEQUEUE:
+ if (!dtokp->is_dequeueable())
+ {
+ std::ostringstream oss;
+ oss << "jrnl=" << _jc->id() << " op=" << _op_str[op];
+ oss << " dtok_id=" << dtokp->id() << " dtok_state=" << dtokp->wstate_str();
+ throw jexception(jerrno::JERR_WMGR_BADDTOKSTATE, oss.str(), "wmgr",
+ "pre_write_check");
+ }
+ break;
+ case WMGR_ABORT:
+ break;
+ case WMGR_COMMIT:
+ break;
+ }
+
+ return RHM_IORES_SUCCESS;
+}
+
+void
+wmgr::dequeue_check(const std::string& xid, const u_int64_t drid)
+{
+ // First check emap
+ bool found = false;
+ int16_t fid = _emap.get_pfid(drid);
+ if (fid < enq_map::EMAP_OK) // fail
+ {
+ if (fid == enq_map::EMAP_RID_NOT_FOUND)
+ {
+ if (xid.size())
+ found = _tmap.data_exists(xid, drid);
+ }
+ else if (fid == enq_map::EMAP_LOCKED)
+ {
+ std::ostringstream oss;
+ oss << std::hex << "drid=0x" << drid;
+ throw jexception(jerrno::JERR_MAP_LOCKED, oss.str(), "wmgr", "dequeue_check");
+ }
+ }
+ else
+ found = true;
+ if (!found)
+ {
+ std::ostringstream oss;
+ oss << "jrnl=" << _jc->id() << " drid=0x" << std::hex << drid;
+ throw jexception(jerrno::JERR_WMGR_DEQRIDNOTENQ, oss.str(), "wmgr", "dequeue_check");
+ }
+}
+
+void
+wmgr::dblk_roundup()
+{
+ const u_int32_t xmagic = RHM_JDAT_EMPTY_MAGIC;
+ u_int32_t wdblks = jrec::size_blks(_cached_offset_dblks, JRNL_SBLK_SIZE) * JRNL_SBLK_SIZE;
+ while (_cached_offset_dblks < wdblks)
+ {
+ void* wptr = (void*)((char*)_page_ptr_arr[_pg_index] + _pg_offset_dblks * JRNL_DBLK_SIZE);
+ std::memcpy(wptr, (const void*)&xmagic, sizeof(xmagic));
+#ifdef RHM_CLEAN
+ std::memset((char*)wptr + sizeof(xmagic), RHM_CLEAN_CHAR, JRNL_DBLK_SIZE - sizeof(xmagic));
+#endif
+ _pg_offset_dblks++;
+ _cached_offset_dblks++;
+ }
+}
+
+void
+wmgr::write_fhdr(u_int64_t rid, u_int16_t fid, u_int16_t lid, std::size_t fro)
+{
+ file_hdr fhdr(RHM_JDAT_FILE_MAGIC, RHM_JDAT_VERSION, rid, fid, lid, fro, _wrfc.owi(), true);
+ std::memcpy(_fhdr_ptr_arr[fid], &fhdr, sizeof(fhdr));
+#ifdef RHM_CLEAN
+ std::memset((char*)_fhdr_ptr_arr[fid] + sizeof(fhdr), RHM_CLEAN_CHAR, _sblksize - sizeof(fhdr));
+#endif
+ aio_cb* aiocbp = _fhdr_aio_cb_arr[fid];
+ aio::prep_pwrite(aiocbp, _wrfc.fh(), _fhdr_ptr_arr[fid], _sblksize, 0);
+ if (aio::submit(_ioctx, 1, &aiocbp) < 0)
+ throw jexception(jerrno::JERR__AIO, "wmgr", "write_fhdr");
+ _aio_evt_rem++;
+ _wrfc.add_subm_cnt_dblks(JRNL_SBLK_SIZE);
+ _wrfc.incr_aio_cnt();
+ _wrfc.file_controller()->set_wr_fhdr_aio_outstanding(true);
+}
+
+void
+wmgr::rotate_page()
+{
+ _page_cb_arr[_pg_index]._state = AIO_PENDING;
+ if (_pg_offset_dblks >= _cache_pgsize_sblks * JRNL_SBLK_SIZE)
+ {
+ _pg_offset_dblks = 0;
+ _pg_cntr++;
+ }
+ if (++_pg_index >= _cache_num_pages)
+ _pg_index = 0;
+}
+
+void
+wmgr::clean()
+{
+ std::free(_fhdr_base_ptr);
+ _fhdr_base_ptr = 0;
+
+ std::free(_fhdr_ptr_arr);
+ _fhdr_ptr_arr = 0;
+
+ if (_fhdr_aio_cb_arr)
+ {
+ for (u_int32_t i=0; i<_num_jfiles; i++)
+ delete _fhdr_aio_cb_arr[i];
+ std::free(_fhdr_aio_cb_arr);
+ _fhdr_aio_cb_arr = 0;
+ }
+}
+
+const std::string
+wmgr::status_str() const
+{
+ std::ostringstream oss;
+ oss << "wmgr: pi=" << _pg_index << " pc=" << _pg_cntr;
+ oss << " po=" << _pg_offset_dblks << " aer=" << _aio_evt_rem;
+ oss << " edac:" << (_enq_busy?"T":"F") << (_deq_busy?"T":"F");
+ oss << (_abort_busy?"T":"F") << (_commit_busy?"T":"F");
+ oss << " ps=[";
+ for (int i=0; i<_cache_num_pages; i++)
+ {
+ switch (_page_cb_arr[i]._state)
+ {
+ case UNUSED: oss << "-"; break;
+ case IN_USE: oss << "U"; break;
+ case AIO_PENDING: oss << "A"; break;
+ case AIO_COMPLETE: oss << "*"; break;
+ default: oss << _page_cb_arr[i]._state;
+ }
+ }
+ oss << "] " << _wrfc.status_str();
+ return oss.str();
+}
+
+// static
+
+const char* wmgr::_op_str[] = {"enqueue", "dequeue", "abort", "commit"};
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.h b/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.h
new file mode 100644
index 0000000000..8347221b1d
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/wmgr.h
@@ -0,0 +1,147 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file wmgr.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::wmgr (write manager). See
+ * class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_WMGR_H
+#define QPID_LEGACYSTORE_JRNL_WMGR_H
+
+namespace mrg
+{
+namespace journal
+{
+class wmgr;
+}
+}
+
+#include <cstring>
+#include "qpid/legacystore/jrnl/enums.h"
+#include "qpid/legacystore/jrnl/pmgr.h"
+#include "qpid/legacystore/jrnl/wrfc.h"
+#include <set>
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \brief Class for managing a write page cache of arbitrary size and number of pages.
+ *
+ * The write page cache works on the principle of caching the write data within a page until
+ * that page is either full or flushed; this initiates a single AIO write operation to store
+ * the data on disk.
+ *
+ * The maximum disk throughput is achieved by keeping the write operations of uniform size.
+ * Waiting for a page cache to fill achieves this; and in high data volume/throughput situations
+ * achieves the optimal disk throughput. Calling flush() forces a write of the current page cache
+ * no matter how full it is, and disrupts the uniformity of the write operations. This should
+ * normally only be done if throughput drops and there is a danger of a page of unwritten data
+ * waiting around for excessive time.
+ *
+ * The usual tradeoff between data storage latency and throughput performance applies.
+ */
+ class wmgr : public pmgr
+ {
+ private:
+ wrfc& _wrfc; ///< Ref to write rotating file controller
+ u_int32_t _max_dtokpp; ///< Max data writes per page
+ u_int32_t _max_io_wait_us; ///< Max wait in microseconds till submit
+ void* _fhdr_base_ptr; ///< Base pointer to file header memory
+ void** _fhdr_ptr_arr; ///< Array of pointers to file headers memory
+ aio_cb** _fhdr_aio_cb_arr; ///< Array of iocb pointers for file header writes
+ u_int32_t _cached_offset_dblks; ///< Amount of unwritten data in page (dblocks)
+ std::deque<data_tok*> _ddtokl; ///< Deferred dequeue data_tok list
+ u_int32_t _jfsize_dblks; ///< Journal file size in dblks (NOT sblks!)
+ u_int32_t _jfsize_pgs; ///< Journal file size in cache pages
+ u_int16_t _num_jfiles; ///< Number of files used in iocb mallocs
+
+ // TODO: Convert _enq_busy etc into a proper threadsafe lock
+ // TODO: Convert to enum? Are these encodes mutually exclusive?
+ bool _enq_busy; ///< Flag true if enqueue is in progress
+ bool _deq_busy; ///< Flag true if dequeue is in progress
+ bool _abort_busy; ///< Flag true if abort is in progress
+ bool _commit_busy; ///< Flag true if commit is in progress
+
+ enum _op_type { WMGR_ENQUEUE = 0, WMGR_DEQUEUE, WMGR_ABORT, WMGR_COMMIT };
+ static const char* _op_str[];
+
+ enq_rec _enq_rec; ///< Enqueue record used for encoding/decoding
+ deq_rec _deq_rec; ///< Dequeue record used for encoding/decoding
+ txn_rec _txn_rec; ///< Transaction record used for encoding/decoding
+ std::set<std::string> _txn_pending_set; ///< Set containing xids of pending commits/aborts
+
+ public:
+ wmgr(jcntl* jc, enq_map& emap, txn_map& tmap, wrfc& wrfc);
+ wmgr(jcntl* jc, enq_map& emap, txn_map& tmap, wrfc& wrfc, const u_int32_t max_dtokpp,
+ const u_int32_t max_iowait_us);
+ virtual ~wmgr();
+
+ void initialize(aio_callback* const cbp, const u_int32_t wcache_pgsize_sblks,
+ const u_int16_t wcache_num_pages, const u_int32_t max_dtokpp,
+ const u_int32_t max_iowait_us, std::size_t eo = 0);
+ iores enqueue(const void* const data_buff, const std::size_t tot_data_len,
+ const std::size_t this_data_len, data_tok* dtokp, const void* const xid_ptr,
+ const std::size_t xid_len, const bool transient, const bool external);
+ iores dequeue(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len,
+ const bool txn_coml_commit);
+ iores abort(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len);
+ iores commit(data_tok* dtokp, const void* const xid_ptr, const std::size_t xid_len);
+ iores flush();
+ int32_t get_events(page_state state, timespec* const timeout, bool flush = false);
+ bool is_txn_synced(const std::string& xid);
+ inline bool curr_pg_blocked() const { return _page_cb_arr[_pg_index]._state != UNUSED; }
+ inline bool curr_file_blocked() const { return _wrfc.aio_cnt() > 0; }
+ inline u_int32_t unflushed_dblks() { return _cached_offset_dblks; }
+
+ // Debug aid
+ const std::string status_str() const;
+
+ private:
+ void initialize(aio_callback* const cbp, const u_int32_t wcache_pgsize_sblks,
+ const u_int16_t wcache_num_pages);
+ iores pre_write_check(const _op_type op, const data_tok* const dtokp,
+ const std::size_t xidsize = 0, const std::size_t dsize = 0, const bool external = false)
+ const;
+ void dequeue_check(const std::string& xid, const u_int64_t drid);
+ void file_header_check(const u_int64_t rid, const bool cont, const u_int32_t rec_dblks_rem);
+ void flush_check(iores& res, bool& cont, bool& done);
+ iores write_flush();
+ iores rotate_file();
+ void dblk_roundup();
+ void write_fhdr(u_int64_t rid, u_int16_t fid, u_int16_t lid, std::size_t fro);
+ void rotate_page();
+ void clean();
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_WMGR_H
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.cpp b/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.cpp
new file mode 100644
index 0000000000..43461b66a3
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.cpp
@@ -0,0 +1,162 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file wrfc.cpp
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::wrfc (rotating
+ * file controller). See comments in file wrfc.h for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#include "qpid/legacystore/jrnl/wrfc.h"
+
+#include <cmath>
+#include "qpid/legacystore/jrnl/jerrno.h"
+#include "qpid/legacystore/jrnl/jexception.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+wrfc::wrfc(const lpmgr* lpmp):
+ rfc(lpmp),
+ _fsize_sblks(0),
+ _fsize_dblks(0),
+ _enq_cap_offs_dblks(0),
+ _rid(0),
+ _reset_ok(false),
+ _owi(false),
+ _frot(true)
+{}
+
+wrfc::~wrfc()
+{}
+
+void
+wrfc::initialize(const u_int32_t fsize_sblks, rcvdat* rdp)
+{
+ if (rdp)
+ {
+ _fc_index = rdp->_lfid;
+ _curr_fc = _lpmp->get_fcntlp(_fc_index);
+ _curr_fc->wr_reset(rdp);
+ _rid = rdp->_h_rid + 1;
+ _reset_ok = true;
+ _owi = rdp->_owi;
+ _frot = rdp->_frot;
+ if (rdp->_lffull)
+ rotate();
+ }
+ else
+ {
+ rfc::initialize();
+ rfc::set_findex(0);
+ _rid = 0ULL;
+ _reset_ok = false;
+ }
+ _fsize_sblks = fsize_sblks;
+ _fsize_dblks = fsize_sblks * JRNL_SBLK_SIZE;
+ _enq_cap_offs_dblks = (u_int32_t)std::ceil(_fsize_dblks * _lpmp->num_jfiles() * (100.0 - JRNL_ENQ_THRESHOLD) / 100);
+ // Check the offset is at least one file; if not, make it so
+ if (_enq_cap_offs_dblks < _fsize_dblks)
+ _enq_cap_offs_dblks = _fsize_dblks;
+}
+
+iores wrfc::rotate()
+{
+ if (!_lpmp->num_jfiles())
+ throw jexception(jerrno::JERR__NINIT, "wrfc", "rotate");
+ _fc_index++;
+ if (_fc_index == _lpmp->num_jfiles())
+ {
+ _fc_index = 0;
+ _owi = !_owi;
+ _frot = false;
+ }
+ _curr_fc = _lpmp->get_fcntlp(_fc_index);
+ if (_curr_fc->aio_cnt())
+ return RHM_IORES_FILE_AIOWAIT;
+ if (!wr_reset()) //Checks if file is still in use (ie not fully dequeued yet)
+ return RHM_IORES_FULL;
+ return RHM_IORES_SUCCESS;
+}
+
+u_int16_t wrfc::earliest_index() const
+{
+ if (_frot)
+ return 0;
+ u_int16_t next_index = _fc_index + 1;
+ if (next_index >= _lpmp->num_jfiles())
+ next_index = 0;
+ return next_index;
+}
+
+bool
+wrfc::enq_threshold(const u_int32_t enq_dsize_dblks) const
+{
+ u_int32_t subm_dblks = subm_cnt_dblks(); // includes file hdr if > 0
+ // This compensates for new files which don't have their file headers written yet,
+ // as file header space cannot be included in this calculation.
+ if (subm_dblks != 0)
+ subm_dblks -= 4;
+ u_int32_t fwd_dblks = subm_dblks + enq_dsize_dblks + _enq_cap_offs_dblks;
+ u_int16_t findex = _fc_index;
+ fcntl* fcp = _curr_fc;
+ bool in_use = false;
+ while (fwd_dblks && !(findex != _fc_index && fcp->enqcnt()))
+ {
+ fwd_dblks -= fwd_dblks > _fsize_dblks ? _fsize_dblks : fwd_dblks;
+ if (fwd_dblks)
+ {
+ if (++findex == _lpmp->num_jfiles())
+ findex = 0;
+ fcp = _lpmp->get_fcntlp(findex);
+ }
+ in_use |= fcp->enqcnt() > 0;
+ }
+ // Return true if threshold exceeded
+ return findex != _fc_index && in_use;
+}
+
+bool wrfc::wr_reset()
+{
+ _reset_ok = _curr_fc->reset(); // returns false if full (ie file still contains enqueued recs)
+ return _reset_ok;
+}
+
+// TODO: update this to reflect all status data
+std::string
+wrfc::status_str() const
+{
+ std::ostringstream oss;
+ oss << "wrfc: " << rfc::status_str();
+ if (is_active())
+ oss << " fcntl[" << _fc_index << "]: " << _curr_fc->status_str();
+ return oss.str();
+}
+
+} // namespace journal
+} // namespace mrg
diff --git a/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.h b/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.h
new file mode 100644
index 0000000000..f0e4e73151
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/jrnl/wrfc.h
@@ -0,0 +1,154 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+/**
+ * \file wrfc.h
+ *
+ * Qpid asynchronous store plugin library
+ *
+ * File containing code for class mrg::journal::wrfc (write rotating
+ * file controller). See class documentation for details.
+ *
+ * \author Kim van der Riet
+ */
+
+#ifndef QPID_LEGACYSTORE_JRNL_WRFC_H
+#define QPID_LEGACYSTORE_JRNL_WRFC_H
+
+namespace mrg
+{
+namespace journal
+{
+class wrfc;
+}
+}
+
+#include <cstddef>
+#include "qpid/legacystore/jrnl/enums.h"
+#include "qpid/legacystore/jrnl/rrfc.h"
+
+namespace mrg
+{
+namespace journal
+{
+
+ /**
+ * \class wrfc
+ * \brief Class to handle write management of a journal rotating file controller.
+ */
+ class wrfc : public rfc
+ {
+ private:
+ u_int32_t _fsize_sblks; ///< Size of journal files in sblks
+ u_int32_t _fsize_dblks; ///< Size of journal files in dblks
+ u_int32_t _enq_cap_offs_dblks; ///< Enqueue capacity offset
+ u_int64_t _rid; ///< Master counter for record ID (rid)
+ bool _reset_ok; ///< Flag set when reset succeeds
+ bool _owi; ///< Overwrite indicator
+ bool _frot; ///< Flag is true for first rotation, false otherwise
+
+ public:
+ wrfc(const lpmgr* lpmp);
+ virtual ~wrfc();
+
+ /**
+ * \brief Initialize the controller.
+ * \param fsize_sblks Size of each journal file in sblks.
+ * \param rdp Struct carrying restore information. Optional for non-restore use, defaults to 0 (NULL).
+ */
+ using rfc::initialize;
+ void initialize(const u_int32_t fsize_sblks, rcvdat* rdp = 0);
+
+ /**
+ * \brief Rotate active file controller to next file in rotating file group.
+ * \exception jerrno::JERR__NINIT if called before calling initialize().
+ */
+ iores rotate();
+
+ /**
+ * \brief Returns the index of the earliest complete file within the rotating
+ * file group. Unwritten files are excluded. The currently active file is
+ * excluded unless it is the only written file.
+ */
+ u_int16_t earliest_index() const;
+
+ /**
+ * \brief Determines if a proposed write would cause the enqueue threshold to be exceeded.
+ *
+ * The following routine finds whether the next write will take the write pointer to beyond the
+ * enqueue limit threshold. The following illustrates how this is achieved.
+ * <pre>
+ * Current file index: 4 +---+----------+
+ * X's mark still-enqueued records |msg| 1-thresh |
+ * msg = current msg size + unwritten cache +---+----------+
+ * thresh = JRNL_ENQ_THRESHOLD as a fraction ^ V
+ * +-------+-------+-------+-------+--+----+-------+-+-----+-------+
+ * file num ->| 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * enq recs ->| X XX |XX XXX |XX XXXX|XXXXXXX|XX | | | X |
+ * +-------+-------+-------+-------+--+----+-------+-+-----+-------+
+ * ^ ^ ^
+ * subm_dblks --+ | |
+ * These files must be free of enqueues
+ * If not, return true.
+ * </pre>
+ * \param enq_dsize_dblks Proposed size of write in dblocks
+ */
+ bool enq_threshold(const u_int32_t enq_dsize_dblks) const;
+
+ inline u_int64_t rid() const { return _rid; }
+ inline u_int64_t get_incr_rid() { return _rid++; }
+ bool wr_reset();
+ inline bool is_wr_reset() const { return _reset_ok; }
+ inline bool owi() const { return _owi; }
+ inline bool frot() const { return _frot; }
+
+ // Convenience access methods to current file controller
+
+ inline int fh() const { return _curr_fc->wr_fh(); }
+
+ inline u_int32_t subm_cnt_dblks() const { return _curr_fc->wr_subm_cnt_dblks(); }
+ inline std::size_t subm_offs() const { return _curr_fc->wr_subm_offs(); }
+ inline u_int32_t add_subm_cnt_dblks(u_int32_t a) { return _curr_fc->add_wr_subm_cnt_dblks(a); }
+
+ inline u_int32_t cmpl_cnt_dblks() const { return _curr_fc->wr_cmpl_cnt_dblks(); }
+ inline std::size_t cmpl_offs() const { return _curr_fc->wr_cmpl_offs(); }
+ inline u_int32_t add_cmpl_cnt_dblks(u_int32_t a) { return _curr_fc->add_wr_cmpl_cnt_dblks(a); }
+
+ inline u_int16_t aio_cnt() const { return _curr_fc->aio_cnt(); }
+ inline u_int16_t incr_aio_cnt() { return _curr_fc->incr_aio_cnt(); }
+ inline u_int16_t decr_aio_cnt() { return _curr_fc->decr_aio_cnt(); }
+
+ inline bool is_void() const { return _curr_fc->wr_void(); }
+ inline bool is_empty() const { return _curr_fc->wr_empty(); }
+ inline u_int32_t remaining_dblks() const { return _curr_fc->wr_remaining_dblks(); }
+ inline bool is_full() const { return _curr_fc->is_wr_full(); };
+ inline bool is_compl() const { return _curr_fc->is_wr_compl(); };
+ inline u_int32_t aio_outstanding_dblks() const { return _curr_fc->wr_aio_outstanding_dblks(); }
+ inline bool file_rotate() const { return _curr_fc->wr_file_rotate(); }
+
+ // Debug aid
+ std::string status_str() const;
+ };
+
+} // namespace journal
+} // namespace mrg
+
+#endif // ifndef QPID_LEGACYSTORE_JRNL_WRFC_H
diff --git a/qpid/cpp/src/qpid/legacystore/management-schema.xml b/qpid/cpp/src/qpid/legacystore/management-schema.xml
new file mode 100644
index 0000000000..65969f0fb2
--- /dev/null
+++ b/qpid/cpp/src/qpid/legacystore/management-schema.xml
@@ -0,0 +1,99 @@
+<schema package="org.apache.qpid.legacystore">
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+ <class name="Store">
+ <property name="brokerRef" type="objId" access="RO" references="qpid.Broker" index="y" parentRef="y"/>
+ <property name="location" type="sstr" access="RO" desc="Logical directory on disk"/>
+ <property name="defaultInitialFileCount" type="uint16" access="RO" unit="file" desc="Default number of files initially allocated to each journal"/>
+ <property name="defaultDataFileSize" type="uint32" access="RO" unit="RdPg" desc="Default size of each journal data file"/>
+ <property name="tplIsInitialized" type="bool" access="RO" desc="Transaction prepared list has been initialized by a transactional prepare"/>
+ <property name="tplDirectory" type="sstr" access="RO" desc="Transaction prepared list directory"/>
+ <property name="tplWritePageSize" type="uint32" access="RO" unit="byte" desc="Page size in transaction prepared list write-page-cache"/>
+ <property name="tplWritePages" type="uint32" access="RO" unit="wpage" desc="Number of pages in transaction prepared list write-page-cache"/>
+ <property name="tplInitialFileCount" type="uint16" access="RO" unit="file" desc="Number of files initially allocated to transaction prepared list journal"/>
+ <property name="tplDataFileSize" type="uint32" access="RO" unit="byte" desc="Size of each journal data file in transaction prepared list journal"/>
+ <property name="tplCurrentFileCount" type="uint32" access="RO" unit="file" desc="Number of files currently allocated to transaction prepared list journal"/>
+
+ <statistic name="tplTransactionDepth" type="hilo32" unit="txn" desc="Number of currently enqueued prepared transactions"/>
+ <statistic name="tplTxnPrepares" type="count64" unit="record" desc="Total transaction prepares on transaction prepared list"/>
+ <statistic name="tplTxnCommits" type="count64" unit="record" desc="Total transaction commits on transaction prepared list"/>
+ <statistic name="tplTxnAborts" type="count64" unit="record" desc="Total transaction aborts on transaction prepared list"/>
+ <statistic name="tplOutstandingAIOs" type="hilo32" unit="aio_op" desc="Number of currently outstanding AIO requests in Async IO system"/>
+ </class>
+
+ <class name="Journal">
+ <property name="queueRef" type="objId" access="RO" references="qpid.Queue" isGeneralReference="y"/>
+ <property name="name" type="sstr" access="RO" index="y"/>
+ <property name="directory" type="sstr" access="RO" desc="Directory containing journal files"/>
+ <property name="baseFileName" type="sstr" access="RO" desc="Base filename prefix for journal"/>
+ <property name="writePageSize" type="uint32" access="RO" unit="byte" desc="Page size in write-page-cache"/>
+ <property name="writePages" type="uint32" access="RO" unit="wpage" desc="Number of pages in write-page-cache"/>
+ <property name="readPageSize" type="uint32" access="RO" unit="byte" desc="Page size in read-page-cache"/>
+ <property name="readPages" type="uint32" access="RO" unit="rpage" desc="Number of pages in read-page-cache"/>
+ <property name="initialFileCount" type="uint16" access="RO" unit="file" desc="Number of files initially allocated to this journal"/>
+ <property name="autoExpand" type="bool" access="RO" desc="Auto-expand enabled"/>
+ <property name="currentFileCount" type="uint16" access="RO" unit="file" desc="Number of files currently allocated to this journal"/>
+ <property name="maxFileCount" type="uint16" access="RO" unit="file" desc="Max number of files allowed for this journal"/>
+ <property name="dataFileSize" type="uint32" access="RO" unit="byte" desc="Size of each journal data file"/>
+
+ <statistic name="recordDepth" type="hilo32" unit="record" desc="Number of currently enqueued records (durable messages)"/>
+ <statistic name="enqueues" type="count64" unit="record" desc="Total enqueued records on journal"/>
+ <statistic name="dequeues" type="count64" unit="record" desc="Total dequeued records on journal"/>
+ <statistic name="txn" type="count32" unit="record" desc="Total open transactions (xids) on journal"/>
+ <statistic name="txnEnqueues" type="count64" unit="record" desc="Total transactional enqueued records on journal"/>
+ <statistic name="txnDequeues" type="count64" unit="record" desc="Total transactional dequeued records on journal"/>
+ <statistic name="txnCommits" type="count64" unit="record" desc="Total transactional commit records on journal"/>
+ <statistic name="txnAborts" type="count64" unit="record" desc="Total transactional abort records on journal"/>
+ <statistic name="outstandingAIOs" type="hilo32" unit="aio_op" desc="Number of currently outstanding AIO requests in Async IO system"/>
+
+<!--
+ The following are not yet "wired up" in JournalImpl.cpp
+-->
+ <statistic name="freeFileCount" type="hilo32" unit="file" desc="Number of files free on this journal. Includes free files trapped in holes."/>
+ <statistic name="availableFileCount" type="hilo32" unit="file" desc="Number of files available to be written. Excluding holes"/>
+ <statistic name="writeWaitFailures" type="count64" unit="record" desc="AIO Wait failures on write"/>
+ <statistic name="writeBusyFailures" type="count64" unit="record" desc="AIO Busy failures on write"/>
+ <statistic name="readRecordCount" type="count64" unit="record" desc="Records read from the journal"/>
+ <statistic name="readBusyFailures" type="count64" unit="record" desc="AIO Busy failures on read"/>
+ <statistic name="writePageCacheDepth" type="hilo32" unit="wpage" desc="Current depth of write-page-cache"/>
+ <statistic name="readPageCacheDepth" type="hilo32" unit="rpage" desc="Current depth of read-page-cache"/>
+
+ <method name="expand" desc="Increase number of files allocated for this journal">
+ <arg name="by" type="uint32" dir="I" desc="Number of files to increase journal size by"/>
+ </method>
+ </class>
+
+ <eventArguments>
+ <arg name="autoExpand" type="bool" desc="Journal auto-expand enabled"/>
+ <arg name="fileSize" type="uint32" desc="Journal file size in bytes"/>
+ <arg name="jrnlId" type="sstr" desc="Journal Id"/>
+ <arg name="numEnq" type="uint32" desc="Number of recovered enqueues"/>
+ <arg name="numFiles" type="uint16" desc="Number of journal files"/>
+ <arg name="numTxn" type="uint32" desc="Number of recovered transactions"/>
+ <arg name="numTxnDeq" type="uint32" desc="Number of recovered transactional dequeues"/>
+ <arg name="numTxnEnq" type="uint32" desc="Number of recovered transactional enqueues"/>
+ <arg name="what" type="sstr" desc="Description of event"/>
+ </eventArguments>
+ <event name="enqThresholdExceeded" sev="warn" args="jrnlId, what"/>
+ <event name="created" sev="notice" args="jrnlId, fileSize, numFiles"/>
+ <event name="full" sev="error" args="jrnlId, what"/>
+ <event name="recovered" sev="notice" args="jrnlId, fileSize, numFiles, numEnq, numTxn, numTxnEnq, numTxnDeq"/>
+</schema>
diff --git a/qpid/cpp/src/qpid/management/ManagementAgent.cpp b/qpid/cpp/src/qpid/management/ManagementAgent.cpp
index 7b8808c0a0..86e9d0be8d 100644
--- a/qpid/cpp/src/qpid/management/ManagementAgent.cpp
+++ b/qpid/cpp/src/qpid/management/ManagementAgent.cpp
@@ -28,11 +28,13 @@
#include "qpid/management/ManagementObject.h"
#include "qpid/broker/DeliverableMessage.h"
#include "qpid/log/Statement.h"
-#include <qpid/broker/Message.h>
+#include "qpid/broker/Message.h"
+#include "qpid/broker/Broker.h"
#include "qpid/framing/MessageTransferBody.h"
#include "qpid/framing/FieldValue.h"
#include "qpid/broker/amqp_0_10/MessageTransfer.h"
#include "qpid/sys/Time.h"
+#include "qpid/sys/Timer.h"
#include "qpid/sys/Thread.h"
#include "qpid/sys/PollableQueue.h"
#include "qpid/broker/ConnectionState.h"
@@ -47,6 +49,9 @@
#include <sstream>
#include <typeinfo>
+#include <boost/bind.hpp>
+#include <boost/function.hpp>
+
namespace qpid {
namespace management {
@@ -92,6 +97,32 @@ struct ScopedManagementContext
setManagementExecutionContext(0);
}
};
+
+typedef boost::function0<void> FireFunction;
+struct Periodic : public qpid::sys::TimerTask
+{
+ FireFunction fireFunction;
+ qpid::sys::Timer* timer;
+
+ Periodic (FireFunction f, qpid::sys::Timer* t, uint32_t seconds);
+ virtual ~Periodic ();
+ void fire ();
+};
+
+Periodic::Periodic (FireFunction f, qpid::sys::Timer* t, uint32_t _seconds)
+ : TimerTask(sys::Duration((_seconds ? _seconds : 1) * sys::TIME_SEC),
+ "ManagementAgent::periodicProcessing"),
+ fireFunction(f), timer(t) {}
+
+Periodic::~Periodic() {}
+
+void Periodic::fire()
+{
+ setupNextFire();
+ timer->add(this);
+ fireFunction();
+}
+
}
@@ -170,6 +201,8 @@ void ManagementAgent::configure(const string& _dataDir, bool _publish, uint16_t
sendQueue.reset(
new EventQueue(boost::bind(&ManagementAgent::sendEvents, this, _1), broker->getPoller()));
sendQueue->start();
+ timer = &broker->getTimer();
+ timer->add(new Periodic(boost::bind(&ManagementAgent::periodicProcessing, this), timer, interval));
// Get from file or generate and save to file.
if (dataDir.empty())
@@ -212,13 +245,6 @@ void ManagementAgent::configure(const string& _dataDir, bool _publish, uint16_t
}
}
-void ManagementAgent::pluginsInitialized() {
- // Do this here so cluster plugin has the chance to set up the timer.
- timer = &broker->getClusterTimer();
- timer->add(new Periodic(*this, interval));
-}
-
-
void ManagementAgent::setName(const string& vendor, const string& product, const string& instance)
{
if (vendor.find(':') != vendor.npos) {
@@ -420,20 +446,6 @@ void ManagementAgent::raiseEvent(const ManagementEvent& event, severity_t severi
}
}
-ManagementAgent::Periodic::Periodic (ManagementAgent& _agent, uint32_t _seconds)
- : TimerTask(sys::Duration((_seconds ? _seconds : 1) * sys::TIME_SEC),
- "ManagementAgent::periodicProcessing"),
- agent(_agent) {}
-
-ManagementAgent::Periodic::~Periodic() {}
-
-void ManagementAgent::Periodic::fire()
-{
- setupNextFire();
- agent.timer->add(this);
- agent.periodicProcessing();
-}
-
void ManagementAgent::clientAdded (const string& routingKey)
{
sys::Mutex::ScopedLock lock(userLock);
@@ -477,17 +489,6 @@ void ManagementAgent::clientAdded (const string& routingKey)
}
}
-void ManagementAgent::clusterUpdate() {
- // Called on all cluster memebers when a new member joins a cluster.
- // Set clientWasAdded so that on the next periodicProcessing we will do
- // a full update on all cluster members.
- sys::Mutex::ScopedLock l(userLock);
- moveNewObjects(); // keep lists consistent with updater/updatee.
- moveDeletedObjects();
- clientWasAdded = true;
- debugSnapshot("Cluster member joined");
-}
-
void ManagementAgent::encodeHeader (Buffer& buf, uint8_t opcode, uint32_t seq)
{
buf.putOctet ('A');
@@ -2550,70 +2551,6 @@ void ManagementAgent::SchemaClass::mapDecode(const Variant::Map& _map) {
}
}
-void ManagementAgent::exportSchemas(string& out) {
- Variant::List list_;
- Variant::Map map_, kmap, cmap;
-
- for (PackageMap::const_iterator i = packages.begin(); i != packages.end(); ++i) {
- string name = i->first;
- const ClassMap& classes = i ->second;
- for (ClassMap::const_iterator j = classes.begin(); j != classes.end(); ++j) {
- const SchemaClassKey& key = j->first;
- const SchemaClass& klass = j->second;
- if (klass.writeSchemaCall == 0) { // Ignore built-in schemas.
- // Encode name, schema-key, schema-class
-
- map_.clear();
- kmap.clear();
- cmap.clear();
-
- key.mapEncode(kmap);
- klass.mapEncode(cmap);
-
- map_["_pname"] = name;
- map_["_key"] = kmap;
- map_["_class"] = cmap;
- list_.push_back(map_);
- }
- }
- }
-
- ListCodec::encode(list_, out);
-}
-
-void ManagementAgent::importSchemas(qpid::framing::Buffer& inBuf) {
-
- string buf(inBuf.getPointer(), inBuf.available());
- Variant::List content;
- ListCodec::decode(buf, content);
- Variant::List::const_iterator l;
-
-
- for (l = content.begin(); l != content.end(); l++) {
- string package;
- SchemaClassKey key;
- SchemaClass klass;
- Variant::Map map_, kmap, cmap;
- Variant::Map::const_iterator i;
-
- map_ = l->asMap();
-
- if ((i = map_.find("_pname")) != map_.end()) {
- package = i->second.asString();
-
- if ((i = map_.find("_key")) != map_.end()) {
- key.mapDecode(i->second.asMap());
-
- if ((i = map_.find("_class")) != map_.end()) {
- klass.mapDecode(i->second.asMap());
-
- packages[package][key] = klass;
- }
- }
- }
- }
-}
-
void ManagementAgent::RemoteAgent::mapEncode(Variant::Map& map_) const {
Variant::Map _objId, _values;
@@ -2657,52 +2594,6 @@ void ManagementAgent::RemoteAgent::mapDecode(const Variant::Map& map_) {
mgmtObject->set_connectionRef(connectionRef);
}
-void ManagementAgent::exportAgents(string& out) {
- Variant::List list_;
- Variant::Map map_, omap, amap;
-
- for (RemoteAgentMap::const_iterator i = remoteAgents.begin();
- i != remoteAgents.end();
- ++i)
- {
- // TODO aconway 2010-03-04: see comment in ManagementAgent::RemoteAgent::encode
- boost::shared_ptr<RemoteAgent> agent(i->second);
-
- map_.clear();
- amap.clear();
-
- agent->mapEncode(amap);
- map_["_remote_agent"] = amap;
- list_.push_back(map_);
- }
-
- ListCodec::encode(list_, out);
-}
-
-void ManagementAgent::importAgents(qpid::framing::Buffer& inBuf) {
- string buf(inBuf.getPointer(), inBuf.available());
- Variant::List content;
- ListCodec::decode(buf, content);
- Variant::List::const_iterator l;
- sys::Mutex::ScopedLock lock(userLock);
-
- for (l = content.begin(); l != content.end(); l++) {
- boost::shared_ptr<RemoteAgent> agent(new RemoteAgent(*this));
- Variant::Map map_;
- Variant::Map::const_iterator i;
-
- map_ = l->asMap();
-
- if ((i = map_.find("_remote_agent")) != map_.end()) {
-
- agent->mapDecode(i->second.asMap());
-
- addObject (agent->mgmtObject, 0, false);
- remoteAgents[agent->connectionRef] = agent;
- }
- }
-}
-
namespace {
bool isDeletedMap(const ManagementObjectMap::value_type& value) {
return value.second->isDeleted();
@@ -2781,54 +2672,6 @@ Variant::Map ManagementAgent::toMap(const FieldTable& from)
return map;
}
-
-// Build up a list of the current set of deleted objects that are pending their
-// next (last) publish-ment.
-void ManagementAgent::exportDeletedObjects(DeletedObjectList& outList)
-{
- outList.clear();
-
- sys::Mutex::ScopedLock lock (userLock);
-
- moveNewObjects();
- moveDeletedObjects();
-
- // now copy the pending deletes into the outList
- for (PendingDeletedObjsMap::iterator mIter = pendingDeletedObjs.begin();
- mIter != pendingDeletedObjs.end(); mIter++) {
- for (DeletedObjectList::iterator lIter = mIter->second.begin();
- lIter != mIter->second.end(); lIter++) {
- outList.push_back(*lIter);
- }
- }
-}
-
-// Called by cluster to reset the management agent's list of deleted
-// objects to match the rest of the cluster.
-void ManagementAgent::importDeletedObjects(const DeletedObjectList& inList)
-{
- sys::Mutex::ScopedLock lock (userLock);
- sys::Mutex::ScopedLock objLock(objectLock);
- // Clear out any existing deleted objects
- moveNewObjects();
- pendingDeletedObjs.clear();
- ManagementObjectMap::iterator i = managementObjects.begin();
- // Silently drop any deleted objects left over from receiving the update.
- while (i != managementObjects.end()) {
- ManagementObject::shared_ptr object = i->second;
- if (object->isDeleted()) {
- managementObjects.erase(i++);
- }
- else ++i;
- }
- for (DeletedObjectList::const_iterator lIter = inList.begin(); lIter != inList.end(); lIter++) {
-
- std::string classkey((*lIter)->packageName + std::string(":") + (*lIter)->className);
- pendingDeletedObjs[classkey].push_back(*lIter);
- }
-}
-
-
// construct a DeletedObject from a management object.
ManagementAgent::DeletedObject::DeletedObject(ManagementObject::shared_ptr src, bool v1, bool v2)
: packageName(src->getPackageName()),
@@ -2866,45 +2709,6 @@ ManagementAgent::DeletedObject::DeletedObject(ManagementObject::shared_ptr src,
}
}
-
-
-// construct a DeletedObject from an encoded representation. Used by
-// clustering to move deleted objects between clustered brokers. See
-// DeletedObject::encode() for the reverse.
-ManagementAgent::DeletedObject::DeletedObject(const std::string& encoded)
-{
- qpid::types::Variant::Map map_;
- MapCodec::decode(encoded, map_);
-
- packageName = map_["_package_name"].getString();
- className = map_["_class_name"].getString();
- objectId = map_["_object_id"].getString();
-
- encodedV1Config = map_["_v1_config"].getString();
- encodedV1Inst = map_["_v1_inst"].getString();
- encodedV2 = map_["_v2_data"].asMap();
-}
-
-
-// encode a DeletedObject to a string buffer. Used by
-// clustering to move deleted objects between clustered brokers. See
-// DeletedObject(const std::string&) for the reverse.
-void ManagementAgent::DeletedObject::encode(std::string& toBuffer)
-{
- qpid::types::Variant::Map map_;
-
-
- map_["_package_name"] = packageName;
- map_["_class_name"] = className;
- map_["_object_id"] = objectId;
-
- map_["_v1_config"] = encodedV1Config;
- map_["_v1_inst"] = encodedV1Inst;
- map_["_v2_data"] = encodedV2;
-
- MapCodec::encode(map_, toBuffer);
-}
-
// Remove Deleted objects, and save for later publishing...
bool ManagementAgent::moveDeletedObjects() {
typedef vector<pair<ObjectId, ManagementObject::shared_ptr> > DeleteList;
diff --git a/qpid/cpp/src/qpid/management/ManagementAgent.h b/qpid/cpp/src/qpid/management/ManagementAgent.h
index 7f1a2e3e66..6de5d1d719 100644
--- a/qpid/cpp/src/qpid/management/ManagementAgent.h
+++ b/qpid/cpp/src/qpid/management/ManagementAgent.h
@@ -26,7 +26,6 @@
#include "qpid/broker/Exchange.h"
#include "qpid/framing/Uuid.h"
#include "qpid/sys/Mutex.h"
-#include "qpid/sys/Timer.h"
#include "qpid/broker/ConnectionToken.h"
#include "qpid/management/ManagementObject.h"
#include "qpid/management/ManagementEvent.h"
@@ -47,6 +46,9 @@ namespace qpid {
namespace broker {
class ConnectionState;
}
+namespace sys {
+class Timer;
+}
namespace management {
class ManagementAgent
@@ -75,11 +77,6 @@ public:
/** Called before plugins are initialized */
void configure (const std::string& dataDir, bool publish, uint16_t interval,
qpid::broker::Broker* broker, int threadPoolSize);
- /** Called after plugins are initialized. */
- void pluginsInitialized();
-
- /** Called by cluster to suppress management output during update. */
- void suppress(bool s) { suppressed = s; }
void setName(const std::string& vendor,
const std::string& product,
@@ -112,8 +109,6 @@ public:
severity_t severity = SEV_DEFAULT);
QPID_BROKER_EXTERN void clientAdded (const std::string& routingKey);
- QPID_BROKER_EXTERN void clusterUpdate();
-
bool dispatchCommand (qpid::broker::Deliverable& msg,
const std::string& routingKey,
const framing::FieldTable* args,
@@ -123,25 +118,6 @@ public:
/** Disallow a method. Attempts to call it will receive an exception with message. */
void disallow(const std::string& className, const std::string& methodName, const std::string& message);
- /** Disallow all QMFv1 methods (used in clustered brokers). */
- void disallowV1Methods() { disallowAllV1Methods = true; }
-
- /** Serialize my schemas as a binary blob into schemaOut */
- void exportSchemas(std::string& schemaOut);
-
- /** Serialize my remote-agent map as a binary blob into agentsOut */
- void exportAgents(std::string& agentsOut);
-
- /** Decode a serialized schemas and add to my schema cache */
- void importSchemas(framing::Buffer& inBuf);
-
- /** Decode a serialized agent map */
- void importAgents(framing::Buffer& inBuf);
-
- // these are in support of the managementSetup-state stuff, for synch'ing clustered brokers
- uint64_t getNextObjectId(void) { return nextObjectId; }
- void setNextObjectId(uint64_t o) { nextObjectId = o; }
-
uint16_t getBootSequence(void) { return bootSequence; }
void setBootSequence(uint16_t b) { bootSequence = b; writeData(); }
@@ -150,20 +126,11 @@ public:
static types::Variant::Map toMap(const framing::FieldTable& from);
- // For Clustering: management objects that have been marked as
- // "deleted", but are waiting for their last published object
- // update are not visible to the cluster replication code. These
- // interfaces allow clustering to gather up all the management
- // objects that are deleted in order to allow all clustered
- // brokers to publish the same set of deleted objects.
-
class DeletedObject {
public:
typedef boost::shared_ptr<DeletedObject> shared_ptr;
DeletedObject(ManagementObject::shared_ptr, bool v1, bool v2);
- DeletedObject( const std::string &encoded );
~DeletedObject() {};
- void encode( std::string& toBuffer );
const std::string getKey() const {
// used to batch up objects of the same class type
return std::string(packageName + std::string(":") + className);
@@ -183,22 +150,7 @@ public:
typedef std::vector<DeletedObject::shared_ptr> DeletedObjectList;
- /** returns a snapshot of all currently deleted management objects. */
- void exportDeletedObjects( DeletedObjectList& outList );
-
- /** Import a list of deleted objects to send on next publish interval. */
- void importDeletedObjects( const DeletedObjectList& inList );
-
private:
- struct Periodic : public qpid::sys::TimerTask
- {
- ManagementAgent& agent;
-
- Periodic (ManagementAgent& agent, uint32_t seconds);
- virtual ~Periodic ();
- void fire ();
- };
-
// Storage for tracking remote management agents, attached via the client
// management agent API.
//
diff --git a/qpid/cpp/src/qpid/store/MessageStorePlugin.cpp b/qpid/cpp/src/qpid/store/MessageStorePlugin.cpp
index c6b0e1a53a..b876bd6b6d 100644
--- a/qpid/cpp/src/qpid/store/MessageStorePlugin.cpp
+++ b/qpid/cpp/src/qpid/store/MessageStorePlugin.cpp
@@ -136,12 +136,6 @@ MessageStorePlugin::providerAvailable(const std::string name,
QPID_LOG(warning, "Storage provider " << name << " duplicate; ignored.");
}
-void
-MessageStorePlugin::truncateInit(const bool /*saveStoreContent*/)
-{
- QPID_LOG(info, "Store: truncateInit");
-}
-
/**
* Record the existence of a durable queue
diff --git a/qpid/cpp/src/qpid/store/MessageStorePlugin.h b/qpid/cpp/src/qpid/store/MessageStorePlugin.h
index 4a9bb2aecb..1fcde6683d 100644
--- a/qpid/cpp/src/qpid/store/MessageStorePlugin.h
+++ b/qpid/cpp/src/qpid/store/MessageStorePlugin.h
@@ -24,18 +24,22 @@
#include "qpid/Plugin.h"
#include "qpid/Options.h"
-#include "qpid/broker/Broker.h"
#include "qpid/broker/MessageStore.h"
-#include "qpid/broker/PersistableExchange.h"
-#include "qpid/broker/PersistableMessage.h"
-#include "qpid/broker/PersistableQueue.h"
-#include "qpid/management/Manageable.h"
+//#include "qpid/management/Manageable.h"
#include <string>
using namespace qpid;
namespace qpid {
+
+namespace broker {
+class Broker;
+class PersistableExchange;
+class PersistableMessage;
+class PersistableQueue;
+}
+
namespace store {
class StorageProvider;
@@ -82,18 +86,6 @@ class MessageStorePlugin :
/**
* @name Methods inherited from qpid::broker::MessageStore
*/
- //@{
- /**
- * If called before recovery, will discard the database and reinitialize
- * using an empty store. This is used when cluster nodes recover and
- * must get their content from a cluster sync rather than directly from
- * the store.
- *
- * @param saveStoreContent If true, the store's contents should be
- * saved to a backup location before
- * reinitializing the store content.
- */
- virtual void truncateInit(const bool saveStoreContent = false);
/**
* Record the existence of a durable queue
diff --git a/qpid/cpp/src/qpid/store/StorageProvider.h b/qpid/cpp/src/qpid/store/StorageProvider.h
index d162cc58ec..de12ffb869 100644
--- a/qpid/cpp/src/qpid/store/StorageProvider.h
+++ b/qpid/cpp/src/qpid/store/StorageProvider.h
@@ -143,20 +143,6 @@ public:
/**
* @name Methods inherited from qpid::broker::MessageStore
*/
- //@{
- /**
- * If called after init() but before recovery, will discard the database
- * and reinitialize using an empty store dir. If @a pushDownStoreFiles
- * is true, the content of the store dir will be moved to a backup dir
- * inside the store dir. This is used when cluster nodes recover and must
- * get thier content from a cluster sync rather than directly fromt the
- * store.
- *
- * @param pushDownStoreFiles If true, will move content of the store dir
- * into a subdir, leaving the store dir
- * otherwise empty.
- */
- virtual void truncateInit(const bool pushDownStoreFiles = false) = 0;
/**
* Record the existence of a durable queue
diff --git a/qpid/cpp/src/qpid/store/ms-clfs/MSSqlClfsProvider.cpp b/qpid/cpp/src/qpid/store/ms-clfs/MSSqlClfsProvider.cpp
index 586aaaf980..90785263d3 100644
--- a/qpid/cpp/src/qpid/store/ms-clfs/MSSqlClfsProvider.cpp
+++ b/qpid/cpp/src/qpid/store/ms-clfs/MSSqlClfsProvider.cpp
@@ -26,6 +26,7 @@
#include <string>
#include <windows.h>
#include <clfsw32.h>
+#include <qpid/broker/Broker.h>
#include <qpid/broker/RecoverableQueue.h>
#include <qpid/log/Statement.h>
#include <qpid/store/MessageStorePlugin.h>
@@ -108,20 +109,6 @@ public:
/**
* @name Methods inherited from qpid::broker::MessageStore
*/
- //@{
- /**
- * If called after init() but before recovery, will discard the database
- * and reinitialize using an empty store dir. If @a pushDownStoreFiles
- * is true, the content of the store dir will be moved to a backup dir
- * inside the store dir. This is used when cluster nodes recover and must
- * get their content from a cluster sync rather than directly from the
- * store.
- *
- * @param pushDownStoreFiles If true, will move content of the store dir
- * into a subdir, leaving the store dir
- * otherwise empty.
- */
- virtual void truncateInit(const bool pushDownStoreFiles = false);
/**
* Record the existence of a durable queue
@@ -467,11 +454,6 @@ MSSqlClfsProvider::activate(MessageStorePlugin &store)
}
void
-MSSqlClfsProvider::truncateInit(const bool pushDownStoreFiles)
-{
-}
-
-void
MSSqlClfsProvider::create(PersistableQueue& queue,
const qpid::framing::FieldTable& /*args needed for jrnl*/)
{
diff --git a/qpid/cpp/src/qpid/store/ms-sql/MSSqlProvider.cpp b/qpid/cpp/src/qpid/store/ms-sql/MSSqlProvider.cpp
index 7f22db3d02..1432cc8fca 100644
--- a/qpid/cpp/src/qpid/store/ms-sql/MSSqlProvider.cpp
+++ b/qpid/cpp/src/qpid/store/ms-sql/MSSqlProvider.cpp
@@ -92,20 +92,6 @@ public:
/**
* @name Methods inherited from qpid::broker::MessageStore
*/
- //@{
- /**
- * If called after init() but before recovery, will discard the database
- * and reinitialize using an empty store dir. If @a pushDownStoreFiles
- * is true, the content of the store dir will be moved to a backup dir
- * inside the store dir. This is used when cluster nodes recover and must
- * get thier content from a cluster sync rather than directly fromt the
- * store.
- *
- * @param pushDownStoreFiles If true, will move content of the store dir
- * into a subdir, leaving the store dir
- * otherwise empty.
- */
- virtual void truncateInit(const bool pushDownStoreFiles = false);
/**
* Record the existence of a durable queue
@@ -392,11 +378,6 @@ MSSqlProvider::activate(MessageStorePlugin &store)
}
void
-MSSqlProvider::truncateInit(const bool pushDownStoreFiles)
-{
-}
-
-void
MSSqlProvider::create(PersistableQueue& queue,
const qpid::framing::FieldTable& /*args needed for jrnl*/)
{
diff --git a/qpid/cpp/src/qpid/sys/AsynchIOHandler.cpp b/qpid/cpp/src/qpid/sys/AsynchIOHandler.cpp
index 99e745c698..0225b11d27 100644
--- a/qpid/cpp/src/qpid/sys/AsynchIOHandler.cpp
+++ b/qpid/cpp/src/qpid/sys/AsynchIOHandler.cpp
@@ -51,14 +51,14 @@ struct ProtocolTimeoutTask : public sys::TimerTask {
}
};
-AsynchIOHandler::AsynchIOHandler(const std::string& id, ConnectionCodec::Factory* f, bool nodict0) :
+AsynchIOHandler::AsynchIOHandler(const std::string& id, ConnectionCodec::Factory* f, bool isClient0, bool nodict0) :
identifier(id),
aio(0),
factory(f),
codec(0),
reads(0),
readError(false),
- isClient(false),
+ isClient(isClient0),
nodict(nodict0),
readCredit(InfiniteCredit)
{}
diff --git a/qpid/cpp/src/qpid/sys/AsynchIOHandler.h b/qpid/cpp/src/qpid/sys/AsynchIOHandler.h
index 6e70606a04..91ddb022af 100644
--- a/qpid/cpp/src/qpid/sys/AsynchIOHandler.h
+++ b/qpid/cpp/src/qpid/sys/AsynchIOHandler.h
@@ -60,12 +60,10 @@ class AsynchIOHandler : public OutputControl {
void write(const framing::ProtocolInitiation&);
public:
- QPID_COMMON_EXTERN AsynchIOHandler(const std::string& id, qpid::sys::ConnectionCodec::Factory* f, bool nodict);
+ QPID_COMMON_EXTERN AsynchIOHandler(const std::string& id, qpid::sys::ConnectionCodec::Factory* f, bool isClient, bool nodict);
QPID_COMMON_EXTERN ~AsynchIOHandler();
QPID_COMMON_EXTERN void init(AsynchIO* a, Timer& timer, uint32_t maxTime);
- QPID_COMMON_INLINE_EXTERN void setClient() { isClient = true; }
-
// Output side
QPID_COMMON_EXTERN void abort();
QPID_COMMON_EXTERN void activateOutput();
diff --git a/qpid/cpp/src/qpid/sys/FileSysDir.h b/qpid/cpp/src/qpid/sys/FileSysDir.h
index ffe7823f0a..7432fe39c9 100755
--- a/qpid/cpp/src/qpid/sys/FileSysDir.h
+++ b/qpid/cpp/src/qpid/sys/FileSysDir.h
@@ -54,6 +54,15 @@ class FileSysDir
void mkdir(void);
+ typedef void Callback(const std::string&);
+
+ /**
+ * Call the Callback function for every regular file in the directory
+ *
+ * @param cb Callback function that receives the full path to the file
+ */
+ void forEachFile(Callback cb) const;
+
std::string getPath () { return dirPath; }
};
diff --git a/qpid/cpp/src/qpid/sys/OutputControl.h b/qpid/cpp/src/qpid/sys/OutputControl.h
index eae99beb0f..ad5caa3168 100644
--- a/qpid/cpp/src/qpid/sys/OutputControl.h
+++ b/qpid/cpp/src/qpid/sys/OutputControl.h
@@ -1,3 +1,6 @@
+#ifndef QPID_SYS_OUTPUT_CONTROL_H
+#define QPID_SYS_OUTPUT_CONTROL_H
+
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -21,9 +24,6 @@
#include "qpid/sys/IntegerTypes.h"
-#ifndef _OutputControl_
-#define _OutputControl_
-
namespace qpid {
namespace sys {
@@ -40,4 +40,4 @@ namespace sys {
}
-#endif
+#endif /*!QPID_SYS_OUTPUT_CONTROL_H*/
diff --git a/qpid/cpp/src/qpid/sys/ProtocolFactory.h b/qpid/cpp/src/qpid/sys/ProtocolFactory.h
index ed573bd2f6..236398c111 100644
--- a/qpid/cpp/src/qpid/sys/ProtocolFactory.h
+++ b/qpid/cpp/src/qpid/sys/ProtocolFactory.h
@@ -42,6 +42,7 @@ class ProtocolFactory : public qpid::SharedObject<ProtocolFactory>
virtual void accept(boost::shared_ptr<Poller>, ConnectionCodec::Factory*) = 0;
virtual void connect(
boost::shared_ptr<Poller>,
+ const std::string& name,
const std::string& host, const std::string& port,
ConnectionCodec::Factory* codec,
ConnectFailedCallback failed) = 0;
diff --git a/qpid/cpp/src/qpid/sys/RdmaIOPlugin.cpp b/qpid/cpp/src/qpid/sys/RdmaIOPlugin.cpp
index b491d28d0a..e1f4362d64 100644
--- a/qpid/cpp/src/qpid/sys/RdmaIOPlugin.cpp
+++ b/qpid/cpp/src/qpid/sys/RdmaIOPlugin.cpp
@@ -23,6 +23,7 @@
#include "qpid/Plugin.h"
#include "qpid/broker/Broker.h"
+#include "qpid/broker/NameGenerator.h"
#include "qpid/framing/AMQP_HighestVersion.h"
#include "qpid/log/Statement.h"
#include "qpid/sys/rdma/RdmaIO.h"
@@ -83,7 +84,7 @@ class RdmaIOHandler : public OutputControl {
};
RdmaIOHandler::RdmaIOHandler(Rdma::Connection::intrusive_ptr c, qpid::sys::ConnectionCodec::Factory* f) :
- identifier(c->getFullName()),
+ identifier(broker::QPID_NAME_PREFIX+c->getFullName()),
factory(f),
codec(0),
readError(false),
@@ -250,7 +251,7 @@ class RdmaIOProtocolFactory : public ProtocolFactory {
public:
RdmaIOProtocolFactory(int16_t port, int backlog);
void accept(Poller::shared_ptr, ConnectionCodec::Factory*);
- void connect(Poller::shared_ptr, const string& host, const std::string& port, ConnectionCodec::Factory*, ConnectFailedCallback);
+ void connect(Poller::shared_ptr, const std::string& name, const string& host, const std::string& port, ConnectionCodec::Factory*, ConnectFailedCallback);
uint16_t getPort() const;
@@ -371,6 +372,7 @@ void RdmaIOProtocolFactory::connected(Poller::shared_ptr poller, Rdma::Connectio
void RdmaIOProtocolFactory::connect(
Poller::shared_ptr poller,
+ const std::string& /*name*/,
const std::string& host, const std::string& port,
ConnectionCodec::Factory* f,
ConnectFailedCallback failed)
diff --git a/qpid/cpp/src/qpid/sys/SslPlugin.cpp b/qpid/cpp/src/qpid/sys/SslPlugin.cpp
index 0638b55db6..a40da24eb8 100644
--- a/qpid/cpp/src/qpid/sys/SslPlugin.cpp
+++ b/qpid/cpp/src/qpid/sys/SslPlugin.cpp
@@ -23,6 +23,7 @@
#include "qpid/Plugin.h"
#include "qpid/broker/Broker.h"
+#include "qpid/broker/NameGenerator.h"
#include "qpid/log/Statement.h"
#include "qpid/sys/AsynchIOHandler.h"
#include "qpid/sys/AsynchIO.h"
@@ -76,15 +77,16 @@ class SslProtocolFactory : public ProtocolFactory {
SslProtocolFactory(const qpid::broker::Broker::Options& opts, const SslServerOptions& options,
Timer& timer);
void accept(Poller::shared_ptr, ConnectionCodec::Factory*);
- void connect(Poller::shared_ptr, const std::string& host, const std::string& port,
+ void connect(Poller::shared_ptr, const std::string& name, const std::string& host, const std::string& port,
ConnectionCodec::Factory*,
ConnectFailedCallback);
uint16_t getPort() const;
private:
- void established(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*,
- bool isClient);
+ void establishedIncoming(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*);
+ void establishedOutgoing(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*, const std::string&);
+ void establishedCommon(AsynchIOHandler*, Poller::shared_ptr , const Socket&);
void connectFailed(const Socket&, int, const std::string&, ConnectFailedCallback);
};
@@ -220,21 +222,24 @@ SslProtocolFactory::SslProtocolFactory(const qpid::broker::Broker::Options& opts
}
}
+void SslProtocolFactory::establishedIncoming(Poller::shared_ptr poller, const Socket& s,
+ ConnectionCodec::Factory* f) {
+ AsynchIOHandler* async = new AsynchIOHandler(broker::QPID_NAME_PREFIX+s.getFullAddress(), f, false, false);
+ establishedCommon(async, poller, s);
+}
-void SslProtocolFactory::established(Poller::shared_ptr poller, const Socket& s,
- ConnectionCodec::Factory* f, bool isClient) {
-
- AsynchIOHandler* async = new AsynchIOHandler(s.getFullAddress(), f, nodict);
+void SslProtocolFactory::establishedOutgoing(Poller::shared_ptr poller, const Socket& s,
+ ConnectionCodec::Factory* f, const std::string& name) {
+ AsynchIOHandler* async = new AsynchIOHandler(name, f, true, false);
+ establishedCommon(async, poller, s);
+}
+void SslProtocolFactory::establishedCommon(AsynchIOHandler* async, Poller::shared_ptr poller, const Socket& s) {
if (tcpNoDelay) {
s.setTcpNoDelay();
QPID_LOG(info, "Set TCP_NODELAY on connection to " << s.getPeerAddress());
}
- if (isClient) {
- async->setClient();
- }
-
AsynchIO* aio = AsynchIO::create(
s,
boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
@@ -257,7 +262,7 @@ void SslProtocolFactory::accept(Poller::shared_ptr poller,
for (unsigned i = 0; i<listeners.size(); ++i) {
acceptors.push_back(
AsynchAcceptor::create(listeners[i],
- boost::bind(&SslProtocolFactory::established, this, poller, _1, fact, false)));
+ boost::bind(&SslProtocolFactory::establishedIncoming, this, poller, _1, fact)));
acceptors[i].start(poller);
}
}
@@ -273,6 +278,7 @@ void SslProtocolFactory::connectFailed(
void SslProtocolFactory::connect(
Poller::shared_ptr poller,
+ const std::string& name,
const std::string& host, const std::string& port,
ConnectionCodec::Factory* fact,
ConnectFailedCallback failed)
@@ -289,8 +295,8 @@ void SslProtocolFactory::connect(
*socket,
host,
port,
- boost::bind(&SslProtocolFactory::established,
- this, poller, _1, fact, true),
+ boost::bind(&SslProtocolFactory::establishedOutgoing,
+ this, poller, _1, fact, name),
boost::bind(&SslProtocolFactory::connectFailed,
this, _1, _2, _3, failed));
c->start(poller);
diff --git a/qpid/cpp/src/qpid/sys/TCPIOPlugin.cpp b/qpid/cpp/src/qpid/sys/TCPIOPlugin.cpp
index da0bd31caa..1ef8708cd0 100644
--- a/qpid/cpp/src/qpid/sys/TCPIOPlugin.cpp
+++ b/qpid/cpp/src/qpid/sys/TCPIOPlugin.cpp
@@ -23,6 +23,7 @@
#include "qpid/Plugin.h"
#include "qpid/broker/Broker.h"
+#include "qpid/broker/NameGenerator.h"
#include "qpid/log/Statement.h"
#include "qpid/sys/AsynchIOHandler.h"
#include "qpid/sys/AsynchIO.h"
@@ -50,15 +51,17 @@ class AsynchIOProtocolFactory : public ProtocolFactory {
public:
AsynchIOProtocolFactory(const qpid::broker::Broker::Options& opts, Timer& timer, bool shouldListen);
void accept(Poller::shared_ptr, ConnectionCodec::Factory*);
- void connect(Poller::shared_ptr, const std::string& host, const std::string& port,
+ void connect(Poller::shared_ptr, const std::string& name,
+ const std::string& host, const std::string& port,
ConnectionCodec::Factory*,
ConnectFailedCallback);
uint16_t getPort() const;
private:
- void established(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*,
- bool isClient);
+ void establishedIncoming(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*);
+ void establishedOutgoing(Poller::shared_ptr, const Socket&, ConnectionCodec::Factory*, const std::string&);
+ void establishedCommon(AsynchIOHandler*, Poller::shared_ptr , const Socket&);
void connectFailed(const Socket&, int, const std::string&, ConnectFailedCallback);
};
@@ -171,17 +174,24 @@ AsynchIOProtocolFactory::AsynchIOProtocolFactory(const qpid::broker::Broker::Opt
}
}
-void AsynchIOProtocolFactory::established(Poller::shared_ptr poller, const Socket& s,
- ConnectionCodec::Factory* f, bool isClient) {
- AsynchIOHandler* async = new AsynchIOHandler(s.getFullAddress(), f, false);
+void AsynchIOProtocolFactory::establishedIncoming(Poller::shared_ptr poller, const Socket& s,
+ ConnectionCodec::Factory* f) {
+ AsynchIOHandler* async = new AsynchIOHandler(broker::QPID_NAME_PREFIX+s.getFullAddress(), f, false, false);
+ establishedCommon(async, poller, s);
+}
+
+void AsynchIOProtocolFactory::establishedOutgoing(Poller::shared_ptr poller, const Socket& s,
+ ConnectionCodec::Factory* f, const std::string& name) {
+ AsynchIOHandler* async = new AsynchIOHandler(name, f, true, false);
+ establishedCommon(async, poller, s);
+}
+void AsynchIOProtocolFactory::establishedCommon(AsynchIOHandler* async, Poller::shared_ptr poller, const Socket& s) {
if (tcpNoDelay) {
s.setTcpNoDelay();
QPID_LOG(info, "Set TCP_NODELAY on connection to " << s.getPeerAddress());
}
- if (isClient)
- async->setClient();
AsynchIO* aio = AsynchIO::create
(s,
boost::bind(&AsynchIOHandler::readbuff, async, _1, _2),
@@ -204,7 +214,7 @@ void AsynchIOProtocolFactory::accept(Poller::shared_ptr poller,
for (unsigned i = 0; i<listeners.size(); ++i) {
acceptors.push_back(
AsynchAcceptor::create(listeners[i],
- boost::bind(&AsynchIOProtocolFactory::established, this, poller, _1, fact, false)));
+ boost::bind(&AsynchIOProtocolFactory::establishedIncoming, this, poller, _1, fact)));
acceptors[i].start(poller);
}
}
@@ -220,6 +230,7 @@ void AsynchIOProtocolFactory::connectFailed(
void AsynchIOProtocolFactory::connect(
Poller::shared_ptr poller,
+ const std::string& name,
const std::string& host, const std::string& port,
ConnectionCodec::Factory* fact,
ConnectFailedCallback failed)
@@ -235,8 +246,8 @@ void AsynchIOProtocolFactory::connect(
*socket,
host,
port,
- boost::bind(&AsynchIOProtocolFactory::established,
- this, poller, _1, fact, true),
+ boost::bind(&AsynchIOProtocolFactory::establishedOutgoing,
+ this, poller, _1, fact, name),
boost::bind(&AsynchIOProtocolFactory::connectFailed,
this, _1, _2, _3, failed));
c->start(poller);
diff --git a/qpid/cpp/src/qpid/sys/Timer.cpp b/qpid/cpp/src/qpid/sys/Timer.cpp
index 83d1a5260b..f8eef2c9ec 100644
--- a/qpid/cpp/src/qpid/sys/Timer.cpp
+++ b/qpid/cpp/src/qpid/sys/Timer.cpp
@@ -96,12 +96,7 @@ void TimerTask::cancel() {
state = CANCELLED;
}
-void TimerTask::setFired() {
- // Set nextFireTime to just before now, making readyToFire() true.
- nextFireTime = AbsTime(sys::now(), Duration(-1));
-}
-
-
+// TODO AStitcher 21/08/09 The threshholds for emitting warnings are a little arbitrary
Timer::Timer() :
active(false),
late(50 * TIME_MSEC),
@@ -133,7 +128,6 @@ public:
}
};
-// TODO AStitcher 21/08/09 The threshholds for emitting warnings are a little arbitrary
void Timer::run()
{
Monitor::ScopedLock l(monitor);
@@ -151,10 +145,6 @@ void Timer::run()
{
TimerTaskCallbackScope s(*t);
if (s) {
- {
- Monitor::ScopedUnlock u(monitor);
- drop(t);
- }
if (delay > lateCancel) {
QPID_LOG(debug, t->name << " cancelled timer woken up " <<
delay / TIME_MSEC << "ms late");
@@ -235,9 +225,6 @@ void Timer::fire(boost::intrusive_ptr<TimerTask> t) {
}
}
-// Provided for subclasses: called when a task is droped.
-void Timer::drop(boost::intrusive_ptr<TimerTask>) {}
-
bool operator<(const intrusive_ptr<TimerTask>& a,
const intrusive_ptr<TimerTask>& b)
{
diff --git a/qpid/cpp/src/qpid/sys/Timer.h b/qpid/cpp/src/qpid/sys/Timer.h
index 5731b8d977..5045009609 100644
--- a/qpid/cpp/src/qpid/sys/Timer.h
+++ b/qpid/cpp/src/qpid/sys/Timer.h
@@ -67,10 +67,6 @@ class TimerTask : public RefCounted {
std::string getName() const { return name; }
- // Move the nextFireTime so readyToFire is true.
- // Used by the cluster, where tasks are fired on cluster events, not on local time.
- QPID_COMMON_EXTERN void setFired();
-
protected:
// Must be overridden with callback
virtual void fire() = 0;
@@ -99,7 +95,7 @@ class Timer : private Runnable {
protected:
QPID_COMMON_EXTERN virtual void fire(boost::intrusive_ptr<TimerTask> task);
- QPID_COMMON_EXTERN virtual void drop(boost::intrusive_ptr<TimerTask> task);
+
// Allow derived classes to change the late/overran thresholds.
Duration late;
Duration overran;
diff --git a/qpid/cpp/src/qpid/sys/posix/BSDSocket.cpp b/qpid/cpp/src/qpid/sys/posix/BSDSocket.cpp
index 4fe85b93fb..7c31b13ae9 100644
--- a/qpid/cpp/src/qpid/sys/posix/BSDSocket.cpp
+++ b/qpid/cpp/src/qpid/sys/posix/BSDSocket.cpp
@@ -162,11 +162,6 @@ void BSDSocket::connect(const SocketAddress& addr) const
// remote port (which is unoccupied) as the port to bind the local
// end of the socket, resulting in a "circular" connection.
//
- // This seems like something the OS should prevent but I have
- // confirmed that sporadic hangs in
- // cluster_tests.LongTests.test_failover on RHEL5 are caused by
- // such a circular connection.
- //
// Raise an error if we see such a connection, since we know there is
// no listener on the peer address.
//
diff --git a/qpid/cpp/src/qpid/sys/posix/FileSysDir.cpp b/qpid/cpp/src/qpid/sys/posix/FileSysDir.cpp
index 22dc487e74..cec580164d 100755
--- a/qpid/cpp/src/qpid/sys/posix/FileSysDir.cpp
+++ b/qpid/cpp/src/qpid/sys/posix/FileSysDir.cpp
@@ -18,6 +18,7 @@
#include "qpid/sys/FileSysDir.h"
#include "qpid/sys/StrError.h"
+#include "qpid/log/Statement.h"
#include "qpid/Exception.h"
#include <sys/types.h>
@@ -25,6 +26,8 @@
#include <fcntl.h>
#include <cerrno>
#include <unistd.h>
+#include <dirent.h>
+#include <stdlib.h>
namespace qpid {
namespace sys {
@@ -51,4 +54,27 @@ void FileSysDir::mkdir(void)
throw Exception ("Can't create directory: " + dirPath);
}
+void FileSysDir::forEachFile(Callback cb) const {
+
+ ::dirent** namelist;
+
+ int n = scandir(dirPath.c_str(), &namelist, 0, alphasort);
+ if (n == -1) throw Exception (strError(errno) + ": Can't scan directory: " + dirPath);
+
+ for (int i = 0; i<n; ++i) {
+ std::string fullpath = dirPath + "/" + namelist[i]->d_name;
+ // Filter out non files/stat problems etc.
+ struct ::stat s;
+ // Can't throw here without leaking memory, so just do nothing with
+ // entries for which stat() fails.
+ if (!::stat(fullpath.c_str(), &s)) {
+ if (S_ISREG(s.st_mode)) {
+ cb(fullpath);
+ }
+ }
+ ::free(namelist[i]);
+ }
+ ::free(namelist);
+}
+
}} // namespace qpid::sys
diff --git a/qpid/cpp/src/qpid/sys/posix/SystemInfo.cpp b/qpid/cpp/src/qpid/sys/posix/SystemInfo.cpp
index cbff1effac..ea7f521f2b 100755
--- a/qpid/cpp/src/qpid/sys/posix/SystemInfo.cpp
+++ b/qpid/cpp/src/qpid/sys/posix/SystemInfo.cpp
@@ -77,51 +77,6 @@ inline bool isLoopback(const ::sockaddr* addr) {
}
}
-void SystemInfo::getLocalIpAddresses (uint16_t port,
- std::vector<Address> &addrList) {
- ::ifaddrs* ifaddr = 0;
- QPID_POSIX_CHECK(::getifaddrs(&ifaddr));
- for (::ifaddrs* ifap = ifaddr; ifap != 0; ifap = ifap->ifa_next) {
- if (ifap->ifa_addr == 0) continue;
- if (isLoopback(ifap->ifa_addr)) continue;
- int family = ifap->ifa_addr->sa_family;
- switch (family) {
- case AF_INET6: {
- // Ignore link local addresses as:
- // * The scope id is illegal in URL syntax
- // * Clients won't be able to use a link local address
- // without adding their own (potentially different) scope id
- sockaddr_in6* sa6 = (sockaddr_in6*)((void*)ifap->ifa_addr);
- if (IN6_IS_ADDR_LINKLOCAL(&sa6->sin6_addr)) break;
- // Fallthrough
- }
- case AF_INET: {
- char dispName[NI_MAXHOST];
- int rc = ::getnameinfo(
- ifap->ifa_addr,
- (family == AF_INET)
- ? sizeof(struct sockaddr_in)
- : sizeof(struct sockaddr_in6),
- dispName, sizeof(dispName),
- 0, 0, NI_NUMERICHOST);
- if (rc != 0) {
- throw QPID_POSIX_ERROR(rc);
- }
- string addr(dispName);
- addrList.push_back(Address(TCP, addr, port));
- break;
- }
- default:
- continue;
- }
- }
- ::freeifaddrs(ifaddr);
-
- if (addrList.empty()) {
- addrList.push_back(Address(TCP, LOOPBACK, port));
- }
-}
-
namespace {
inline socklen_t sa_len(::sockaddr* sa)
{
diff --git a/qpid/cpp/src/qpid/sys/solaris/SystemInfo.cpp b/qpid/cpp/src/qpid/sys/solaris/SystemInfo.cpp
index d4b18e66c8..0e754e048b 100755
--- a/qpid/cpp/src/qpid/sys/solaris/SystemInfo.cpp
+++ b/qpid/cpp/src/qpid/sys/solaris/SystemInfo.cpp
@@ -60,31 +60,6 @@ bool SystemInfo::getLocalHostname(Address &address) {
static const string LOCALHOST("127.0.0.1");
static const string TCP("tcp");
-void SystemInfo::getLocalIpAddresses(uint16_t port,
- std::vector<Address> &addrList) {
- int s = socket(PF_INET, SOCK_STREAM, 0);
- for (int i=1;;i++) {
- struct lifreq ifr;
- ifr.lifr_index = i;
- if (::ioctl(s, SIOCGIFADDR, &ifr) < 0) {
- break;
- }
- struct sockaddr *sa = static_cast<struct sockaddr *>((void *) &ifr.lifr_addr);
- if (sa->sa_family != AF_INET) {
- // TODO: Url parsing currently can't cope with IPv6 addresses, defer for now
- break;
- }
- struct sockaddr_in *sin = static_cast<struct sockaddr_in *>((void *)sa);
- std::string addr(inet_ntoa(sin->sin_addr));
- if (addr != LOCALHOST)
- addrList.push_back(Address(TCP, addr, port));
- }
- if (addrList.empty()) {
- addrList.push_back(Address(TCP, LOCALHOST, port));
- }
- close (s);
-}
-
void SystemInfo::getSystemId(std::string &osName,
std::string &nodeName,
std::string &release,
diff --git a/qpid/cpp/src/qpid/sys/ssl/util.cpp b/qpid/cpp/src/qpid/sys/ssl/util.cpp
index 3078e894df..de5d638b09 100644
--- a/qpid/cpp/src/qpid/sys/ssl/util.cpp
+++ b/qpid/cpp/src/qpid/sys/ssl/util.cpp
@@ -31,8 +31,6 @@
#include <iostream>
#include <fstream>
-#include <boost/filesystem/operations.hpp>
-#include <boost/filesystem/path.hpp>
namespace qpid {
namespace sys {
@@ -82,15 +80,14 @@ SslOptions SslOptions::global;
char* readPasswordFromFile(PK11SlotInfo*, PRBool retry, void*)
{
const std::string& passwordFile = SslOptions::global.certPasswordFile;
- if (retry || passwordFile.empty() || !boost::filesystem::exists(passwordFile)) {
- return 0;
- } else {
- std::ifstream file(passwordFile.c_str());
- std::string password;
- file >> password;
- return PL_strdup(password.c_str());
- }
-}
+ if (retry || passwordFile.empty()) return 0;
+ std::ifstream file(passwordFile.c_str());
+ if (!file) return 0;
+
+ std::string password;
+ file >> password;
+ return PL_strdup(password.c_str());
+}
void initNSS(const SslOptions& options, bool server)
{
diff --git a/qpid/cpp/src/qpid/sys/windows/FileSysDir.cpp b/qpid/cpp/src/qpid/sys/windows/FileSysDir.cpp
index 88f1637d48..e090747715 100644
--- a/qpid/cpp/src/qpid/sys/windows/FileSysDir.cpp
+++ b/qpid/cpp/src/qpid/sys/windows/FileSysDir.cpp
@@ -24,6 +24,9 @@
#include <sys/stat.h>
#include <direct.h>
#include <errno.h>
+#include <windows.h>
+#include <strsafe.h>
+
namespace qpid {
namespace sys {
@@ -50,4 +53,36 @@ void FileSysDir::mkdir(void)
throw Exception ("Can't create directory: " + dirPath);
}
+void FileSysDir::forEachFile(Callback cb) const {
+
+ WIN32_FIND_DATAA findFileData;
+ char szDir[MAX_PATH];
+ size_t dirPathLength;
+ HANDLE hFind = INVALID_HANDLE_VALUE;
+
+ // create dirPath+"\*" in szDir
+ StringCchLength (dirPath.c_str(), MAX_PATH, &dirPathLength);
+
+ if (dirPathLength > (MAX_PATH - 3)) {
+ throw Exception ("Directory path is too long: " + dirPath);
+ }
+
+ StringCchCopy(szDir, MAX_PATH, dirPath.c_str());
+ StringCchCat(szDir, MAX_PATH, TEXT("\\*"));
+
+ // Special work for first file
+ hFind = FindFirstFileA(szDir, &findFileData);
+ if (INVALID_HANDLE_VALUE == hFind) {
+ return;
+ }
+
+ // process everything that isn't a directory
+ do {
+ if (!(findFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ std::string fileName(findFileData.cFileName);
+ cb(fileName);
+ }
+ } while (FindNextFile(hFind, &findFileData) != 0);
+}
+
}} // namespace qpid::sys
diff --git a/qpid/cpp/src/qpid/sys/windows/SystemInfo.cpp b/qpid/cpp/src/qpid/sys/windows/SystemInfo.cpp
index b3de8d947e..fb58d53b81 100755
--- a/qpid/cpp/src/qpid/sys/windows/SystemInfo.cpp
+++ b/qpid/cpp/src/qpid/sys/windows/SystemInfo.cpp
@@ -67,35 +67,6 @@ bool SystemInfo::getLocalHostname (Address &address) {
static const std::string LOCALHOST("127.0.0.1");
static const std::string TCP("tcp");
-void SystemInfo::getLocalIpAddresses (uint16_t port,
- std::vector<Address> &addrList) {
- enum { MAX_URL_INTERFACES = 100 };
-
- SOCKET s = socket (PF_INET, SOCK_STREAM, 0);
- if (s != INVALID_SOCKET) {
- INTERFACE_INFO interfaces[MAX_URL_INTERFACES];
- DWORD filledBytes = 0;
- WSAIoctl (s,
- SIO_GET_INTERFACE_LIST,
- 0,
- 0,
- interfaces,
- sizeof (interfaces),
- &filledBytes,
- 0,
- 0);
- unsigned int interfaceCount = filledBytes / sizeof (INTERFACE_INFO);
- for (unsigned int i = 0; i < interfaceCount; ++i) {
- if (interfaces[i].iiFlags & IFF_UP) {
- std::string addr(inet_ntoa(interfaces[i].iiAddress.AddressIn.sin_addr));
- if (addr != LOCALHOST)
- addrList.push_back(Address(TCP, addr, port));
- }
- }
- closesocket (s);
- }
-}
-
// Null function which always fails to find an network interface name
bool SystemInfo::getInterfaceAddresses(const std::string&, std::vector<std::string>&)
{
diff --git a/qpid/cpp/src/qpid/types/Variant.cpp b/qpid/cpp/src/qpid/types/Variant.cpp
index 9b981c9171..8c9837e765 100644
--- a/qpid/cpp/src/qpid/types/Variant.cpp
+++ b/qpid/cpp/src/qpid/types/Variant.cpp
@@ -110,21 +110,28 @@ class VariantImpl
} value;
std::string encoding;//optional encoding for variable length data
- template<class T> T convertFromString() const
+ template<class T> T convertFromString() const
{
const std::string& s = *value.string;
+
try {
- T r = boost::lexical_cast<T>(s);
- //lexical_cast won't fail if string is a negative number and T is unsigned
- //So check that and allow special case of negative zero
- //else its a non-zero negative number so throw exception at end of function
- if (std::numeric_limits<T>::is_signed || s.find('-') != 0 || r == 0) {
- return r;
+ // Extra shenanigans to work around negative zero
+ // conversion error in older GCC libs.
+ if ( s[0] != '-' ) {
+ return boost::lexical_cast<T>(s);
+ } else {
+ T r = boost::lexical_cast<T>(s.substr(1));
+ if (std::numeric_limits<T>::is_signed) {
+ return -r;
+ } else {
+ if (r==0) return 0;
+ }
}
} catch(const boost::bad_lexical_cast&) {
}
throw InvalidConversion(QPID_MSG("Cannot convert " << s));
}
+
};
diff --git a/qpid/cpp/src/qpidd.cpp b/qpid/cpp/src/qpidd.cpp
index 920009580c..e17dea3164 100644
--- a/qpid/cpp/src/qpidd.cpp
+++ b/qpid/cpp/src/qpidd.cpp
@@ -75,17 +75,19 @@ int run_broker(int argc, char *argv[], bool hidden)
for (vector<string>::iterator iter = bootOptions.module.load.begin();
iter != bootOptions.module.load.end();
iter++)
- qpid::tryShlib (iter->data(), false);
+ qpid::tryShlib (*iter);
if (!bootOptions.module.noLoad) {
bool isDefault = defaultPath == bootOptions.module.loadDir;
qpid::loadModuleDir (bootOptions.module.loadDir, isDefault);
}
- // Parse options
+ // Parse options. In the second pass, do not allow unknown options.
+ // All the modules have been added now, so any unknown options
+ // should be flagged as errors.
try {
options.reset(new QpiddOptions(argv[0]));
- options->parse(argc, argv, options->common.config);
+ options->parse(argc, argv, options->common.config, false);
} catch (const std::exception& /*e*/) {
if (helpArgSeen) {
// provide help even when parsing fails
diff --git a/qpid/cpp/src/ssl.mk b/qpid/cpp/src/ssl.mk
index ff2aa502d6..aaaf14ed54 100644
--- a/qpid/cpp/src/ssl.mk
+++ b/qpid/cpp/src/ssl.mk
@@ -50,7 +50,8 @@ sslconnector_la_SOURCES = \
if HAVE_PROTON
sslconnector_la_SOURCES += \
- qpid/messaging/amqp/SslTransport.cpp
+ qpid/messaging/amqp/SslTransport.cpp \
+ qpid/messaging/amqp/SslTransport.h
endif #HAVE_PROTON
diff --git a/qpid/cpp/src/tests/.valgrind.supp b/qpid/cpp/src/tests/.valgrind.supp
index 2c6a1509ff..1a24a9178e 100644
--- a/qpid/cpp/src/tests/.valgrind.supp
+++ b/qpid/cpp/src/tests/.valgrind.supp
@@ -39,21 +39,6 @@
fun:_sasl_load_plugins
fun:sasl_client_init
}
-{
- Benign leak in CPG - patched version.
- Memcheck:Leak
- fun:*
- fun:openais_service_connect
- fun:cpg_initialize
-}
-
-{
- Benign error in libcpg.
- Memcheck:Param
- socketcall.sendmsg(msg.msg_iov[i])
- obj:*/libpthread-2.5.so
- obj:*/libcpg.so.2.0.0
-}
{
Uninitialised value problem in _dl_relocate (F7, F8)
@@ -161,14 +146,6 @@
}
{
- CPG error - seems benign.
- Memcheck:Param
- socketcall.sendmsg(msg.msg_iov[i])
- obj:*
- obj:*/libcpg.so.2.0.0
-}
-
-{
Known leak in boost.thread 1.33.1. Wildcards for 64/32 bit diffs.
Memcheck:Leak
fun:*
diff --git a/qpid/cpp/src/tests/BrokerMgmtAgent.cpp b/qpid/cpp/src/tests/BrokerMgmtAgent.cpp
index 9c21e51a18..29c3faf809 100644
--- a/qpid/cpp/src/tests/BrokerMgmtAgent.cpp
+++ b/qpid/cpp/src/tests/BrokerMgmtAgent.cpp
@@ -22,6 +22,7 @@
#include "unit_test.h"
#include "MessagingFixture.h"
#include "qpid/management/Buffer.h"
+#include "qpid/management/ManagementAgent.h"
#include "qpid/messaging/Message.h"
#include "qpid/amqp_0_10/Codecs.h"
#include "qpid/log/Logger.h"
@@ -323,361 +324,6 @@ QPID_AUTO_TEST_CASE(v2ObjPublish)
delete tm;
}
-
-// verify that a deleted object is exported correctly using the
-// exportDeletedObjects() method. V1 testcase.
-//
-QPID_AUTO_TEST_CASE(v1ExportDelObj)
-{
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("myObj"));
- uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- agent->addObject(tm->GetManagementObject(), 1);
-
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
-
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- // destroy the object, then immediately export (before the next poll cycle)
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
-
- // wait for the deleted object to be published
-
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
-
- BOOST_CHECK(isDeleted);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
-
- r1.close();
- delete fix;
- delete tm;
-}
-
-
-// verify that a deleted object is imported correctly using the
-// importDeletedObjects() method. V1 testcase.
-//
-QPID_AUTO_TEST_CASE(v1ImportDelObj)
-{
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("anObj"));
- uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- agent->addObject(tm->GetManagementObject(), 1);
-
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
-
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- // destroy the object, then immediately export (before the next poll cycle)
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
-
- // destroy the broker, and reinistantiate a new one without populating it
- // with a TestObject.
-
- r1.close();
- delete fix;
- delete tm; // should no longer be necessary
-
- fix = new AgentFixture(3);
- r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
-
- // wait for the deleted object to be published
-
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
-
- BOOST_CHECK(isDeleted);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
-
- r1.close();
- delete fix;
-}
-
-
-// verify that an object that is added and deleted prior to the
-// first poll cycle is accounted for by the export
-//
-QPID_AUTO_TEST_CASE(v1ExportFastDelObj)
-{
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("objectifyMe"));
-
- // add, then immediately delete and export the object...
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->addObject(tm->GetManagementObject(), 999);
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
-
- delete fix;
- delete tm;
-}
-
-
-// Verify that we can export and import multiple deleted objects correctly.
-//
-QPID_AUTO_TEST_CASE(v1ImportMultiDelObj)
-{
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- // populate the agent with multiple test objects
- const size_t objCount = 50;
- std::vector<TestManageable *> tmv;
- uint32_t objLen;
-
- for (size_t i = 0; i < objCount; i++) {
- std::stringstream key;
- key << "testobj-" << std::setfill('x') << std::setw(4) << i;
- // (no, seriously, I didn't just do that.)
- // Note well: we have to keep the key string length EXACTLY THE SAME
- // FOR ALL OBJECTS, so objLen will be the same. Otherwise the
- // decodeV1ObjectUpdates() will fail (v1 lacks explict encoded length).
- TestManageable *tm = new TestManageable(agent, key.str());
- objLen = tm->GetManagementObject()->writePropertiesSize();
- agent->addObject(tm->GetManagementObject(), i + 1);
- tmv.push_back(tm);
- }
-
- // wait for the objects to be published
- Message m1;
- uint32_t msgCount = 0;
- while(r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- msgCount += objs.size();
- }
-
- BOOST_CHECK_EQUAL(msgCount, objCount);
-
- // destroy some of the objects, then immediately export (before the next poll cycle)
-
- uint32_t delCount = 0;
- for (size_t i = 0; i < objCount; i += 2) {
- tmv[i]->GetManagementObject()->resourceDestroy();
- delCount++;
- }
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK_EQUAL(delObjs.size(), delCount);
-
- // destroy the broker, and reinistantiate a new one without populating it
- // with TestObjects.
-
- r1.close();
- delete fix;
- while (tmv.size()) {
- delete tmv.back();
- tmv.pop_back();
- }
-
- fix = new AgentFixture(3);
- r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
-
- // wait for the deleted object to be published, verify the count
-
- uint32_t countDels = 0;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- countDels++;
- }
- }
-
- // make sure we get the correct # of deleted objects
- BOOST_CHECK_EQUAL(countDels, delCount);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
-
- r1.close();
- delete fix;
-}
-
-// Verify that we can export and import multiple deleted objects correctly.
-// QMF V2 variant
-QPID_AUTO_TEST_CASE(v2ImportMultiDelObj)
-{
- AgentFixture* fix = new AgentFixture(3, true);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- Receiver r1 = fix->createV2DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- // populate the agent with multiple test objects
- const size_t objCount = 50;
- std::vector<TestManageable *> tmv;
-
- for (size_t i = 0; i < objCount; i++) {
- std::stringstream key;
- key << "testobj-" << i;
- TestManageable *tm = new TestManageable(agent, key.str());
- if (tm->GetManagementObject()->writePropertiesSize()) {}
- agent->addObject(tm->GetManagementObject(), key.str());
- tmv.push_back(tm);
- }
-
- // wait for the objects to be published
- Message m1;
- uint32_t msgCount = 0;
- while(r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV2ObjectUpdates(m1, objs);
- msgCount += objs.size();
- }
-
- BOOST_CHECK_EQUAL(msgCount, objCount);
-
- // destroy some of the objects, then immediately export (before the next poll cycle)
-
- uint32_t delCount = 0;
- for (size_t i = 0; i < objCount; i += 2) {
- tmv[i]->GetManagementObject()->resourceDestroy();
- delCount++;
- }
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK_EQUAL(delObjs.size(), delCount);
-
- // destroy the broker, and reinistantiate a new one without populating it
- // with TestObjects.
-
- r1.close();
- delete fix;
- while (tmv.size()) {
- delete tmv.back();
- tmv.pop_back();
- }
-
- fix = new AgentFixture(3, true);
- r1 = fix->createV2DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
-
- // wait for the deleted object to be published, verify the count
-
- uint32_t countDels = 0;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- countDels++;
- }
- }
-
- // make sure we get the correct # of deleted objects
- BOOST_CHECK_EQUAL(countDels, delCount);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
-
- r1.close();
- delete fix;
-}
-
// See QPID-2997
QPID_AUTO_TEST_CASE(v2RapidRestoreObj)
{
diff --git a/qpid/cpp/src/tests/CMakeLists.txt b/qpid/cpp/src/tests/CMakeLists.txt
index 63afc46831..bc13e2ecdd 100644
--- a/qpid/cpp/src/tests/CMakeLists.txt
+++ b/qpid/cpp/src/tests/CMakeLists.txt
@@ -314,6 +314,7 @@ if (PYTHON_EXECUTABLE)
add_test (ha_tests ${test_wrap} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/ha_tests.py)
add_test (ipv6_test ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/ipv6_test${test_script_suffix})
add_test (federation_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_tests${test_script_suffix})
+ add_test (federation_sys_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_sys_tests${test_script_suffix})
if (BUILD_ACL)
add_test (acl_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_acl_tests${test_script_suffix})
endif (BUILD_ACL)
@@ -340,7 +341,12 @@ add_library (dlclose_noop MODULE dlclose_noop.c)
#
## Longer running stability tests, not run by default check: target.
## Not run under valgrind, too slow
-#LONG_TESTS=fanout_perftest shared_perftest multiq_perftest topic_perftest run_failover_soak
+#LONG_TESTS=fanout_perftest shared_perftest multiq_perftest topic_perftest
#EXTRA_DIST+=$(LONG_TESTS) run_perftest
#check-long:
# $(MAKE) check TESTS="start_broker $(LONG_TESTS) stop_broker" VALGRIND=
+
+#
+# legacystore
+#
+add_subdirectory(legacystore)
diff --git a/qpid/cpp/src/tests/ClusterFailover.cpp b/qpid/cpp/src/tests/ClusterFailover.cpp
deleted file mode 100644
index bf5c147f19..0000000000
--- a/qpid/cpp/src/tests/ClusterFailover.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/**@file Tests for partial failure in a cluster.
- * Partial failure means some nodes experience a failure while others do not.
- * In this case the failed nodes must shut down.
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ClusterFixture.h"
-#include "qpid/client/FailoverManager.h"
-#include <boost/assign.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/bind.hpp>
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(ClusterFailoverTestSuite)
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace qpid::client::arg;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=sys::TIME_SEC/4;
-
-ClusterFixture::Args getArgs(bool durable=std::getenv("STORE_LIB"))
-{
- ClusterFixture::Args args;
- args += "--auth", "no", "--no-module-dir",
- "--load-module", getLibPath("CLUSTER_LIB");
- if (durable)
- args += "--load-module", getLibPath("STORE_LIB"), "TMP_DATA_DIR";
- else
- args += "--no-data-dir";
- return args;
-}
-
-// Test re-connecting with same session name after a failure.
-QPID_AUTO_TEST_CASE(testReconnectSameSessionName) {
- ClusterFixture cluster(2, getArgs(), -1);
- // Specify a timeout to make sure it is ignored, session resume is
- // not implemented so sessions belonging to dead brokers should
- // not be kept.
- Client c0(cluster[0], "foo", 5);
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size()); // wait for both.
- c0.session.queueDeclare("q");
- c0.session.messageTransfer(arg::content=Message("sendme", "q"));
- BOOST_CHECK_EQUAL(c0.subs.get("q").getData(), "sendme");
- cluster.killWithSilencer(0, c0.connection, 9);
- Client c1(cluster[1], "foo", 5);
- c1.session.queueQuery(); // Try to use the session.
-}
-
-QPID_AUTO_TEST_CASE(testReconnectExclusiveQueue) {
- // Regresion test. Session timeouts should be ignored
- // by the broker as session resume is not implemented.
- ClusterFixture cluster(2, getArgs(), -1);
- Client c0(cluster[0], "foo", 5);
- c0.session.queueDeclare("exq", arg::exclusive=true);
- SubscriptionSettings settings;
- settings.exclusive = true;
- settings.autoAck = 0;
- Subscription s0 = c0.subs.subscribe(c0.lq, "exq", settings, "exsub");
- c0.session.messageTransfer(arg::content=Message("sendme", "exq"));
- BOOST_CHECK_EQUAL(c0.lq.get().getData(), "sendme");
-
- // Regression: core dump on exit if unacked messages were left in
- // a session with a timeout.
- cluster.killWithSilencer(0, c0.connection);
-
- // Regression: session timeouts prevented re-connecting to
- // exclusive queue.
- Client c1(cluster[1]);
- c1.session.queueDeclare("exq", arg::exclusive=true);
- Subscription s1 = c1.subs.subscribe(c1.lq, "exq", settings, "exsub");
- s1.cancel();
-
- // Regression: session timeouts prevented new member joining
- // cluster with exclusive queues.
- cluster.add();
- Client c2(cluster[2]);
- c2.session.queueQuery();
-}
-
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/ClusterFixture.cpp b/qpid/cpp/src/tests/ClusterFixture.cpp
deleted file mode 100644
index 6b62cb6fc7..0000000000
--- a/qpid/cpp/src/tests/ClusterFixture.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/log/Logger.h"
-
-#include <boost/bind.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/assign.hpp>
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using qpid::sys::TIME_SEC;
-using qpid::broker::Broker;
-using boost::shared_ptr;
-using qpid::cluster::Cluster;
-using boost::assign::list_of;
-
-
-#include "ClusterFixture.h"
-
-namespace qpid {
-namespace tests {
-
-ClusterFixture::ClusterFixture(size_t n, const Args& args_, int localIndex_)
- : name(Uuid(true).str()), localIndex(localIndex_), userArgs(args_)
-{
- add(n);
-}
-
-ClusterFixture::ClusterFixture(size_t n, boost::function<void (Args&, size_t)> updateArgs_, int localIndex_)
- : name(Uuid(true).str()), localIndex(localIndex_), updateArgs(updateArgs_)
-{
- add(n);
-}
-
-ClusterFixture::Args ClusterFixture::makeArgs(const std::string& prefix, size_t index) {
- Args args = list_of<string>("qpidd ")
- ("--cluster-name")(name)
- ("--log-prefix")(prefix);
- args.insert(args.end(), userArgs.begin(), userArgs.end());
- if (updateArgs) updateArgs(args, index);
- return args;
-}
-
-void ClusterFixture::add() {
- if (size() != size_t(localIndex)) { // fork a broker process.
- std::ostringstream os; os << "fork" << size();
- std::string prefix = os.str();
- forkedBrokers.push_back(shared_ptr<ForkedBroker>(new ForkedBroker(makeArgs(prefix, size()))));
- push_back(forkedBrokers.back()->getPort());
- }
- else { // Run in this process
- addLocal();
- }
-}
-
-namespace {
-/** Parse broker & cluster options */
-Broker::Options parseOpts(size_t argc, const char* argv[]) {
- Broker::Options opts;
- Plugin::addOptions(opts); // Pick up cluster options.
- opts.parse(argc, argv, "", true); // Allow-unknown for --load-module
- return opts;
-}
-}
-
-void ClusterFixture::addLocal() {
- assert(int(size()) == localIndex);
- ostringstream os; os << "local" << localIndex;
- string prefix = os.str();
- Args args(makeArgs(prefix, localIndex));
- vector<const char*> argv(args.size());
- transform(args.begin(), args.end(), argv.begin(), boost::bind(&string::c_str, _1));
- qpid::log::Logger::instance().setPrefix(prefix);
- localBroker.reset(new BrokerFixture(parseOpts(argv.size(), &argv[0])));
- push_back(localBroker->getPort());
- forkedBrokers.push_back(shared_ptr<ForkedBroker>());
-}
-
-bool ClusterFixture::hasLocal() const { return localIndex >= 0 && size_t(localIndex) < size(); }
-
-/** Kill a forked broker with sig, or shutdown localBroker if n==0. */
-void ClusterFixture::kill(size_t n, int sig) {
- if (n == size_t(localIndex))
- localBroker->broker->shutdown();
- else
- forkedBrokers[n]->kill(sig);
-}
-
-/** Kill a broker and suppress errors from closing connection c. */
-void ClusterFixture::killWithSilencer(size_t n, client::Connection& c, int sig) {
- ScopedSuppressLogging sl;
- try { c.close(); } catch(...) {}
- kill(n,sig);
-}
-
-/**
- * Get the known broker ports from a Connection.
- *@param n if specified wait for the cluster size to be n, up to a timeout.
- */
-std::set<int> knownBrokerPorts(qpid::client::Connection& c, int n) {
- FailoverListener fl(c, false);
- std::vector<qpid::Url> urls = fl.getKnownBrokers();
- if (n >= 0 && unsigned(n) != urls.size()) {
- // Retry up to 10 secs in .1 second intervals.
- for (size_t retry=100; urls.size() != unsigned(n) && retry != 0; --retry) {
- qpid::sys::usleep(1000*100); // 0.1 secs
- urls = fl.getKnownBrokers();
- }
- }
- std::set<int> s;
- for (std::vector<qpid::Url>::const_iterator i = urls.begin(); i != urls.end(); ++i)
- s.insert((*i)[0].port);
- return s;
-}
-
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/ClusterFixture.h b/qpid/cpp/src/tests/ClusterFixture.h
deleted file mode 100644
index f548ff9376..0000000000
--- a/qpid/cpp/src/tests/ClusterFixture.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef CLUSTER_FIXTURE_H
-#define CLUSTER_FIXTURE_H
-
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/log/Logger.h"
-
-#include <boost/bind.hpp>
-#include <boost/function.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using qpid::sys::TIME_SEC;
-using qpid::broker::Broker;
-using boost::shared_ptr;
-using qpid::cluster::Cluster;
-
-namespace qpid {
-namespace tests {
-
-/** Cluster fixture is a vector of ports for the replicas.
- *
- * At most one replica (by default replica 0) is in the current
- * process, all others are forked as children.
- */
-class ClusterFixture : public vector<uint16_t> {
- public:
- typedef std::vector<std::string> Args;
-
- /** @param localIndex can be -1 meaning don't automatically start a local broker.
- * A local broker can be started with addLocal().
- */
- ClusterFixture(size_t n, const Args& args, int localIndex=-1);
-
- /**@param updateArgs function is passed the index of the cluster member and can update the arguments. */
- ClusterFixture(size_t n, boost::function<void (Args&, size_t)> updateArgs, int localIndex=-1);
-
- void add(size_t n) { for (size_t i=0; i < n; ++i) add(); }
- void add(); // Add a broker.
- void setup();
-
- bool hasLocal() const;
-
- /** Kill a forked broker with sig, or shutdown localBroker. */
- void kill(size_t n, int sig=SIGINT);
-
- /** Kill a broker and suppress errors from closing connection c. */
- void killWithSilencer(size_t n, client::Connection& c, int sig=SIGINT);
-
- private:
-
- void addLocal(); // Add a local broker.
- Args makeArgs(const std::string& prefix, size_t index);
- string name;
- std::auto_ptr<BrokerFixture> localBroker;
- int localIndex;
- std::vector<shared_ptr<ForkedBroker> > forkedBrokers;
- Args userArgs;
- boost::function<void (Args&, size_t)> updateArgs;
-};
-
-/**
- * Get the known broker ports from a Connection.
- *@param n if specified wait for the cluster size to be n, up to a timeout.
- */
-std::set<int> knownBrokerPorts(qpid::client::Connection& source, int n=-1);
-
-}} // namespace qpid::tests
-
-#endif /*!CLUSTER_FIXTURE_H*/
diff --git a/qpid/cpp/src/tests/ForkedBroker.cpp b/qpid/cpp/src/tests/ForkedBroker.cpp
deleted file mode 100644
index de1b42d40f..0000000000
--- a/qpid/cpp/src/tests/ForkedBroker.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include "ForkedBroker.h"
-#include "qpid/log/Statement.h"
-#include <boost/bind.hpp>
-#include <algorithm>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <signal.h>
-
-using namespace std;
-using qpid::ErrnoException;
-
-namespace std {
-static ostream& operator<<(ostream& o, const qpid::tests::ForkedBroker::Args& a) {
-copy(a.begin(), a.end(), ostream_iterator<string>(o, " "));
-return o;
-}
-}
-
-namespace qpid {
-namespace tests {
-
-ForkedBroker::ForkedBroker(const Args& constArgs) : running(false), exitStatus(0) {
- Args args(constArgs);
- // Substitute the special value "TMP_DATA_DIR" with a temporary data dir.
- Args::iterator i = find(args.begin(), args.end(), string("TMP_DATA_DIR"));
- if (i != args.end()) {
- args.erase(i);
- char dd[] = "/tmp/ForkedBroker.XXXXXX";
- if (!mkdtemp(dd))
- throw qpid::ErrnoException("Can't create data dir");
- dataDir = dd;
- args.push_back("--data-dir");
- args.push_back(dataDir);
- }
- // Never use the default data directory, set --no-data-dir if no other data-dir arg.
- Args::iterator j = find(args.begin(), args.end(), string("--data-dir"));
- Args::iterator k = find(args.begin(), args.end(), string("--no-data-dir"));
- if (j == args.end() && k == args.end())
- args.push_back("--no-data-dir");
- init(args);
-}
-
-ForkedBroker::~ForkedBroker() {
- try { kill(); }
- catch (const std::exception& e) {
- QPID_LOG(error, QPID_MSG("Killing forked broker: " << e.what()));
- }
- if (!dataDir.empty())
- {
- if(::system(("rm -rf "+dataDir).c_str())) {}
- }
-}
-
-void ForkedBroker::kill(int sig) {
- if (pid == 0) return;
- int savePid = pid;
- pid = 0; // Reset pid here in case of an exception.
- using qpid::ErrnoException;
- if (::kill(savePid, sig) < 0)
- throw ErrnoException("kill failed");
- int status;
- if (::waitpid(savePid, &status, 0) < 0 && sig != 9)
- throw ErrnoException("wait for forked process failed");
- if (WEXITSTATUS(status) != 0 && sig != 9)
- throw qpid::Exception(QPID_MSG("Forked broker exited with: " << WEXITSTATUS(status)));
- running = false;
- exitStatus = status;
-}
-
-bool isLogOption(const std::string& s) {
- const char * log_enable = "--log-enable",
- * trace = "--trace";
- return( (! strncmp(s.c_str(), log_enable, strlen(log_enable))) ||
- (! strncmp(s.c_str(), trace, strlen(trace)))
- );
-}
-
-namespace {
- void ignore_signal(int)
- {
- }
-}
-
-void ForkedBroker::init(const Args& userArgs) {
- using qpid::ErrnoException;
- port = 0;
- int pipeFds[2];
- if(::pipe(pipeFds) < 0) throw ErrnoException("Can't create pipe");
-
- // Ignore the SIGCHLD signal generated by an exitting child
- // We will clean up any exitting children in the waitpid above
- // This should really be neater (like only once not per fork)
- struct ::sigaction sa;
- sa.sa_handler = ignore_signal;
- ::sigemptyset(&sa.sa_mask);
- ::sigaddset(&sa.sa_mask, SIGCHLD);
- sa.sa_flags = SA_NOCLDSTOP | SA_RESTART;
- ::sigaction(SIGCHLD, &sa, 0);
-
- pid = ::fork();
- if (pid < 0) throw ErrnoException("Fork failed");
- if (pid) { // parent
- ::close(pipeFds[1]);
- FILE* f = ::fdopen(pipeFds[0], "r");
- if (!f) throw ErrnoException("fopen failed");
- if (::fscanf(f, "%d", &port) != 1) {
- if (ferror(f)) throw ErrnoException("Error reading port number from child.");
- else throw qpid::Exception("EOF reading port number from child.");
- }
- ::fclose(f);
- running = true;
- }
- else { // child
- ::close(pipeFds[0]);
- int fd = ::dup2(pipeFds[1], 1); // pipe stdout to the parent.
- if (fd < 0) throw ErrnoException("dup2 failed");
- const char* prog = ::getenv("QPIDD_EXEC");
- if (!prog) prog = "../qpidd"; // This only works from within svn checkout
- Args args(userArgs);
- args.push_back("--port=0");
- // Keep quiet except for errors.
- if (!::getenv("QPID_TRACE") && !::getenv("QPID_LOG_ENABLE")
- && find_if(userArgs.begin(), userArgs.end(), isLogOption) == userArgs.end())
- args.push_back("--log-enable=error+");
- std::vector<const char*> argv(args.size());
- std::transform(args.begin(), args.end(), argv.begin(), boost::bind(&std::string::c_str, _1));
- argv.push_back(0);
- QPID_LOG(debug, "ForkedBroker exec " << prog << ": " << args);
-
- execv(prog, const_cast<char* const*>(&argv[0]));
- QPID_LOG(critical, "execv failed to start broker: prog=\"" << prog << "\"; args=\"" << args << "\"; errno=" << errno << " (" << std::strerror(errno) << ")");
- ::exit(1);
- }
-}
-
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/ForkedBroker.h b/qpid/cpp/src/tests/ForkedBroker.h
deleted file mode 100644
index 87e141a425..0000000000
--- a/qpid/cpp/src/tests/ForkedBroker.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef TESTS_FORKEDBROKER_H
-#define TESTS_FORKEDBROKER_H
-
-
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include "qpid/Exception.h"
-#include "qpid/log/Statement.h"
-#include "qpid/broker/Broker.h"
-#include <boost/lexical_cast.hpp>
-#include <string>
-#include <stdio.h>
-#include <sys/wait.h>
-
-namespace qpid {
-namespace tests {
-
-/**
- * Class to fork a broker child process.
- *
- * For most tests a BrokerFixture may be more convenient as it starts
- * a broker in the same process which allows you to easily debug into
- * the broker.
- *
- * This useful for tests that need to start multiple brokers where
- * those brokers can't coexist in the same process (e.g. for cluster
- * tests where CPG doesn't allow multiple group members in a single
- * process.)
- *
- */
-class ForkedBroker {
- public:
- typedef std::vector<std::string> Args;
-
- // argv args are passed to broker.
- //
- // Special value "TMP_DATA_DIR" is substituted with a temporary
- // data directory for the broker.
- //
- ForkedBroker(const Args& argv);
- ~ForkedBroker();
-
- void kill(int sig=SIGINT);
- int wait(); // Wait for exit, return exit status.
- uint16_t getPort() { return port; }
- pid_t getPID() { return pid; }
- bool isRunning() { return running; }
-
- private:
-
- void init(const Args& args);
-
- bool running;
- int exitStatus;
-
- pid_t pid;
- int port;
- std::string dataDir;
-};
-
-}} // namespace qpid::tests
-
-#endif /*!TESTS_FORKEDBROKER_H*/
diff --git a/qpid/cpp/src/tests/InitialStatusMap.cpp b/qpid/cpp/src/tests/InitialStatusMap.cpp
deleted file mode 100644
index 95806737e3..0000000000
--- a/qpid/cpp/src/tests/InitialStatusMap.cpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-
-#include "unit_test.h"
-#include "test_tools.h"
-#include "qpid/cluster/InitialStatusMap.h"
-#include "qpid/framing/Uuid.h"
-#include <boost/assign.hpp>
-
-using namespace std;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::framing::cluster;
-using namespace boost::assign;
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(InitialStatusMapTestSuite)
-
-typedef InitialStatusMap::Status Status;
-
-Status activeStatus(const Uuid& id=Uuid(), const MemberSet& ms=MemberSet(),
- const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, true, id, STORE_STATE_NO_STORE, Uuid(),
- encodeMemberSet(ms), urls);
-}
-
-Status newcomerStatus(const Uuid& id=Uuid(), const MemberSet& ms=MemberSet(),
- const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, false, id, STORE_STATE_NO_STORE, Uuid(),
- encodeMemberSet(ms), urls);
-}
-
-Status storeStatus(bool active, StoreState state, Uuid start=Uuid(), Uuid stop=Uuid(),
- const MemberSet& ms=MemberSet(), const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, active, start, state, stop,
- encodeMemberSet(ms), urls);
-}
-
-QPID_AUTO_TEST_CASE(testFirstInCluster) {
- // Single member is first in cluster.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- BOOST_CHECK(!map.isComplete());
- MemberSet members = list_of(MemberId(0));
- map.configChange(members);
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(0), newcomerStatus(id, list_of<MemberId>(0)));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.getElders().empty());
- BOOST_CHECK(!map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(id, map.getClusterId());
-}
-
-QPID_AUTO_TEST_CASE(testJoinExistingCluster) {
- // Single member 0 joins existing cluster 1,2
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(0), newcomerStatus());
- map.received(MemberId(1), activeStatus(id));
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(2), activeStatus(id));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(1)(2));
- BOOST_CHECK(map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-
- // Check that transitionToComplete is reset.
- map.configChange(list_of<MemberId>(0)(1));
- BOOST_CHECK(!map.transitionToComplete());
-}
-
-QPID_AUTO_TEST_CASE(testMultipleFirstInCluster) {
- // Multiple members 0,1,2 join at same time.
- InitialStatusMap map(MemberId(1), 1); // self is 1
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
-
- // All new members
- map.received(MemberId(0), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- map.received(MemberId(1), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- map.received(MemberId(2), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- BOOST_CHECK(!map.isResendNeeded());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of(MemberId(2)));
- BOOST_CHECK(!map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testMultipleJoinExisting) {
- // Multiple members 2,3 join simultaneously a cluster containing 0,1.
- InitialStatusMap map(MemberId(2), 1); // self is 2
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2))(MemberId(3));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
- map.received(MemberId(0), activeStatus(id, list_of<MemberId>(0)));
- map.received(MemberId(1), newcomerStatus(id, list_of<MemberId>(0)(1)));
- map.received(MemberId(2), newcomerStatus(id, list_of<MemberId>(0)(1)(2)(3)));
- map.received(MemberId(3), newcomerStatus(id, list_of<MemberId>(0)(1)(2)(3)));
- BOOST_CHECK(!map.isResendNeeded());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(0)(1)(3));
- BOOST_CHECK(map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testMembersLeave) {
- // Test that map completes if members leave rather than send status.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- map.configChange(list_of(MemberId(0))(MemberId(1))(MemberId(2)));
- map.received(MemberId(0), newcomerStatus());
- map.received(MemberId(1), activeStatus(id));
- BOOST_CHECK(!map.isComplete());
- map.configChange(list_of(MemberId(0))(MemberId(1))); // 2 left
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of(MemberId(1)));
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testInteveningConfig) {
- // Multiple config changes arrives before we complete the map.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
-
- map.configChange(list_of<MemberId>(0)(1));
- BOOST_CHECK(map.isResendNeeded());
- map.received(MemberId(0), newcomerStatus());
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(!map.isResendNeeded());
- // New member 2 joins before we receive 1
- map.configChange(list_of<MemberId>(0)(1)(2));
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(map.isResendNeeded());
- map.received(1, activeStatus(id));
- map.received(2, newcomerStatus());
- // We should not be complete as we haven't received 0 since new member joined
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(!map.isResendNeeded());
-
- map.received(0, newcomerStatus());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(1));
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testAllCleanNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testAllEmptyNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_EMPTY_STORE));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testAllNoStoreNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_NO_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_NO_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_NO_STORE));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testDirtyNeedUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_DIRTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testEmptyNeedUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testEmptyAlone) {
- InitialStatusMap map(MemberId(0), 1);
- map.configChange(list_of<MemberId>(0));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/Makefile.am b/qpid/cpp/src/tests/Makefile.am
index 4184b5f38a..d035e1aaa8 100644
--- a/qpid/cpp/src/tests/Makefile.am
+++ b/qpid/cpp/src/tests/Makefile.am
@@ -108,8 +108,6 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
TopicExchangeTest.cpp \
TxBufferTest.cpp \
ConnectionOptions.h \
- ForkedBroker.h \
- ForkedBroker.cpp \
ManagementTest.cpp \
MessageReplayTracker.cpp \
ConsoleTest.cpp \
@@ -242,11 +240,6 @@ header_test_INCLUDES=$(PUBLIC_INCLUDES)
header_test_SOURCES=header_test.cpp TestOptions.h ConnectionOptions.h
header_test_LDADD=$(lib_client)
-check_PROGRAMS+=failover_soak
-failover_soak_INCLUDES=$(PUBLIC_INCLUDES)
-failover_soak_SOURCES=failover_soak.cpp ForkedBroker.h ForkedBroker.cpp
-failover_soak_LDADD=$(lib_client) $(lib_broker)
-
check_PROGRAMS+=declare_queues
declare_queues_INCLUDES=$(PUBLIC_INCLUDES)
declare_queues_SOURCES=declare_queues.cpp
@@ -319,7 +312,6 @@ EXTRA_DIST += \
ssl_test \
ping_broker \
config.null \
- cpg_check.sh.in \
run_federation_tests \
run_federation_sys_tests \
run_long_federation_sys_tests \
@@ -352,6 +344,7 @@ EXTRA_DIST += \
run_ha_tests \
ha_test.py \
ha_tests.py \
+ brokertest.py \
ha_store_tests.py \
test_env.ps1.in
@@ -375,8 +368,6 @@ EXTRA_DIST+= \
shared_perftest \
multiq_perftest \
topic_perftest \
- run_failover_soak \
- federated_cluster_test_with_node_failure \
sasl_test_setup.sh \
run_msg_group_tests_soak \
qpidd-empty.conf
diff --git a/qpid/cpp/src/tests/MessageUtils.h b/qpid/cpp/src/tests/MessageUtils.h
index c2eabd804d..5024f5b77c 100644
--- a/qpid/cpp/src/tests/MessageUtils.h
+++ b/qpid/cpp/src/tests/MessageUtils.h
@@ -1,3 +1,6 @@
+#ifndef TESTS_MESSAGEUTILS_H
+#define TESTS_MESSAGEUTILS_H
+
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -98,3 +101,5 @@ struct MessageUtils
};
}} // namespace qpid::tests
+
+#endif /*!TESTS_MESSAGEUTILS_H*/
diff --git a/qpid/cpp/src/tests/PartialFailure.cpp b/qpid/cpp/src/tests/PartialFailure.cpp
deleted file mode 100644
index 63ee28017a..0000000000
--- a/qpid/cpp/src/tests/PartialFailure.cpp
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/**@file Tests for partial failure in a cluster.
- * Partial failure means some nodes experience a failure while others do not.
- * In this case the failed nodes must shut down.
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ClusterFixture.h"
-#include <boost/assign.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/bind.hpp>
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(PartialFailureTestSuite)
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace qpid::client::arg;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=sys::TIME_SEC/4;
-
-static bool isLogOption(const std::string& s) { return boost::starts_with(s, "--log-enable"); }
-
-void updateArgs(ClusterFixture::Args& args, size_t index) {
- ostringstream clusterLib, testStoreLib, storeName;
- clusterLib << getLibPath("CLUSTER_LIB");
- testStoreLib << getLibPath("TEST_STORE_LIB");
- storeName << "s" << index;
- args.push_back("--auth");
- args.push_back("no");
- args.push_back("--no-module-dir");
- args.push_back("--load-module");
- args.push_back(clusterLib.str());
- args.push_back("--load-module");
- args.push_back(testStoreLib.str());
- args.push_back("--test-store-name");
- args.push_back(storeName.str());
- args.push_back("TMP_DATA_DIR");
-
- // These tests generate errors deliberately, disable error logging unless a log env var is set.
- if (!::getenv("QPID_TRACE") && !::getenv("QPID_LOG_ENABLE")) {
- remove_if(args.begin(), args.end(), isLogOption);
- args.push_back("--log-enable=critical+:DISABLED"); // hacky way to disable logs.
- }
-}
-
-Message pMessage(string data, string q) {
- Message msg(data, q);
- msg.getDeliveryProperties().setDeliveryMode(PERSISTENT);
- return msg;
-}
-
-void queueAndSub(Client& c) {
- c.session.queueDeclare(c.name, durable=true);
- c.subs.subscribe(c.lq, c.name);
-}
-
-// Handle near-simultaneous errors
-QPID_AUTO_TEST_CASE(testCoincidentErrors) {
- ClusterFixture cluster(2, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
-
- c0.session.queueDeclare("q", durable=true);
- {
- ScopedSuppressLogging allQuiet;
- async(c0.session).messageTransfer(content=pMessage("TEST_STORE_DO: s0[exception]", "q"));
- async(c1.session).messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception]", "q"));
-
- int alive=0;
- try { Client c00(cluster[0], "c00"); ++alive; c00.close(); } catch (...) {}
- try { Client c11(cluster[1], "c11"); ++alive; c11.close(); } catch (...) {}
-
- BOOST_CHECK_EQUAL(alive, 1);
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- }
-}
-
-// Verify normal cluster-wide errors.
-QPID_AUTO_TEST_CASE(testNormalErrors) {
- // FIXME aconway 2009-04-10: Would like to put a scope just around
- // the statements expected to fail (in BOOST_CHECK_yTHROW) but that
- // sproadically lets out messages, possibly because they're in
- // Connection thread.
-
- ClusterFixture cluster(3, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
-
- {
- ScopedSuppressLogging allQuiet;
- queueAndSub(c0);
- c0.session.messageTransfer(content=Message("x", "c0"));
- BOOST_CHECK_EQUAL(c0.lq.get(TIMEOUT).getData(), "x");
-
- // Session error.
- BOOST_CHECK_THROW(c0.session.exchangeBind(), SessionException);
- c1.session.messageTransfer(content=Message("stay", "c0")); // Will stay on queue, session c0 is dead.
-
- // Connection error, kill c1 on all members.
- queueAndSub(c1);
- BOOST_CHECK_THROW(
- c1.session.messageTransfer(
- content=pMessage("TEST_STORE_DO: s0[exception] s1[exception] s2[exception] testNormalErrors", "c1")),
- ConnectionException);
- c2.session.messageTransfer(content=Message("stay", "c1")); // Will stay on queue, session/connection c1 is dead.
-
- BOOST_CHECK_EQUAL(3u, knownBrokerPorts(c2.connection, 3).size());
- BOOST_CHECK_EQUAL(c2.subs.get("c0", TIMEOUT).getData(), "stay");
- BOOST_CHECK_EQUAL(c2.subs.get("c1", TIMEOUT).getData(), "stay");
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-
-// Test errors after a new member joins to verify frame-sequence-numbers are ok in update.
-QPID_AUTO_TEST_CASE(testErrorAfterJoin) {
- ClusterFixture cluster(1, updateArgs, -1);
- Client c0(cluster[0]);
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Kill the new guy
- cluster.add();
- Client c1(cluster[1]);
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] testErrorAfterJoin", "q"));
- BOOST_CHECK_THROW(c1.session.messageTransfer(content=pMessage("xxx", "q")), TransportFailure);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c0.connection, 1).size());
-
- // Kill the old guy
- cluster.add();
- Client c2(cluster[2]);
- c2.session.messageTransfer(content=pMessage("TEST_STORE_DO: s0[exception] testErrorAfterJoin2", "q"));
- BOOST_CHECK_THROW(c0.session.messageTransfer(content=pMessage("xxx", "q")), TransportFailure);
-
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c2.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-// Test that if one member fails and others do not, the failure leaves the cluster.
-QPID_AUTO_TEST_CASE(testSinglePartialFailure) {
- ClusterFixture cluster(3, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
- // Cause partial failure on c1
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] testSinglePartialFailure", "q"));
- BOOST_CHECK_THROW(c1.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("b", "q"));
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 3u);
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size());
-
- // Cause partial failure on c2
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s2[exception] testSinglePartialFailure2", "q"));
- BOOST_CHECK_THROW(c2.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("c", "q"));
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 5u);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c0.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-// Test multiple partial falures: 2 fail 2 pass
-QPID_AUTO_TEST_CASE(testMultiPartialFailure) {
- ClusterFixture cluster(4, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
- Client c3(cluster[3], "c3");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Cause partial failure on c1, c2
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] s2[exception] testMultiPartialFailure", "q"));
- BOOST_CHECK_THROW(c1.session.queueQuery("q"), TransportFailure);
- BOOST_CHECK_THROW(c2.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("b", "q"));
- c3.session.messageTransfer(content=pMessage("c", "q"));
- BOOST_CHECK_EQUAL(c3.session.queueQuery("q").getMessageCount(), 4u);
- // FIXME aconway 2009-06-30: This check fails sporadically with 2 != 3.
- // It should pass reliably.
- // BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- c3.close();
- }
-}
-
-/** FIXME aconway 2009-04-10:
- * The current approach to shutting down a process in test_store
- * sometimes leads to assertion failures and errors in the shut-down
- * process. Need a cleaner solution
- */
-#if 0
-QPID_AUTO_TEST_CASE(testPartialFailureMemberLeaves) {
- ClusterFixture cluster(2, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Cause failure on member 0 and simultaneous crash on member 1.
- BOOST_CHECK_THROW(
- c0.session.messageTransfer(
- content=pMessage("TEST_STORE_DO: s0[exception] s1[exit_process] testPartialFailureMemberLeaves", "q")),
- ConnectionException);
- cluster.wait(1);
-
- Client c00(cluster[0], "c00"); // Old connection is dead.
- BOOST_CHECK_EQUAL(c00.session.queueQuery("q").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c00.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- }
-}
-#endif
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/QueueTest.cpp b/qpid/cpp/src/tests/QueueTest.cpp
index 3dfe3863f4..1177bf7119 100644
--- a/qpid/cpp/src/tests/QueueTest.cpp
+++ b/qpid/cpp/src/tests/QueueTest.cpp
@@ -40,6 +40,7 @@
#include "qpid/framing/reply_exceptions.h"
#include "qpid/broker/QueueFlowLimit.h"
#include "qpid/broker/QueueSettings.h"
+#include "qpid/sys/Timer.h"
#include <iostream>
#include <vector>
diff --git a/qpid/cpp/src/tests/StoreStatus.cpp b/qpid/cpp/src/tests/StoreStatus.cpp
deleted file mode 100644
index 43d4cfd920..0000000000
--- a/qpid/cpp/src/tests/StoreStatus.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-
-#include "unit_test.h"
-#include "test_tools.h"
-#include "qpid/cluster/StoreStatus.h"
-#include "qpid/framing/Uuid.h"
-#include <boost/assign.hpp>
-#include <boost/filesystem/operations.hpp>
-
-using namespace std;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::framing::cluster;
-using namespace boost::assign;
-using namespace boost::filesystem;
-
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(StoreStatusTestSuite)
-
-const char* TEST_DIR = "StoreStatus.tmp";
-
-struct TestDir {
- TestDir() {
- remove_all(TEST_DIR);
- create_directory(TEST_DIR);
- }
- ~TestDir() {
- remove_all(TEST_DIR);
- }
-};
-
-QPID_AUTO_TEST_CASE(testLoadEmpty) {
- TestDir td;
- StoreStatus ss(TEST_DIR);
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_NO_STORE);
- BOOST_CHECK(!ss.getClusterId());
- BOOST_CHECK(!ss.getShutdownId());
- ss.load();
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_EMPTY_STORE);
- BOOST_CHECK(!ss.getShutdownId());
-}
-
-QPID_AUTO_TEST_CASE(testSaveLoadDirty) {
- TestDir td;
- Uuid clusterId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.dirty();
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_DIRTY_STORE);
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_DIRTY_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK(!ss2.getShutdownId());
-}
-
-QPID_AUTO_TEST_CASE(testSaveLoadClean) {
- TestDir td;
- Uuid clusterId = Uuid(true);
- Uuid shutdownId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.clean(shutdownId);
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_CLEAN_STORE);
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_CLEAN_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK_EQUAL(ss2.getShutdownId(), shutdownId);
-}
-
-QPID_AUTO_TEST_CASE(testMarkDirty) {
- // Save clean then mark to dirty.
- TestDir td;
- Uuid clusterId = Uuid(true);
- Uuid shutdownId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.dirty();
- ss.clean(shutdownId);
- ss.dirty();
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_DIRTY_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK(!ss2.getShutdownId());
-}
-
-QPID_AUTO_TEST_SUITE_END()
-
- }} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/Variant.cpp b/qpid/cpp/src/tests/Variant.cpp
index 40f1c0cf75..6d629bbb4a 100644
--- a/qpid/cpp/src/tests/Variant.cpp
+++ b/qpid/cpp/src/tests/Variant.cpp
@@ -135,6 +135,16 @@ QPID_AUTO_TEST_CASE(testConversionsFromString)
BOOST_CHECK_EQUAL(0, value.asInt16());
BOOST_CHECK_EQUAL(0u, value.asUint16());
+ value = "-Blah";
+ BOOST_CHECK_THROW(value.asUint16(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt16(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asUint32(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt32(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asUint64(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt64(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asFloat(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asDouble(), InvalidConversion);
+
value = "-000";
BOOST_CHECK_EQUAL(0, value.asInt16());
BOOST_CHECK_EQUAL(0u, value.asUint16());
diff --git a/qpid/cpp/src/tests/benchmark b/qpid/cpp/src/tests/benchmark
deleted file mode 100755
index c075837847..0000000000
--- a/qpid/cpp/src/tests/benchmark
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# A basic "benchmark" to generate performacne samples of throughput
-# and latency against a single cluster member while they are replicating.
-#
-# Must be run in the qpid src/tests build directory.
-#
-
-usage() {
-cat <<EOF
-Usage: $0 [options] -- client hosts --- broker hosts
-Read the script for options.
-EOF
-}
-# Defaults
-TESTDIR=${TESTDIR:-$PWD} # Absolute path to test exes on all hosts.
-SCRIPTDIR=${SCRIPTDIR:-`dirname $0`} # Path to local test scripts directory.
-SAMPLES=10 # Runs of each test.
-COUNT=${COUNT:-10000} # Count for pub/sub tests.
-SIZE=${SIZE:-600} # Size of messages
-ECHO=${ECHO:-1000} # Count for echo test.
-NSUBS=${NSUBS:-4}
-NPUBS=${NPUBS:-4}
-
-collect() { eval $COLLECT=\""\$$COLLECT $*"\"; }
-COLLECT=ARGS
-while test $# -gt 0; do
- case $1 in
- --testdir) TESTDIR=$2 ; shift 2 ;;
- --samples) SAMPLES=$2 ; shift 2 ;;
- --count) COUNT=$2 ; shift 2 ;;
- --echos) ECHO=$2 ; shift 2 ;;
- --size) SIZE=$2 ; shift 2 ;;
- --nsubs) NSUBS=$2 ; shift 2 ;;
- --npubs) NPUBS=$2 ; shift 2 ;;
- --) COLLECT=CLIENTARG; shift ;;
- ---) COLLECT=BROKERARG; shift;;
- *) collect $1; shift ;;
- esac
-done
-
-CLIENTS=${CLIENTARG:-$CLIENTS}
-BROKERS=${BROKERARG:-$BROKERS}
-test -z "$CLIENTS" && { echo "Must specify at least one client host."; exit 1; }
-test -z "$BROKERS" && { echo "Must specify at least one broker host."; exit 1; }
-
-export TESTDIR # For perfdist
-CLIENTS=($CLIENTS) # Convert to array
-BROKERS=($BROKERS)
-trap "rm -f $FILES" EXIT
-
-dosamples() {
- FILE=`mktemp`
- FILES="$FILES $FILE"
- TABS=`echo "$HEADING" | sed s'/[^ ]//g'`
- {
- echo "\"$*\"$TABS"
- echo "$HEADING"
- for (( i=0; i<$SAMPLES; ++i)) ; do echo "`$*`" ; done
- echo
- } | tee $FILE
-}
-
-HEADING="pub sub total Mb"
-dosamples $SCRIPTDIR/perfdist --size $SIZE --count $COUNT --nsubs $NSUBS --npubs $NPUBS -s -- ${CLIENTS[*]} --- ${BROKERS[*]}
-HEADING="pub"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/publish --routing-key perftest0 --size $SIZE --count $COUNT -s -b ${BROKERS[0]}
-HEADING="sub"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/consume --queue perftest0 -s --count $COUNT -b ${BROKERS[0]}
-HEADING="min max avg"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/echotest --count $ECHO -s -b ${BROKERS[0]}
-
-echo
-echo "Tab separated spreadsheet (also saved as benchmark.tab):"
-echo
-
-echo "benchmark -- ${CLIENTS[*]} --- ${BROKERS[*]} " | tee benchmark.tab
-paste $FILES | tee -a benchmark.tab
diff --git a/qpid/cpp/src/tests/brokertest.py b/qpid/cpp/src/tests/brokertest.py
index 24f4bcadf9..70c145a51b 100644
--- a/qpid/cpp/src/tests/brokertest.py
+++ b/qpid/cpp/src/tests/brokertest.py
@@ -17,8 +17,7 @@
# under the License.
#
-# Support library for tests that start multiple brokers, e.g. cluster
-# or federation
+# Support library for tests that start multiple brokers, e.g. HA or federation
import os, signal, string, tempfile, subprocess, socket, threading, time, imp, re
import qpid, traceback, signal
@@ -382,8 +381,7 @@ class Broker(Popen):
if not retry(self.log_ready, timeout=timeout):
raise Exception(
"Timed out waiting for broker %s%s"%(self.name, error_line(self.log,5)))
- # Create a connection and a session. For a cluster broker this will
- # return after cluster init has finished.
+ # Create a connection and a session.
try:
c = self.connect(**kwargs)
try: c.session()
@@ -391,54 +389,6 @@ class Broker(Popen):
except Exception,e: raise RethrownException(
"Broker %s not responding: (%s)%s"%(self.name,e,error_line(self.log, 5)))
- def store_state(self):
- f = open(os.path.join(self.datadir, "cluster", "store.status"))
- try: uuids = f.readlines()
- finally: f.close()
- null_uuid="00000000-0000-0000-0000-000000000000\n"
- if len(uuids) < 2: return "unknown" # we looked while the file was being updated.
- if uuids[0] == null_uuid: return "empty"
- if uuids[1] == null_uuid: return "dirty"
- return "clean"
-
-class Cluster:
- """A cluster of brokers in a test."""
- # Client connection options for use in failover tests.
- CONNECTION_OPTIONS = "reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
-
- _cluster_count = 0
-
- def __init__(self, test, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
- self.test = test
- self._brokers=[]
- self.name = "cluster%d" % Cluster._cluster_count
- Cluster._cluster_count += 1
- # Use unique cluster name
- self.args = copy(args)
- self.args += [ "--cluster-name", "%s-%s:%d" % (self.name, socket.gethostname(), os.getpid()) ]
- self.args += [ "--log-enable=info+", "--log-enable=debug+:cluster"]
- assert BrokerTest.cluster_lib, "Cannot locate cluster plug-in"
- self.args += [ "--load-module", BrokerTest.cluster_lib ]
- self.start_n(count, expect=expect, wait=wait, show_cmd=show_cmd)
-
- def start(self, name=None, expect=EXPECT_RUNNING, wait=True, args=[], port=0, show_cmd=False):
- """Add a broker to the cluster. Returns the index of the new broker."""
- if not name: name="%s-%d" % (self.name, len(self._brokers))
- self._brokers.append(self.test.broker(self.args+args, name, expect, wait, port=port, show_cmd=show_cmd))
- return self._brokers[-1]
-
- def ready(self, timeout=30, **kwargs):
- for b in self: b.ready(**kwargs)
-
- def start_n(self, count, expect=EXPECT_RUNNING, wait=True, args=[], show_cmd=False):
- for i in range(count): self.start(expect=expect, wait=wait, args=args, show_cmd=show_cmd)
-
- # Behave like a list of brokers.
- def __len__(self): return len(self._brokers)
- def __getitem__(self,index): return self._brokers[index]
- def __iter__(self): return self._brokers.__iter__()
-
-
def browse(session, queue, timeout=0, transform=lambda m: m.content):
"""Return a list with the contents of each message on queue."""
r = session.receiver("%s;{mode:browse}"%(queue))
@@ -475,7 +425,6 @@ class BrokerTest(TestCase):
# Environment settings.
qpidd_exec = os.path.abspath(checkenv("QPIDD_EXEC"))
- cluster_lib = os.getenv("CLUSTER_LIB")
ha_lib = os.getenv("HA_LIB")
xml_lib = os.getenv("XML_LIB")
qpid_config_exec = os.getenv("QPID_CONFIG_EXEC")
@@ -527,11 +476,6 @@ class BrokerTest(TestCase):
raise RethrownException("Failed to start broker %s(%s): %s" % (b.name, b.log, e))
return b
- def cluster(self, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
- """Create and return a cluster ready for use"""
- cluster = Cluster(self, count, args, expect=expect, wait=wait, show_cmd=show_cmd)
- return cluster
-
def browse(self, *args, **kwargs): browse(*args, **kwargs)
def assert_browse(self, *args, **kwargs): assert_browse(*args, **kwargs)
def assert_browse_retry(self, *args, **kwargs): assert_browse_retry(*args, **kwargs)
@@ -560,13 +504,16 @@ class StoppableThread(Thread):
join(self)
if self.error: raise self.error
+# Options for a client that wants to reconnect automatically.
+RECONNECT_OPTIONS="reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
+
class NumberedSender(Thread):
"""
Thread to run a sender client and send numbered messages until stopped.
"""
def __init__(self, broker, max_depth=None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS,
+ connection_options=RECONNECT_OPTIONS,
failover_updates=True, url=None, args=[]):
"""
max_depth: enable flow control, ensure sent - received <= max_depth.
@@ -629,7 +576,7 @@ class NumberedReceiver(Thread):
sequentially numbered messages.
"""
def __init__(self, broker, sender=None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS,
+ connection_options=RECONNECT_OPTIONS,
failover_updates=True, url=None):
"""
sender: enable flow control. Call sender.received(n) for each message received.
@@ -678,31 +625,6 @@ class NumberedReceiver(Thread):
join(self)
self.check()
-class ErrorGenerator(StoppableThread):
- """
- Thread that continuously generates errors by trying to consume from
- a non-existent queue. For cluster regression tests, error handling
- caused issues in the past.
- """
-
- def __init__(self, broker):
- StoppableThread.__init__(self)
- self.broker=broker
- broker.test.cleanup_stop(self)
- self.start()
-
- def run(self):
- c = self.broker.connect_old()
- try:
- while not self.stopped:
- try:
- c.session(str(qpid.datatypes.uuid4())).message_subscribe(
- queue="non-existent-queue")
- assert(False)
- except qpid.session.SessionException: pass
- time.sleep(0.01)
- except: pass # Normal if broker is killed.
-
def import_script(path):
"""
Import executable script at path as a module.
diff --git a/qpid/cpp/src/tests/cluster_authentication_soak.cpp b/qpid/cpp/src/tests/cluster_authentication_soak.cpp
deleted file mode 100644
index a3271701c3..0000000000
--- a/qpid/cpp/src/tests/cluster_authentication_soak.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <signal.h>
-#include <fcntl.h>
-
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-
-#include <string>
-#include <iostream>
-#include <sstream>
-#include <vector>
-
-#include <boost/assign.hpp>
-
-#include "qpid/framing/Uuid.h"
-
-#include <ForkedBroker.h>
-#include <qpid/client/Connection.h>
-
-#include <sasl/sasl.h>
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-
-
-
-using namespace std;
-using boost::assign::list_of;
-using namespace qpid::framing;
-using namespace qpid::client;
-
-
-namespace qpid {
-namespace tests {
-
-vector<pid_t> brokerPids;
-
-typedef vector<ForkedBroker *> brokerVector;
-
-
-
-
-
-int runSilent = 1;
-int newbiePort = 0;
-
-
-void
-makeClusterName ( string & s ) {
- stringstream ss;
- ss << "authenticationSoakCluster_" << Uuid(true).str();
- s = ss.str();
-}
-
-
-
-void
-startBroker ( brokerVector & brokers , int brokerNumber, string const & clusterName ) {
- stringstream prefix, clusterArg;
- prefix << "soak-" << brokerNumber;
- clusterArg << "--cluster-name=" << clusterName;
-
- std::vector<std::string> argv;
-
- argv.push_back ("../qpidd");
- argv.push_back ("--no-module-dir");
- argv.push_back ("--load-module=../.libs/cluster.so");
- argv.push_back (clusterArg.str());
- argv.push_back ("--cluster-username=zig");
- argv.push_back ("--cluster-password=zig");
- argv.push_back ("--cluster-mechanism=PLAIN");
- argv.push_back ("--sasl-config=./sasl_config");
- argv.push_back ("--auth=yes");
- argv.push_back ("--mgmt-enable=yes");
- argv.push_back ("--log-prefix");
- argv.push_back (prefix.str());
- argv.push_back ("--log-to-file");
- argv.push_back (prefix.str()+".log");
- argv.push_back ("TMP_DATA_DIR");
-
- ForkedBroker * newbie = new ForkedBroker (argv);
- newbiePort = newbie->getPort();
- brokers.push_back ( newbie );
-}
-
-
-
-
-bool
-runPerftest ( bool hangTest ) {
- stringstream portSs;
- portSs << newbiePort;
- string portStr = portSs.str();
- char const * path = "./qpid-perftest";
-
- vector<char const *> argv;
- argv.push_back ( "./qpid-perftest" );
- argv.push_back ( "-p" );
- argv.push_back ( portStr.c_str() );
- argv.push_back ( "--username" );
- argv.push_back ( "zig" );
- argv.push_back ( "--password" );
- argv.push_back ( "zig" );
- argv.push_back ( "--mechanism" );
- argv.push_back ( "DIGEST-MD5" );
- argv.push_back ( "--count" );
- argv.push_back ( "20000" );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
-
- if ( ! pid ) {
- int i=open("/dev/null",O_RDWR);
- dup2 ( i, fileno(stdout) );
- dup2 ( i, fileno(stderr) );
-
- execv ( path, const_cast<char * const *>(&argv[0]) );
- // The exec failed: we are still in parent process.
- perror ( "error running qpid-perftest: " );
- return false;
- }
- else {
- if ( hangTest ) {
- if ( ! runSilent )
- cerr << "Pausing perftest " << pid << endl;
- kill ( pid, 19 );
- }
-
- struct timeval startTime,
- currentTime,
- duration;
-
- gettimeofday ( & startTime, 0 );
-
- while ( 1 ) {
- sleep ( 2 );
- int status;
- int returned_pid = waitpid ( pid, &status, WNOHANG );
- if ( returned_pid == pid ) {
- int exit_status = WEXITSTATUS(status);
- if ( exit_status ) {
- cerr << "qpid-perftest failed. exit_status was: " << exit_status << endl;
- return false;
- }
- else {
- return true; // qpid-perftest succeeded.
- }
- }
- else { // qpid-perftest has not yet completed.
- gettimeofday ( & currentTime, 0 );
- timersub ( & currentTime, & startTime, & duration );
- if ( duration.tv_sec > 60 ) {
- kill ( pid, 9 );
- cerr << "qpid-perftest pid " << pid << " hanging: killed.\n";
- return false;
- }
- }
- }
-
- }
-}
-
-
-
-bool
-allBrokersAreAlive ( brokerVector & brokers ) {
- for ( unsigned int i = 0; i < brokers.size(); ++ i )
- if ( ! brokers[i]->isRunning() )
- return false;
-
- return true;
-}
-
-
-
-
-
-void
-killAllBrokers ( brokerVector & brokers ) {
- for ( unsigned int i = 0; i < brokers.size(); ++ i ) {
- brokers[i]->kill ( 9 );
- }
-}
-
-
-
-
-void
-killOneBroker ( brokerVector & brokers ) {
- int doomedBroker = getpid() % brokers.size();
- cout << "Killing broker " << brokers[doomedBroker]->getPID() << endl;
- brokers[doomedBroker]->kill ( 9 );
- sleep ( 2 );
-}
-
-
-
-
-}} // namespace qpid::tests
-
-using namespace qpid::tests;
-
-
-
-/*
- * Please note that this test has self-test capability.
- * It is intended to detect
- * 1. perftest hangs.
- * 2. broker deaths
- * Both of these condtions can be forced when running manually
- * to ensure that the test really does detect them.
- * See command-line arguments 3 and 4.
- */
-int
-main ( int argc, char ** argv )
-{
- // I need the SASL_PATH_TYPE_CONFIG feature, which did not appear until SASL 2.1.22
-#if (SASL_VERSION_FULL < ((2<<16)|(1<<8)|22))
- cout << "Skipping SASL test, SASL version too low." << endl;
- return 0;
-#endif
-
- int n_iterations = argc > 1 ? atoi(argv[1]) : 1;
- runSilent = argc > 2 ? atoi(argv[2]) : 1; // default to silent
- int killBroker = argc > 3 ? atoi(argv[3]) : 0; // Force the kill of one broker.
- int hangTest = argc > 4 ? atoi(argv[4]) : 0; // Force the first perftest to hang.
- int n_brokers = 3;
- brokerVector brokers;
-
- srand ( getpid() );
- string clusterName;
- makeClusterName ( clusterName );
- for ( int i = 0; i < n_brokers; ++ i ) {
- startBroker ( brokers, i, clusterName );
- }
-
- sleep ( 3 );
-
- /* Run all qpid-perftest iterations, and only then check for brokers
- * still being up. If you just want a quick check for the failure
- * mode in which a single iteration would kill all brokers except
- * the client-connected one, just run it with the iterations arg
- * set to 1.
- */
- for ( int iteration = 0; iteration < n_iterations; ++ iteration ) {
- if ( ! runPerftest ( hangTest ) ) {
- if ( ! runSilent )
- cerr << "qpid-perftest " << iteration << " failed.\n";
- return 1;
- }
- if ( ! ( iteration % 10 ) ) {
- if ( ! runSilent )
- cerr << "qpid-perftest " << iteration << " complete. -------------- \n";
- }
- }
- if ( ! runSilent )
- cerr << "\nqpid-perftest " << n_iterations << " iterations complete. -------------- \n\n";
-
- /* If the command-line tells us to kill a broker, do
- * it now. Use this option to prove that this test
- * really can detect broker-deaths.
- */
- if ( killBroker ) {
- killOneBroker ( brokers );
- }
-
- if ( ! allBrokersAreAlive ( brokers ) ) {
- if ( ! runSilent )
- cerr << "not all brokers are alive.\n";
- killAllBrokers ( brokers );
- return 2;
- }
-
- killAllBrokers ( brokers );
- if ( ! runSilent )
- cout << "success.\n";
-
- return 0;
-}
-
-
-
diff --git a/qpid/cpp/src/tests/cluster_failover b/qpid/cpp/src/tests/cluster_failover
deleted file mode 100755
index 43170c731a..0000000000
--- a/qpid/cpp/src/tests/cluster_failover
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# A simple manual failover test, sends a stream of numbered messages.
-# You can kill the connected broker and verify that the clients reconnect
-# and no messages are lost.
-
-URL=$1
-test -n "$URL" || { echo Usage: $0 URL ; exit 1; }
-SEND=$(mktemp /tmp/send.XXXXXXXXXX)
-RECV=$(mktemp /tmp/recv.XXXXXXXXXX)
-echo $SEND $RECV
-
-seq 1000000 > $SEND
-
-qpid-send -a 'cluster_failover;{create:always}' -b $URL --connection-options "{reconnect:true}" --send-rate 10 --content-stdin < $SEND &
-
-while msg=$(qpid-receive -m1 -f -a 'cluster_failover;{create:always}' -b $URL --connection-options "{reconnect:true,heartbeat:1}"); do
- echo -n $msg; date
-done
-wait
diff --git a/qpid/cpp/src/tests/cluster_python_tests b/qpid/cpp/src/tests/cluster_python_tests
deleted file mode 100755
index 25c7889246..0000000000
--- a/qpid/cpp/src/tests/cluster_python_tests
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Skip if cluster services not running.
-. cpg_check.sh
-cpg_enabled || exit 0
-
-FAILING=`dirname $0`/cluster_python_tests_failing.txt
-source `dirname $0`/python_tests
-
diff --git a/qpid/cpp/src/tests/cluster_python_tests_failing.txt b/qpid/cpp/src/tests/cluster_python_tests_failing.txt
deleted file mode 100644
index f8639d7b59..0000000000
--- a/qpid/cpp/src/tests/cluster_python_tests_failing.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-qpid_tests.broker_0_10.management.ManagementTest.test_purge_queue
-qpid_tests.broker_0_10.management.ManagementTest.test_connection_close
-qpid_tests.broker_0_10.message.MessageTests.test_ttl
-qpid_tests.broker_0_10.management.ManagementTest.test_broker_connectivity_oldAPI
diff --git a/qpid/cpp/src/tests/cluster_read_credit b/qpid/cpp/src/tests/cluster_read_credit
deleted file mode 100755
index 552ffee53b..0000000000
--- a/qpid/cpp/src/tests/cluster_read_credit
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Regression test for http://issues.apache.org/jira/browse/QPID-2086
-
-srcdir=`dirname $0`
-source cpg_check.sh
-cpg_enabled || exit 0
-
-$srcdir/start_cluster 1 --cluster-read-max=2 || exit 1
-trap $srcdir/stop_cluster EXIT
-seq 1 10000 | ./sender --port `cat cluster.ports` --routing-key no-such-queue
diff --git a/qpid/cpp/src/tests/cluster_test.cpp b/qpid/cpp/src/tests/cluster_test.cpp
deleted file mode 100644
index f2ccd0ba84..0000000000
--- a/qpid/cpp/src/tests/cluster_test.cpp
+++ /dev/null
@@ -1,1231 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-#include "ClusterFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionSettings.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/client/FailoverManager.h"
-#include "qpid/client/QueueOptions.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/framing/MessageTransferBody.h"
-#include "qpid/log/Logger.h"
-#include "qpid/sys/Monitor.h"
-#include "qpid/sys/Thread.h"
-
-#include <boost/bind.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/assign.hpp>
-
-#include <string>
-#include <iostream>
-#include <fstream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(cluster_test)
-
-bool durableFlag = std::getenv("STORE_LIB") != 0;
-
-void prepareArgs(ClusterFixture::Args& args, const bool durableFlag = false) {
- ostringstream clusterLib;
- clusterLib << getLibPath("CLUSTER_LIB");
- args += "--auth", "no", "--no-module-dir", "--load-module", clusterLib.str();
- if (durableFlag)
- args += "--load-module", getLibPath("STORE_LIB"), "TMP_DATA_DIR";
- else
- args += "--no-data-dir";
-}
-
-ClusterFixture::Args prepareArgs(const bool durableFlag = false) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- return args;
-}
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=2*sys::TIME_SEC;
-
-
-ostream& operator<<(ostream& o, const cpg_name* n) {
- return o << Cpg::str(*n);
-}
-
-ostream& operator<<(ostream& o, const cpg_address& a) {
- return o << "(" << a.nodeid <<","<<a.pid<<","<<a.reason<<")";
-}
-
-template <class T>
-ostream& operator<<(ostream& o, const pair<T*, int>& array) {
- o << "{ ";
- ostream_iterator<cpg_address> i(o, " ");
- copy(array.first, array.first+array.second, i);
- o << "}";
- return o;
-}
-
-template <class C> set<int> makeSet(const C& c) {
- set<int> s;
- copy(c.begin(), c.end(), inserter(s, s.begin()));
- return s;
-}
-
-class Sender {
- public:
- Sender(boost::shared_ptr<ConnectionImpl> ci, uint16_t ch) : connection(ci), channel(ch) {}
- void send(const AMQBody& body, bool firstSeg, bool lastSeg, bool firstFrame, bool lastFrame) {
- AMQFrame f(body);
- f.setChannel(channel);
- f.setFirstSegment(firstSeg);
- f.setLastSegment(lastSeg);
- f.setFirstFrame(firstFrame);
- f.setLastFrame(lastFrame);
- connection->expand(f.encodedSize(), false);
- connection->handle(f);
- }
-
- private:
- boost::shared_ptr<ConnectionImpl> connection;
- uint16_t channel;
-};
-
-int64_t getMsgSequence(const Message& m) {
- return m.getMessageProperties().getApplicationHeaders().getAsInt64("qpid.msg_sequence");
-}
-
-Message ttlMessage(const string& data, const string& key, uint64_t ttl, bool durable = false) {
- Message m(data, key);
- m.getDeliveryProperties().setTtl(ttl);
- if (durable) m.getDeliveryProperties().setDeliveryMode(framing::PERSISTENT);
- return m;
-}
-
-Message makeMessage(const string& data, const string& key, bool durable = false) {
- Message m(data, key);
- if (durable) m.getDeliveryProperties().setDeliveryMode(framing::PERSISTENT);
- return m;
-}
-
-vector<string> browse(Client& c, const string& q, int n) {
- SubscriptionSettings browseSettings(
- FlowControl::messageCredit(n),
- ACCEPT_MODE_NONE,
- ACQUIRE_MODE_NOT_ACQUIRED,
- 0 // No auto-ack.
- );
- LocalQueue lq;
- c.subs.subscribe(lq, q, browseSettings);
- c.session.messageFlush(q);
- vector<string> result;
- for (int i = 0; i < n; ++i) {
- Message m;
- if (!lq.get(m, TIMEOUT))
- break;
- result.push_back(m.getData());
- }
- c.subs.getSubscription(q).cancel();
- return result;
-}
-
-ConnectionSettings aclSettings(int port, const std::string& id) {
- ConnectionSettings settings;
- settings.port = port;
- settings.mechanism = "PLAIN";
- settings.username = id;
- settings.password = id;
- return settings;
-}
-
-// An illegal frame body
-struct PoisonPill : public AMQBody {
- virtual uint8_t type() const { return 0xFF; }
- virtual void encode(Buffer& ) const {}
- virtual void decode(Buffer& , uint32_t=0) {}
- virtual uint32_t encodedSize() const { return 0; }
-
- virtual void print(std::ostream&) const {};
- virtual void accept(AMQBodyConstVisitor&) const {};
-
- virtual AMQMethodBody* getMethod() { return 0; }
- virtual const AMQMethodBody* getMethod() const { return 0; }
-
- /** Match if same type and same class/method ID for methods */
- static bool match(const AMQBody& , const AMQBody& ) { return false; }
- virtual boost::intrusive_ptr<AMQBody> clone() const { return new PoisonPill; }
-};
-
-QPID_AUTO_TEST_CASE(testBadClientData) {
- // Ensure that bad data on a client connection closes the
- // connection but does not stop the broker.
- ClusterFixture::Args args;
- prepareArgs(args, false);
- args += "--log-enable=critical"; // Supress expected errors
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0]);
- Client c1(cluster[1]);
- boost::shared_ptr<client::ConnectionImpl> ci =
- client::ConnectionAccess::getImpl(c0.connection);
- AMQFrame poison(boost::intrusive_ptr<AMQBody>(new PoisonPill));
- ci->expand(poison.encodedSize(), false);
- ci->handle(poison);
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(c0.session.queueQuery("q0"), Exception);
- }
- Client c00(cluster[0]);
- BOOST_CHECK_EQUAL(c00.session.queueQuery("q00").getQueue(), "");
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getQueue(), "");
-}
-
-QPID_AUTO_TEST_CASE(testAcl) {
- ofstream policyFile("cluster_test.acl");
- policyFile << "acl allow foo@QPID create queue name=foo" << endl
- << "acl allow foo@QPID create queue name=foo2" << endl
- << "acl deny foo@QPID create queue name=bar" << endl
- << "acl allow all all" << endl;
- policyFile.close();
- char cwd[1024];
- BOOST_CHECK(::getcwd(cwd, sizeof(cwd)));
- ostringstream aclLib;
- aclLib << getLibPath("ACL_LIB");
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- args += "--log-enable=critical"; // Supress expected errors
- args += "--acl-file", string(cwd) + "/cluster_test.acl",
- "--cluster-mechanism", "PLAIN",
- "--cluster-username", "cluster",
- "--cluster-password", "cluster",
- "--load-module", aclLib.str();
- ClusterFixture cluster(2, args, -1);
-
- Client c0(aclSettings(cluster[0], "c0"), "c0");
- Client c1(aclSettings(cluster[1], "c1"), "c1");
- Client foo(aclSettings(cluster[1], "foo"), "foo");
-
- foo.session.queueDeclare("foo", arg::durable=durableFlag);
- BOOST_CHECK_EQUAL(c0.session.queueQuery("foo").getQueue(), "foo");
-
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(foo.session.queueDeclare("bar", arg::durable=durableFlag), framing::UnauthorizedAccessException);
- }
- BOOST_CHECK(c0.session.queueQuery("bar").getQueue().empty());
- BOOST_CHECK(c1.session.queueQuery("bar").getQueue().empty());
-
- cluster.add();
- Client c2(aclSettings(cluster[2], "c2"), "c2");
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(foo.session.queueDeclare("bar", arg::durable=durableFlag), framing::UnauthorizedAccessException);
- }
- BOOST_CHECK(c2.session.queueQuery("bar").getQueue().empty());
-}
-
-QPID_AUTO_TEST_CASE(testMessageTimeToLive) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=ttlMessage("a", "q", 200, durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("b", "q", durableFlag));
- c0.session.messageTransfer(arg::content=ttlMessage("x", "p", 100000, durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("y", "p", durableFlag));
- cluster.add();
- Client c2(cluster[1], "c2");
-
- BOOST_CHECK_EQUAL(browse(c0, "p", 1), list_of<string>("x"));
- BOOST_CHECK_EQUAL(browse(c1, "p", 1), list_of<string>("x"));
- BOOST_CHECK_EQUAL(browse(c2, "p", 1), list_of<string>("x"));
-
- sys::usleep(200*1000);
- BOOST_CHECK_EQUAL(browse(c0, "q", 1), list_of<string>("b"));
- BOOST_CHECK_EQUAL(browse(c1, "q", 1), list_of<string>("b"));
- BOOST_CHECK_EQUAL(browse(c2, "q", 1), list_of<string>("b"));
-}
-
-QPID_AUTO_TEST_CASE(testSequenceOptions) {
- // Make sure the exchange qpid.msg_sequence property is properly replicated.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- FieldTable ftargs;
- ftargs.setInt("qpid.msg_sequence", 1);
- c0.session.queueDeclare(arg::queue="q", arg::durable=durableFlag);
- c0.session.exchangeDeclare(arg::exchange="ex", arg::type="direct", arg::arguments=ftargs);
- c0.session.exchangeBind(arg::exchange="ex", arg::queue="q", arg::bindingKey="k");
- c0.session.messageTransfer(arg::content=makeMessage("1", "k", durableFlag), arg::destination="ex");
- c0.session.messageTransfer(arg::content=makeMessage("2", "k", durableFlag), arg::destination="ex");
- BOOST_CHECK_EQUAL(1, getMsgSequence(c0.subs.get("q", TIMEOUT)));
- BOOST_CHECK_EQUAL(2, getMsgSequence(c0.subs.get("q", TIMEOUT)));
-
- cluster.add();
- Client c1(cluster[1]);
- c1.session.messageTransfer(arg::content=makeMessage("3", "k", durableFlag), arg::destination="ex");
- BOOST_CHECK_EQUAL(3, getMsgSequence(c1.subs.get("q", TIMEOUT)));
-}
-
-QPID_AUTO_TEST_CASE(testTxTransaction) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare(arg::queue="q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("A", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("B", "q", durableFlag));
-
- // Start a transaction that will commit.
- Session commitSession = c0.connection.newSession("commit");
- SubscriptionManager commitSubs(commitSession);
- commitSession.txSelect();
- commitSession.messageTransfer(arg::content=makeMessage("a", "q", durableFlag));
- commitSession.messageTransfer(arg::content=makeMessage("b", "q", durableFlag));
- BOOST_CHECK_EQUAL(commitSubs.get("q", TIMEOUT).getData(), "A");
-
- // Start a transaction that will roll back.
- Session rollbackSession = c0.connection.newSession("rollback");
- SubscriptionManager rollbackSubs(rollbackSession);
- rollbackSession.txSelect();
- rollbackSession.messageTransfer(arg::content=makeMessage("1", "q", durableFlag));
- Message rollbackMessage = rollbackSubs.get("q", TIMEOUT);
- BOOST_CHECK_EQUAL(rollbackMessage.getData(), "B");
-
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
- // Add new member mid transaction.
- cluster.add();
- Client c1(cluster[1], "c1");
-
- // More transactional work
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- rollbackSession.messageTransfer(arg::content=makeMessage("2", "q", durableFlag));
- commitSession.messageTransfer(arg::content=makeMessage("c", "q", durableFlag));
- rollbackSession.messageTransfer(arg::content=makeMessage("3", "q", durableFlag));
-
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
-
- // Commit/roll back.
- commitSession.txCommit();
- rollbackSession.txRollback();
- rollbackSession.messageRelease(rollbackMessage.getId());
-
- // Verify queue status: just the comitted messages and dequeues should remain.
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 4u);
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "B");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "a");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "b");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "c");
-
- commitSession.close();
- rollbackSession.close();
-}
-
-QPID_AUTO_TEST_CASE(testUnacked) {
- // Verify replication of unacknowledged messages.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- Message m;
-
- // Create unacked message: acquired but not accepted.
- SubscriptionSettings manualAccept(FlowControl::unlimited(), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_PRE_ACQUIRED, 0);
- c0.session.queueDeclare("q1", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("11","q1", durableFlag));
- LocalQueue q1;
- c0.subs.subscribe(q1, "q1", manualAccept);
- BOOST_CHECK_EQUAL(q1.get(TIMEOUT).getData(), "11"); // Acquired but not accepted
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q1").getMessageCount(), 0u); // Gone from queue
-
- // Create unacked message: not acquired, accepted or completeed.
- SubscriptionSettings manualAcquire(FlowControl::unlimited(), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_NOT_ACQUIRED, 0);
- c0.session.queueDeclare("q2", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("21","q2", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("22","q2", durableFlag));
- LocalQueue q2;
- c0.subs.subscribe(q2, "q2", manualAcquire);
- m = q2.get(TIMEOUT); // Not acquired or accepted, still on queue
- BOOST_CHECK_EQUAL(m.getData(), "21");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 2u); // Not removed
- c0.subs.getSubscription("q2").acquire(m); // Acquire manually
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 1u); // Removed
- BOOST_CHECK_EQUAL(q2.get(TIMEOUT).getData(), "22"); // Not acquired or accepted, still on queue
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 1u); // 1 not acquired.
-
- // Create empty credit record: acquire and accept but don't complete.
- SubscriptionSettings manualComplete(FlowControl::messageWindow(1), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_PRE_ACQUIRED, 1, MANUAL_COMPLETION);
- c0.session.queueDeclare("q3", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("31", "q3", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("32", "q3", durableFlag));
- LocalQueue q3;
- c0.subs.subscribe(q3, "q3", manualComplete);
- Message m31=q3.get(TIMEOUT);
- BOOST_CHECK_EQUAL(m31.getData(), "31"); // Automatically acquired & accepted but not completed.
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q3").getMessageCount(), 1u);
-
- // Add new member while there are unacked messages.
- cluster.add();
- Client c1(cluster[1], "c1");
-
- // Check queue counts
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q2").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q3").getMessageCount(), 1u);
-
- // Complete the empty credit message, should unblock the message behind it.
- BOOST_CHECK_THROW(q3.get(0), Exception);
- c0.session.markCompleted(SequenceSet(m31.getId()), true);
- BOOST_CHECK_EQUAL(q3.get(TIMEOUT).getData(), "32");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q3").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q3").getMessageCount(), 0u);
-
- // Close the original session - unacked messages should be requeued.
- c0.session.close();
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q2").getMessageCount(), 2u);
-
- BOOST_CHECK_EQUAL(c1.subs.get("q1", TIMEOUT).getData(), "11");
- BOOST_CHECK_EQUAL(c1.subs.get("q2", TIMEOUT).getData(), "21");
- BOOST_CHECK_EQUAL(c1.subs.get("q2", TIMEOUT).getData(), "22");
-}
-
-// FIXME aconway 2009-06-17: test for unimplemented feature, enable when implemented.
-void testUpdateTxState() {
- // Verify that we update transaction state correctly to new members.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- // Do work in a transaction.
- c0.session.txSelect();
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("1","q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("2","q", durableFlag));
- Message m;
- BOOST_CHECK(c0.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "1");
-
- // New member, TX not comitted, c1 should see nothing.
- cluster.add();
- Client c1(cluster[1], "c1");
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 0u);
-
- // After commit c1 shoudl see results of tx.
- c0.session.txCommit();
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 1u);
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "2");
-
- // Another transaction with both members active.
- c0.session.messageTransfer(arg::content=makeMessage("3","q", durableFlag));
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 0u);
- c0.session.txCommit();
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 1u);
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "3");
-}
-
-QPID_AUTO_TEST_CASE(testUpdateMessageBuilder) {
- // Verify that we update a partially recieved message to a new member.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- Sender sender(ConnectionAccess::getImpl(c0.connection), c0.session.getChannel());
-
- // Send first 2 frames of message.
- MessageTransferBody transfer(
- ProtocolVersion(), string(), // default exchange.
- framing::message::ACCEPT_MODE_NONE,
- framing::message::ACQUIRE_MODE_PRE_ACQUIRED);
- sender.send(transfer, true, false, true, true);
- AMQHeaderBody header;
- header.get<DeliveryProperties>(true)->setRoutingKey("q");
- if (durableFlag)
- header.get<DeliveryProperties>(true)->setDeliveryMode(DELIVERY_MODE_PERSISTENT);
- else
- header.get<DeliveryProperties>(true)->setDeliveryMode(DELIVERY_MODE_NON_PERSISTENT);
- sender.send(header, false, false, true, true);
-
- // No reliable way to ensure the partial message has arrived
- // before we start the new broker, so we sleep.
- sys::usleep(2500);
- cluster.add();
-
- // Send final 2 frames of message.
- sender.send(AMQContentBody("ab"), false, true, true, false);
- sender.send(AMQContentBody("cd"), false, true, false, true);
-
- // Verify message is enqued correctly on second member.
- Message m;
- Client c1(cluster[1], "c1");
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "abcd");
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());
-}
-
-QPID_AUTO_TEST_CASE(testConnectionKnownHosts) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- set<int> kb0 = knownBrokerPorts(c0.connection, 1);
- BOOST_CHECK_EQUAL(kb0.size(), 1u);
- BOOST_CHECK_EQUAL(kb0, makeSet(cluster));
-
- cluster.add();
- Client c1(cluster[1], "c1");
- set<int> kb1 = knownBrokerPorts(c1.connection, 2);
- kb0 = knownBrokerPorts(c0.connection, 2);
- BOOST_CHECK_EQUAL(kb1.size(), 2u);
- BOOST_CHECK_EQUAL(kb1, makeSet(cluster));
- BOOST_CHECK_EQUAL(kb1,kb0);
-
- cluster.add();
- Client c2(cluster[2], "c2");
- set<int> kb2 = knownBrokerPorts(c2.connection, 3);
- kb1 = knownBrokerPorts(c1.connection, 3);
- kb0 = knownBrokerPorts(c0.connection, 3);
- BOOST_CHECK_EQUAL(kb2.size(), 3u);
- BOOST_CHECK_EQUAL(kb2, makeSet(cluster));
- BOOST_CHECK_EQUAL(kb2,kb0);
- BOOST_CHECK_EQUAL(kb2,kb1);
-
- cluster.killWithSilencer(1,c1.connection,9);
- kb0 = knownBrokerPorts(c0.connection, 2);
- kb2 = knownBrokerPorts(c2.connection, 2);
- BOOST_CHECK_EQUAL(kb0.size(), 2u);
- BOOST_CHECK_EQUAL(kb0, kb2);
-}
-
-QPID_AUTO_TEST_CASE(testUpdateConsumers) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
-
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.subs.subscribe(c0.lq, "q", FlowControl::zero());
- LocalQueue lp;
- c0.subs.subscribe(lp, "p", FlowControl::messageCredit(1));
- c0.session.sync();
-
- // Start new members
- cluster.add(); // Local
- Client c1(cluster[1], "c1");
- cluster.add();
- Client c2(cluster[2], "c2");
-
- // Transfer messages
- c0.session.messageTransfer(arg::content=makeMessage("aaa", "q", durableFlag));
-
- c0.session.messageTransfer(arg::content=makeMessage("bbb", "p", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("ccc", "p", durableFlag));
-
- // Activate the subscription, ensure message removed on all queues.
- c0.subs.setFlowControl("q", FlowControl::unlimited());
- Message m;
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "aaa");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c2.session.queueQuery("q").getMessageCount(), 0u);
-
- // Check second subscription's flow control: gets first message, not second.
- BOOST_CHECK(lp.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "bbb");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("p").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("p").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c2.session.queueQuery("p").getMessageCount(), 1u);
-
- BOOST_CHECK(c0.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "ccc");
-
- // Kill the subscribing member, ensure further messages are not removed.
- cluster.killWithSilencer(0,c0.connection,9);
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c1.connection, 2).size(), 2u);
- for (int i = 0; i < 10; ++i) {
- c1.session.messageTransfer(arg::content=makeMessage("xxx", "q", durableFlag));
- BOOST_REQUIRE(c1.subs.get(m, "q", TIMEOUT));
- BOOST_REQUIRE_EQUAL(m.getData(), "xxx");
- }
-}
-
-// Test that message data and delivery properties are updated properly.
-QPID_AUTO_TEST_CASE(testUpdateMessages) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- // Create messages with different delivery properties
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.exchangeBind(arg::exchange="amq.fanout", arg::queue="q");
- c0.session.messageTransfer(arg::content=makeMessage("foo","q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar","q", durableFlag),
- arg::destination="amq.fanout");
-
- while (c0.session.queueQuery("q").getMessageCount() != 2)
- sys::usleep(1000); // Wait for message to show up on broker 0.
-
- // Add a new broker, it will catch up.
- cluster.add();
-
- // Do some work post-add
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("pfoo","p", durableFlag));
-
- // Do some work post-join
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c0.connection, 2).size(), 2u);
- c0.session.messageTransfer(arg::content=makeMessage("pbar","p", durableFlag));
-
- // Verify new brokers have state.
- Message m;
-
- Client c1(cluster[1], "c1");
-
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "foo");
- BOOST_CHECK(m.getDeliveryProperties().hasExchange());
- BOOST_CHECK_EQUAL(m.getDeliveryProperties().getExchange(), "");
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "bar");
- BOOST_CHECK(m.getDeliveryProperties().hasExchange());
- BOOST_CHECK_EQUAL(m.getDeliveryProperties().getExchange(), "amq.fanout");
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
-
- // Add another broker, don't wait for join - should be stalled till ready.
- cluster.add();
- Client c2(cluster[2], "c2");
- BOOST_CHECK(c2.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "pfoo");
- BOOST_CHECK(c2.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "pbar");
- BOOST_CHECK_EQUAL(c2.session.queueQuery("p").getMessageCount(), 0u);
-}
-
-QPID_AUTO_TEST_CASE(testWiringReplication) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0]);
- BOOST_CHECK(c0.session.queueQuery("q").getQueue().empty());
- BOOST_CHECK(c0.session.exchangeQuery("ex").getType().empty());
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.exchangeDeclare("ex", arg::type="direct");
- c0.session.close();
- c0.connection.close();
- // Verify all brokers get wiring update.
- for (size_t i = 0; i < cluster.size(); ++i) {
- BOOST_MESSAGE("i == "<< i);
- Client c(cluster[i]);
- BOOST_CHECK_EQUAL("q", c.session.queueQuery("q").getQueue());
- BOOST_CHECK_EQUAL("direct", c.session.exchangeQuery("ex").getType());
- }
-}
-
-QPID_AUTO_TEST_CASE(testMessageEnqueue) {
- // Enqueue on one broker, dequeue on another.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0]);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
- c0.session.close();
- Client c1(cluster[1]);
- Message msg;
- BOOST_CHECK(c1.subs.get(msg, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(string("foo"), msg.getData());
- BOOST_CHECK(c1.subs.get(msg, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(string("bar"), msg.getData());
-}
-
-QPID_AUTO_TEST_CASE(testMessageDequeue) {
- // Enqueue on one broker, dequeue on two others.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
-
- Message msg;
-
- // Dequeue on 2 others, ensure correct order.
- Client c1(cluster[1], "c1");
- BOOST_CHECK(c1.subs.get(msg, "q"));
- BOOST_CHECK_EQUAL("foo", msg.getData());
-
- Client c2(cluster[2], "c2");
- BOOST_CHECK(c1.subs.get(msg, "q"));
- BOOST_CHECK_EQUAL("bar", msg.getData());
-
- // Queue should be empty on all cluster members.
- BOOST_CHECK_EQUAL(0u, c0.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c1.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c2.session.queueQuery("q").getMessageCount());
-}
-
-QPID_AUTO_TEST_CASE(testDequeueWaitingSubscription) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0]);
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c0.connection, 3).size(), 3u); // Wait for brokers.
-
- // First start a subscription.
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.subs.subscribe(c0.lq, "q", FlowControl::messageCredit(2));
-
- // Now send messages
- Client c1(cluster[1]);
- c1.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c1.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
-
- // Check they arrived
- Message m;
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL("foo", m.getData());
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL("bar", m.getData());
-
- // Queue should be empty on all cluster members.
- Client c2(cluster[2]);
- BOOST_CHECK_EQUAL(0u, c0.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c1.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c2.session.queueQuery("q").getMessageCount());
-}
-
-QPID_AUTO_TEST_CASE(queueDurabilityPropagationToNewbie)
-{
- /*
- Start with a single broker.
- Set up two queues: one durable, and one not.
- Add a new broker to the cluster.
- Make sure it has one durable and one non-durable queue.
- */
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0]);
- c0.session.queueDeclare("durable_queue", arg::durable=true);
- c0.session.queueDeclare("non_durable_queue", arg::durable=false);
- cluster.add();
- Client c1(cluster[1]);
- QueueQueryResult durable_query = c1.session.queueQuery ( "durable_queue" );
- QueueQueryResult non_durable_query = c1.session.queueQuery ( "non_durable_queue" );
- BOOST_CHECK_EQUAL(durable_query.getQueue(), std::string("durable_queue"));
- BOOST_CHECK_EQUAL(non_durable_query.getQueue(), std::string("non_durable_queue"));
-
- BOOST_CHECK_EQUAL ( durable_query.getDurable(), true );
- BOOST_CHECK_EQUAL ( non_durable_query.getDurable(), false );
-}
-
-
-QPID_AUTO_TEST_CASE(testHeartbeatCancelledOnFailover)
-{
-
- struct Sender : FailoverManager::Command
- {
- std::string queue;
- std::string content;
-
- Sender(const std::string& q, const std::string& c) : queue(q), content(c) {}
-
- void execute(AsyncSession& session, bool)
- {
- session.messageTransfer(arg::content=makeMessage(content, queue, durableFlag));
- }
- };
-
- struct Receiver : FailoverManager::Command, MessageListener, qpid::sys::Runnable
- {
- FailoverManager& mgr;
- std::string queue;
- std::string expectedContent;
- qpid::client::Subscription subscription;
- qpid::sys::Monitor lock;
- bool ready, failed;
-
- Receiver(FailoverManager& m, const std::string& q, const std::string& c) : mgr(m), queue(q), expectedContent(c), ready(false), failed(false) {}
-
- void received(Message& message)
- {
- BOOST_CHECK_EQUAL(expectedContent, message.getData());
- subscription.cancel();
- }
-
- void execute(AsyncSession& session, bool)
- {
- session.queueDeclare(arg::queue=queue, arg::durable=durableFlag);
- SubscriptionManager subs(session);
- subscription = subs.subscribe(*this, queue);
- session.sync();
- setReady();
- subs.run();
- //cleanup:
- session.queueDelete(arg::queue=queue);
- }
-
- void run()
- {
- try {
- mgr.execute(*this);
- }
- catch (const std::exception& e) {
- BOOST_MESSAGE("Exception in mgr.execute: " << e.what());
- failed = true;
- }
- }
-
- void waitForReady()
- {
- qpid::sys::Monitor::ScopedLock l(lock);
- while (!ready) {
- lock.wait();
- }
- }
-
- void setReady()
- {
- qpid::sys::Monitor::ScopedLock l(lock);
- ready = true;
- lock.notify();
- }
- };
-
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- ConnectionSettings settings;
- settings.port = cluster[1];
- settings.heartbeat = 1;
- FailoverManager fmgr(settings);
- Sender sender("my-queue", "my-data");
- Receiver receiver(fmgr, "my-queue", "my-data");
- qpid::sys::Thread runner(receiver);
- receiver.waitForReady();
- {
- ScopedSuppressLogging allQuiet; // suppress connection closed messages
- cluster.kill(1);
- //sleep for 2 secs to allow the heartbeat task to fire on the now dead connection:
- ::usleep(2*1000*1000);
- }
- fmgr.execute(sender);
- runner.join();
- BOOST_CHECK(!receiver.failed);
- fmgr.close();
-}
-
-QPID_AUTO_TEST_CASE(testPolicyUpdate) {
- //tests that the policys internal state is accurate on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(REJECT, 0, 2);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- c1.session.messageTransfer(arg::content=makeMessage("one", "q", durableFlag));
- cluster.add();
- Client c2(cluster[1], "c2");
- c2.session.messageTransfer(arg::content=makeMessage("two", "q", durableFlag));
-
- BOOST_CHECK_THROW(c2.session.messageTransfer(arg::content=makeMessage("three", "q", durableFlag)), framing::ResourceLimitExceededException);
-
- Message received;
- BOOST_CHECK(c1.subs.get(received, "q"));
- BOOST_CHECK_EQUAL(received.getData(), std::string("one"));
- BOOST_CHECK(c1.subs.get(received, "q"));
- BOOST_CHECK_EQUAL(received.getData(), std::string("two"));
- BOOST_CHECK(!c1.subs.get(received, "q"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testExclusiveQueueUpdate) {
- //tests that exclusive queues are accurately replicated on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- c1.session.queueDeclare("q", arg::exclusive=true, arg::autoDelete=true, arg::alternateExchange="amq.fanout");
- cluster.add();
- Client c2(cluster[1], "c2");
- QueueQueryResult result = c2.session.queueQuery("q");
- BOOST_CHECK_EQUAL(result.getQueue(), std::string("q"));
- BOOST_CHECK(result.getExclusive());
- BOOST_CHECK(result.getAutoDelete());
- BOOST_CHECK(!result.getDurable());
- BOOST_CHECK_EQUAL(result.getAlternateExchange(), std::string("amq.fanout"));
- BOOST_CHECK_THROW(c2.session.queueDeclare(arg::queue="q", arg::exclusive=true, arg::passive=true), framing::ResourceLockedException);
- c1.session.close();
- c1.connection.close();
- c2.session = c2.connection.newSession();
- BOOST_CHECK_THROW(c2.session.queueDeclare(arg::queue="q", arg::passive=true), framing::NotFoundException);
- }
-}
-
-/**
- * Subscribes to specified queue and acquires up to the specified
- * number of message but does not accept or release them. These
- * message are therefore 'locked' by the clients session.
- */
-Subscription lockMessages(Client& client, const std::string& queue, int count)
-{
- LocalQueue q;
- SubscriptionSettings settings(FlowControl::messageCredit(count));
- settings.autoAck = 0;
- Subscription sub = client.subs.subscribe(q, queue, settings);
- client.session.messageFlush(sub.getName());
- return sub;
-}
-
-/**
- * check that the specified queue contains the expected set of
- * messages (matched on content) for all nodes in the cluster
- */
-void checkQueue(ClusterFixture& cluster, const std::string& queue, const std::vector<std::string>& messages)
-{
- for (size_t i = 0; i < cluster.size(); i++) {
- Client client(cluster[i], (boost::format("%1%_%2%") % "c" % (i+1)).str());
- BOOST_CHECK_EQUAL(browse(client, queue, messages.size()), messages);
- client.close();
- }
-}
-
-void send(Client& client, const std::string& queue, int count, int start=1, const std::string& base="m",
- const std::string& lvqKey="")
-{
- for (int i = 0; i < count; i++) {
- Message message = makeMessage((boost::format("%1%_%2%") % base % (i+start)).str(), queue, durableFlag);
- if (!lvqKey.empty()) message.getHeaders().setString(QueueOptions::strLVQMatchProperty, lvqKey);
- client.session.messageTransfer(arg::content=message);
- }
-}
-
-QPID_AUTO_TEST_CASE(testRingQueueUpdate) {
- //tests that ring queues are accurately replicated on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(RING, 0, 5);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- send(c1, "q", 5);
- lockMessages(c1, "q", 1);
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
- //send one more message
- send(c1, "q", 1, 6);
- //release locked message
- c1.close();
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("m_2")("m_3")("m_4")("m_5")("m_6"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testRingQueueUpdate2) {
- //tests that ring queues are accurately replicated on newly joined
- //nodes; just like testRingQueueUpdate, but new node joins after
- //the sixth message has been sent.
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(RING, 0, 5);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- send(c1, "q", 5);
- lockMessages(c1, "q", 1);
- //send sixth message
- send(c1, "q", 1, 6);
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
- //release locked message
- c1.close();
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("m_2")("m_3")("m_4")("m_5")("m_6"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testLvqUpdate) {
- //tests that lvqs are accurately replicated on newly joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setOrdering(LVQ);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
-
- send(c1, "q", 5, 1, "a", "a");
- send(c1, "q", 2, 1, "b", "b");
- send(c1, "q", 1, 1, "c", "c");
- send(c1, "q", 1, 3, "b", "b");
-
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
-
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("a_5")("b_3")("c_1"));
- }
-}
-
-
-QPID_AUTO_TEST_CASE(testBrowsedLvqUpdate) {
- //tests that lvqs are accurately replicated on newly joined nodes
- //if the lvq state has been affected by browsers
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setOrdering(LVQ);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
-
- send(c1, "q", 1, 1, "a", "a");
- send(c1, "q", 2, 1, "b", "b");
- send(c1, "q", 1, 1, "c", "c");
- checkQueue(cluster, "q", list_of<string>("a_1")("b_2")("c_1"));
- send(c1, "q", 4, 2, "a", "a");
- send(c1, "q", 1, 3, "b", "b");
-
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
-
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("a_1")("b_2")("c_1")("a_5")("b_3"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testRelease) {
- //tests that releasing a messages that was unacked when one node
- //joined works correctly
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- c1.session.queueDeclare("q", arg::durable=durableFlag);
- for (int i = 0; i < 5; i++) {
- c1.session.messageTransfer(arg::content=makeMessage((boost::format("%1%_%2%") % "m" % (i+1)).str(), "q", durableFlag));
- }
- //receive but don't ack a message
- LocalQueue lq;
- SubscriptionSettings lqSettings(FlowControl::messageCredit(1));
- lqSettings.autoAck = 0;
- Subscription lqSub = c1.subs.subscribe(lq, "q", lqSettings);
- c1.session.messageFlush("q");
- Message received;
- BOOST_CHECK(lq.get(received));
- BOOST_CHECK_EQUAL(received.getData(), std::string("m_1"));
-
- //add new node
- cluster.add();
-
- lqSub.release(lqSub.getUnaccepted());
-
- //check state of queue on both nodes
- vector<string> expected = list_of<string>("m_1")("m_2")("m_3")("m_4")("m_5");
- Client c3(cluster[0], "c3");
- BOOST_CHECK_EQUAL(browse(c3, "q", 5), expected);
- Client c2(cluster[1], "c2");
- BOOST_CHECK_EQUAL(browse(c2, "q", 5), expected);
- }
-}
-
-
-// Browse for 1 message with byte credit, return true if a message was
-// received false if not.
-bool browseByteCredit(Client& c, const string& q, int n, Message& m) {
- SubscriptionSettings browseSettings(
- FlowControl(1, n, false), // 1 message, n bytes credit, no window
- ACCEPT_MODE_NONE,
- ACQUIRE_MODE_NOT_ACQUIRED,
- 0 // No auto-ack.
- );
- LocalQueue lq;
- Subscription s = c.subs.subscribe(lq, q, browseSettings);
- c.session.messageFlush(arg::destination=q, arg::sync=true);
- c.session.sync();
- c.subs.getSubscription(q).cancel();
- return lq.get(m, 0); // No timeout, flush should push message thru.
-}
-
-// Ensure cluster update preserves exact message size, use byte credt as test.
-QPID_AUTO_TEST_CASE(testExactByteCredit) {
- ClusterFixture cluster(1, prepareArgs(), -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q");
- c0.session.messageTransfer(arg::content=Message("MyMessage", "q"));
- cluster.add();
-
- int size=36; // Size of message on broker: headers+body
- Client c1(cluster[1], "c1");
- Message m;
-
- // Ensure we get the message with exact credit.
- BOOST_CHECK(browseByteCredit(c0, "q", size, m));
- BOOST_CHECK(browseByteCredit(c1, "q", size, m));
- // and not with one byte less.
- BOOST_CHECK(!browseByteCredit(c0, "q", size-1, m));
- BOOST_CHECK(!browseByteCredit(c1, "q", size-1, m));
-}
-
-// Test that consumer positions are updated correctly.
-// Regression test for https://bugzilla.redhat.com/show_bug.cgi?id=541927
-//
-QPID_AUTO_TEST_CASE(testUpdateConsumerPosition) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- SubscriptionSettings settings;
- settings.autoAck = 0;
- // Set the acquire mode to 'not-acquired' the consumer moves along the queue
- // but does not acquire (remove) messages.
- settings.acquireMode = ACQUIRE_MODE_NOT_ACQUIRED;
- Subscription s = c0.subs.subscribe(c0.lq, "q", settings);
- c0.session.messageTransfer(arg::content=makeMessage("1", "q", durableFlag));
- BOOST_CHECK_EQUAL("1", c0.lq.get(TIMEOUT).getData());
-
- // Add another member, send/receive another message and acquire
- // the messages. With the bug, this creates an inconsistency
- // because the browse position was not updated to the new member.
- cluster.add();
- c0.session.messageTransfer(arg::content=makeMessage("2", "q", durableFlag));
- BOOST_CHECK_EQUAL("2", c0.lq.get(TIMEOUT).getData());
- s.acquire(s.getUnacquired());
- s.accept(s.getUnaccepted());
-
- // In the bug we now have 0 messages on cluster[0] and 1 message on cluster[1]
- // Subscribing on cluster[1] provokes an error that shuts down cluster[0]
- Client c1(cluster[1], "c1");
- Subscription s1 = c1.subs.subscribe(c1.lq, "q"); // Default auto-ack=1
- Message m;
- BOOST_CHECK(!c1.lq.get(m, TIMEOUT/10));
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
-}
-
-QPID_AUTO_TEST_CASE(testFairsharePriorityDelivery) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- FieldTable arguments;
- arguments.setInt("x-qpid-priorities", 10);
- arguments.setInt("x-qpid-fairshare", 5);
- c0.session.queueDeclare("q", arg::durable=durableFlag, arg::arguments=arguments);
-
- //send messages of different priorities
- for (int i = 0; i < 20; i++) {
- Message msg = makeMessage((boost::format("msg-%1%") % i).str(), "q", durableFlag);
- msg.getDeliveryProperties().setPriority(i % 2 ? 9 : 5);
- c0.session.messageTransfer(arg::content=msg);
- }
-
- //pull off a couple of the messages (first four should be the top priority messages
- for (int i = 0; i < 4; i++) {
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % ((i*2)+1)).str(), c0.subs.get("q", TIMEOUT).getData());
- }
-
- // Add another member
- cluster.add();
- Client c1(cluster[1], "c1");
-
- //pull off some more messages
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 9).str(), c0.subs.get("q", TIMEOUT).getData());
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 0).str(), c1.subs.get("q", TIMEOUT).getData());
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 2).str(), c0.subs.get("q", TIMEOUT).getData());
-
- //check queue has same content on both nodes
- BOOST_CHECK_EQUAL(browse(c0, "q", 12), browse(c1, "q", 12));
-}
-
-QPID_AUTO_TEST_SUITE_END()
-}} // namespace qpid::tests
diff --git a/qpid/cpp/src/tests/cluster_test_logs.py b/qpid/cpp/src/tests/cluster_test_logs.py
deleted file mode 100755
index 22f2470590..0000000000
--- a/qpid/cpp/src/tests/cluster_test_logs.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Functions for comparing broker log files, used by cluster_tests.py.
-
-import os, os.path, re, glob
-from itertools import izip
-
-def split_log(log):
- """Split a broker log at checkpoints where a member joins.
- Return the set of checkpoints discovered."""
- checkpoint_re = re.compile("Member joined, frameSeq=([0-9]+), queue snapshot:")
- outfile = None
- checkpoints = []
- for l in open(log):
- match = checkpoint_re.search(l)
- if match:
- checkpoint = match.groups()[0]
- checkpoints.append(checkpoint)
- if outfile: outfile.close()
- outfile = open("%s.%s"%(log, checkpoint), 'w')
-
- if outfile: outfile.write(l)
- if outfile: outfile.close()
- return checkpoints
-
-def filter_log(log):
- """Filter the contents of a log file to remove data that is expected
- to differ between brokers in a cluster. Filtered log contents between
- the same checkpoints should match across the cluster."""
- out = open("%s.filter"%(log), 'w')
- # Lines to skip entirely, expected differences
- skip = "|".join([
- 'local connection', # Only on local broker
- 'UPDATER|UPDATEE', # Ignore update process
- 'stall for update|unstall, ignore update|cancelled offer .* unstall',
- 'caught up',
- 'active for links|Passivating links|Activating links',
- 'info Connecting: .*', # UpdateClient connection
- 'info Connection.* connected to', # UpdateClient connection
- 'warning Connection \\[[-0-9.: ]+\\] closed', # UpdateClient connection
- 'warning Broker closed connection: 200, OK',
- 'task late',
- 'task overran',
- 'warning CLOSING .* unsent data',
- 'Inter-broker link ', # ignore link state changes
- 'Updated link key from ', # ignore link state changes
- 'Running in a cluster, marking store',
- 'debug Sending keepalive signal to watchdog', # Watchdog timer thread
- 'last broker standing joined by 1 replicas, updating queue policies.',
- 'Connection .* timed out: closing', # heartbeat connection close
- "org.apache.qpid.broker:bridge:", # ignore bridge index
- "closed connection"
- ])
- # Regex to match a UUID
- uuid='\w\w\w\w\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w\w\w\w\w\w\w\w\w'
- # Substitutions to remove expected differences
- subs = [
- (r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d ', ''), # Remove timestamp
- (r'cluster\([0-9.: ]*', 'cluster('), # Remove cluster node id
- (r' local\)| shadow\)', ')'), # Remove local/shadow indication
- (r'CATCHUP', 'READY'), # Treat catchup as equivalent to ready.
- (r'OFFER', 'READY'), # Treat offer as equivalent to ready.
- # System UUID expected to be different
- (r'(org.apache.qpid.broker:system[:(])%s(\)?)'%(uuid), r'\1UUID\2'),
-
- # TODO aconway 2010-12-20: review if these should be expected:
- (r' len=\d+', ' len=NN'), # buffer lengths
- (r' map={.*_object_name:([^,}]*)[,}].*', r' \1'), # V2 map - just keep name
- (r'\d+-\d+-\d+--\d+', 'X-X-X--X'), # V1 Object IDs
- ]
- # Substitutions to mask known issue: durable test shows inconsistent "changed stats for com.redhat.rhm.store:journal" messages.
- skip += '|Changed V[12] statistics com.redhat.rhm.store:journal'
- subs += [(r'to=console.obj.1.0.com.redhat.rhm.store.journal props=\d+ stats=\d+',
- 'to=console.obj.1.0.com.redhat.rhm.store.journal props=NN stats=NN')]
-
- skip_re = re.compile(skip)
- subs = [(re.compile(pattern), subst) for pattern, subst in subs]
- for l in open(log):
- if skip_re.search(l): continue
- for pattern,subst in subs: l = re.sub(pattern,subst,l)
- out.write(l)
- out.close()
-
-def verify_logs():
- """Compare log files from cluster brokers, verify that they correspond correctly."""
- for l in glob.glob("*.log"): filter_log(l)
- checkpoints = set()
- for l in glob.glob("*.filter"): checkpoints = checkpoints.union(set(split_log(l)))
- errors=[]
- for c in checkpoints:
- fragments = glob.glob("*.filter.%s"%(c))
- fragments.sort(reverse=True, key=os.path.getsize)
- while len(fragments) >= 2:
- a = fragments.pop(0)
- b = fragments[0]
- for ab in izip(open(a), open(b)):
- if ab[0] != ab[1]:
- errors.append("\n %s %s"%(a, b))
- break
- if errors:
- raise Exception("Files differ in %s"%(os.getcwd())+"".join(errors))
-
-# Can be run as a script.
-if __name__ == "__main__":
- verify_logs()
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/README.txt b/qpid/cpp/src/tests/cluster_test_scripts/README.txt
deleted file mode 100644
index e861a2f397..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/README.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Cluster test scripts.
-
-A set of scripts to start and stop cluster and test clients on
-multiple hosts using ssh.
-
-Pre-requisites: You must be
- - set up for password-free ssh access to the test hosts.
- - a member of the ais group on all the test hosts.
-
-Configuration:
-
-Copy defaults.sh to config.sh and edit the values as necessary.
-
-Test scripts:
-
-Test scripts use the functions in functions.sh to start & monitor
-cluster and clients.
-A test script can collect other scripts.
-
-
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/cluster_check b/qpid/cpp/src/tests/cluster_test_scripts/cluster_check
deleted file mode 100755
index 05fcc1bcd2..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/cluster_check
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Check that all members of a cluster are running
-
-source config.sh
-
-HOSTS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-for ((i=0; i<${#HOSTS[*]}; ++i)); do
- host=${HOSTS[$i]}
- port=${PORTS[$i]}
- ssh $host "$QPIDD -cp $port" > /dev/null || {
- ret=1
- echo "ERROR: broker not running $host:$port"
- }
-done
-exit $ret
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/cluster_start b/qpid/cpp/src/tests/cluster_test_scripts/cluster_start
deleted file mode 100755
index 8911358f7e..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/cluster_start
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Start a cluster
-#
-# Arguments: NAME HOST [host...]
-# Start a cluster called NAME with N nodes running on the given HOSTs
-# repeat the host name to run multiple brokers on one host. Use dynamic
-# ports.
-#
-# Log files, data directories and hosts/ports files are all stored under
-# $HOME/cluster_test/$NAME
-#
-
-source config.sh
-
-CLUSTER_NAME=`date +"${USER}_%F_%T"`
-HOSTS=($BROKER_HOSTS)
-for ((i = 0; i < ${#HOSTS[*]}; ++i)) ; do
- host=${HOSTS[$i]}
- datadir=$CLUSTER_HOME/broker$i
- log=$datadir/qpidd.log
- ssh $host "rm -rf $datadir; mkdir -p $datadir" || {
- echo "ERROR: can't make data dir $datadir"; exit 1
- }
- port=`ssh $host "echo $QPIDD -dp0 --cluster-name=$CLUSTER_NAME \
- --data-dir=$datadir \
- --log-to-file=$log --log-prefix=broker$i \
- $QPIDD_OPTS | newgrp ais"` || {
- error "ERROR: can't start broker $i on $host"; exit 1;
- }
- PORTS="$PORTS $port"
-done
-
-echo "$BROKER_HOSTS" > $CLUSTER_HOME/hosts
-echo "$PORTS" > $CLUSTER_HOME/ports
-
-`dirname $0`/cluster_check $NAME
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/cluster_stop b/qpid/cpp/src/tests/cluster_test_scripts/cluster_stop
deleted file mode 100755
index 09aa8f3b21..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/cluster_stop
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Stop the cluster.
-
-source config.sh
-
-HOSTS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-for ((i=0; i<${#HOSTS[*]}; ++i)); do
- host=${HOSTS[$i]}
- port=${PORTS[$i]}
- ssh $host "$QPIDD -qp $port" > /dev/null || {
- ret=1
- echo "ERROR: stopping broker at $host:$port"
- }
-done
-
-exit $ret
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/config_example.sh b/qpid/cpp/src/tests/cluster_test_scripts/config_example.sh
deleted file mode 100755
index d47c9a9c77..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/config_example.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-# Cluster configuration.
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# All output stored under $HOME/$CLUSTER_HOME.
-CLUSTER_HOME=$HOME/cluster_test
-
-# Hosts where brokers will be run. Repeat hostname to run multiple brokers on 1 host.
-BROKER_HOSTS="mrg22 mrg23 mrg24 mrg25 mrg26"
-
-# Hosts where clients will be run.
-CLIENT_HOSTS="$BROKER_HOSTS"
-
-# Paths to executables
-QPIDD=qpidd
-PERFTEST=perftest
-
-# Directory containing tests
-TESTDIR=/usr/bin
-
-# Options for qpidd, must be sufficient to load the cluster plugin.
-# Scripts will add --cluster-name, --daemon, --port and --log-to-file options here.
-QPIDD_OPTS=" \
---auth=no \
---log-enable=notice+ \
---log-enable=debug+:cluster \
-"
diff --git a/qpid/cpp/src/tests/cluster_test_scripts/perftest b/qpid/cpp/src/tests/cluster_test_scripts/perftest
deleted file mode 100755
index 984761eb5f..0000000000
--- a/qpid/cpp/src/tests/cluster_test_scripts/perftest
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run a distributed perftest against a cluster.
-# Args: npubs nsubs [perftest-options]
-
-source config.sh
-
-NPUBS=${1:-4} ; shift
-NSUBS=${1:-4} ; shift
-OPTS="--npubs $NPUBS --nsubs $NSUBS $*"
-
-CLIENTS=($CLIENT_HOSTS)
-BROKERS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-start() {
- client=${CLIENTS[i % ${#CLIENTS[*]}]}
- broker=${BROKERS[i % ${#BROKERS[*]}]}
- port=${PORTS[i % ${#PORTS[*]}]}
- ssh -n $client $PERFTEST $OPTS $* -b $broker -p $port &
- PIDS="$PIDS $!"
-}
-
-ssh ${CLIENTS[0]} $PERFTEST $OPTS --setup -b ${BROKERS[0]} -p${PORTS[0]}
-for (( i=0 ; i < $NPUBS ; ++i)); do start --publish; done
-for (( ; i < $NPUBS+$NSUBS ; ++i)); do start --subscribe; done
-ssh ${CLIENTS[0]} $PERFTEST $OPTS --control -b ${BROKERS[0]} -p${PORTS[0]}
-
-for pid in $PIDS; do
- wait $pid || echo "ERROR: client process $pid failed"
-done
-
-`dirname $0`/cluster_check
-
-
diff --git a/qpid/cpp/src/tests/cluster_tests.fail b/qpid/cpp/src/tests/cluster_tests.fail
deleted file mode 100644
index b28b04f643..0000000000
--- a/qpid/cpp/src/tests/cluster_tests.fail
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/qpid/cpp/src/tests/cluster_tests.py b/qpid/cpp/src/tests/cluster_tests.py
deleted file mode 100755
index 3c96b252df..0000000000
--- a/qpid/cpp/src/tests/cluster_tests.py
+++ /dev/null
@@ -1,1834 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, signal, sys, time, imp, re, subprocess, glob, random, logging
-import cluster_test_logs
-from qpid import datatypes, messaging
-from brokertest import *
-from qpid.harness import Skipped
-from qpid.messaging import Message, Empty, Disposition, REJECTED, util
-from threading import Thread, Lock, Condition
-from logging import getLogger
-from itertools import chain
-from tempfile import NamedTemporaryFile
-
-log = getLogger("qpid.cluster_tests")
-
-# Note: brokers that shut themselves down due to critical error during
-# normal operation will still have an exit code of 0. Brokers that
-# shut down because of an error found during initialize will exit with
-# a non-0 code. Hence the apparently inconsistent use of EXPECT_EXIT_OK
-# and EXPECT_EXIT_FAIL in some of the tests below.
-
-# TODO aconway 2010-03-11: resolve this - ideally any exit due to an error
-# should give non-0 exit status.
-
-# Import scripts as modules
-qpid_cluster=import_script(checkenv("QPID_CLUSTER_EXEC"))
-
-def readfile(filename):
- """Returns te content of file named filename as a string"""
- f = file(filename)
- try: return f.read()
- finally: f.close()
-
-class ShortTests(BrokerTest):
- """Short cluster functionality tests."""
-
- def test_message_replication(self):
- """Test basic cluster message replication."""
- # Start a cluster, send some messages to member 0.
- cluster = self.cluster(2)
- s0 = cluster[0].connect().session()
- s0.sender("q; {create:always}").send(Message("x"))
- s0.sender("q; {create:always}").send(Message("y"))
- s0.connection.close()
-
- # Verify messages available on member 1.
- s1 = cluster[1].connect().session()
- m = s1.receiver("q", capacity=1).fetch(timeout=1)
- s1.acknowledge()
- self.assertEqual("x", m.content)
- s1.connection.close()
-
- # Start member 2 and verify messages available.
- s2 = cluster.start().connect().session()
- m = s2.receiver("q", capacity=1).fetch(timeout=1)
- s2.acknowledge()
- self.assertEqual("y", m.content)
- s2.connection.close()
-
- def test_store_direct_update_match(self):
- """Verify that brokers stores an identical message whether they receive it
- direct from clients or during an update, no header or other differences"""
- cluster = self.cluster(0, args=["--load-module", self.test_store_lib])
- cluster.start(args=["--test-store-dump", "direct.dump"])
- # Try messages with various headers
- cluster[0].send_message("q", Message(durable=True, content="foobar",
- subject="subject",
- reply_to="reply_to",
- properties={"n":10}))
- # Try messages of different sizes
- for size in range(0,10000,100):
- cluster[0].send_message("q", Message(content="x"*size, durable=True))
- # Try sending via named exchange
- c = cluster[0].connect_old()
- s = c.session(str(qpid.datatypes.uuid4()))
- s.exchange_bind(exchange="amq.direct", binding_key="foo", queue="q")
- props = s.delivery_properties(routing_key="foo", delivery_mode=2)
- s.message_transfer(
- destination="amq.direct",
- message=qpid.datatypes.Message(props, "content"))
-
- # Try message with TTL and differnet headers/properties
- cluster[0].send_message("q", Message(durable=True, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={}, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={"x":10}, ttl=100000))
-
- # Now update a new member and compare their dumps.
- cluster.start(args=["--test-store-dump", "updatee.dump"])
- assert readfile("direct.dump") == readfile("updatee.dump")
-
- os.remove("direct.dump")
- os.remove("updatee.dump")
-
- def test_sasl(self):
- """Test SASL authentication and encryption in a cluster"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- acl=os.path.join(os.getcwd(), "policy.acl")
- aclf=file(acl,"w")
- # Must allow cluster-user (zag) access to credentials exchange.
- aclf.write("""
-acl allow zag@QPID publish exchange name=qpid.cluster-credentials
-acl allow zig@QPID all all
-acl deny all all
-""")
- aclf.close()
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--load-module", os.getenv("ACL_LIB"),
- "--acl-file", acl,
- "--cluster-username=zag",
- "--cluster-password=zag",
- "--cluster-mechanism=PLAIN"
- ])
-
- # Valid user/password, ensure queue is created.
- c = cluster[0].connect(username="zig", password="zig")
- c.session().sender("ziggy;{create:always,node:{x-declare:{exclusive:true}}}")
- c.close()
- cluster.start() # Start second node.
-
- # Check queue is created on second node.
- c = cluster[1].connect(username="zig", password="zig")
- c.session().receiver("ziggy;{assert:always}")
- c.close()
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Valid user, bad password
- try:
- cluster[0].connect(username="zig", password="foo").close()
- self.fail("Expected exception")
- except messaging.exceptions.ConnectionError: pass
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Bad user ID
- try:
- cluster[0].connect(username="foo", password="bar").close()
- self.fail("Expected exception")
- except messaging.exceptions.ConnectionError: pass
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Action disallowed by ACL
- c = cluster[0].connect(username="zag", password="zag")
- try:
- s = c.session()
- s.sender("zaggy;{create:always}")
- s.close()
- self.fail("Expected exception")
- except messaging.exceptions.UnauthorizedAccess: pass
- # make sure the queue was not created at the other node.
- c = cluster[1].connect(username="zig", password="zig")
- try:
- s = c.session()
- s.sender("zaggy;{assert:always}")
- s.close()
- self.fail("Expected exception")
- except messaging.exceptions.NotFound: pass
-
- def test_sasl_join_good(self):
- """Verify SASL authentication between brokers when joining a cluster."""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- # Test with a valid username/password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=PLAIN"
- ])
- cluster.start()
- c = cluster[1].connect(username="zag", password="zag", mechanism="PLAIN")
-
- def test_sasl_join_bad_password(self):
- # Test with an invalid password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", os.path.join(self.rootdir, "sasl_config"),
- "--cluster-username=zig",
- "--cluster-password=bad",
- "--cluster-mechanism=PLAIN"
- ])
- cluster.start(wait=False, expect=EXPECT_EXIT_FAIL)
- assert cluster[1].log_contains("critical Unexpected error: connection-forced: Authentication failed")
-
- def test_sasl_join_wrong_user(self):
- # Test with a valid user that is not the cluster user.
- cluster = self.cluster(0, args=["--auth", "yes",
- "--sasl-config", os.path.join(self.rootdir, "sasl_config")])
- cluster.start(args=["--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=PLAIN"
- ])
-
- cluster.start(wait=False, expect=EXPECT_EXIT_FAIL,
- args=["--cluster-username=zag",
- "--cluster-password=zag",
- "--cluster-mechanism=PLAIN"
- ])
- assert cluster[1].log_contains("critical Unexpected error: unauthorized-access: unauthorized-access: Unauthorized user zag@QPID for qpid.cluster-credentials, should be zig")
-
- def test_user_id_update(self):
- """Ensure that user-id of an open session is updated to new cluster members"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- cluster = self.cluster(1, args=["--auth", "yes", "--sasl-config", sasl_config,
- "--cluster-mechanism=ANONYMOUS"])
- c = cluster[0].connect(username="zig", password="zig")
- s = c.session().sender("q;{create:always}")
- s.send(Message("x", user_id="zig")) # Message sent before start new broker
- cluster.start()
- s.send(Message("y", user_id="zig")) # Messsage sent after start of new broker
- # Verify brokers are healthy and messages are on the queue.
- self.assertEqual("x", cluster[0].get_message("q").content)
- self.assertEqual("y", cluster[1].get_message("q").content)
-
- def test_other_mech(self):
- """Test using a mechanism other than PLAIN/ANONYMOUS for cluster update authentication.
- Regression test for https://issues.apache.org/jira/browse/QPID-3849"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- cluster = self.cluster(2, args=["--auth", "yes", "--sasl-config", sasl_config,
- "--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=DIGEST-MD5"])
- cluster[0].connect()
- cluster.start() # Before the fix this broker falied to join the cluster.
- cluster[2].connect()
-
- def test_link_events(self):
- """Regression test for https://bugzilla.redhat.com/show_bug.cgi?id=611543"""
- args = ["--mgmt-pub-interval", 1] # Publish management information every second.
- broker1 = self.cluster(1, args)[0]
- broker2 = self.cluster(1, args)[0]
- qp = self.popen(["qpid-printevents", broker1.host_port()], EXPECT_RUNNING)
- qr = self.popen(["qpid-route", "route", "add",
- broker1.host_port(), broker2.host_port(),
- "amq.fanout", "key"
- ], EXPECT_EXIT_OK)
- # Look for link event in printevents output.
- retry(lambda: find_in_file("brokerLinkUp", qp.outfile("out")))
- broker1.ready()
- broker2.ready()
- qr.wait()
-
- def test_queue_cleaner(self):
- """ Regression test to ensure that cleanup of expired messages works correctly """
- cluster = self.cluster(2, args=["--queue-purge-interval", 3])
-
- s0 = cluster[0].connect().session()
- sender = s0.sender("my-lvq; {create: always, node:{x-declare:{arguments:{'qpid.last_value_queue':1}}}}")
- #send 10 messages that will all expire and be cleaned up
- for i in range(1, 10):
- msg = Message("message-%s" % i)
- msg.properties["qpid.LVQ_key"] = "a"
- msg.ttl = 0.1
- sender.send(msg)
- #wait for queue cleaner to run
- time.sleep(3)
-
- #test all is ok by sending and receiving a message
- msg = Message("non-expiring")
- msg.properties["qpid.LVQ_key"] = "b"
- sender.send(msg)
- s0.connection.close()
- s1 = cluster[1].connect().session()
- m = s1.receiver("my-lvq", capacity=1).fetch(timeout=1)
- s1.acknowledge()
- self.assertEqual("non-expiring", m.content)
- s1.connection.close()
-
- for b in cluster: b.ready() # Make sure all brokers still running.
-
-
- def test_amqfailover_visible(self):
- """Verify that the amq.failover exchange can be seen by
- QMF-based tools - regression test for BZ615300."""
- broker1 = self.cluster(1)[0]
- broker2 = self.cluster(1)[0]
- qs = subprocess.Popen(["qpid-stat", "-e", "-b", broker1.host_port()], stdout=subprocess.PIPE)
- out = qs.communicate()[0]
- assert out.find("amq.failover") > 0
-
- def evaluate_address(self, session, address):
- """Create a receiver just to evaluate an address for its side effects"""
- r = session.receiver(address)
- r.close()
-
- def test_expire_fanout(self):
- """Regression test for QPID-2874: Clustered broker crashes in assertion in
- cluster/ExpiryPolicy.cpp.
- Caused by a fan-out message being updated as separate messages"""
- cluster = self.cluster(1)
- session0 = cluster[0].connect().session()
- # Create 2 queues bound to fanout exchange.
- self.evaluate_address(session0, "q1;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:q1}]}}")
- self.evaluate_address(session0, "q2;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:q2}]}}")
- queues = ["q1", "q2"]
- # Send a fanout message with a long timeout
- s = session0.sender("amq.fanout")
- s.send(Message("foo", ttl=100), sync=False)
- # Start a new member, check the messages
- cluster.start()
- session1 = cluster[1].connect().session()
- for q in queues: self.assert_browse(session1, "q1", ["foo"])
-
- def test_route_update(self):
- """Regression test for https://issues.apache.org/jira/browse/QPID-2982
- Links and bridges associated with routes were not replicated on update.
- This meant extra management objects and caused an exit if a management
- client was attached.
- """
- args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
-
- # First broker will be killed.
- cluster0 = self.cluster(1, args=args)
- cluster1 = self.cluster(1, args=args)
- assert 0 == subprocess.call(
- ["qpid-route", "route", "add", cluster0[0].host_port(),
- cluster1[0].host_port(), "dummy-exchange", "dummy-key", "-d"])
- cluster0.start()
-
- # Wait for qpid-tool:list on cluster0[0] to generate expected output.
- pattern = re.compile("org.apache.qpid.broker.*link")
- qpid_tool = subprocess.Popen(["qpid-tool", cluster0[0].host_port()],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- class Scanner(Thread):
- def __init__(self): self.found = False; Thread.__init__(self)
- def run(self):
- for l in qpid_tool.stdout:
- if pattern.search(l): self.found = True; return
- scanner = Scanner()
- scanner.start()
- start = time.time()
- try:
- # Wait up to 5 second timeout for scanner to find expected output
- while not scanner.found and time.time() < start + 5:
- qpid_tool.stdin.write("list\n") # Ask qpid-tool to list
- for b in cluster0: b.ready() # Raise if any brokers are down
- finally:
- qpid_tool.stdin.write("quit\n")
- qpid_tool.wait()
- scanner.join()
- assert scanner.found
- # Regression test for https://issues.apache.org/jira/browse/QPID-3235
- # Inconsistent stats when changing elder.
-
- # Force a change of elder
- cluster0.start()
- for b in cluster0: b.ready()
- cluster0[0].expect=EXPECT_EXIT_FAIL # About to die.
- cluster0[0].kill()
- time.sleep(2) # Allow a management interval to pass.
- for b in cluster0[1:]: b.ready()
- # Verify logs are consistent
- cluster_test_logs.verify_logs()
-
- def test_redelivered(self):
- """Verify that redelivered flag is set correctly on replayed messages"""
- cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- url = "amqp:tcp:%s,tcp:%s" % (cluster[0].host_port(), cluster[1].host_port())
- queue = "my-queue"
- cluster[0].declare_queue(queue)
- self.sender = self.popen(
- ["qpid-send",
- "--broker", url,
- "--address", queue,
- "--sequence=true",
- "--send-eos=1",
- "--messages=100000",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS)
- ])
- self.receiver = self.popen(
- ["qpid-receive",
- "--broker", url,
- "--address", queue,
- "--ignore-duplicates",
- "--check-redelivered",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--forever"
- ])
- time.sleep(1)#give sender enough time to have some messages to replay
- cluster[0].kill()
- self.sender.wait()
- self.receiver.wait()
- cluster[1].kill()
-
- class BlockedSend(Thread):
- """Send a message, send is expected to block.
- Verify that it does block (for a given timeout), then allow
- waiting till it unblocks when it is expected to do so."""
- def __init__(self, sender, msg):
- self.sender, self.msg = sender, msg
- self.blocked = True
- self.condition = Condition()
- self.timeout = 0.1 # Time to wait for expected results.
- Thread.__init__(self)
- def run(self):
- try:
- self.sender.send(self.msg, sync=True)
- self.condition.acquire()
- try:
- self.blocked = False
- self.condition.notify()
- finally: self.condition.release()
- except Exception,e: print "BlockedSend exception: %s"%e
- def start(self):
- Thread.start(self)
- time.sleep(self.timeout)
- assert self.blocked # Expected to block
- def assert_blocked(self): assert self.blocked
- def wait(self): # Now expecting to unblock
- self.condition.acquire()
- try:
- while self.blocked:
- self.condition.wait(self.timeout)
- if self.blocked: raise Exception("Timed out waiting for send to unblock")
- finally: self.condition.release()
- self.join()
-
- def queue_flowlimit_test(self, brokers):
- """Verify that the queue's flowlimit configuration and state are
- correctly replicated.
- The brokers argument allows this test to run on single broker,
- cluster of 2 pre-startd brokers or cluster where second broker
- starts after queue is in flow control.
- """
- # configure a queue with a specific flow limit on first broker
- ssn0 = brokers.first().connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- brokers.first().startQmf()
- q1 = [q for q in brokers.first().qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- brokers.second().startQmf()
- qs = brokers.second().qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now drain the queue using a session to the other broker
- ssn1 = brokers.second().connect().session()
- r1 = ssn1.receiver("flq", capacity=6)
- for x in range(4):
- r1.fetch(timeout=0)
- ssn1.acknowledge()
- sender.wait() # Verify no longer blocked.
-
- # and re-verify state of queue on both brokers
- q1.update()
- assert not q1.flowStopped
- q2.update()
- assert not q2.flowStopped
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
- def test_queue_flowlimit(self):
- """Test flow limits on a standalone broker"""
- broker = self.broker()
- class Brokers:
- def first(self): return broker
- def second(self): return broker
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster(self):
- cluster = self.cluster(2)
- class Brokers:
- def first(self): return cluster[0]
- def second(self): return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster_join(self):
- cluster = self.cluster(1)
- class Brokers:
- def first(self): return cluster[0]
- def second(self):
- if len(cluster) == 1: cluster.start()
- return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_replicate(self):
- """ Verify that a queue which is in flow control BUT has drained BELOW
- the flow control 'stop' threshold, is correctly replicated when a new
- broker is added to the cluster.
- """
-
- class AsyncSender(Thread):
- """Send a fixed number of msgs from a sender in a separate thread
- so it may block without blocking the test.
- """
- def __init__(self, broker, address, count=1, size=4):
- Thread.__init__(self)
- self.daemon = True
- self.broker = broker
- self.queue = address
- self.count = count
- self.size = size
- self.done = False
-
- def run(self):
- self.sender = subprocess.Popen(["qpid-send",
- "--capacity=1",
- "--content-size=%s" % self.size,
- "--messages=%s" % self.count,
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--address=%s" % self.queue,
- "--broker=%s" % self.broker.host_port()])
- self.sender.wait()
- self.done = True
-
- cluster = self.cluster(2)
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':100, 'qpid.flow_resume_count':20}}}}")
-
- # fire off the sending thread to broker[0], and wait until the queue
- # hits flow control on broker[1]
- sender = AsyncSender(cluster[0], "flq", count=110);
- sender.start();
-
- cluster[1].startQmf()
- q_obj = [q for q in cluster[1].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- deadline = time.time() + 10
- while not q_obj.flowStopped and time.time() < deadline:
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- assert q_obj.msgDepth < 110
-
- # Now drain enough messages on broker[1] to drop below the flow stop
- # threshold, but not relieve flow control...
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=15",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[1].host_port()])
- receiver.wait()
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- current_depth = q_obj.msgDepth
-
- # add a new broker to the cluster, and verify that the queue is in flow
- # control on that broker
- cluster.start()
- cluster[2].startQmf()
- q_obj = [q for q in cluster[2].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- assert q_obj.flowStopped
- assert q_obj.msgDepth == current_depth
-
- # now drain the queue on broker[2], and verify that the sender becomes
- # unblocked
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=95",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[2].host_port()])
- receiver.wait()
- q_obj.update()
- assert not q_obj.flowStopped
- self.assertEqual(q_obj.msgDepth, 0)
-
- # verify that the sender has become unblocked
- sender.join(timeout=5)
- assert not sender.isAlive()
- assert sender.done
-
- def test_blocked_queue_delete(self):
- """Verify that producers which are blocked on a queue due to flow
- control are unblocked when that queue is deleted.
- """
-
- cluster = self.cluster(2)
- cluster[0].startQmf()
- cluster[1].startQmf()
-
- # configure a queue with a specific flow limit on first broker
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- q1 = [q for q in cluster[0].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- qs = cluster[1].qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now delete the blocked queue from other broker
- ssn1 = cluster[1].connect().session()
- self.evaluate_address(ssn1, "flq;{delete:always}")
- sender.wait() # Verify no longer blocked.
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
-
- def test_alternate_exchange_update(self):
- """Verify that alternate-exchange on exchanges and queues is propagated to new members of a cluster. """
- cluster = self.cluster(1)
- s0 = cluster[0].connect().session()
- # create alt queue bound to amq.fanout exchange, will be destination for alternate exchanges
- self.evaluate_address(s0, "alt;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:alt}]}}")
- # create direct exchange ex with alternate-exchange amq.fanout and no queues bound
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'amq.fanout'}}}")
- # create queue q with alternate-exchange amq.fanout
- self.evaluate_address(s0, "q;{create:always,node:{type:queue, x-declare:{alternate-exchange:'amq.fanout'}}}")
-
- def verify(broker):
- s = broker.connect().session()
- # Verify unmatched message goes to ex's alternate.
- s.sender("ex").send("foo")
- self.assertEqual("foo", s.receiver("alt").fetch(timeout=0).content)
- # Verify rejected message goes to q's alternate.
- s.sender("q").send("bar")
- msg = s.receiver("q").fetch(timeout=0)
- self.assertEqual("bar", msg.content)
- s.acknowledge(msg, Disposition(REJECTED)) # Reject the message
- self.assertEqual("bar", s.receiver("alt").fetch(timeout=0).content)
-
- verify(cluster[0])
- cluster.start()
- verify(cluster[1])
-
- def test_binding_order(self):
- """Regression test for binding order inconsistency in cluster"""
- cluster = self.cluster(1)
- c0 = cluster[0].connect()
- s0 = c0.session()
- # Declare multiple queues bound to same key on amq.topic
- def declare(q,max=0):
- if max: declare = 'x-declare:{arguments:{"qpid.max_count":%d, "qpid.flow_stop_count":0}}'%max
- else: declare = 'x-declare:{}'
- bind='x-bindings:[{queue:%s,key:key,exchange:"amq.topic"}]'%(q)
- s0.sender("%s;{create:always,node:{%s,%s}}" % (q,declare,bind))
- declare('d',max=4) # Only one with a limit
- for q in ['c', 'b','a']: declare(q)
- # Add a cluster member, send enough messages to exceed the max count
- cluster.start()
- try:
- s = s0.sender('amq.topic/key')
- for m in xrange(1,6): s.send(Message(str(m)))
- self.fail("Expected capacity exceeded exception")
- except messaging.exceptions.TargetCapacityExceeded: pass
- c1 = cluster[1].connect()
- s1 = c1.session()
- s0 = c0.session() # Old session s0 is broken by exception.
- # Verify queue contents are consistent.
- for q in ['a','b','c','d']:
- self.assertEqual(self.browse(s0, q), self.browse(s1, q))
- # Verify queue contents are "best effort"
- for q in ['a','b','c']: self.assert_browse(s1,q,[str(n) for n in xrange(1,6)])
- self.assert_browse(s1,'d',[str(n) for n in xrange(1,5)])
-
- def test_deleted_exchange(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
- Verify stand-alone case
- """
- cluster = self.cluster()
- # Verify we do not route message via an exchange that has been destroyed.
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar") # Should fail, exchange is deleted.
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
- self.assert_browse(cluster[0].connect().session(), "q", ["foo"])
-
- def test_deleted_exchange_inconsistent(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
-
- Verify cluster inconsistency.
- """
- cluster = self.cluster()
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
-
- cluster.start()
- s1 = cluster[1].connect().session()
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar")
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
-
- self.assert_browse(s1, "q", ["foo"])
-
-
- def test_ttl_consistent(self):
- """Ensure we don't get inconsistent errors with message that have TTL very close together"""
- messages = [ Message(str(i), ttl=i/1000.0) for i in xrange(0,1000)]
- messages.append(Message("x"))
- cluster = self.cluster(2)
- sender = cluster[0].connect().session().sender("q;{create:always}")
-
- def fetch(b):
- receiver = b.connect().session().receiver("q;{create:always}")
- while receiver.fetch().content != "x": pass
-
- for m in messages: sender.send(m, sync=False)
- for m in messages: sender.send(m, sync=False)
- fetch(cluster[0])
- fetch(cluster[1])
- for m in messages: sender.send(m, sync=False)
- cluster.start()
- fetch(cluster[2])
-
-
- def _verify_federation(self, src_broker, src, dst_broker, dst, timeout=30):
- """ Prove that traffic can pass between two federated brokers.
- """
- tot_time = 0
- active = False
- send_session = src_broker.connect().session()
- sender = send_session.sender(src)
- receive_session = dst_broker.connect().session()
- receiver = receive_session.receiver(dst)
- while not active and tot_time < timeout:
- sender.send(Message("Hello from Source!"))
- try:
- receiver.fetch(timeout = 1)
- receive_session.acknowledge()
- # Get this far without Empty exception, and the link is good!
- active = True
- while True:
- # Keep receiving msgs, as several may have accumulated
- receiver.fetch(timeout = 1)
- receive_session.acknowledge()
- except Empty:
- if not active:
- tot_time += 1
- receiver.close()
- receive_session.close()
- sender.close()
- send_session.close()
- return active
-
- def test_federation_failover(self):
- """
- Verify that federation operates across failures occuring in a cluster.
- Specifically:
- 1) Destination cluster learns of membership changes in the source
- cluster
- 2) Destination cluster replicates the current state of the source
- cluster to newly-added members
- """
-
- # 2 node cluster source, 2 node cluster destination
- src_cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- src_cluster.ready();
- dst_cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- dst_cluster.ready();
-
- cmd = self.popen(["qpid-config",
- "--broker", src_cluster[0].host_port(),
- "add", "queue", "srcQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "exchange", "fanout", "destX"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "destX", "destQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- # federate the srcQ to the destination exchange
- dst_cluster[0].startQmf()
- dst_broker = dst_cluster[0].qmf_session.getObjects(_class="broker")[0]
- result = dst_broker.connect(src_cluster[0].host(), src_cluster[0].port(), False, "PLAIN",
- "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result);
-
- link = dst_cluster[0].qmf_session.getObjects(_class="link")[0]
- result = link.bridge(False, "srcQ", "destX", "", "", "", True, False, False, 10)
- self.assertEqual(result.status, 0, result)
-
- # check that traffic passes
- assert self._verify_federation(src_cluster[0], "srcQ", dst_cluster[0], "destQ")
-
- # add src[2] broker to source cluster
- src_cluster.start(expect=EXPECT_EXIT_FAIL);
- src_cluster.ready();
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[0], "destQ")
-
- # Kill src[0]. dst[0] should fail over to src[1]
- src_cluster[0].kill()
- for b in src_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[1], "srcQ", dst_cluster[0], "destQ")
-
- # Kill src[1], dst[0] should fail over to src[2]
- src_cluster[1].kill()
- for b in src_cluster[2:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[0], "destQ")
-
- # Kill dest[0], force failover to dest[1]
- dst_cluster[0].kill()
- for b in dst_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[1], "destQ")
-
- # Add dest[2]
- # dest[1] syncs dest[2] to current remote state
- dst_cluster.start(expect=EXPECT_EXIT_FAIL);
- for b in dst_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[1], "destQ")
-
- # Kill dest[1], force failover to dest[2]
- dst_cluster[1].kill()
- for b in dst_cluster[2:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[2], "destQ")
-
- for i in range(2, len(src_cluster)): src_cluster[i].kill()
- for i in range(2, len(dst_cluster)): dst_cluster[i].kill()
-
-
- def test_federation_multilink_failover(self):
- """
- Verify that multi-link federation operates across failures occuring in
- a cluster.
- """
-
- # 1 node cluster source, 1 node cluster destination
- src_cluster = self.cluster(1, expect=EXPECT_EXIT_FAIL)
- src_cluster.ready();
- dst_cluster = self.cluster(1, expect=EXPECT_EXIT_FAIL)
- dst_cluster.ready();
-
- # federate a direct binding across two separate links
-
- # first, create a direct exchange bound to two queues using different
- # bindings
- cmd = self.popen(["qpid-config",
- "--broker", src_cluster[0].host_port(),
- "add", "exchange", "direct", "FedX"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "exchange", "direct", "FedX"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ1"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "FedX", "destQ1", "one"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ2"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "FedX", "destQ2", "two"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- # Create two separate links between the dst and source brokers, bind
- # each to different keys
- dst_cluster[0].startQmf()
- dst_broker = dst_cluster[0].qmf_session.getObjects(_class="broker")[0]
-
- for _l in [("link1", "bridge1", "one"),
- ("link2", "bridge2", "two")]:
- result = dst_broker.create("link", _l[0],
- {"host":src_cluster[0].host(),
- "port":src_cluster[0].port()},
- False)
- self.assertEqual(result.status, 0, result);
- result = dst_broker.create("bridge", _l[1],
- {"link":_l[0],
- "src":"FedX",
- "dest":"FedX",
- "key":_l[2]}, False)
- self.assertEqual(result.status, 0);
-
- # check that traffic passes
- assert self._verify_federation(src_cluster[0], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[0], "FedX/two", dst_cluster[0], "destQ2")
-
- # add new member, verify traffic
- src_cluster.start(expect=EXPECT_EXIT_FAIL);
- src_cluster.ready();
-
- dst_cluster.start(expect=EXPECT_EXIT_FAIL);
- dst_cluster.ready();
-
- assert self._verify_federation(src_cluster[0], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[0], "FedX/two", dst_cluster[0], "destQ2")
-
- src_cluster[0].kill()
- for b in src_cluster[1:]: b.ready()
-
- assert self._verify_federation(src_cluster[1], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[1], "FedX/two", dst_cluster[0], "destQ2")
-
- dst_cluster[0].kill()
- for b in dst_cluster[1:]: b.ready()
-
- assert self._verify_federation(src_cluster[1], "FedX/one", dst_cluster[1], "destQ1")
- assert self._verify_federation(src_cluster[1], "FedX/two", dst_cluster[1], "destQ2")
-
- for i in range(1, len(src_cluster)): src_cluster[i].kill()
- for i in range(1, len(dst_cluster)): dst_cluster[i].kill()
-
-
-
-# Some utility code for transaction tests
-XA_RBROLLBACK = 1
-XA_RBTIMEOUT = 2
-XA_OK = 0
-dtx_branch_counter = 0
-
-class DtxStatusException(Exception):
- def __init__(self, expect, actual):
- self.expect = expect
- self.actual = actual
-
- def str(self):
- return "DtxStatusException(expect=%s, actual=%s)"%(self.expect, self.actual)
-
-class DtxTestFixture:
- """Bundle together some common requirements for dtx tests."""
- def __init__(self, test, broker, name, exclusive=False):
- self.test = test
- self.broker = broker
- self.name = name
- # Use old API. DTX is not supported in messaging API.
- self.connection = broker.connect_old()
- self.session = self.connection.session(name, 1) # 1 second timeout
- self.queue = self.session.queue_declare(name, exclusive=exclusive)
- self.session.dtx_select()
- self.consumer = None
-
- def xid(self, id=None):
- if id is None: id = self.name
- return self.session.xid(format=0, global_id=id)
-
- def check_status(self, expect, actual):
- if expect != actual: raise DtxStatusException(expect, actual)
-
- def start(self, id=None, resume=False):
- self.check_status(XA_OK, self.session.dtx_start(xid=self.xid(id), resume=resume).status)
-
- def end(self, id=None, suspend=False):
- self.check_status(XA_OK, self.session.dtx_end(xid=self.xid(id), suspend=suspend).status)
-
- def prepare(self, id=None):
- self.check_status(XA_OK, self.session.dtx_prepare(xid=self.xid(id)).status)
-
- def commit(self, id=None, one_phase=True):
- self.check_status(
- XA_OK, self.session.dtx_commit(xid=self.xid(id), one_phase=one_phase).status)
-
- def rollback(self, id=None):
- self.check_status(XA_OK, self.session.dtx_rollback(xid=self.xid(id)).status)
-
- def set_timeout(self, timeout, id=None):
- self.session.dtx_set_timeout(xid=self.xid(id),timeout=timeout)
-
- def send(self, messages):
- for m in messages:
- dp=self.session.delivery_properties(routing_key=self.name)
- mp=self.session.message_properties()
- self.session.message_transfer(message=qpid.datatypes.Message(dp, mp, m))
-
- def accept(self):
- """Accept 1 message from queue"""
- consumer_tag="%s-consumer"%(self.name)
- self.session.message_subscribe(queue=self.name, destination=consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.message, value = 1, destination = consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
- msg = self.session.incoming(consumer_tag).get(timeout=1)
- self.session.message_cancel(destination=consumer_tag)
- self.session.message_accept(qpid.datatypes.RangedSet(msg.id))
- return msg
-
-
- def verify(self, sessions, messages):
- for s in sessions:
- self.test.assert_browse(s, self.name, messages)
-
-class DtxTests(BrokerTest):
-
- def test_dtx_update(self):
- """Verify that DTX transaction state is updated to a new broker.
- Start a collection of transactions, then add a new cluster member,
- then verify they commit/rollback correctly on the new broker."""
-
- # Note: multiple test have been bundled into one to avoid the need to start/stop
- # multiple brokers per test.
-
- cluster=self.cluster(1)
- sessions = [cluster[0].connect().session()] # For verify
-
- # Transaction that will be open when new member joins, then committed.
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.send(["1", "2"])
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be open when new member joins, then rolled back.
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.start()
- t2.send(["1", "2"])
-
- # Transaction that will be prepared when new member joins, then committed.
- t3 = DtxTestFixture(self, cluster[0], "t3")
- t3.start()
- t3.send(["1", "2"])
- t3.end()
- t3.prepare()
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be prepared when new member joins, then rolled back.
- t4 = DtxTestFixture(self, cluster[0], "t4")
- t4.start()
- t4.send(["1", "2"])
- t4.end()
- t4.prepare()
-
- # Transaction using an exclusive queue
- t5 = DtxTestFixture(self, cluster[0], "t5", exclusive=True)
- t5.start()
- t5.send(["1", "2"])
-
- # Accept messages in a transaction before/after join then commit
- # Note: Message sent outside transaction, we're testing transactional acceptance.
- t6 = DtxTestFixture(self, cluster[0], "t6")
- t6.send(["a","b","c"])
- t6.start()
- self.assertEqual(t6.accept().body, "a");
- t6.verify(sessions, ["b", "c"])
-
- # Accept messages in a transaction before/after join then roll back
- # Note: Message sent outside transaction, we're testing transactional acceptance.
- t7 = DtxTestFixture(self, cluster[0], "t7")
- t7.send(["a","b","c"])
- t7.start()
- self.assertEqual(t7.accept().body, "a");
- t7.verify(sessions, ["b", "c"])
-
- # Ended, suspended transactions across join.
- t8 = DtxTestFixture(self, cluster[0], "t8")
- t8.start(id="1")
- t8.send(["x"])
- t8.end(id="1", suspend=True)
- t8.start(id="2")
- t8.send(["y"])
- t8.end(id="2")
- t8.start()
- t8.send("z")
-
-
- # Start new cluster member
- cluster.start()
- sessions.append(cluster[1].connect().session())
-
- # Commit t1
- t1.send(["3","4"])
- t1.verify(sessions, [])
- t1.end()
- t1.commit(one_phase=True)
- t1.verify(sessions, ["1","2","3","4"])
-
- # Rollback t2
- t2.send(["3","4"])
- t2.end()
- t2.rollback()
- t2.verify(sessions, [])
-
- # Commit t3
- t3.commit(one_phase=False)
- t3.verify(sessions, ["1","2"])
-
- # Rollback t4
- t4.rollback()
- t4.verify(sessions, [])
-
- # Commit t5
- t5.send(["3","4"])
- t5.verify(sessions, [])
- t5.end()
- t5.commit(one_phase=True)
- t5.verify(sessions, ["1","2","3","4"])
-
- # Commit t6
- self.assertEqual(t6.accept().body, "b");
- t6.verify(sessions, ["c"])
- t6.end()
- t6.commit(one_phase=True)
- t6.session.close() # Make sure they're not requeued by the session.
- t6.verify(sessions, ["c"])
-
- # Rollback t7
- self.assertEqual(t7.accept().body, "b");
- t7.verify(sessions, ["c"])
- t7.end()
- t7.rollback()
- t7.verify(sessions, ["a", "b", "c"])
-
- # Resume t8
- t8.end()
- t8.commit(one_phase=True)
- t8.start("1", resume=True)
- t8.end("1")
- t8.commit("1", one_phase=True)
- t8.commit("2", one_phase=True)
- t8.verify(sessions, ["z", "x","y"])
-
-
- def test_dtx_failover_rollback(self):
- """Kill a broker during a transaction, verify we roll back correctly"""
- cluster=self.cluster(1, expect=EXPECT_EXIT_FAIL)
- cluster.start(expect=EXPECT_RUNNING)
-
- # Test unprepared at crash
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.send(["a"]) # Not in transaction
- t1.start()
- t1.send(["b"]) # In transaction
-
- # Test prepared at crash
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.send(["a"]) # Not in transaction
- t2.start()
- t2.send(["b"]) # In transaction
- t2.end()
- t2.prepare()
-
- # Crash the broker
- cluster[0].kill()
-
- # Transactional changes should not appear
- s = cluster[1].connect().session();
- self.assert_browse(s, "t1", ["a"])
- self.assert_browse(s, "t2", ["a"])
-
- def test_dtx_timeout(self):
- """Verify that dtx timeout works"""
- cluster = self.cluster(1)
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.set_timeout(1)
- time.sleep(1.1)
- try:
- t1.end()
- self.fail("Expected rollback timeout.")
- except DtxStatusException, e:
- self.assertEqual(e.actual, XA_RBTIMEOUT)
-
-class TxTests(BrokerTest):
-
- def test_tx_update(self):
- """Verify that transaction state is updated to a new broker"""
-
- def make_message(session, body=None, key=None, id=None):
- dp=session.delivery_properties(routing_key=key)
- mp=session.message_properties(correlation_id=id)
- return qpid.datatypes.Message(dp, mp, body)
-
- cluster=self.cluster(1)
- # Use old API. TX is not supported in messaging API.
- c = cluster[0].connect_old()
- s = c.session("tx-session", 1)
- s.queue_declare(queue="q")
- # Start transaction
- s.tx_select()
- s.message_transfer(message=make_message(s, "1", "q"))
- # Start new member mid-transaction
- cluster.start()
- # Do more work
- s.message_transfer(message=make_message(s, "2", "q"))
- # Commit the transaction and verify the results.
- s.tx_commit()
- for b in cluster: self.assert_browse(b.connect().session(), "q", ["1","2"])
-
-
-class LongTests(BrokerTest):
- """Tests that can run for a long time if -DDURATION=<minutes> is set"""
- def duration(self):
- d = self.config.defines.get("DURATION")
- if d: return float(d)*60
- else: return 3 # Default is to be quick
-
- def test_failover(self):
- """Test fail-over during continuous send-receive with errors"""
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
- for b in cluster: ErrorGenerator(b)
-
- # Start sender and receiver threads
- cluster[0].declare_queue("test-queue")
- sender = NumberedSender(cluster[0], max_depth=1000)
- receiver = NumberedReceiver(cluster[0], sender=sender)
- receiver.start()
- sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration()
- i = 0
- while time.time() < endtime:
- sender.sender.assert_running()
- receiver.receiver.assert_running()
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- for b in cluster[i:]: b.ready()
- ErrorGenerator(b)
- time.sleep(5)
- sender.stop()
- receiver.stop()
- for i in range(i, len(cluster)): cluster[i].kill()
-
- def test_management(self, args=[]):
- """
- Stress test: Run management clients and other clients concurrently
- while killing and restarting brokers.
- """
-
- class ClientLoop(StoppableThread):
- """Run a client executable in a loop."""
- def __init__(self, broker, cmd):
- StoppableThread.__init__(self)
- self.broker=broker
- self.cmd = cmd # Client command.
- self.lock = Lock()
- self.process = None # Client process.
- self.start()
-
- def run(self):
- try:
- while True:
- self.lock.acquire()
- try:
- if self.stopped: break
- self.process = self.broker.test.popen(
- self.cmd, expect=EXPECT_UNKNOWN)
- finally:
- self.lock.release()
- try:
- exit = self.process.wait()
- except OSError, e:
- # Process may already have been killed by self.stop()
- break
- except Exception, e:
- self.process.unexpected(
- "client of %s: %s"%(self.broker.name, e))
- self.lock.acquire()
- try:
- if self.stopped: break
- if exit != 0:
- self.process.unexpected(
- "client of %s exit code %s"%(self.broker.name, exit))
- finally:
- self.lock.release()
- except Exception, e:
- self.error = RethrownException("Error in ClientLoop.run")
-
- def stop(self):
- """Stop the running client and wait for it to exit"""
- self.lock.acquire()
- try:
- if self.stopped: return
- self.stopped = True
- if self.process:
- try: self.process.kill() # Kill the client.
- except OSError: pass # The client might not be running.
- finally: self.lock.release()
- StoppableThread.stop(self)
-
- # body of test_management()
-
- args += ["--mgmt-pub-interval", 1]
- args += ["--log-enable=trace+:management"]
- # Use store if present.
- if BrokerTest.store_lib: args +=["--load-module", BrokerTest.store_lib]
- cluster = self.cluster(3, args, expect=EXPECT_EXIT_FAIL) # brokers will be killed
-
- clients = [] # Per-broker list of clients that only connect to one broker.
- mclients = [] # Management clients that connect to every broker in the cluster.
-
- def start_clients(broker):
- """Start ordinary clients for a broker."""
- cmds=[
- ["qpid-tool", "localhost:%s"%(broker.port())],
- ["qpid-perftest", "--count=5000", "--durable=yes",
- "--base-name", str(qpid.datatypes.uuid4()), "--port", broker.port()],
- ["qpid-txtest", "--queue-base-name", "tx-%s"%str(qpid.datatypes.uuid4()),
- "--port", broker.port()],
- ["qpid-queue-stats", "-a", "localhost:%s" %(broker.port())]
- ]
- clients.append([ClientLoop(broker, cmd) for cmd in cmds])
-
- def start_mclients(broker):
- """Start management clients that make multiple connections."""
- cmd = ["qpid-cluster", "-C", "localhost:%s" %(broker.port())]
- mclients.append(ClientLoop(broker, cmd))
-
- endtime = time.time() + self.duration()
- # For long duration, first run is a quarter of the duration.
- runtime = min(5.0, self.duration() / 3.0)
- alive = 0 # First live cluster member
- for i in range(len(cluster)): start_clients(cluster[i])
- start_mclients(cluster[alive])
-
- while time.time() < endtime:
- time.sleep(runtime)
- runtime = 5 # Remaining runs 5 seconds, frequent broker kills
- for b in cluster[alive:]: b.ready() # Check if a broker crashed.
- # Kill the first broker, expect the clients to fail.
- b = cluster[alive]
- b.ready()
- b.kill()
- # Stop the brokers clients and all the mclients.
- for c in clients[alive] + mclients:
- try: c.stop()
- except: pass # Ignore expected errors due to broker shutdown.
- clients[alive] = []
- mclients = []
- # Start another broker and clients
- alive += 1
- cluster.start(expect=EXPECT_EXIT_FAIL)
- cluster[-1].ready() # Wait till its ready
- start_clients(cluster[-1])
- start_mclients(cluster[alive])
- for c in chain(mclients, *clients):
- c.stop()
- for b in cluster[alive:]:
- b.ready() # Verify still alive
- b.kill()
- # Verify that logs are consistent
- cluster_test_logs.verify_logs()
-
- def test_management_qmf2(self):
- self.test_management(args=["--mgmt-qmf2=yes"])
-
- def test_connect_consistent(self):
- args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
- cluster = self.cluster(2, args=args)
- end = time.time() + self.duration()
- while (time.time() < end): # Get a management interval
- for i in xrange(1000): cluster[0].connect().close()
- cluster_test_logs.verify_logs()
-
- def test_flowlimit_failover(self):
- """Test fail-over during continuous send-receive with flow control
- active.
- """
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("test-queue; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':2000, 'qpid.flow_resume_count':100}}}}")
-
- receiver = NumberedReceiver(cluster[0])
- receiver.start()
- sender = NumberedSender(cluster[0])
- sender.start()
- # Wait for senders & receiver to get up and running
- retry(lambda: receiver.received > 10)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- sender.sender.assert_running()
- receiver.receiver.assert_running()
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(5)
- sender.stop()
- receiver.stop()
- for i in range(i, len(cluster)): cluster[i].kill()
-
- def test_ttl_failover(self):
- """Test that messages with TTL don't cause problems in a cluster with failover"""
-
- class Client(StoppableThread):
-
- def __init__(self, broker):
- StoppableThread.__init__(self)
- self.connection = broker.connect(reconnect=True)
- self.auto_fetch_reconnect_urls(self.connection)
- self.session = self.connection.session()
-
- def auto_fetch_reconnect_urls(self, conn):
- """Replacment for qpid.messaging.util version which is noisy"""
- ssn = conn.session("auto-fetch-reconnect-urls")
- rcv = ssn.receiver("amq.failover")
- rcv.capacity = 10
-
- def main():
- while True:
- try:
- msg = rcv.fetch()
- qpid.messaging.util.set_reconnect_urls(conn, msg)
- ssn.acknowledge(msg, sync=False)
- except messaging.exceptions.LinkClosed: return
- except messaging.exceptions.ConnectionError: return
-
- thread = Thread(name="auto-fetch-reconnect-urls", target=main)
- thread.setDaemon(True)
- thread.start()
-
- def stop(self):
- StoppableThread.stop(self)
- self.connection.detach()
-
- class Sender(Client):
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.sent = 0 # Number of messages _reliably_ sent.
- self.sender = self.session.sender(address, capacity=1000)
-
- def send_counted(self, ttl):
- self.sender.send(Message(str(self.sent), ttl=ttl))
- self.sent += 1
-
- def run(self):
- while not self.stopped:
- choice = random.randint(0,4)
- if choice == 0: self.send_counted(None) # No ttl
- elif choice == 1: self.send_counted(100000) # Large ttl
- else: # Small ttl, might expire
- self.sender.send(Message("", ttl=random.random()/10))
- self.sender.send(Message("z"), sync=True) # Chaser.
-
- class Receiver(Client):
-
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.received = 0 # Number of non-empty (reliable) messages received.
- self.receiver = self.session.receiver(address, capacity=1000)
- def run(self):
- try:
- while True:
- m = self.receiver.fetch(1)
- if m.content == "z": break
- if m.content: # Ignore unreliable messages
- # Ignore duplicates
- if int(m.content) == self.received: self.received += 1
- except Exception,e: self.error = e
-
- # def test_ttl_failover
-
- # Original cluster will all be killed so expect exit with failure
- # Set small purge interval.
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["--queue-purge-interval=1"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # Python client failover produces noisy WARN logs, disable temporarily
- logger = logging.getLogger()
- log_level = logger.getEffectiveLevel()
- logger.setLevel(logging.ERROR)
- sender = None
- receiver = None
- try:
- # Start sender and receiver threads
- receiver = Receiver(cluster[0], "q;{create:always}")
- receiver.start()
- sender = Sender(cluster[0], "q;{create:always}")
- sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
-
- # Kill brokers in a cycle.
- endtime = time.time() + self.duration()
- runtime = min(5.0, self.duration() / 4.0)
- i = 0
- while time.time() < endtime:
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- b.ready()
- time.sleep(runtime)
- sender.stop()
- receiver.stop()
- for b in cluster[i:]:
- b.ready() # Check it didn't crash
- b.kill()
- self.assertEqual(sender.sent, receiver.received)
- cluster_test_logs.verify_logs()
-
- finally:
- # Detach to avoid slow reconnect attempts during shut-down if test fails.
- if sender: sender.connection.detach()
- if receiver: receiver.connection.detach()
- logger.setLevel(log_level)
-
- def test_msg_group_failover(self):
- """Test fail-over during continuous send-receive of grouped messages.
- """
-
- class GroupedTrafficGenerator(Thread):
- def __init__(self, url, queue, group_key):
- Thread.__init__(self)
- self.url = url
- self.queue = queue
- self.group_key = group_key
- self.status = -1
-
- def run(self):
- # generate traffic for approx 10 seconds (2011msgs / 200 per-sec)
- cmd = ["msg_group_test",
- "--broker=%s" % self.url,
- "--address=%s" % self.queue,
- "--connection-options={%s}" % (Cluster.CONNECTION_OPTIONS),
- "--group-key=%s" % self.group_key,
- "--receivers=2",
- "--senders=3",
- "--messages=2011",
- "--send-rate=200",
- "--capacity=11",
- "--ack-frequency=23",
- "--allow-duplicates",
- "--group-size=37",
- "--randomize-group-size",
- "--interleave=13"]
- # "--trace"]
- self.generator = Popen( cmd );
- self.status = self.generator.wait()
- return self.status
-
- def results(self):
- self.join(timeout=30) # 3x assumed duration
- if self.isAlive(): return -1
- return self.status
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["-t"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- q_args = "{'qpid.group_header_key':'group-id', 'qpid.shared_msg_group':1}"
- s0 = ssn0.sender("test-group-q; {create:always, node:{type:queue, x-declare:{arguments:%s}}}" % q_args)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- traffic = GroupedTrafficGenerator( cluster[i].host_port(),
- "test-group-q", "group-id" )
- traffic.start()
- time.sleep(1)
-
- for x in range(2):
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(1)
-
- # wait for traffic to finish, verify success
- self.assertEqual(0, traffic.results())
-
- for i in range(i, len(cluster)): cluster[i].kill()
-
-
-class StoreTests(BrokerTest):
- """
- Cluster tests that can only be run if there is a store available.
- """
- def args(self):
- assert BrokerTest.store_lib
- return ["--load-module", BrokerTest.store_lib]
-
- def test_store_loaded(self):
- """Ensure we are indeed loading a working store"""
- broker = self.broker(self.args(), name="recoverme", expect=EXPECT_EXIT_FAIL)
- m = Message("x", durable=True)
- broker.send_message("q", m)
- broker.kill()
- broker = self.broker(self.args(), name="recoverme")
- self.assertEqual("x", broker.get_message("q").content)
-
- def test_kill_restart(self):
- """Verify we can kill/resetart a broker with store in a cluster"""
- cluster = self.cluster(1, self.args())
- cluster.start("restartme", expect=EXPECT_EXIT_FAIL).kill()
-
- # Send a message, retrieve from the restarted broker
- cluster[0].send_message("q", "x")
- m = cluster.start("restartme").get_message("q")
- self.assertEqual("x", m.content)
-
- def stop_cluster(self,broker):
- """Clean shut-down of a cluster"""
- self.assertEqual(0, qpid_cluster.main(
- ["-kf", broker.host_port()]))
-
- def test_persistent_restart(self):
- """Verify persistent cluster shutdown/restart scenarios"""
- cluster = self.cluster(0, args=self.args() + ["--cluster-size=3"])
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_OK, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_FAIL, wait=True)
- a.send_message("q", Message("1", durable=True))
- # Kill & restart one member.
- c.kill()
- self.assertEqual(a.get_message("q").content, "1")
- a.send_message("q", Message("2", durable=True))
- c = cluster.start("c", expect=EXPECT_EXIT_OK)
- self.assertEqual(c.get_message("q").content, "2")
- # Shut down the entire cluster cleanly and bring it back up
- a.send_message("q", Message("3", durable=True))
- self.stop_cluster(a)
- a = cluster.start("a", wait=False)
- b = cluster.start("b", wait=False)
- c = cluster.start("c", wait=True)
- self.assertEqual(a.get_message("q").content, "3")
-
- def test_persistent_partial_failure(self):
- # Kill 2 members, shut down the last cleanly then restart
- # Ensure we use the clean database
- cluster = self.cluster(0, args=self.args() + ["--cluster-size=3"])
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_FAIL, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_OK, wait=True)
- a.send_message("q", Message("4", durable=True))
- a.kill()
- b.kill()
- self.assertEqual(c.get_message("q").content, "4")
- c.send_message("q", Message("clean", durable=True))
- self.stop_cluster(c)
- a = cluster.start("a", wait=False)
- b = cluster.start("b", wait=False)
- c = cluster.start("c", wait=True)
- self.assertEqual(a.get_message("q").content, "clean")
-
- def test_wrong_cluster_id(self):
- # Start a cluster1 broker, then try to restart in cluster2
- cluster1 = self.cluster(0, args=self.args())
- a = cluster1.start("a", expect=EXPECT_EXIT_OK)
- a.terminate()
- cluster2 = self.cluster(1, args=self.args())
- try:
- a = cluster2.start("a", expect=EXPECT_EXIT_FAIL)
- a.ready()
- self.fail("Expected exception")
- except: pass
-
- def test_wrong_shutdown_id(self):
- # Start 2 members and shut down.
- cluster = self.cluster(0, args=self.args()+["--cluster-size=2"])
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_OK, wait=False)
- self.stop_cluster(a)
- self.assertEqual(a.wait(), 0)
- self.assertEqual(b.wait(), 0)
-
- # Restart with a different member and shut down.
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_OK, wait=False)
- self.stop_cluster(a)
- self.assertEqual(a.wait(), 0)
- self.assertEqual(c.wait(), 0)
- # Mix members from both shutdown events, they should fail
- # TODO aconway 2010-03-11: can't predict the exit status of these
- # as it depends on the order of delivery of initial-status messages.
- # See comment at top of this file.
- a = cluster.start("a", expect=EXPECT_UNKNOWN, wait=False)
- b = cluster.start("b", expect=EXPECT_UNKNOWN, wait=False)
- self.assertRaises(Exception, lambda: a.ready())
- self.assertRaises(Exception, lambda: b.ready())
-
- def test_solo_store_clean(self):
- # A single node cluster should always leave a clean store.
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL)
- a.send_message("q", Message("x", durable=True))
- a.kill()
- a = cluster.start("a")
- self.assertEqual(a.get_message("q").content, "x")
-
- def test_last_store_clean(self):
- # Verify that only the last node in a cluster to shut down has
- # a clean store. Start with cluster of 3, reduce to 1 then
- # increase again to ensure that a node that was once alone but
- # finally did not finish as the last node does not get a clean
- # store.
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL)
- self.assertEqual(a.store_state(), "clean")
- b = cluster.start("b", expect=EXPECT_EXIT_FAIL)
- c = cluster.start("c", expect=EXPECT_EXIT_FAIL)
- self.assertEqual(b.store_state(), "dirty")
- self.assertEqual(c.store_state(), "dirty")
- retry(lambda: a.store_state() == "dirty")
-
- a.send_message("q", Message("x", durable=True))
- a.kill()
- b.kill() # c is last man, will mark store clean
- retry(lambda: c.store_state() == "clean")
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL) # c no longer last man
- retry(lambda: c.store_state() == "dirty")
- c.kill() # a is now last man
- retry(lambda: a.store_state() == "clean")
- a.kill()
- self.assertEqual(a.store_state(), "clean")
- self.assertEqual(b.store_state(), "dirty")
- self.assertEqual(c.store_state(), "dirty")
-
- def test_restart_clean(self):
- """Verify that we can re-start brokers one by one in a
- persistent cluster after a clean oshutdown"""
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_OK)
- b = cluster.start("b", expect=EXPECT_EXIT_OK)
- c = cluster.start("c", expect=EXPECT_EXIT_OK)
- a.send_message("q", Message("x", durable=True))
- self.stop_cluster(a)
- a = cluster.start("a")
- b = cluster.start("b")
- c = cluster.start("c")
- self.assertEqual(c.get_message("q").content, "x")
-
- def test_join_sub_size(self):
- """Verify that after starting a cluster with cluster-size=N,
- we can join new members even if size < N-1"""
- cluster = self.cluster(0, self.args()+["--cluster-size=3"])
- a = cluster.start("a", wait=False, expect=EXPECT_EXIT_FAIL)
- b = cluster.start("b", wait=False, expect=EXPECT_EXIT_FAIL)
- c = cluster.start("c")
- a.send_message("q", Message("x", durable=True))
- a.send_message("q", Message("y", durable=True))
- a.kill()
- b.kill()
- a = cluster.start("a")
- self.assertEqual(c.get_message("q").content, "x")
- b = cluster.start("b")
- self.assertEqual(c.get_message("q").content, "y")
diff --git a/qpid/cpp/src/tests/failover_soak.cpp b/qpid/cpp/src/tests/failover_soak.cpp
deleted file mode 100644
index c2ac36a757..0000000000
--- a/qpid/cpp/src/tests/failover_soak.cpp
+++ /dev/null
@@ -1,827 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/wait.h>
-#include <sys/time.h>
-#include <string.h>
-#include <sys/types.h>
-#include <signal.h>
-
-#include <string>
-#include <iostream>
-#include <sstream>
-#include <vector>
-
-#include <boost/assign.hpp>
-
-#include "qpid/framing/Uuid.h"
-
-#include <ForkedBroker.h>
-#include <qpid/client/Connection.h>
-
-
-
-
-
-using namespace std;
-using boost::assign::list_of;
-using namespace qpid::framing;
-using namespace qpid::client;
-
-
-namespace qpid {
-namespace tests {
-
-vector<pid_t> pids;
-
-typedef vector<ForkedBroker *> brokerVector;
-
-typedef enum
-{
- NO_STATUS,
- RUNNING,
- COMPLETED
-}
-childStatus;
-
-
-typedef enum
-{
- NO_TYPE,
- DECLARING_CLIENT,
- SENDING_CLIENT,
- RECEIVING_CLIENT
-}
-childType;
-
-
-ostream& operator<< ( ostream& os, const childType& ct ) {
- switch ( ct ) {
- case DECLARING_CLIENT: os << "Declaring Client"; break;
- case SENDING_CLIENT: os << "Sending Client"; break;
- case RECEIVING_CLIENT: os << "Receiving Client"; break;
- default: os << "No Client"; break;
- }
-
- return os;
-}
-
-
-
-
-struct child
-{
- child ( string & name, pid_t pid, childType type )
- : name(name), pid(pid), retval(-999), status(RUNNING), type(type)
- {
- gettimeofday ( & startTime, 0 );
- }
-
-
- void
- done ( int _retval )
- {
- retval = _retval;
- status = COMPLETED;
- gettimeofday ( & stopTime, 0 );
- }
-
-
- void
- setType ( childType t )
- {
- type = t;
- }
-
-
- string name;
- pid_t pid;
- int retval;
- childStatus status;
- childType type;
- struct timeval startTime,
- stopTime;
-};
-
-
-
-
-struct children : public vector<child *>
-{
-
- void
- add ( string & name, pid_t pid, childType type )
- {
- push_back ( new child ( name, pid, type ) );
- }
-
-
- child *
- get ( pid_t pid )
- {
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- if ( pid == (*i)->pid )
- return *i;
-
- return 0;
- }
-
-
- void
- exited ( pid_t pid, int retval )
- {
- child * kid = get ( pid );
- if(! kid)
- {
- if ( verbosity > 1 )
- {
- cerr << "children::exited warning: Can't find child with pid "
- << pid
- << endl;
- }
- return;
- }
-
- kid->done ( retval );
- }
-
-
- int
- unfinished ( )
- {
- int count = 0;
-
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- if ( COMPLETED != (*i)->status )
- ++ count;
-
- return count;
- }
-
-
- int
- checkChildren ( )
- {
- for ( unsigned int i = 0; i < pids.size(); ++ i )
- {
- int pid = pids[i];
- int returned_pid;
- int status;
-
- child * kid = get ( pid );
-
- if ( kid->status != COMPLETED )
- {
- returned_pid = waitpid ( pid, &status, WNOHANG );
-
- if ( returned_pid == pid )
- {
- int exit_status = WEXITSTATUS(status);
- exited ( pid, exit_status );
- if ( exit_status ) // this is a child error.
- return exit_status;
- }
- }
- }
-
- return 0;
- }
-
-
- void
- killEverybody ( )
- {
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- kill ( (*i)->pid, 9 );
- }
-
-
-
- void
- print ( )
- {
- cout << "--- status of all children --------------\n";
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- cout << "child: " << (*i)->name
- << " status: " << (*i)->status
- << endl;
- cout << "\n\n\n\n";
- }
-
- int verbosity;
-};
-
-
-children allMyChildren;
-
-
-void
-childExit ( int )
-{
- int childReturnCode;
- pid_t pid = waitpid ( 0, & childReturnCode, WNOHANG);
-
- if ( pid > 0 )
- allMyChildren.exited ( pid, childReturnCode );
-}
-
-
-
-int
-mrand ( int maxDesiredVal ) {
- double zeroToOne = (double) rand() / (double) RAND_MAX;
- return (int) (zeroToOne * (double) maxDesiredVal);
-}
-
-
-
-int
-mrand ( int minDesiredVal, int maxDesiredVal ) {
- int interval = maxDesiredVal - minDesiredVal;
- return minDesiredVal + mrand ( interval );
-}
-
-
-
-void
-makeClusterName ( string & s ) {
- stringstream ss;
- ss << "soakTestCluster_" << Uuid(true).str();
- s = ss.str();
-}
-
-
-
-
-
-void
-printBrokers ( brokerVector & brokers )
-{
- cout << "Broker List ------------ size: " << brokers.size() << "\n";
- for ( brokerVector::iterator i = brokers.begin(); i != brokers.end(); ++ i) {
- cout << "pid: "
- << (*i)->getPID()
- << " port: "
- << (*i)->getPort()
- << endl;
- }
- cout << "end Broker List ------------\n";
-}
-
-
-
-
-ForkedBroker * newbie = 0;
-int newbie_port = 0;
-
-
-
-bool
-wait_for_newbie ( )
-{
- if ( ! newbie )
- return true;
-
- try
- {
- Connection connection;
- connection.open ( "127.0.0.1", newbie_port );
- connection.close();
- newbie = 0; // He's no newbie anymore!
- return true;
- }
- catch ( const std::exception& error )
- {
- std::cerr << "wait_for_newbie error: "
- << error.what()
- << endl;
- return false;
- }
-}
-
-bool endsWith(const char* str, const char* suffix) {
- return (strlen(suffix) < strlen(str) && 0 == strcmp(str+strlen(str)-strlen(suffix), suffix));
-}
-
-
-void
-startNewBroker ( brokerVector & brokers,
- char const * moduleOrDir,
- string const clusterName,
- int verbosity,
- int durable )
-{
- static int brokerId = 0;
- stringstream path, prefix;
- prefix << "soak-" << brokerId;
- std::vector<std::string> argv = list_of<string>
- ("qpidd")
- ("--cluster-name")(clusterName)
- ("--auth=no")
- ("--mgmt-enable=no")
- ("--log-prefix")(prefix.str())
- ("--log-to-file")(prefix.str()+".log")
- ("--log-enable=info+")
- ("--log-enable=debug+:cluster")
- ("TMP_DATA_DIR");
-
- if (endsWith(moduleOrDir, "cluster.so")) {
- // Module path specified, load only that module.
- argv.push_back(string("--load-module=")+moduleOrDir);
- argv.push_back("--no-module-dir");
- if ( durable ) {
- std::cerr << "failover_soak warning: durable arg hass no effect. Use \"dir\" option of \"moduleOrDir\".\n";
- }
- }
- else {
- // Module directory specified, load all modules in dir.
- argv.push_back(string("--module-dir=")+moduleOrDir);
- }
-
- newbie = new ForkedBroker (argv);
- newbie_port = newbie->getPort();
- ForkedBroker * broker = newbie;
-
- if ( verbosity > 0 )
- std::cerr << "new broker created: pid == "
- << broker->getPID()
- << " log-prefix == "
- << "soak-" << brokerId
- << endl;
- brokers.push_back ( broker );
-
- ++ brokerId;
-}
-
-
-
-
-
-bool
-killFrontBroker ( brokerVector & brokers, int verbosity )
-{
- cerr << "killFrontBroker: waiting for newbie sync...\n";
- if ( ! wait_for_newbie() )
- return false;
- cerr << "killFrontBroker: newbie synced.\n";
-
- if ( verbosity > 0 )
- cout << "killFrontBroker pid: " << brokers[0]->getPID() << " on port " << brokers[0]->getPort() << endl;
- try { brokers[0]->kill(9); }
- catch ( const exception& error ) {
- if ( verbosity > 0 )
- {
- cout << "error killing broker: "
- << error.what()
- << endl;
- }
-
- return false;
- }
- delete brokers[0];
- brokers.erase ( brokers.begin() );
- return true;
-}
-
-
-
-
-
-/*
- * The optional delay is to avoid killing newbie brokers that have just
- * been added and are still in the process of updating. This causes
- * spurious, test-generated errors that scare everybody.
- */
-void
-killAllBrokers ( brokerVector & brokers, int delay )
-{
- if ( delay > 0 )
- {
- std::cerr << "Killing all brokers after delay of " << delay << endl;
- sleep ( delay );
- }
-
- for ( uint i = 0; i < brokers.size(); ++ i )
- try { brokers[i]->kill(9); }
- catch ( const exception& error )
- {
- std::cerr << "killAllBrokers Warning: exception during kill on broker "
- << i
- << " "
- << error.what()
- << endl;
- }
-}
-
-
-
-
-
-pid_t
-runDeclareQueuesClient ( brokerVector brokers,
- char const * host,
- char const * path,
- int verbosity,
- int durable,
- char const * queue_prefix,
- int n_queues
- )
-{
- string name("declareQueues");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1 )
- cout << "startDeclareQueuesClient: host: "
- << host
- << " port: "
- << port
- << endl;
- stringstream portSs;
- portSs << port;
-
- vector<const char*> argv;
- argv.push_back ( "declareQueues" );
- argv.push_back ( host );
- string portStr = portSs.str();
- argv.push_back ( portStr.c_str() );
- if ( durable )
- argv.push_back ( "1" );
- else
- argv.push_back ( "0" );
-
- argv.push_back ( queue_prefix );
-
- char n_queues_str[20];
- sprintf ( n_queues_str, "%d", n_queues );
- argv.push_back ( n_queues_str );
-
- argv.push_back ( 0 );
- pid_t pid = fork();
-
- if ( ! pid ) {
- execv ( path, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing declareQueues: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, DECLARING_CLIENT );
- return pid;
-}
-
-
-
-
-
-pid_t
-startReceivingClient ( brokerVector brokers,
- char const * host,
- char const * receiverPath,
- char const * reportFrequency,
- int verbosity,
- char const * queue_name
- )
-{
- string name("receiver");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1 )
- cout << "startReceivingClient: port " << port << endl;
-
- // verbosity has to be > 1 to let clients talk.
- int client_verbosity = (verbosity > 1 ) ? 1 : 0;
-
- char portStr[100];
- char verbosityStr[100];
- sprintf(portStr, "%d", port);
- sprintf(verbosityStr, "%d", client_verbosity);
-
-
- vector<const char*> argv;
- argv.push_back ( "resumingReceiver" );
- argv.push_back ( host );
- argv.push_back ( portStr );
- argv.push_back ( reportFrequency );
- argv.push_back ( verbosityStr );
- argv.push_back ( queue_name );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
- pids.push_back ( pid );
-
- if ( ! pid ) {
- execv ( receiverPath, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing receiver: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, RECEIVING_CLIENT );
- return pid;
-}
-
-
-
-
-
-pid_t
-startSendingClient ( brokerVector brokers,
- char const * host,
- char const * senderPath,
- char const * nMessages,
- char const * reportFrequency,
- int verbosity,
- int durability,
- char const * queue_name
- )
-{
- string name("sender");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1)
- cout << "startSenderClient: port " << port << endl;
- char portStr[100];
- char verbosityStr[100];
- //
- // verbosity has to be > 1 to let clients talk.
- int client_verbosity = (verbosity > 1 ) ? 1 : 0;
-
- sprintf ( portStr, "%d", port);
- sprintf ( verbosityStr, "%d", client_verbosity);
-
- vector<const char*> argv;
- argv.push_back ( "replayingSender" );
- argv.push_back ( host );
- argv.push_back ( portStr );
- argv.push_back ( nMessages );
- argv.push_back ( reportFrequency );
- argv.push_back ( verbosityStr );
- if ( durability )
- argv.push_back ( "1" );
- else
- argv.push_back ( "0" );
- argv.push_back ( queue_name );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
- pids.push_back ( pid );
-
- if ( ! pid ) {
- execv ( senderPath, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing sender: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, SENDING_CLIENT );
- return pid;
-}
-
-
-
-#define HUNKY_DORY 0
-#define BAD_ARGS 1
-#define CANT_FORK_DQ 2
-#define CANT_FORK_RECEIVER 3
-#define CANT_FORK_SENDER 4
-#define DQ_FAILED 5
-#define ERROR_ON_CHILD 6
-#define HANGING 7
-#define ERROR_KILLING_BROKER 8
-
-}} // namespace qpid::tests
-
-using namespace qpid::tests;
-
-// If you want durability, use the "dir" option of "moduleOrDir" .
-int
-main ( int argc, char const ** argv )
-{
- int brokerKills = 0;
- if ( argc != 11 ) {
- cerr << "Usage: "
- << argv[0]
- << "moduleOrDir declareQueuesPath senderPath receiverPath nMessages reportFrequency verbosity durable n_queues n_brokers"
- << endl;
- cerr << "\tverbosity is an integer, durable is 0 or 1\n";
- return BAD_ARGS;
- }
- signal ( SIGCHLD, childExit );
-
- int i = 1;
- char const * moduleOrDir = argv[i++];
- char const * declareQueuesPath = argv[i++];
- char const * senderPath = argv[i++];
- char const * receiverPath = argv[i++];
- char const * nMessages = argv[i++];
- char const * reportFrequency = argv[i++];
- int verbosity = atoi(argv[i++]);
- int durable = atoi(argv[i++]);
- int n_queues = atoi(argv[i++]);
- int n_brokers = atoi(argv[i++]);
-
- char const * host = "127.0.0.1";
-
- allMyChildren.verbosity = verbosity;
-
- string clusterName;
-
- srand ( getpid() );
-
- makeClusterName ( clusterName );
-
- brokerVector brokers;
-
- if ( verbosity > 1 )
- cout << "Starting initial cluster...\n";
-
- for ( int i = 0; i < n_brokers; ++ i ) {
- startNewBroker ( brokers,
- moduleOrDir,
- clusterName,
- verbosity,
- durable );
- }
-
-
- if ( verbosity > 0 )
- printBrokers ( brokers );
-
- // Get prefix for each queue name.
- stringstream queue_prefix;
- queue_prefix << "failover_soak_" << getpid();
- string queue_prefix_str(queue_prefix.str());
-
- // Run the declareQueues child.
- int childStatus;
- pid_t dqClientPid =
- runDeclareQueuesClient ( brokers,
- host,
- declareQueuesPath,
- verbosity,
- durable,
- queue_prefix_str.c_str(),
- n_queues
- );
- if ( -1 == dqClientPid ) {
- cerr << "END_OF_TEST ERROR_START_DECLARE_1\n";
- return CANT_FORK_DQ;
- }
-
- // Don't continue until declareQueues is finished.
- pid_t retval = waitpid ( dqClientPid, & childStatus, 0);
- if ( retval != dqClientPid) {
- cerr << "END_OF_TEST ERROR_START_DECLARE_2\n";
- return DQ_FAILED;
- }
- allMyChildren.exited ( dqClientPid, childStatus );
-
-
- /*
- Start one receiving and one sending client for each queue.
- */
- for ( int i = 0; i < n_queues; ++ i ) {
-
- stringstream queue_name;
- queue_name << queue_prefix.str() << '_' << i;
- string queue_name_str(queue_name.str());
-
- // Receiving client ---------------------------
- pid_t receivingClientPid =
- startReceivingClient ( brokers,
- host,
- receiverPath,
- reportFrequency,
- verbosity,
- queue_name_str.c_str() );
- if ( -1 == receivingClientPid ) {
- cerr << "END_OF_TEST ERROR_START_RECEIVER\n";
- return CANT_FORK_RECEIVER;
- }
-
-
- // Sending client ---------------------------
- pid_t sendingClientPid =
- startSendingClient ( brokers,
- host,
- senderPath,
- nMessages,
- reportFrequency,
- verbosity,
- durable,
- queue_name_str.c_str() );
- if ( -1 == sendingClientPid ) {
- cerr << "END_OF_TEST ERROR_START_SENDER\n";
- return CANT_FORK_SENDER;
- }
- }
-
-
- int minSleep = 2,
- maxSleep = 6;
-
- int totalBrokers = n_brokers;
-
- int loop = 0;
-
- while ( 1 )
- {
- ++ loop;
-
- /*
- if ( verbosity > 1 )
- std::cerr << "------- loop " << loop << " --------\n";
-
- if ( verbosity > 0 )
- cout << totalBrokers << " brokers have been added to the cluster.\n\n\n";
- */
-
- // Sleep for a while. -------------------------
- int sleepyTime = mrand ( minSleep, maxSleep );
- sleep ( sleepyTime );
-
- int bullet = mrand ( 100 );
- if ( bullet >= 95 )
- {
- fprintf ( stderr, "Killing oldest broker...\n" );
-
- // Kill the oldest broker. --------------------------
- if ( ! killFrontBroker ( brokers, verbosity ) )
- {
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
- std::cerr << "END_OF_TEST ERROR_BROKER\n";
- return ERROR_KILLING_BROKER;
- }
- ++ brokerKills;
-
- // Start a new broker. --------------------------
- if ( verbosity > 0 )
- cout << "Starting new broker.\n\n";
-
- startNewBroker ( brokers,
- moduleOrDir,
- clusterName,
- verbosity,
- durable );
- ++ totalBrokers;
- printBrokers ( brokers );
- cerr << brokerKills << " brokers have been killed.\n\n\n";
- }
-
- int retval = allMyChildren.checkChildren();
- if ( retval )
- {
- std::cerr << "END_OF_TEST ERROR_CLIENT\n";
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
- return ERROR_ON_CHILD;
- }
-
- // If all children have exited, quit.
- int unfinished = allMyChildren.unfinished();
- if ( unfinished == 0 ) {
- killAllBrokers ( brokers, 5 );
-
- if ( verbosity > 1 )
- cout << "failoverSoak: all children have exited.\n";
-
- std::cerr << "END_OF_TEST SUCCESSFUL\n";
- return HUNKY_DORY;
- }
-
- }
-
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
-
- std::cerr << "END_OF_TEST SUCCESSFUL\n";
-
- return HUNKY_DORY;
-}
-
-
-
diff --git a/qpid/cpp/src/tests/federated_cluster_test b/qpid/cpp/src/tests/federated_cluster_test
deleted file mode 100755
index f42b7501b8..0000000000
--- a/qpid/cpp/src/tests/federated_cluster_test
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Test reliability of the replication feature in the face of link
-# failures:
-srcdir=`dirname $0`
-source ./test_env.sh
-
-trap stop_brokers EXIT
-
-fail() {
- echo $1
- exit 1
-}
-
-stop_brokers() {
- if [[ $BROKER_A ]] ; then
- ../qpidd --no-module-dir -q --port $BROKER_A
- unset BROKER_A
- fi
- if [[ $NODE_1 ]] ; then
- ../qpidd --no-module-dir -q --port $NODE_1
- unset NODE_1
- fi
- if [[ $NODE_2 ]] ; then
- ../qpidd --no-module-dir -q --port $NODE_2
- unset NODE_2
- fi
- if [ -f cluster.ports ]; then
- rm cluster.ports
- fi
-}
-
-start_brokers() {
- #start single node...
- BROKER_A=`../qpidd --daemon --port 0 --no-data-dir --no-module-dir --auth no --log-enable info+` || fail "BROKER_A failed to start"
-
- #...and start cluster
- $srcdir/start_cluster 2 || fail "Could not start cluster"
- NODE_1=$(head -1 cluster.ports)
- NODE_2=$(tail -1 cluster.ports)
- test -n "$NODE_1" || fail "NODE_1 failed to start"
- test -n "$NODE_2" || fail "NODE_2 failed to start"
-}
-
-setup() {
- #create exchange on both cluster and single broker
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add exchange direct test-exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" add exchange direct test-exchange
-
- #create dynamic routes for test exchange
- $PYTHON_COMMANDS/qpid-route dynamic add "localhost:$NODE_2" "localhost:$BROKER_A" test-exchange
- $PYTHON_COMMANDS/qpid-route dynamic add "localhost:$BROKER_A" "localhost:$NODE_2" test-exchange
-
- #create test queue on cluster and bind it to the test exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" add queue test-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" bind test-exchange test-queue to-cluster
-
- #create test queue on single broker and bind it to the test exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue test-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" bind test-exchange test-queue from-cluster
-}
-
-run_test_pull_to_cluster_two_consumers() {
- #start consumers on each of the two nodes of the cluster
- ./receiver --port $NODE_1 --queue test-queue --credit-window 1 > fed1.out.tmp &
- ./receiver --port $NODE_2 --queue test-queue --credit-window 1 > fed2.out.tmp &
-
- #send stream of messages to test exchange on single broker
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $BROKER_A --exchange test-exchange --routing-key to-cluster --send-eos 2 < fed.in.tmp
-
- #combine output of the two consumers, sort it and compare with the expected stream
- wait
- sort -g -k 2 fed1.out.tmp fed2.out.tmp > fed.out.tmp
- diff fed.in.tmp fed.out.tmp || fail "federated link to cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-run_test_pull_to_cluster() {
- #send stream of messages to test exchange on single broker
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $BROKER_A --exchange test-exchange --routing-key to-cluster --send-eos 1 < fed.in.tmp
-
- #consume from remaining node of the cluster
- ./receiver --port $NODE_2 --queue test-queue > fed.out.tmp
-
- #verify all messages are received
- diff fed.in.tmp fed.out.tmp || fail "federated link to cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-run_test_pull_from_cluster() {
- #start consumer on single broker
- ./receiver --port $BROKER_A --queue test-queue --credit-window 1 > fed.out.tmp &
-
- #send stream of messages to test exchange on cluster
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $NODE_2 --exchange test-exchange --routing-key from-cluster --send-eos 1 < fed.in.tmp
-
- #verify all messages are received
- wait
- diff fed.in.tmp fed.out.tmp || fail "federated link from cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-
-if test -d ${PYTHON_DIR}; then
- . cpg_check.sh
- cpg_enabled || exit 0
-
- rm -f fed*.tmp #cleanup any files left from previous run
- start_brokers
- echo "brokers started"
- setup
- echo "setup completed"
- run_test_pull_to_cluster_two_consumers
- echo "federated link to cluster verified"
- run_test_pull_from_cluster
- echo "federated link from cluster verified"
- if [[ $TEST_NODE_FAILURE ]] ; then
- #kill first cluster node and retest
- kill -9 $(../qpidd --check --port $NODE_1) && unset NODE_1
- echo "killed first cluster node; waiting for links to re-establish themselves..."
- sleep 5
- echo "retesting..."
- run_test_pull_to_cluster
- echo "federated link to cluster verified"
- run_test_pull_from_cluster
- echo "federated link from cluster verified"
- fi
-fi
diff --git a/qpid/cpp/src/tests/federation_sys.py b/qpid/cpp/src/tests/federation_sys.py
index 11590f684e..e2553e4cf3 100755
--- a/qpid/cpp/src/tests/federation_sys.py
+++ b/qpid/cpp/src/tests/federation_sys.py
@@ -7,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -37,12 +37,12 @@ class Enum(object):
class QmfTestBase010(TestBase010):
-
+
_brokers = []
_links = []
_bridges = []
_alt_exch_ops = Enum(none=0, create=1, delete=2)
-
+
class _Broker(object):
"""
This broker proxy object holds the Qmf proxy to a broker of known address as well as the QMF broker
@@ -64,7 +64,7 @@ class QmfTestBase010(TestBase010):
for session in self.sessions:
try: # Session may have been closed by broker error
session.close()
- except Exception, e: print "WARNING: %s: Unable to close session %s (%s): %s %s" % (self, session, hex(id(session)), type(e), e)
+ except Exception, e: print "WARNING: %s: Unable to close session %s (%s): %s %s" % (self, session, hex(id(session)), type(e), e)
try: # Connection may have been closed by broker error
self.connection.close()
except Exception, e: print "WARNING: %s: Unable to close connection %s (%s): %s %s" % (self, self.connection, hex(id(self.connection)), type(e), e)
@@ -72,7 +72,7 @@ class QmfTestBase010(TestBase010):
session = self.connection.session(name, transactional_flag)
self.sessions.append(session)
return session
-
+
def setUp(self):
"""
Called one before each test starts
@@ -96,34 +96,22 @@ class QmfTestBase010(TestBase010):
b.destroy(self.qmf)
TestBase010.tearDown(self)
self.qmf.close()
-
+
#--- General test utility functions
-
+
def _get_name(self):
"""
Return the name of method which called this method stripped of "test_" prefix. Used for naming
queues and exchanges on a per-test basis.
"""
return stack()[1][3][5:]
-
+
def _get_broker_port(self, key):
"""
Get the port of a broker defined in the environment using -D<key>=portno
"""
return int(self.defines[key])
-
- def _get_cluster_ports(self, key):
- """
- Get the cluster ports from the parameters of the test which place it in the environment using
- -D<key>="port0 port1 ... portN" (space-separated)
- """
- ports = []
- ports_str = self.defines[key]
- if ports_str:
- for p in ports_str.split():
- ports.append(int(p))
- return ports
-
+
def _get_send_address(self, exch_name, queue_name):
"""
Get an address to which to send messages based on the exchange name and queue name, but taking into account
@@ -132,18 +120,15 @@ class QmfTestBase010(TestBase010):
if len(exch_name) == 0: # Default exchange
return queue_name
return "%s/%s" % (exch_name, queue_name)
-
- def _get_broker(self, cluster_flag, broker_port_key, cluster_ports_key):
+
+ def _get_broker(self, broker_port_key):
"""
Read the port numbers for pre-started brokers from the environment using keys, then find or create and return
the Qmf broker proxy for the appropriate broker
"""
- if cluster_flag:
- port = self._get_cluster_ports(cluster_ports_key)[0] # Always use the first node in the cluster
- else:
- port = self._get_broker_port(broker_port_key)
+ port = self._get_broker_port(broker_port_key)
return self._find_create_broker("localhost:%s" % port)
-
+ ################
def _get_msg_subject(self, topic_key):
"""
Return an appropriate subject for sending a message to a known topic. Return None if there is no topic.
@@ -152,7 +137,7 @@ class QmfTestBase010(TestBase010):
if "*" in topic_key: return topic_key.replace("*", "test")
if "#" in topic_key: return topic_key.replace("#", "multipart.test")
return topic_key
-
+
def _send_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", topic_key = "",
msg_durable_flag = False, enq_txn_size = 0):
"""
@@ -172,7 +157,7 @@ class QmfTestBase010(TestBase010):
send_session.commit()
sender.close()
send_session.close()
-
+
def _receive_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", deq_txn_size = 0,
timeout = 0):
"""
@@ -214,9 +199,9 @@ class QmfTestBase010(TestBase010):
receive_session.commit()
receiver.close()
receive_session.close()
-
+
#--- QMF-specific utility functions
-
+
def _get_qmf_property(self, props, key):
"""
Get the value of a named property key kj from a property list [(k0, v0), (k1, v1), ... (kn, vn)].
@@ -225,13 +210,13 @@ class QmfTestBase010(TestBase010):
if k.name == key:
return v
return None
-
+
def _check_qmf_return(self, method_result):
"""
Check the result of a Qmf-defined method call
"""
self.assertTrue(method_result.status == 0, method_result.text)
-
+
def _check_optional_qmf_property(self, qmf_broker, type, qmf_object, key, expected_val, obj_ref_flag):
"""
Optional Qmf properties don't show up in the properties list when they are not specified. Checks for
@@ -253,9 +238,9 @@ class QmfTestBase010(TestBase010):
(type, qmf_object.name, key, val, expected_val))
else:
self.fail("%s %s exists, but has an unexpected %s property \"%s\" set." % (type, qmf_object.name, key, val))
-
+
#--- Find/create Qmf broker objects
-
+
def _find_qmf_broker(self, url):
"""
Find the Qmf broker object for the given broker URL. The broker must have been previously added to Qmf through
@@ -265,7 +250,7 @@ class QmfTestBase010(TestBase010):
if b.getBroker().getUrl() == url:
return b
return None
-
+
def _find_create_broker(self, url):
"""
Find a running broker through Qmf. If it does not exist, add it (assuming the broker is already running).
@@ -280,9 +265,9 @@ class QmfTestBase010(TestBase010):
else:
broker.qmf_broker = qmf_broker
return broker
-
+
#--- Find/create/delete exchanges
-
+
def _find_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
"""
Find Qmf exchange object
@@ -302,7 +287,7 @@ class QmfTestBase010(TestBase010):
(name, e.autoDelete, auto_delete))
return e
return None
-
+
def _find_create_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete, args):
"""
Find Qmf exchange object if exchange exists, create exchange and return its Qmf object if not
@@ -315,7 +300,7 @@ class QmfTestBase010(TestBase010):
e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
self.assertNotEqual(e, None, "Creation of exchange %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
return e
-
+
def _find_delete_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
"""
Find and delete Qmf exchange object if it exists
@@ -323,9 +308,9 @@ class QmfTestBase010(TestBase010):
e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
if e is not None and not auto_delete:
self._check_qmf_return(qmf_broker.delete(type="exchange", name=name, options={}))
-
+
#--- Find/create/delete queues
-
+
def _find_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete):
"""
Find a Qmf queue object
@@ -344,7 +329,7 @@ class QmfTestBase010(TestBase010):
(name, q.autoDelete, auto_delete))
return q
return None
-
+
def _find_create_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
"""
Find Qmf queue object if queue exists, create queue and return its Qmf object if not
@@ -357,7 +342,7 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
self.assertNotEqual(q, None, "Creation of queue %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
return q
-
+
def _find_delete_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
"""
Find and delete Qmf queue object if it exists
@@ -365,9 +350,9 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
if q is not None and not auto_delete:
self._check_qmf_return(qmf_broker.delete(type="queue", name=name, options={}))
-
+
#--- Find/create/delete bindings (between an exchange and a queue)
-
+
def _find_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find a Qmf binding object
@@ -383,7 +368,7 @@ class QmfTestBase010(TestBase010):
(qmf_exchange.name, qmf_queue.name, b.arguments, binding_args))
return b
return None
-
+
def _find_create_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find Qmf binding object if it exists, create binding and return its Qmf object if not
@@ -396,7 +381,7 @@ class QmfTestBase010(TestBase010):
self.assertNotEqual(b, None, "Creation of binding between exchange %s and queue %s with key %s failed" %
(qmf_exchange.name, qmf_queue.name, binding_key))
return b
-
+
def _find_delete_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find and delete Qmf binding object if it exists
@@ -405,7 +390,7 @@ class QmfTestBase010(TestBase010):
if b is not None:
if len(qmf_exchange.name) > 0: # not default exchange
self._check_qmf_return(qmf_broker.delete(type="binding", name="%s/%s/%s" % (qmf_exchange.name, qmf_queue.name, binding_key), options={}))
-
+
#--- Find/create a link
def _find_qmf_link(self, qmf_from_broker_proxy, host, port):
@@ -416,7 +401,7 @@ class QmfTestBase010(TestBase010):
if l.host == host and l.port == port:
return l
return None
-
+
def _find_create_qmf_link(self, qmf_from_broker, qmf_to_broker_proxy, link_durable_flag, auth_mechanism, user_id,
password, transport, pause_interval, link_ready_timeout):
"""
@@ -433,24 +418,19 @@ class QmfTestBase010(TestBase010):
(qmf_from_broker.getBroker().getUrl(), qmf_to_broker_proxy.getUrl()))
self._wait_for_link(l, pause_interval, link_ready_timeout)
return l
-
+
def _wait_for_link(self, link, pause_interval, link_ready_timeout):
"""
Wait for link to become active (state=Operational)
"""
tot_time = 0
link.update()
- if link.state == "":
- # Link mgmt updates for the c++ link object are disabled when in a cluster because of inconsistent state:
- # one is "Operational", the other "Passive". In this case, wait a bit and hope for the best...
- sleep(2*pause_interval)
- else:
- while link.state != "Operational" and tot_time < link_ready_timeout:
- sleep(pause_interval)
- tot_time += pause_interval
- link.update()
- self.assertEqual(link.state, "Operational", "Timeout: Link not operational, state=%s" % link.state)
-
+ while link.state != "Operational" and tot_time < link_ready_timeout:
+ sleep(pause_interval)
+ tot_time += pause_interval
+ link.update()
+ self.assertEqual(link.state, "Operational", "Timeout: Link not operational, state=%s" % link.state)
+
#--- Find/create a bridge
def _find_qmf_bridge(self, qmf_broker_proxy, qmf_link, source, destination, key):
@@ -461,7 +441,7 @@ class QmfTestBase010(TestBase010):
if b.linkRef == qmf_link.getObjectId() and b.src == source and b.dest == destination and b.key == key:
return b
return None
-
+
def _find_create_qmf_bridge(self, qmf_broker_proxy, qmf_link, queue_name, exch_name, topic_key,
queue_route_type_flag, bridge_durable_flag):
"""
@@ -486,7 +466,7 @@ class QmfTestBase010(TestBase010):
b = self._find_qmf_bridge(qmf_broker_proxy, qmf_link, src, dest, key)
self.assertNotEqual(b, None, "Bridge creation failed: src=%s dest=%s key=%s" % (src, dest, key))
return b
-
+
def _wait_for_bridge(self, bridge, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval,
bridge_ready_timeout):
"""
@@ -522,9 +502,9 @@ class QmfTestBase010(TestBase010):
sender.close()
send_session.close()
self.assertTrue(active, "Bridge failed to become active after %ds: %s" % (bridge_ready_timeout, bridge))
-
+
#--- Find/create/delete utility functions
-
+
def _create_and_bind(self, qmf_broker, exchange_args, queue_args, binding_args):
"""
Create a binding between a named exchange and queue on a broker
@@ -532,7 +512,7 @@ class QmfTestBase010(TestBase010):
e = self._find_create_qmf_exchange(qmf_broker, **exchange_args)
q = self._find_create_qmf_queue(qmf_broker, **queue_args)
return self._find_create_qmf_binding(qmf_broker, e, q, **binding_args)
-
+
def _check_alt_exchange(self, qmf_broker, alt_exch_name, alt_exch_type, alt_exch_op):
"""
Check for existence of alternate exchange. Return the Qmf exchange proxy object for the alternate exchange
@@ -546,7 +526,7 @@ class QmfTestBase010(TestBase010):
alternate="", durable=False, auto_delete=False)
return self._find_qmf_exchange(qmf_broker=qmf_broker, name=alt_exchange_name, type=alt_exchange_type,
alternate="", durable=False, auto_delete=False)
-
+
def _delete_queue_binding(self, qmf_broker, exchange_args, queue_args, binding_args):
"""
Delete a queue and the binding between it and the exchange
@@ -555,7 +535,7 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, queue_args["name"], queue_args["alternate_exchange"], queue_args["durable"], queue_args["exclusive"], queue_args["auto_delete"])
self._find_delete_qmf_binding(qmf_broker, e, q, **binding_args)
self._find_delete_qmf_queue(qmf_broker, **queue_args)
-
+
def _create_route(self, queue_route_type_flag, src_broker, dest_broker, exch_name, queue_name, topic_key,
link_durable_flag, bridge_durable_flag, auth_mechanism, user_id, password, transport,
pause_interval = 1, link_ready_timeout = 20, bridge_ready_timeout = 20):
@@ -571,7 +551,7 @@ class QmfTestBase010(TestBase010):
self._wait_for_bridge(b, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval, bridge_ready_timeout)
# Parameterized test - entry point for tests
-
+
def _do_test(self,
test_name, # Name of test
exch_name = "amq.direct", # Remote exchange name
@@ -597,8 +577,6 @@ class QmfTestBase010(TestBase010):
queue_route_type_flag = False, # Route type: false = bridge route, true = queue route
enq_txn_size = 0, # Enqueue transaction size, 0 = no transactions
deq_txn_size = 0, # Dequeue transaction size, 0 = no transactions
- local_cluster_flag = False, # Use a node from the local cluster, otherwise use single local broker
- remote_cluster_flag = False, # Use a node from the remote cluster, otherwise use single remote broker
alt_exch_op = _alt_exch_ops.create,# Op on alt exch [create (ensure present), delete (ensure not present), none (neither create nor delete)]
auth_mechanism = "", # Authorization mechanism for linked broker
user_id = "", # User ID for authorization on linked broker
@@ -609,32 +587,32 @@ class QmfTestBase010(TestBase010):
Parameterized federation test. Sets up a federated link between a source broker and a destination broker and
checks that messages correctly pass over the link to the destination. Where appropriate (non-queue-routes), also
checks for the presence of messages on the source broker.
-
+
In these tests, the concept is to create a LOCAL broker, then create a link to a REMOTE broker using federation.
In other words, the messages sent to the LOCAL broker will be replicated on the REMOTE broker, and tests are
performed on the REMOTE broker to check that the required messages are present. In the case of regular routes,
the LOCAL broker will also retain the messages, and a similar test is performed on this broker.
-
+
TODO: There are several items to improve here:
1. _do_test() is rather general. Rather create a version for each exchange type and test the exchange/queue
interaction in more detail based on the exchange type
2. Add a headers and an xml exchange type
- 3. Restructure the tests to start and stop brokers and clusters directly rather than relying on previously
- started brokers. Then persistence can be checked by stopping and restarting the brokers/clusters. In particular,
+ 3. Restructure the tests to start and stop brokers directly rather than relying on previously
+ started brokers. Then persistence can be checked by stopping and restarting the brokers. In particular,
test the persistence of links and bridges, both of which take a persistence flag.
4. Test the behavior of the alternate exchanges when messages are sourced through a link. Also check behavior
when the alternate exchange is not present or is deleted after the reference is made.
5. Test special queue types (eg LVQ)
"""
- local_broker = self._get_broker(local_cluster_flag, "local-port", "local-cluster-ports")
- remote_broker = self._get_broker(remote_cluster_flag, "remote-port", "remote-cluster-ports")
-
+ local_broker = self._get_broker("local-port")
+ remote_broker = self._get_broker("remote-port")
+
# Check alternate exchanges exist (and create them if not) on both local and remote brokers
self._check_alt_exchange(local_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
self._check_alt_exchange(local_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
self._check_alt_exchange(remote_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
self._check_alt_exchange(remote_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
-
+
queue_name = "queue_%s" % test_name
exchange_args = {"name": exch_name, "type": exch_type, "alternate": exch_alt_exch,
"durable": exch_durable_flag, "auto_delete": exch_auto_delete_flag, "args": exch_x_args}
@@ -658,70 +636,70 @@ class QmfTestBase010(TestBase010):
if not queue_route_type_flag:
self._receive_msgs("local_receive_session", local_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size)
self._receive_msgs("remote_receive_session", remote_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size, timeout = 5)
-
+
# Clean up
self._delete_queue_binding(qmf_broker=local_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
self._delete_queue_binding(qmf_broker=remote_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
class A_ShortTests(QmfTestBase010):
-
+
def test_route_defaultExch(self):
self._do_test(self._get_name())
-
+
def test_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True)
-
-
+
+
class A_LongTests(QmfTestBase010):
-
+
def test_route_amqDirectExch(self):
self._do_test(self._get_name(), exch_name="amq.direct")
-
+
def test_queueRoute_amqDirectExch(self):
self._do_test(self._get_name(), exch_name="amq.direct", queue_route_type_flag=True)
-
-
+
+
def test_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange")
-
+
def test_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True)
-
-
+
+
def test_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout")
-
+
def test_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True)
-
-
+
+
def test_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#")
-
+
def test_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True)
-
-
+
+
class B_ShortTransactionTests(QmfTestBase010):
-
+
def test_txEnq01_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
class B_LongTransactionTests(QmfTestBase010):
-
+
def test_txEnq10_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
@@ -730,1171 +708,270 @@ class B_LongTransactionTests(QmfTestBase010):
def test_txEnq01_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-class C_ShortClusterTests(QmfTestBase010):
-
- def test_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True)
-
- def test_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class C_LongClusterTests(QmfTestBase010):
-
- def test_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_ShortClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_LongClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class E_ShortPersistenceTests(QmfTestBase010):
-
+class E_ShortPersistenceTests(QmfTestBase010):
+
def test_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-class E_LongPersistenceTests(QmfTestBase010):
+class E_LongPersistenceTests(QmfTestBase010):
+
-
def test_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
class F_ShortPersistenceTransactionTests(QmfTestBase010):
-
+
def test_txEnq01_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
class F_LongPersistenceTransactionTests(QmfTestBase010):
-
+
def test_txEnq10_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
-
+
+
def test_txEnq01_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq10_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq10_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-class G_ShortPersistenceClusterTests(QmfTestBase010):
-
- def test_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class G_LongPersistenceClusterTests(QmfTestBase010):
-
-
-
- def test_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_ShortPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_LongPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-
-
- def test_txEnq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py
index b29ff42627..1725c594de 100755
--- a/qpid/cpp/src/tests/ha_tests.py
+++ b/qpid/cpp/src/tests/ha_tests.py
@@ -20,7 +20,7 @@
import os, signal, sys, time, imp, re, subprocess, glob, random, logging, shutil, math, unittest, random
import traceback
-from qpid.messaging import Message, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED, Empty
+from qpid.messaging import Message, SessionError, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED, Empty
from qpid.datatypes import uuid4
from brokertest import *
from ha_test import *
@@ -613,22 +613,24 @@ acl deny all all
to new members of a cluster. """
cluster = HaCluster(self, 2)
s = cluster[0].connect().session()
+ cluster[0].wait_status("active")
+ cluster[1].wait_status("ready")
# altex exchange: acts as alternate exchange
s.sender("altex;{create:always,node:{type:topic,x-declare:{type:'fanout'}}}")
# altq queue bound to altex, collect re-routed messages.
s.sender("altq;{create:always,node:{x-bindings:[{exchange:'altex',queue:altq}]}}")
- # 0ex exchange with alternate-exchange altex and no queues bound
- s.sender("0ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'altex'}}}")
+ # ex exchange with alternate-exchange altex and no queues bound
+ s.sender("ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'altex'}}}")
# create queue q with alternate-exchange altex
s.sender("q;{create:always,node:{type:queue, x-declare:{alternate-exchange:'altex'}}}")
# create a bunch of exchanges to ensure we don't clean up prematurely if the
# response comes in multiple fragments.
- for i in xrange(200): s.sender("00ex%s;{create:always,node:{type:topic}}"%i)
+ for i in xrange(200): s.sender("ex.%s;{create:always,node:{type:topic}}"%i)
def verify(broker):
s = broker.connect().session()
# Verify unmatched message goes to ex's alternate.
- s.sender("0ex").send("foo")
+ s.sender("ex").send("foo")
altq = s.receiver("altq")
self.assertEqual("foo", altq.fetch(timeout=0).content)
s.acknowledge()
@@ -640,20 +642,39 @@ acl deny all all
self.assertEqual("bar", altq.fetch(timeout=0).content)
s.acknowledge()
+ def ss(n): return cluster[n].connect().session()
+
# Sanity check: alternate exchanges on original broker
verify(cluster[0])
+ # Altex is in use as an alternate exchange.
+ self.assertRaises(SessionError,
+ lambda:ss(0).sender("altex;{delete:always}").close())
# Check backup that was connected during setup.
- cluster[1].wait_backup("0ex")
+ cluster[1].wait_status("ready")
+ cluster[1].wait_backup("ex")
cluster[1].wait_backup("q")
cluster.bounce(0)
verify(cluster[1])
+
# Check a newly started backup.
cluster.start()
- cluster[2].wait_backup("0ex")
+ cluster[2].wait_status("ready")
+ cluster[2].wait_backup("ex")
cluster[2].wait_backup("q")
cluster.bounce(1)
verify(cluster[2])
+ # Check that alt-exchange in-use count is replicated
+ s = cluster[2].connect().session();
+
+ self.assertRaises(SessionError,
+ lambda:ss(2).sender("altex;{delete:always}").close())
+ s.sender("q;{delete:always}").close()
+ self.assertRaises(SessionError,
+ lambda:ss(2).sender("altex;{delete:always}").close())
+ s.sender("ex;{delete:always}").close()
+ s.sender("altex;{delete:always}").close()
+
def test_priority_reroute(self):
"""Regression test for QPID-4262, rerouting messages from a priority queue
to itself causes a crash"""
@@ -1047,13 +1068,15 @@ class RecoveryTests(HaBrokerTest):
l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
try:
# We don't want backups to time out for this test, set long timeout.
- cluster = HaCluster(self, 4, args=["--ha-backup-timeout=100000"]);
+ cluster = HaCluster(self, 4, args=["--ha-backup-timeout=120"]);
# Wait for the primary to be ready
cluster[0].wait_status("active")
+ for b in cluster[1:4]: b.wait_status("ready")
# Create a queue before the failure.
s1 = cluster.connect(0).session().sender("q1;{create:always}")
for b in cluster: b.wait_backup("q1")
for i in xrange(100): s1.send(str(i))
+
# Kill primary and 2 backups
cluster[3].wait_status("ready")
for i in [0,1,2]: cluster.kill(i, False)
@@ -1070,14 +1093,16 @@ class RecoveryTests(HaBrokerTest):
s2 = cluster.connect(3).session().sender("q2;{create:always}")
# Verify that messages sent are not completed
- for i in xrange(100,200): s1.send(str(i), sync=False); s2.send(str(i), sync=False)
+ for i in xrange(100,200):
+ s1.send(str(i), sync=False);
+ s2.send(str(i), sync=False)
assertSyncTimeout(s1)
self.assertEqual(s1.unsettled(), 100)
assertSyncTimeout(s2)
self.assertEqual(s2.unsettled(), 100)
# Verify we can receive even if sending is on hold:
- cluster[3].assert_browse("q1", [str(i) for i in range(100)+range(100,200)])
+ cluster[3].assert_browse("q1", [str(i) for i in range(200)])
# Restart backups, verify queues are released only when both backups are up
cluster.restart(1)
@@ -1085,11 +1110,10 @@ class RecoveryTests(HaBrokerTest):
self.assertEqual(s1.unsettled(), 100)
assertSyncTimeout(s2)
self.assertEqual(s2.unsettled(), 100)
- self.assertEqual(cluster[3].ha_status(), "recovering")
cluster.restart(2)
# Verify everything is up to date and active
- def settled(sender): sender.sync(); return sender.unsettled() == 0;
+ def settled(sender): sender.sync(timeout=1); return sender.unsettled() == 0;
assert retry(lambda: settled(s1)), "Unsetttled=%s"%(s1.unsettled())
assert retry(lambda: settled(s2)), "Unsetttled=%s"%(s2.unsettled())
cluster[1].assert_browse_backup("q1", [str(i) for i in range(100)+range(100,200)])
diff --git a/qpid/cpp/src/tests/ipv6_test b/qpid/cpp/src/tests/ipv6_test
index 9d1cb2acdd..f47e721513 100755
--- a/qpid/cpp/src/tests/ipv6_test
+++ b/qpid/cpp/src/tests/ipv6_test
@@ -122,43 +122,3 @@ else
rm rdata-in rdata-out
fi
-# Cluster smoke test follows
-test -z $CLUSTER_LIB && exit 0 # Exit if cluster not supported.
-
-## Test failover in a cluster using IPv6 only
-. cpg_check.sh
-cpg_enabled || exit 0
-
-pick_port() {
- # We need a fixed port to set --cluster-url. Use qpidd to pick a free port.
- # Note this method is racy
- PICK=$($QPIDD_EXEC -dp0)
- $QPIDD_EXEC -qp $PICK
- echo $PICK
-}
-
-ssl_cluster_broker() { # $1 = port
- $QPIDD_EXEC $COMMON_OPTS --load-module $CLUSTER_LIB --cluster-name ipv6_test.$HOSTNAME.$$ --cluster-url amqp:[$TEST_HOSTNAME]:$1 --port $1
- # Wait for broker to be ready
- ./qpid-ping -b $TEST_HOSTNAME -qp $1 || { echo "Cannot connect to broker on $1"; exit 1; }
- echo "Running IPv6 cluster broker on port $1"
-}
-
-PORT1=`pick_port`; ssl_cluster_broker $PORT1
-PORT2=`pick_port`; ssl_cluster_broker $PORT2
-
-# Pipe receive output to uniq to remove duplicates
-./qpid-receive --connection-options "{reconnect:true, reconnect-timeout:5}" --failover-updates -b amqp:[$TEST_HOSTNAME]:$PORT1 -a "foo;{create:always}" -f | uniq > ssl_test_receive.tmp &
-
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=one -a "foo;{create:always}"
-
-$QPIDD_EXEC -qp $PORT1 # Kill broker 1 receiver should fail-over.
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=two -a "foo;{create:always}" --send-eos 1
-wait # Wait for qpid-receive
-{ echo one; echo two; } > ssl_test_receive.cmp
-diff ssl_test_receive.tmp ssl_test_receive.cmp || { echo "Failover failed"; exit 1; }
-
-$QPIDD_EXEC -qp $PORT2
-
-rm -f ssl_test_receive.*
-
diff --git a/qpid/cpp/src/tests/legacystore/.valgrind.supp b/qpid/cpp/src/tests/legacystore/.valgrind.supp
new file mode 100644
index 0000000000..5c1c5377bf
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/.valgrind.supp
@@ -0,0 +1,35 @@
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag
+ fun:_ZNSsC1EPKcRKSaIcE
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs4_Rep8_M_cloneERKSaIcEm
+ fun:_ZNSs7reserveEm
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs9_M_mutateEmmm
+ fun:_ZNSs15_M_replace_safeEmmPKcm
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSsC1IPcEET_S1_RKSaIcE
+}
+
diff --git a/qpid/cpp/src/tests/legacystore/.valgrindrc b/qpid/cpp/src/tests/legacystore/.valgrindrc
new file mode 100644
index 0000000000..4aba7661de
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/.valgrindrc
@@ -0,0 +1,7 @@
+--gen-suppressions=all
+--leak-check=full
+--demangle=yes
+--suppressions=.valgrind.supp
+--num-callers=25
+--trace-children=yes
+
diff --git a/qpid/cpp/src/tests/legacystore/CMakeLists.txt b/qpid/cpp/src/tests/legacystore/CMakeLists.txt
new file mode 100644
index 0000000000..6cfaa7ec17
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/CMakeLists.txt
@@ -0,0 +1,117 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if(BUILD_LEGACYSTORE)
+
+message(STATUS "Building legacystore tests")
+
+# Enable dashboard reporting.
+include (CTest)
+
+# Make sure that everything get built before the tests
+# Need to create a var with all the necessary top level targets
+
+# If we're linking Boost for DLLs, turn that on for the unit test too.
+if (QPID_LINK_BOOST_DYNAMIC)
+ add_definitions(-DBOOST_TEST_DYN_LINK)
+endif (QPID_LINK_BOOST_DYNAMIC)
+
+include_directories( ${CMAKE_CURRENT_SOURCE_DIR} )
+
+include (FindPythonInterp)
+
+# # Inherit environment from parent script
+# set (abs_srcdir ${CMAKE_CURRENT_SOURCE_DIR})
+# set (abs_builddir ${CMAKE_CURRENT_BINARY_DIR})
+# set (abs_top_srcdir ${CMAKE_SOURCE_DIR})
+# set (abs_top_builddir ${CMAKE_BINARY_DIR})
+# set (builddir_lib_suffix "")
+
+# If valgrind is selected in the configuration step, set up the path to it
+# for CTest.
+if (ENABLE_VALGRIND)
+ set (MEMORYCHECK_COMMAND ${VALGRIND})
+ set (MEMORYCHECK_COMMAND_OPTIONS "--gen-suppressions=all
+--leak-check=full
+--demangle=yes
+--suppressions=${CMAKE_CURRENT_SOURCE_DIR}/.valgrind.supp
+--num-callers=25
+--log-file=ctest_valgrind.vglog")
+endif (ENABLE_VALGRIND)
+
+# Like this to work with cmake 2.4 on Unix
+set (qpid_test_boost_libs
+ ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SYSTEM_LIBRARY})
+
+#
+# Unit test program
+#
+# Unit tests are built as a single program to reduce valgrind overhead
+# when running the tests. If you want to build a subset of the tests run
+# ccmake and set unit_tests_to_build to the set you want to build.
+# HACK ALERT - Unit tests are built individually to resolve a conflict
+# with running multiple brokers that connect to 0.0.0.0:5672 and that
+# womp on each other's store directory.
+
+#
+# define_selftest
+# macro to accept the name of a single source file and to create a
+# unit test executable that runs the source.
+#
+MACRO (define_selftest theSourceFile)
+add_executable (legacystore_${theSourceFile}
+ unit_test
+ ${theSourceFile}
+ ${platform_test_additions})
+target_link_libraries (legacystore_${theSourceFile}
+ ${qpid_test_boost_libs}
+ qpidmessaging qpidbroker qmfconsole legacystore)
+get_property(ls_include TARGET legacystore_${theSourceFile} PROPERTY INCLUDE_DIRECTORIES)
+list(APPEND ls_include ${abs_top_srcdir}/src/qpid/legacystore)
+list(APPEND ls_include ${abs_top_srcdir}/src/tests)
+set_target_properties (legacystore_${theSourceFile} PROPERTIES
+ INCLUDE_DIRECTORIES "${ls_include}"
+ COMPILE_DEFINITIONS _IN_QPID_BROKER)
+remember_location(legacystore_${theSourceFile})
+set(test_wrap ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_test${test_script_suffix})
+
+add_test (legacystore_${theSourceFile} ${test_wrap} ${legacystore_${theSourceFile}_LOCATION})
+ENDMACRO (define_selftest)
+
+# add_definitions(-H)
+
+define_selftest (SimpleTest)
+define_selftest (OrderingTest)
+define_selftest (TransactionalTest)
+define_selftest (TwoPhaseCommitTest)
+
+#
+# Other test programs
+#
+
+# This should ideally be done as part of the test run, but I don't know a way
+# to get these arguments and the working directory set like Makefile.am does,
+# and have that run during the test pass.
+if (PYTHON_EXECUTABLE)
+ set (python_bld ${CMAKE_CURRENT_BINARY_DIR}/python)
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} setup.py install --prefix=${pythoon_bld} --install-lib=${python_bld} --install-scripts=${python_bld}/commands
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/../python)
+endif (PYTHON_EXECUTABLE)
+
+endif (BUILD_LEGACYSTORE)
diff --git a/qpid/cpp/src/tests/legacystore/MessageUtils.h b/qpid/cpp/src/tests/legacystore/MessageUtils.h
new file mode 100644
index 0000000000..6552357c72
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/MessageUtils.h
@@ -0,0 +1,105 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <qpid/broker/Message.h>
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/amqp_0_10/MessageTransfer.h>
+#include <qpid/framing/AMQFrame.h>
+#include <qpid/framing/all_method_bodies.h>
+#include <qpid/framing/Uuid.h>
+
+using namespace qpid::broker;
+using namespace qpid::framing;
+
+struct MessageUtils
+{
+ static Message createMessage(const std::string& exchange, const std::string& routingKey,
+ const Uuid& messageId=Uuid(), const bool durable = false,
+ const uint64_t contentSize = 0, const std::string& correlationId = std::string())
+ {
+ boost::intrusive_ptr<qpid::broker::amqp_0_10::MessageTransfer> msg(new qpid::broker::amqp_0_10::MessageTransfer());
+
+ AMQFrame method(( MessageTransferBody(ProtocolVersion(), exchange, 0, 0)));
+ AMQFrame header((AMQHeaderBody()));
+
+ msg->getFrames().append(method);
+ msg->getFrames().append(header);
+ MessageProperties* props = msg->getFrames().getHeaders()->get<MessageProperties>(true);
+ props->setContentLength(contentSize);
+ props->setMessageId(messageId);
+ props->setCorrelationId(correlationId);
+ msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setRoutingKey(routingKey);
+ if (durable)
+ msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setDeliveryMode(PERSISTENT);
+ return Message(msg, msg);
+ }
+
+ static void addContent(Message msg, const std::string& data)
+ {
+ AMQFrame content((AMQContentBody(data)));
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).getFrames().append(content);
+ }
+
+ struct MessageRetriever : public Consumer
+ {
+ MessageRetriever(Queue& q) : Consumer("test", CONSUMER), queue(q) {};
+
+ bool deliver(const QueueCursor& c, const Message& m)
+ {
+ message = m;
+ cursor = c;
+ return true;
+ };
+ void notify() {}
+ void cancel() {}
+ void acknowledged(const DeliveryRecord&) {}
+ OwnershipToken* getSession() { return 0; }
+
+ const Queue& queue;
+ Message message;
+ QueueCursor cursor;
+ };
+
+ static Message get(Queue& queue, QueueCursor* cursor = 0)
+ {
+ boost::shared_ptr<MessageRetriever> consumer(new MessageRetriever(queue));
+ if (!queue.dispatch(consumer))throw qpid::Exception("No message found!");
+ if (cursor) *cursor = consumer->cursor;
+ return consumer->message;
+ }
+
+ static Uuid getMessageId(const Message& message)
+ {
+ return qpid::broker::amqp_0_10::MessageTransfer::get(message).getProperties<MessageProperties>()->getMessageId();
+ }
+
+ static std::string getCorrelationId(const Message& message)
+ {
+ return qpid::broker::amqp_0_10::MessageTransfer::get(message).getProperties<MessageProperties>()->getCorrelationId();
+ }
+
+ static void deliver(Message& msg, FrameHandler& h, uint16_t framesize)
+ {
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).sendHeader(h, framesize, false, 0, 0, qpid::types::Variant::Map());
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).sendContent(h, framesize);
+ }
+
+};
diff --git a/qpid/cpp/src/tests/legacystore/OrderingTest.cpp b/qpid/cpp/src/tests/legacystore/OrderingTest.cpp
new file mode 100644
index 0000000000..92a09f0c60
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/OrderingTest.cpp
@@ -0,0 +1,168 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/RecoveryManagerImpl.h>
+#include <qpid/framing/AMQHeaderBody.h>
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace mrg::msgstore;
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+QPID_AUTO_TEST_SUITE(OrderingTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+const std::string test_filename("OrderingTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const std::string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/OrderingTest");
+
+// === Helper fns ===
+
+const std::string name("OrderingQueue");
+std::auto_ptr<MessageStoreImpl> store;
+QueueRegistry queues;
+Queue::shared_ptr queue;
+std::queue<Uuid> ids;
+
+class TestConsumer : public Consumer
+{
+ public:
+
+ TestConsumer(Queue::shared_ptr q, std::queue<Uuid>& i) : Consumer("test", CONSUMER), queue(q), ids(i) {};
+
+ bool deliver(const QueueCursor& cursor, const Message& message)
+ {
+ queue->dequeue(0, cursor);
+ BOOST_CHECK_EQUAL(ids.front(), MessageUtils::getMessageId(message));
+ ids.pop();
+ return true;
+ };
+ void notify() {}
+ void cancel() {}
+ void acknowledged(const DeliveryRecord&) {}
+ OwnershipToken* getSession() { return 0; }
+ private:
+ Queue::shared_ptr queue;
+ std::queue<Uuid>& ids;
+};
+boost::shared_ptr<TestConsumer> consumer;
+
+void setup()
+{
+ store = std::auto_ptr<MessageStoreImpl>(new MessageStoreImpl(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ queue = Queue::shared_ptr(new Queue(name, 0, store.get(), 0));
+ queue->create();
+ consumer = boost::shared_ptr<TestConsumer>(new TestConsumer(queue, ids));
+}
+
+void push()
+{
+ Uuid messageId(true);
+ ids.push(messageId);
+
+ Message msg = MessageUtils::createMessage("exchange", "routing_key", messageId, true, 0);
+
+ queue->deliver(msg);
+}
+
+bool pop()
+{
+ return queue->dispatch(consumer);
+}
+
+void restart()
+{
+ queue.reset();
+ store.reset();
+
+ store = std::auto_ptr<MessageStoreImpl>(new MessageStoreImpl(&br));
+ store->init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (store.get());
+ RecoveryManagerImpl recoveryMgr(queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store->recover(recoveryMgr);
+
+ queue = queues.find(name);
+ consumer = boost::shared_ptr<TestConsumer>(new TestConsumer(queue, ids));
+}
+
+void check()
+{
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) ids.size(), queue->getMessageCount());
+ while (pop()) ;//keeping popping 'till all messages are dequeued
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queue->getMessageCount());
+ BOOST_CHECK_EQUAL((size_t) 0, ids.size());
+}
+
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(Basic)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ std::cout << test_filename << ".Basic: " << std::flush;
+ setup();
+ //push on 10 messages
+ for (int i = 0; i < 10; i++) push();
+ restart();
+ check();
+ std::cout << "ok" << std::endl;
+}
+
+QPID_AUTO_TEST_CASE(Cycle)
+{
+ std::cout << test_filename << ".Cycle: " << std::flush;
+ setup();
+ //push on 10 messages:
+ for (int i = 0; i < 10; i++) push();
+ //pop 5:
+ for (int i = 0; i < 5; i++) pop();
+ //push on another 5:
+ for (int i = 0; i < 5; i++) push();
+ restart();
+ check();
+ std::cout << "ok" << std::endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/qpid/cpp/src/tests/legacystore/SimpleTest.cpp b/qpid/cpp/src/tests/legacystore/SimpleTest.cpp
new file mode 100644
index 0000000000..a49333d876
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/SimpleTest.cpp
@@ -0,0 +1,497 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "tests/legacystore/MessageUtils.h"
+#include "qpid/legacystore/StoreException.h"
+#include "qpid/broker/DirectExchange.h"
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/QueueSettings.h>
+#include <qpid/broker/RecoveryManagerImpl.h>
+#include <qpid/framing/AMQHeaderBody.h>
+#include <qpid/framing/FieldTable.h>
+#include <qpid/framing/FieldValue.h>
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+
+using boost::intrusive_ptr;
+using boost::static_pointer_cast;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace mrg::msgstore;
+using namespace std;
+
+QPID_AUTO_TEST_SUITE(SimpleTest)
+
+const string test_filename("SimpleTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/SimpleTest");
+
+// === Helper fns ===
+
+struct DummyHandler : OutputHandler
+{
+ std::vector<AMQFrame> frames;
+
+ virtual void send(AMQFrame& frame){
+ frames.push_back(frame);
+ }
+};
+
+void recover(MessageStoreImpl& store, QueueRegistry& queues, ExchangeRegistry& exchanges, LinkRegistry& links)
+{
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (&store);
+ RecoveryManagerImpl recovery(queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store.recover(recovery);
+}
+
+void recover(MessageStoreImpl& store, ExchangeRegistry& exchanges)
+{
+ QueueRegistry queues;
+ LinkRegistry links;
+ recover(store, queues, exchanges, links);
+}
+
+void recover(MessageStoreImpl& store, QueueRegistry& queues)
+{
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ recover(store, queues, exchanges, links);
+}
+
+void bindAndUnbind(const string& exchangeName, const string& queueName,
+ const string& key, const FieldTable& args)
+{
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Exchange::shared_ptr exchange(new DirectExchange(exchangeName, true, args));
+ Queue::shared_ptr queue(new Queue(queueName, 0, &store, 0));
+ store.create(*exchange, qpid::framing::FieldTable());
+ store.create(*queue, qpid::framing::FieldTable());
+ BOOST_REQUIRE(exchange->bind(queue, key, &args));
+ store.bind(*exchange, *queue, key, args);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ Queue::shared_ptr queue = queues.find(queueName);
+ // check exchange args are still set
+ for (FieldTable::ValueMap::const_iterator i = args.begin(); i!=args.end(); i++) {
+ BOOST_CHECK(exchange->getArgs().get((*i).first)->getData() == (*i).second->getData());
+ }
+ //check it is bound by unbinding
+ BOOST_REQUIRE(exchange->unbind(queue, key, &args));
+ store.unbind(*exchange, *queue, key, args);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ Queue::shared_ptr queue = queues.find(queueName);
+ // check exchange args are still set
+ for (FieldTable::ValueMap::const_iterator i = args.begin(); i!=args.end(); i++) {
+ BOOST_CHECK(exchange->getArgs().get((*i).first)->getData() == (*i).second->getData());
+ }
+ //make sure it is no longer bound
+ BOOST_REQUIRE(!exchange->unbind(queue, key, &args));
+ }
+}
+
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(CreateDelete)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".CreateDelete: " << flush;
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ string name("CreateDeleteQueue");
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+// TODO - check dir exists
+ BOOST_REQUIRE(queue.getPersistenceId());
+ store.destroy(queue);
+// TODO - check dir is deleted
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(EmptyRecover)
+{
+ cout << test_filename << ".EmptyRecover: " << flush;
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ //nothing to assert, just testing it doesn't blow up
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueCreate)
+{
+ cout << test_filename << ".QueueCreate: " << flush;
+
+ uint64_t id(0);
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+ BOOST_REQUIRE(queue.getPersistenceId());
+ id = queue.getPersistenceId();
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue.get());
+ BOOST_CHECK_EQUAL(id, queue->getPersistenceId());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueCreateWithSettings)
+{
+ cout << test_filename << ".QueueCreateWithSettings: " << flush;
+
+ FieldTable arguments;
+ arguments.setInt("qpid.max_count", 202);
+ arguments.setInt("qpid.max_size", 1003);
+ QueueSettings settings;
+ settings.populate(arguments, settings.storeSettings);
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, settings, &store, 0);
+ queue.create();
+ BOOST_REQUIRE(queue.getPersistenceId());
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getCount(), 202);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getSize(), 1003);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getCount(), queue->getSettings().maxDepth.getCount());
+ BOOST_CHECK_EQUAL(settings.maxDepth.getSize(), queue->getSettings().maxDepth.getSize());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueDestroy)
+{
+ cout << test_filename << ".QueueDestroy: " << flush;
+
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+ store.destroy(queue);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ BOOST_REQUIRE(!registry.find(name));
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Enqueue)
+{
+ cout << test_filename << ".Enqueue: " << flush;
+
+ //TODO: this is largely copy & paste'd from MessageTest in
+ //qpid tree. ideally need some helper routines for reducing
+ //this to a simpler less duplicated form
+
+ string name("MyDurableQueue");
+ string exchange("MyExchange");
+ string routingKey("MyRoutingKey");
+ Uuid messageId(true);
+ string data1("abcdefg");
+ string data2("hijklmn");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue::shared_ptr queue(new Queue(name, 0, &store, 0));
+ queue->create();
+
+ Message msg = MessageUtils::createMessage(exchange, routingKey, messageId, true, 14);
+ MessageUtils::addContent(msg, data1);
+ MessageUtils::addContent(msg, data2);
+
+ msg.addAnnotation("abc", "xyz");
+
+ queue->deliver(msg);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) 1, queue->getMessageCount());
+ Message msg = MessageUtils::get(*queue);
+
+ BOOST_CHECK_EQUAL(routingKey, msg.getRoutingKey());
+ BOOST_CHECK_EQUAL(messageId, MessageUtils::getMessageId(msg));
+ BOOST_CHECK_EQUAL(std::string("xyz"), msg.getAnnotation("abc"));
+ BOOST_CHECK_EQUAL((u_int64_t) 14, msg.getContentSize());
+
+ DummyHandler handler;
+ MessageUtils::deliver(msg, handler, 100);
+ BOOST_CHECK_EQUAL((size_t) 2, handler.frames.size());
+ AMQContentBody* contentBody(dynamic_cast<AMQContentBody*>(handler.frames[1].getBody()));
+ BOOST_REQUIRE(contentBody);
+ BOOST_CHECK_EQUAL(data1.size() + data2.size(), contentBody->getData().size());
+ BOOST_CHECK_EQUAL(data1 + data2, contentBody->getData());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Dequeue)
+{
+ cout << test_filename << ".Dequeue: " << flush;
+
+ //TODO: reduce the duplication in these tests
+ string name("MyDurableQueue");
+ {
+ string exchange("MyExchange");
+ string routingKey("MyRoutingKey");
+ Uuid messageId(true);
+ string data("abcdefg");
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue::shared_ptr queue(new Queue(name, 0, &store, 0));
+ queue->create();
+
+ Message msg = MessageUtils::createMessage(exchange, routingKey, messageId, true, 7);
+ MessageUtils::addContent(msg, data);
+
+ queue->deliver(msg);
+
+ QueueCursor cursor;
+ MessageUtils::get(*queue, &cursor);
+ queue->dequeue(0, cursor);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queue->getMessageCount());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeCreateAndDestroy)
+{
+ cout << test_filename << ".ExchangeCreateAndDestroy: " << flush;
+
+ uint64_t id(0);
+ string name("MyDurableExchange");
+ string type("direct");
+ FieldTable args;
+ args.setString("a", "A");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ ExchangeRegistry registry;
+ Exchange::shared_ptr exchange = registry.declare(name, type, true, args).first;
+ store.create(*exchange, qpid::framing::FieldTable());
+ id = exchange->getPersistenceId();
+ BOOST_REQUIRE(id);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry registry;
+
+ recover(store, registry);
+
+ Exchange::shared_ptr exchange = registry.get(name);
+ BOOST_CHECK_EQUAL(id, exchange->getPersistenceId());
+ BOOST_CHECK_EQUAL(type, exchange->getType());
+ BOOST_REQUIRE(exchange->isDurable());
+ BOOST_CHECK_EQUAL(*args.get("a"), *exchange->getArgs().get("a"));
+ store.destroy(*exchange);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry registry;
+
+ recover(store, registry);
+
+ try {
+ Exchange::shared_ptr exchange = registry.get(name);
+ BOOST_FAIL("Expected exchange not to be found");
+ } catch (const SessionException& e) {
+ BOOST_CHECK_EQUAL((framing::ReplyCode) 404, e.code);
+ }
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeBindAndUnbind)
+{
+ cout << test_filename << ".ExchangeBindAndUnbind: " << flush;
+
+ bindAndUnbind("MyDurableExchange", "MyDurableQueue", "my-routing-key", FieldTable());
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeBindAndUnbindWithArgs)
+{
+ cout << test_filename << ".ExchangeBindAndUnbindWithArgs: " << flush;
+
+ FieldTable args;
+ args.setString("a", "A");
+ args.setString("b", "B");
+ bindAndUnbind("MyDurableExchange", "MyDurableQueue", "my-routing-key", args);
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeImplicitUnbind)
+{
+ cout << test_filename << ".ExchangeImplicitUnbind: " << flush;
+
+ string exchangeName("MyDurableExchange");
+ string queueName1("MyDurableQueue1");
+ string queueName2("MyDurableQueue2");
+ string key("my-routing-key");
+ FieldTable args;
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Exchange::shared_ptr exchange(new DirectExchange(exchangeName, true, args));
+ Queue::shared_ptr queue1(new Queue(queueName1, 0, &store, 0));
+ Queue::shared_ptr queue2(new Queue(queueName2, 0, &store, 0));
+ store.create(*exchange, qpid::framing::FieldTable());
+ store.create(*queue1, qpid::framing::FieldTable());
+ store.create(*queue2, qpid::framing::FieldTable());
+ store.bind(*exchange, *queue1, key, args);
+ store.bind(*exchange, *queue2, key, args);
+ //delete queue1:
+ store.destroy(*queue1);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ //ensure recovery works ok:
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ BOOST_REQUIRE(!queues.find(queueName1).get());
+ BOOST_REQUIRE(queues.find(queueName2).get());
+
+ //delete exchange:
+ store.destroy(*exchange);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ //ensure recovery works ok:
+ recover(store, queues, exchanges, links);
+
+ try {
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ BOOST_FAIL("Expected exchange not to be found");
+ } catch (const SessionException& e) {
+ BOOST_CHECK_EQUAL((framing::ReplyCode) 404, e.code);
+ }
+ Queue::shared_ptr queue = queues.find(queueName2);
+ store.destroy(*queue);
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/qpid/cpp/src/tests/legacystore/TestFramework.cpp b/qpid/cpp/src/tests/legacystore/TestFramework.cpp
new file mode 100644
index 0000000000..2f7faf7682
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/TestFramework.cpp
@@ -0,0 +1,30 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+// Defines broker to be used by tests
+
+#include "unit_test.h"
+#include "TestFramework.h"
+#include "qpid/broker/Broker.h"
+
+#include <iostream>
+
+//BOOST_GLOBAL_FIXTURE( testBroker )
diff --git a/qpid/cpp/src/tests/legacystore/TestFramework.h b/qpid/cpp/src/tests/legacystore/TestFramework.h
new file mode 100644
index 0000000000..f3066db602
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/TestFramework.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+// Defines broker to be used by tests
+
+#include "unit_test.h"
+
+#include <qpid/broker/Broker.h>
+
+namespace {
+ // test broker
+ qpid::broker::Broker::Options opts;
+ qpid::broker::Broker br(opts);
+/*
+ struct testBroker {
+ testBroker() {}
+ ~testBroker() {}
+ };*/
+}
diff --git a/qpid/cpp/src/tests/legacystore/TransactionalTest.cpp b/qpid/cpp/src/tests/legacystore/TransactionalTest.cpp
new file mode 100644
index 0000000000..2d3f6f922c
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/TransactionalTest.cpp
@@ -0,0 +1,351 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include "qpid/legacystore/StoreException.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/broker/RecoveryManagerImpl.h"
+#include "qpid/framing/AMQHeaderBody.h"
+#include "qpid/log/Statement.h"
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace mrg::msgstore;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace std;
+
+namespace {
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+}
+
+QPID_AUTO_TEST_SUITE(TransactionalTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+const string test_filename("TransactionalTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TransactionalTest");
+
+// Test txn context which has special setCompleteFailure() method which prevents entire "txn complete" process from hapenning
+class TestTxnCtxt : public TxnCtxt
+{
+ public:
+ TestTxnCtxt(IdSequence* _loggedtx) : TxnCtxt(_loggedtx) {}
+ void setCompleteFailure(const unsigned num_queues_rem) {
+ // Remove queue members from back of impactedQueues until queues_rem reamin.
+ // to end to simulate multi-queue txn complete failure.
+ while (impactedQueues.size() > num_queues_rem) impactedQueues.erase(impactedQueues.begin());
+ }
+ void resetPreparedXidStorePtr() { preparedXidStorePtr = 0; }
+};
+
+// Test store which has special begin() which returns a TestTPCTxnCtxt, and a method to check for
+// remaining open transactions.
+// begin(), commit(), and abort() all hide functions in MessageStoreImpl. To avoid the compiler
+// warnings/errors these are renamed with a 'TMS' prefix.
+class TestMessageStore: public MessageStoreImpl
+{
+ public:
+ TestMessageStore(qpid::broker::Broker* br, const char* envpath = 0) : MessageStoreImpl(br, envpath) {}
+ std::auto_ptr<qpid::broker::TransactionContext> TMSbegin() {
+ checkInit();
+ // pass sequence number for c/a
+ return auto_ptr<TransactionContext>(new TestTxnCtxt(&messageIdSequence));
+ }
+ void TMScommit(TransactionContext& ctxt, const bool complete_prepared_list) {
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ if (!complete_prepared_list) dynamic_cast<TestTxnCtxt*>(txn)->resetPreparedXidStorePtr();
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), true);
+ }
+ void TMSabort(TransactionContext& ctxt, const bool complete_prepared_list)
+ {
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ if (!complete_prepared_list) dynamic_cast<TestTxnCtxt*>(txn)->resetPreparedXidStorePtr();
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), false);
+ }
+};
+
+// === Helper fns ===
+
+const string nameA("queueA");
+const string nameB("queueB");
+//const Uuid messageId(true);
+std::auto_ptr<MessageStoreImpl> store;
+std::auto_ptr<QueueRegistry> queues;
+Queue::shared_ptr queueA;
+Queue::shared_ptr queueB;
+
+template <class T>
+void setup()
+{
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ //create two queues:
+ queueA = Queue::shared_ptr(new Queue(nameA, 0, store.get(), 0));
+ queueA->create();
+ queueB = Queue::shared_ptr(new Queue(nameB, 0, store.get(), 0));
+ queueB->create();
+}
+
+template <class T>
+void restart()
+{
+ queueA.reset();
+ queueB.reset();
+ queues.reset();
+ store.reset();
+
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1);
+ queues = std::auto_ptr<QueueRegistry>(new QueueRegistry);
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (store.get());
+ RecoveryManagerImpl recovery(*queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store->recover(recovery);
+
+ queueA = queues->find(nameA);
+ queueB = queues->find(nameB);
+}
+
+Message createMessage(const string& id, const string& exchange="exchange", const string& key="routing_key")
+{
+ return MessageUtils::createMessage(exchange, key, Uuid(), true, 0, id);
+}
+
+void checkMsg(Queue::shared_ptr& queue, u_int32_t size, const string& msgid = "<none>")
+{
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(size, queue->getMessageCount());
+ if (size > 0) {
+ Message msg = MessageUtils::get(*queue);
+ BOOST_REQUIRE(msg);
+ BOOST_CHECK_EQUAL(msgid, MessageUtils::getCorrelationId(msg));
+ }
+}
+
+void swap(bool commit)
+{
+ setup<MessageStoreImpl>();
+
+ //create message and enqueue it onto first queue:
+ Message msgA = createMessage("Message", "exchange", "routing_key");
+ queueA->deliver(msgA);
+
+ QueueCursor cursorB;
+ Message msgB = MessageUtils::get(*queueA, &cursorB);
+ BOOST_REQUIRE(msgB);
+ //move the message from one queue to the other as a transaction
+ std::auto_ptr<TransactionContext> txn = store->begin();
+ TxBuffer tx;
+ queueB->deliver(msgB, &tx);//note: need to enqueue it first to avoid message being deleted
+
+ queueA->dequeue(txn.get(), cursorB);
+ tx.prepare(txn.get());
+ if (commit) {
+ store->commit(*txn);
+ } else {
+ store->abort(*txn);
+ }
+
+ restart<MessageStoreImpl>();
+
+ // Check outcome
+ BOOST_REQUIRE(queueA);
+ BOOST_REQUIRE(queueB);
+
+ Queue::shared_ptr x;//the queue from which the message was swapped
+ Queue::shared_ptr y;//the queue on which the message is expected to be
+
+ if (commit) {
+ x = queueA;
+ y = queueB;
+ } else {
+ x = queueB;
+ y = queueA;
+ }
+
+ checkMsg(x, 0);
+ checkMsg(y, 1, "Message");
+ checkMsg(y, 0);
+}
+
+void testMultiQueueTxn(const unsigned num_queues_rem, const bool complete_prepared_list, const bool commit)
+{
+ setup<TestMessageStore>();
+ TestMessageStore* tmsp = static_cast<TestMessageStore*>(store.get());
+ std::auto_ptr<TransactionContext> txn(tmsp->TMSbegin());
+ TxBuffer tx;
+
+ //create two messages and enqueue them onto both queues:
+ Message msgA = createMessage("MessageA", "exchange", "routing_key");
+ queueA->deliver(msgA, &tx);
+ queueB->deliver(msgA, &tx);
+ Message msgB = createMessage("MessageB", "exchange", "routing_key");
+ queueA->deliver(msgB, &tx);
+ queueB->deliver(msgB, &tx);
+
+ tx.prepare(txn.get());
+ static_cast<TestTxnCtxt*>(txn.get())->setCompleteFailure(num_queues_rem);
+ if (commit)
+ tmsp->TMScommit(*txn, complete_prepared_list);
+ else
+ tmsp->TMSabort(*txn, complete_prepared_list);
+ restart<TestMessageStore>();
+
+ // Check outcome
+ if (commit)
+ {
+ checkMsg(queueA, 2, "MessageA");
+ checkMsg(queueB, 2, "MessageA");
+ checkMsg(queueA, 1, "MessageB");
+ checkMsg(queueB, 1, "MessageB");
+ }
+ checkMsg(queueA, 0);
+ checkMsg(queueB, 0);
+}
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(Commit)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".Commit: " << flush;
+ swap(true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Abort)
+{
+ cout << test_filename << ".Abort: " << flush;
+ swap(false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueCommit)
+{
+ cout << test_filename << ".MultiQueueCommit: " << flush;
+ testMultiQueueTxn(2, true, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAbort)
+{
+ cout << test_filename << ".MultiQueueAbort: " << flush;
+ testMultiQueueTxn(2, true, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueCommitRecover: " << flush;
+ testMultiQueueTxn(0, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueAbortRecover: " << flush;
+ testMultiQueueTxn(0, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueCommitRecover: " << flush;
+ testMultiQueueTxn(1, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueAbortRecover: " << flush;
+ testMultiQueueTxn(1, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueCommitRecover: " << flush;
+ testMultiQueueTxn(2, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueAbortRecover: " << flush;
+ testMultiQueueTxn(2, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(LockedRecordTest)
+{
+ cout << test_filename << ".LockedRecordTest: " << flush;
+
+ setup<MessageStoreImpl>();
+ queueA->deliver(createMessage("Message", "exchange", "routingKey"));
+ std::auto_ptr<TransactionContext> txn = store->begin();
+
+ QueueCursor cursor;
+ Message msg = MessageUtils::get(*queueA, &cursor);
+ queueA->dequeue(txn.get(), cursor);
+
+ try {
+ store->dequeue(0, msg.getPersistentContext(), *queueA);
+ BOOST_ERROR("Did not throw JERR_MAP_LOCKED exception as expected.");
+ }
+ catch (const mrg::msgstore::StoreException& e) {
+ if (std::strstr(e.what(), "JERR_MAP_LOCKED") == 0)
+ BOOST_ERROR("Unexpected StoreException: " << e.what());
+ }
+ catch (const std::exception& e) {
+ BOOST_ERROR("Unexpected exception: " << e.what());
+ }
+ store->commit(*txn);
+ checkMsg(queueA, 0);
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/qpid/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp b/qpid/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp
new file mode 100644
index 0000000000..92e49df9e3
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp
@@ -0,0 +1,675 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/broker/RecoveryManagerImpl.h"
+#include "qpid/framing/AMQHeaderBody.h"
+#include "qpid/log/Statement.h"
+#include "qpid/legacystore/TxnCtxt.h"
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace mrg::msgstore;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace std;
+
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+
+QPID_AUTO_TEST_SUITE(TwoPhaseCommitTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+
+const string test_filename("TwoPhaseCommitTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TwoPhaseCommitTest");
+
+// === Helper fns ===
+
+class TwoPhaseCommitTest
+{
+
+ class Strategy
+ {
+ public:
+ virtual void init() = 0;
+ virtual void run(TPCTransactionContext* txn) = 0;
+ virtual void check(bool committed) = 0;
+ virtual ~Strategy(){}
+ };
+
+ class Swap : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ const string messageId;
+ Message msg;
+ public:
+ Swap(TwoPhaseCommitTest* const test_, const string& messageId_): test(test_), messageId(messageId_) {}
+ void init(){ msg = test->deliver(messageId, test->queueA); }
+ void run(TPCTransactionContext* txn) { test->swap(txn, test->queueA, test->queueB); }
+ void check(bool committed) { test->swapCheck(committed, messageId, test->queueA, test->queueB); }
+ };
+
+ class Enqueue : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ Message msg3;
+ public:
+ Enqueue(TwoPhaseCommitTest* const test_): test(test_) {}
+ void init() {}
+ void run(TPCTransactionContext* txn) {
+ msg1 = test->enqueue(txn, "Enqueue1", test->queueA);
+ msg2 = test->enqueue(txn, "Enqueue2", test->queueA);
+ msg3 = test->enqueue(txn, "Enqueue3", test->queueA);
+ }
+ void check(bool committed) {
+ if (committed) {
+ test->checkMsg(test->queueA, 3, "Enqueue1");
+ test->checkMsg(test->queueA, 2, "Enqueue2");
+ test->checkMsg(test->queueA, 1, "Enqueue3");
+ }
+ test->checkMsg(test->queueA, 0);
+ }
+ };
+
+ class Dequeue : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ Message msg3;
+ public:
+ Dequeue(TwoPhaseCommitTest* const test_): test(test_) {}
+ void init() {
+ msg1 = test->deliver("Dequeue1", test->queueA);
+ msg2 = test->deliver("Dequeue2", test->queueA);
+ msg3 = test->deliver("Dequeue3", test->queueA);
+ }
+ void run(TPCTransactionContext* txn) {
+ test->dequeue(txn, test->queueA);
+ test->dequeue(txn, test->queueA);
+ test->dequeue(txn, test->queueA);
+ }
+ void check(bool committed) {
+ if (!committed) {
+ test->checkMsg(test->queueA, 3, "Dequeue1");
+ test->checkMsg(test->queueA, 2, "Dequeue2");
+ test->checkMsg(test->queueA, 1, "Dequeue3");
+ }
+ test->checkMsg(test->queueA, 0);
+ }
+ };
+
+ class MultiQueueTxn : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ std::set<Queue::shared_ptr> queueset;
+ public:
+ MultiQueueTxn(TwoPhaseCommitTest* const test_): test(test_) {}
+ virtual void init() {}
+ virtual void run(TPCTransactionContext* txn) {
+ queueset.insert(test->queueA);
+ queueset.insert(test->queueB);
+ msg1 = test->enqueue(txn, "Message1", queueset);
+ msg2 = test->enqueue(txn, "Message2", queueset);
+ queueset.clear();
+ }
+ virtual void check(bool committed) {
+ TestMessageStore* sptr = static_cast<TestMessageStore*>(test->store.get());
+ if (committed)
+ {
+ test->checkMsg(test->queueA, 2, "Message1");
+ test->checkMsg(test->queueB, 2, "Message1");
+ test->checkMsg(test->queueA, 1, "Message2");
+ test->checkMsg(test->queueB, 1, "Message2");
+ }
+ test->checkMsg(test->queueA, 0);
+ test->checkMsg(test->queueB, 0);
+ // Check there are no remaining open txns in store
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingTxns(*(test->queueA)));
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingTxns(*(test->queueB)));
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingPreparedListTxns());
+ }
+ };
+
+ // Test txn context which has special setCompleteFailure() method which prevents entire "txn complete" process from hapenning
+ class TestTPCTxnCtxt : public TPCTxnCtxt
+ {
+ public:
+ TestTPCTxnCtxt(const std::string& _xid, IdSequence* _loggedtx) : TPCTxnCtxt(_xid, _loggedtx) {}
+ void setCompleteFailure(const unsigned num_queues_rem, const bool complete_prepared_list) {
+ // Remove queue members from back of impactedQueues until queues_rem reamin.
+ // to end to simulate multi-queue txn complete failure.
+ while (impactedQueues.size() > num_queues_rem) impactedQueues.erase(impactedQueues.begin());
+ // If prepared list is not to be committed, set pointer to 0
+ if (!complete_prepared_list) preparedXidStorePtr = 0;
+ }
+ };
+
+ // Test store which has sepcial begin() which returns a TestTPCTxnCtxt, and a method to check for
+ // reamining open transactions
+ class TestMessageStore: public MessageStoreImpl
+ {
+ public:
+ TestMessageStore(qpid::broker::Broker* br, const char* envpath = 0) : MessageStoreImpl(br, envpath) {}
+ std::auto_ptr<qpid::broker::TPCTransactionContext> TMSbegin(const std::string& xid) {
+ checkInit();
+ IdSequence* jtx = &messageIdSequence;
+ // pass sequence number for c/a
+ return auto_ptr<TPCTransactionContext>(new TestTPCTxnCtxt(xid, jtx));
+ }
+ u_int32_t getRemainingTxns(const PersistableQueue& queue) {
+ return static_cast<JournalImpl*>(queue.getExternalQueueStore())->get_open_txn_cnt();
+ }
+ u_int32_t getRemainingPreparedListTxns() {
+ return tplStorePtr->get_open_txn_cnt();
+ }
+ };
+
+ const string nameA;
+ const string nameB;
+ std::auto_ptr<MessageStoreImpl> store;
+ std::auto_ptr<DtxManager> dtxmgr;
+ std::auto_ptr<QueueRegistry> queues;
+ std::auto_ptr<LinkRegistry> links;
+ Queue::shared_ptr queueA;
+ Queue::shared_ptr queueB;
+ Message msg1;
+ Message msg2;
+ Message msg4;
+ std::auto_ptr<TxBuffer> tx;
+
+ void recoverPrepared(bool commit)
+ {
+ setup<MessageStoreImpl>();
+
+ Swap swap(this, "RecoverPrepared");
+ swap.init();
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ swap.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+
+ store->prepare(*txn);
+ restart<MessageStoreImpl>();
+
+ //check that the message is not available from either queue
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queueA->getMessageCount());
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queueB->getMessageCount());
+
+ //commit/abort the txn - through the dtx manager, not directly on the store
+ if (commit) {
+ dtxmgr->commit("my-xid", false);
+ } else {
+ dtxmgr->rollback("my-xid");
+ }
+
+ swap.check(commit);
+ restart<MessageStoreImpl>();
+ swap.check(commit);
+ }
+
+ void testMultiQueueTxn(const unsigned num_queues_rem, const bool complete_prepared_list, const bool commit)
+ {
+ setup<TestMessageStore>();
+ MultiQueueTxn mqtTest(this);
+ mqtTest.init();
+ std::auto_ptr<TPCTransactionContext> txn(static_cast<TestMessageStore*>(store.get())->begin("my-xid"));
+ mqtTest.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ store->prepare(*txn);
+
+ // As the commits and aborts should happen through DtxManager, and it is too complex to
+ // pass all these test params through, we bypass DtxManager and use the store directly.
+ // This will prevent the queues from seeing committed txns, however. To test the success
+ // or failure of
+ static_cast<TestTPCTxnCtxt*>(txn.get())->setCompleteFailure(num_queues_rem, complete_prepared_list);
+ if (commit)
+ store->commit(*txn);
+ else
+ store->abort(*txn);
+ restart<TestMessageStore>();
+ mqtTest.check(commit);
+ }
+
+ void commit(Strategy& strategy)
+ {
+ setup<MessageStoreImpl>();
+ strategy.init();
+
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ strategy.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ store->prepare(*txn);
+ store->commit(*txn);
+ restart<MessageStoreImpl>();
+ strategy.check(true);
+ }
+
+ void abort(Strategy& strategy, bool prepare)
+ {
+ setup<MessageStoreImpl>();
+ strategy.init();
+
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ strategy.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ if (prepare) store->prepare(*txn);
+ store->abort(*txn);
+ restart<MessageStoreImpl>();
+ strategy.check(false);
+ }
+
+ void swap(TPCTransactionContext* txn, Queue::shared_ptr& from, Queue::shared_ptr& to)
+ {
+ QueueCursor c;
+ Message msg1 = MessageUtils::get(*from, &c);//just dequeues in memory
+ //move the message from one queue to the other as part of a
+ //distributed transaction
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ to->deliver(msg1, tx.get());//note: need to enqueue it first to avoid message being deleted
+ from->dequeue(txn, c);
+ }
+
+ void dequeue(TPCTransactionContext* txn, Queue::shared_ptr& queue)
+ {
+ QueueCursor c;
+ Message msg2 = MessageUtils::get(*queue, &c);//just dequeues in memory
+ queue->dequeue(txn, c);
+ }
+
+ Message enqueue(TPCTransactionContext* /*txn*/, const string& msgid, Queue::shared_ptr& queue)
+ {
+ Message msg = createMessage(msgid);
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ queue->deliver(msg, tx.get());
+ return msg;
+ }
+
+ Message enqueue(TPCTransactionContext* /*txn*/, const string& msgid, std::set<Queue::shared_ptr>& queueset)
+ {
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ Message msg = createMessage(msgid);
+ for (std::set<Queue::shared_ptr>::iterator i = queueset.begin(); i != queueset.end(); i++) {
+ (*i)->deliver(msg, tx.get());
+ }
+ return msg;
+ }
+
+ Message deliver(const string& msgid, Queue::shared_ptr& queue)
+ {
+ Message m = createMessage(msgid);
+ queue->deliver(m);
+ return m;
+ }
+
+ template <class T>
+ void setup()
+ {
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ //create two queues:
+ queueA = Queue::shared_ptr(new Queue(nameA, 0, store.get(), 0));
+ queueA->create();
+ queueB = Queue::shared_ptr(new Queue(nameB, 0, store.get(), 0));
+ queueB->create();
+ }
+
+ Message createMessage(const string& id, const string& exchange="exchange", const string& key="routing_key")
+ {
+ Message msg = MessageUtils::createMessage(exchange, key, Uuid(), true, 0, id);
+ return msg;
+ }
+
+ template <class T>
+ void restart()
+ {
+ queueA.reset();
+ queueB.reset();
+ store.reset();
+ queues.reset();
+ links.reset();
+
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1);
+ sys::Timer t;
+ ExchangeRegistry exchanges;
+ queues = std::auto_ptr<QueueRegistry>(new QueueRegistry);
+ links = std::auto_ptr<LinkRegistry>(new LinkRegistry);
+ dtxmgr = std::auto_ptr<DtxManager>(new DtxManager(t));
+ dtxmgr->setStore (store.get());
+ RecoveryManagerImpl recovery(*queues, exchanges, *links, *dtxmgr, br.getProtocolRegistry());
+ store->recover(recovery);
+
+ queueA = queues->find(nameA);
+ queueB = queues->find(nameB);
+ }
+
+ void checkMsg(Queue::shared_ptr& queue, u_int32_t size, const string& msgid = "<none>")
+ {
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(size, queue->getMessageCount());
+ if (size > 0) {
+ Message msg = MessageUtils::get(*queue);
+ BOOST_REQUIRE(msg);
+ BOOST_CHECK_EQUAL(msgid, MessageUtils::getCorrelationId(msg));
+ }
+ }
+
+ void swapCheck(bool swapped, const string& msgid, Queue::shared_ptr& from, Queue::shared_ptr& to)
+ {
+ BOOST_REQUIRE(from);
+ BOOST_REQUIRE(to);
+
+ Queue::shared_ptr x; //the queue from which the message was swapped
+ Queue::shared_ptr y; //the queue on which the message is expected to be
+
+ if (swapped) {
+ x = from;
+ y = to;
+ } else {
+ x = to;
+ y = from;
+ }
+
+ checkMsg(x, 0);
+ checkMsg(y, 1, msgid);
+ checkMsg(y, 0);
+ }
+
+public:
+ TwoPhaseCommitTest() : nameA("queueA"), nameB("queueB") {}
+
+ void testCommitEnqueue()
+ {
+ Enqueue enqueue(this);
+ commit(enqueue);
+ }
+
+ void testCommitDequeue()
+ {
+ Dequeue dequeue(this);
+ commit(dequeue);
+ }
+
+ void testCommitSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ commit(swap);
+ }
+
+ void testPrepareAndAbortEnqueue()
+ {
+ Enqueue enqueue(this);
+ abort(enqueue, true);
+ }
+
+ void testPrepareAndAbortDequeue()
+ {
+ Dequeue dequeue(this);
+ abort(dequeue, true);
+ }
+
+ void testPrepareAndAbortSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ abort(swap, true);
+ }
+
+ void testAbortNoPrepareEnqueue()
+ {
+ Enqueue enqueue(this);
+ abort(enqueue, false);
+ }
+
+ void testAbortNoPrepareDequeue()
+ {
+ Dequeue dequeue(this);
+ abort(dequeue, false);
+ }
+
+ void testAbortNoPrepareSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ abort(swap, false);
+ }
+
+ void testRecoverPreparedThenCommitted()
+ {
+ recoverPrepared(true);
+ }
+
+ void testRecoverPreparedThenAborted()
+ {
+ recoverPrepared(false);
+ }
+
+ void testMultiQueueCommit()
+ {
+ testMultiQueueTxn(2, true, true);
+ }
+
+ void testMultiQueueAbort()
+ {
+ testMultiQueueTxn(2, true, false);
+ }
+
+ void testMultiQueueNoQueueCommitRecover()
+ {
+ testMultiQueueTxn(0, false, true);
+ }
+
+ void testMultiQueueNoQueueAbortRecover()
+ {
+ testMultiQueueTxn(0, false, false);
+ }
+
+ void testMultiQueueSomeQueueCommitRecover()
+ {
+ testMultiQueueTxn(1, false, true);
+ }
+
+ void testMultiQueueSomeQueueAbortRecover()
+ {
+ testMultiQueueTxn(1, false, false);
+ }
+
+ void testMultiQueueAllQueueCommitRecover()
+ {
+ testMultiQueueTxn(2, false, true);
+ }
+
+ void testMultiQueueAllQueueAbortRecover()
+ {
+ testMultiQueueTxn(2, false, false);
+ }
+};
+
+TwoPhaseCommitTest tpct;
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(CommitEnqueue)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".CommitEnqueue: " << flush;
+ tpct.testCommitEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(CommitDequeue)
+{
+ cout << test_filename << ".CommitDequeue: " << flush;
+ tpct.testCommitDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(CommitSwap)
+{
+ cout << test_filename << ".CommitSwap: " << flush;
+ tpct.testCommitSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortEnqueue)
+{
+ cout << test_filename << ".PrepareAndAbortEnqueue: " << flush;
+ tpct.testPrepareAndAbortEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortDequeue)
+{
+ cout << test_filename << ".PrepareAndAbortDequeue: " << flush;
+ tpct.testPrepareAndAbortDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortSwap)
+{
+ cout << test_filename << ".PrepareAndAbortSwap: " << flush;
+ tpct.testPrepareAndAbortSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareEnqueue)
+{
+ cout << test_filename << ".AbortNoPrepareEnqueue: " << flush;
+ tpct.testAbortNoPrepareEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareDequeue)
+{
+ cout << test_filename << ".AbortNoPrepareDequeue: " << flush;
+ tpct.testAbortNoPrepareDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareSwap)
+{
+ cout << test_filename << ".AbortNoPrepareSwap: " << flush;
+ tpct.testAbortNoPrepareSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(RecoverPreparedThenCommitted)
+{
+ cout << test_filename << ".RecoverPreparedThenCommitted: " << flush;
+ tpct.testRecoverPreparedThenCommitted();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(RecoverPreparedThenAborted)
+{
+ cout << test_filename << ".RecoverPreparedThenAborted: " << flush;
+ tpct.testRecoverPreparedThenAborted();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueCommit)
+{
+ cout << test_filename << ".MultiQueueCommit: " << flush;
+ tpct.testMultiQueueCommit();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAbort)
+{
+ cout << test_filename << ".MultiQueueAbort: " << flush;
+ tpct.testMultiQueueAbort();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueCommitRecover: " << flush;
+ tpct.testMultiQueueNoQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueAbortRecover: " << flush;
+ tpct.testMultiQueueNoQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueCommitRecover: " << flush;
+ tpct.testMultiQueueSomeQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueAbortRecover: " << flush;
+ tpct.testMultiQueueSomeQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueCommitRecover: " << flush;
+ tpct.testMultiQueueAllQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueAbortRecover: " << flush;
+ tpct.testMultiQueueAllQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/qpid/cpp/src/tests/stop_cluster b/qpid/cpp/src/tests/legacystore/clean.sh
index 02436c60b7..efb19586fa 100755..100644
--- a/qpid/cpp/src/tests/stop_cluster
+++ b/qpid/cpp/src/tests/legacystore/clean.sh
@@ -8,9 +8,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,15 +19,14 @@
# under the License.
#
-# Stop brokers on ports listed in cluster.ports
+# This script cleans up any previous database and journal files, and should
+# be run prior to the store system tests, as these are prone to crashing or
+# hanging under some circumstances if the database is old or inconsistent.
-PORTS=`cat cluster.ports`
-for PORT in $PORTS ; do
- $QPIDD_EXEC --no-module-dir -qp $PORT || ERROR="$ERROR $PORT"
-done
-rm -f cluster.ports qpidd.port
-
-if [ -n "$ERROR" ]; then
- echo "Errors stopping brokers on ports: $ERROR"
- exit 1
+if [ -d ${TMP_DATA_DIR} ]; then
+ rm -rf ${TMP_DATA_DIR}
+fi
+if [ -d ${TMP_PYTHON_TEST_DIR} ]; then
+ rm -rf ${TMP_PYTHON_TEST_DIR}
fi
+rm -f ${abs_srcdir}/*.vglog*
diff --git a/qpid/cpp/src/tests/legacystore/persistence.py b/qpid/cpp/src/tests/legacystore/persistence.py
new file mode 100644
index 0000000000..c4ab712f14
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/persistence.py
@@ -0,0 +1,574 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys, re, traceback, socket
+from getopt import getopt, GetoptError
+
+from qpid.connection import Connection
+from qpid.util import connect
+from qpid.datatypes import Message, RangedSet
+from qpid.queue import Empty
+from qpid.session import SessionException
+from qpid.testlib import TestBase010
+from time import sleep
+
+class PersistenceTest(TestBase010):
+
+ XA_RBROLLBACK = 1
+ XA_RBTIMEOUT = 2
+ XA_OK = 0
+
+ def createMessage(self, **kwargs):
+ session = self.session
+ dp = {}
+ dp['delivery_mode'] = 2
+ mp = {}
+ for k, v in kwargs.iteritems():
+ if k in ['routing_key', 'delivery_mode']: dp[k] = v
+ if k in ['message_id', 'correlation_id', 'application_headers']: mp[k] = v
+ args = []
+ args.append(session.delivery_properties(**dp))
+ if len(mp):
+ args.append(session.message_properties(**mp))
+ if kwargs.has_key('body'): args.append(kwargs['body'])
+ return Message(*args)
+
+ def phase1(self):
+ session = self.session
+
+ session.queue_declare(queue="queue-a", durable=True)
+ session.queue_declare(queue="queue-b", durable=True)
+ session.exchange_bind(queue="queue-a", exchange="amq.direct", binding_key="a")
+ session.exchange_bind(queue="queue-b", exchange="amq.direct", binding_key="b")
+
+ session.message_transfer(destination="amq.direct",
+ message=self.createMessage(routing_key="a", correlation_id="Msg0001", body="A_Message1"))
+ session.message_transfer(destination="amq.direct",
+ message=self.createMessage(routing_key="b", correlation_id="Msg0002", body="B_Message1"))
+
+# session.queue_declare(queue="lvq-test", durable=True, arguments={"qpid.last_value_queue":True})
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B1"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A1"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C1"))
+
+
+
+ def phase2(self):
+ session = self.session
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+
+ #check they are still bound to amq.direct correctly
+ responses = []
+ responses.append(session.exchange_bound(queue="queue-a", exchange="amq.direct", binding_key="a"))
+ responses.append(session.exchange_bound(queue="queue-b", exchange="amq.direct", binding_key="b"))
+ for r in responses:
+ self.assert_(not r.exchange_not_found)
+ self.assert_(not r.queue_not_found)
+ self.assert_(not r.key_not_matched)
+
+
+ #check expected messages are there
+ self.assertMessageOnQueue("queue-a", "Msg0001", "A_Message1")
+ self.assertMessageOnQueue("queue-b", "Msg0002", "B_Message1")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+
+ session.queue_declare(queue="queue-c", durable=True)
+
+ #send a message to a topic such that it reaches all queues
+ session.exchange_bind(queue="queue-a", exchange="amq.topic", binding_key="abc")
+ session.exchange_bind(queue="queue-b", exchange="amq.topic", binding_key="abc")
+ session.exchange_bind(queue="queue-c", exchange="amq.topic", binding_key="abc")
+
+ session.message_transfer(destination="amq.topic",
+ message=self.createMessage(routing_key="abc", correlation_id="Msg0003", body="AB_Message2"))
+
+# #check LVQ exists and has exepected messages:
+# session.queue_declare(queue="lvq-test", durable=True, passive=True)
+# session.message_subscribe(destination="lvq", queue="lvq-test")
+# lvq = session.incoming("lvq")
+# lvq.start()
+# accepted = RangedSet()
+# for m in ["A2", "B3", "C1"]:
+# msg = lvq.get(timeout=1)
+# self.assertEquals(m, msg.body)
+# accepted.add(msg.id)
+# try:
+# extra = lvq.get(timeout=1)
+# self.fail("lvq-test not empty, contains: " + extra.body)
+# except Empty: None
+# #publish some more messages while subscriber is active (no replacement):
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A4"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C4"))
+# #check that accepting replaced messages is safe
+# session.message_accept(accepted)
+
+
+ def phase3(self):
+ session = self.session
+
+# #lvq recovery validation
+# session.queue_declare(queue="lvq-test", durable=True, passive=True)
+# session.message_subscribe(destination="lvq", queue="lvq-test")
+# lvq = session.incoming("lvq")
+# lvq.start()
+# accepted = RangedSet()
+# lvq.start()
+# for m in ["C4", "A4"]:
+# msg = lvq.get(timeout=1)
+# self.assertEquals(m, msg.body)
+# accepted.add(msg.id)
+# session.message_accept(accepted)
+# try:
+# extra = lvq.get(timeout=1)
+# self.fail("lvq-test not empty, contains: " + extra.body)
+# except Empty: None
+# session.message_cancel(destination="lvq")
+# session.queue_delete(queue="lvq-test")
+
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+ session.queue_declare(queue="queue-c", durable=True, passive=True)
+
+ session.tx_select()
+ #check expected messages are there
+ self.assertMessageOnQueue("queue-a", "Msg0003", "AB_Message2")
+ self.assertMessageOnQueue("queue-b", "Msg0003", "AB_Message2")
+ self.assertMessageOnQueue("queue-c", "Msg0003", "AB_Message2")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+ self.assertEmptyQueue("queue-c")
+
+ #note: default bindings must be restored for this to work
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0004", body="A_Message3"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0005", body="A_Message4"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0006", body="A_Message5"))
+
+ session.tx_commit()
+
+
+ #delete a queue
+ session.queue_delete(queue="queue-c")
+
+ session.message_subscribe(destination="ctag", queue="queue-a", accept_mode=0)
+ session.message_flow(destination="ctag", unit=0, value=0xFFFFFFFF)
+ session.message_flow(destination="ctag", unit=1, value=0xFFFFFFFF)
+ included = session.incoming("ctag")
+ msg1 = included.get(timeout=1)
+ self.assertExpectedContent(msg1, "Msg0004", "A_Message3")
+ msg2 = included.get(timeout=1)
+ self.assertExpectedContent(msg2, "Msg0005", "A_Message4")
+ msg3 = included.get(timeout=1)
+ self.assertExpectedContent(msg3, "Msg0006", "A_Message5")
+ self.ack(msg1, msg2, msg3)
+
+ session.message_transfer(destination="amq.direct", message=self.createMessage(
+ routing_key="queue-b", correlation_id="Msg0007", body="B_Message3"))
+
+ session.tx_rollback()
+
+
+ def phase4(self):
+ session = self.session
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+
+ self.assertMessageOnQueue("queue-a", "Msg0004", "A_Message3")
+ self.assertMessageOnQueue("queue-a", "Msg0005", "A_Message4")
+ self.assertMessageOnQueue("queue-a", "Msg0006", "A_Message5")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+
+ #check this queue doesn't exist
+ try:
+ session.queue_declare(queue="queue-c", durable=True, passive=True)
+ raise Exception("Expected queue-c to have been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def phase5(self):
+
+ session = self.session
+ queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
+
+ for q in queues:
+ session.queue_declare(queue=q, durable=True)
+ session.queue_purge(queue=q)
+
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a1", correlation_id="MsgA", body="MessageA"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-b1", correlation_id="MsgB", body="MessageB"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-c1", correlation_id="MsgC", body="MessageC"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-d1", correlation_id="MsgD", body="MessageD"))
+
+ session.dtx_select()
+ txa = self.xid('a')
+ txb = self.xid('b')
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ self.txswap("queue-a1", "queue-a2", txa)
+ self.txswap("queue-b1", "queue-b2", txb)
+ self.txswap("queue-c1", "queue-c2", txc)
+ self.txswap("queue-d1", "queue-d2", txd)
+
+ #no queue should have any messages accessible
+ for q in queues:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=txa, one_phase=True).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txb).status)
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txc).status)
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txd).status)
+
+ #further checks
+ not_empty = ["queue-a2", "queue-b1"]
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+
+ def phase6(self):
+ session = self.session
+
+ #check prepared transaction are reported correctly by recover
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ xids = session.dtx_recover().in_doubt
+ ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
+
+ if txc.global_id not in ids:
+ self.fail("Recovered xids not as expected. missing: %s" % (txc))
+ if txd.global_id not in ids:
+ self.fail("Recovered xids not as expected. missing: %s" % (txd))
+ self.assertEqual(2, len(xids))
+
+
+ queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
+ not_empty = ["queue-a2", "queue-b1"]
+
+ #re-check
+ not_empty = ["queue-a2", "queue-b1"]
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+ #complete the prepared transactions
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=txc).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txd).status)
+ not_empty.append("queue-c2")
+ not_empty.append("queue-d1")
+
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count)
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count)
+
+ def phase7(self):
+ session = self.session
+ session.synchronous = False
+
+ # check xids from phase 6 are gone
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ xids = session.dtx_recover().in_doubt
+ ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
+
+ if txc.global_id in ids:
+ self.fail("Xid still present : %s" % (txc))
+ if txd.global_id in ids:
+ self.fail("Xid still present : %s" % (txc))
+ self.assertEqual(0, len(xids))
+
+ #test deletion of queue after publish
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ for i in range(1, 10):
+ session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
+
+ session.synchronous = True
+ #explicitly delete queue
+ session.queue_delete(queue = "q")
+
+ #test acking of message from auto-deleted queue
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "a", accept_mode=0, acquire_mode=0)
+ session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = 0, value = 10, destination = "a")
+ queue = session.incoming("a")
+
+ #consume the message, cancel subscription (triggering auto-delete), then ack it
+ msg = queue.get(timeout = 5)
+ session.message_cancel(destination = "a")
+ self.ack(msg)
+
+ #test implicit deletion of bindings when queue is deleted
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
+
+ #test unbind:
+ #create a series of bindings to a queue
+ session.queue_declare(queue = "binding-test-queue", durable=True)
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="abc")
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="xyz")
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="a", arguments={"x-match":"all", "p":"a"})
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="b", arguments={"x-match":"all", "p":"b"})
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="c", arguments={"x-match":"all", "p":"c"})
+ #then restart broker...
+
+
+ def phase8(self):
+ session = self.session
+
+ #continue testing unbind:
+ #send messages to the queue via each of the bindings
+ for k in ["abc", "pqr", "xyz"]:
+ data = "first %s" % (k)
+ session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ data = "first %s" % (a["p"])
+ session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
+ #unbind some bindings (using final 0-10 semantics)
+ session.exchange_unbind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
+ session.exchange_unbind(exchange="amq.match", queue="binding-test-queue", binding_key="b")
+ #send messages again
+ for k in ["abc", "pqr", "xyz"]:
+ data = "second %s" % (k)
+ session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ data = "second %s" % (a["p"])
+ session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
+
+ #check that only the correct messages are received
+ expected = []
+ for k in ["abc", "pqr", "xyz"]:
+ expected.append("first %s" % (k))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ expected.append("first %s" % (a["p"]))
+ for k in ["abc", "xyz"]:
+ expected.append("second %s" % (k))
+ for a in [{"p":"a"}, {"p":"c"}]:
+ expected.append("second %s" % (a["p"]))
+
+ session.message_subscribe(queue = "binding-test-queue", destination = "binding-test")
+ session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "binding-test")
+ session.message_flow(unit = 0, value = 10, destination = "binding-test")
+ queue = session.incoming("binding-test")
+
+ while len(expected):
+ msg = queue.get(timeout=1)
+ if msg.body not in expected:
+ self.fail("Missing message: %s" % msg.body)
+ expected.remove(msg.body)
+ try:
+ msg = queue.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+
+
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
+
+
+ def xid(self, txid, branchqual = ''):
+ return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
+
+ def txswap(self, src, dest, tx):
+ self.assertEqual(self.XA_OK, self.session.dtx_start(xid=tx).status)
+ self.session.message_subscribe(destination="temp-swap", queue=src, accept_mode=0)
+ self.session.message_flow(destination="temp-swap", unit=0, value=1)
+ self.session.message_flow(destination="temp-swap", unit=1, value=0xFFFFFFFF)
+ msg = self.session.incoming("temp-swap").get(timeout=1)
+ self.session.message_cancel(destination="temp-swap")
+ self.session.message_transfer(message=self.createMessage(routing_key=dest, correlation_id=self.getProperty(msg, 'correlation_id'),
+ body=msg.body))
+ self.ack(msg)
+ self.assertEqual(self.XA_OK, self.session.dtx_end(xid=tx).status)
+
+ def assertEmptyQueue(self, name):
+ self.assertEqual(0, self.session.queue_query(queue=name).message_count)
+
+ def assertConnectionException(self, expectedCode, message):
+ self.assertEqual("connection", message.method.klass.name)
+ self.assertEqual("close", message.method.name)
+ self.assertEqual(expectedCode, message.reply_code)
+
+ def assertExpectedMethod(self, reply, klass, method):
+ self.assertEqual(klass, reply.method.klass.name)
+ self.assertEqual(method, reply.method.name)
+
+ def assertExpectedContent(self, msg, id, body):
+ self.assertEqual(id, self.getProperty(msg, 'correlation_id'))
+ self.assertEqual(body, msg.body)
+ return msg
+
+ def getProperty(self, msg, name):
+ for h in msg.headers:
+ if hasattr(h, name): return getattr(h, name)
+ return None
+
+ def ack(self, *msgs):
+ session = self.session
+ set = RangedSet()
+ for m in msgs:
+ set.add(m.id)
+ #TODO: tidy up completion
+ session.receiver._completed.add(m.id)
+ session.message_accept(set)
+ session.channel.session_completed(session.receiver._completed)
+
+ def assertExpectedGetResult(self, id, body):
+ return self.assertExpectedContent(session.incoming("incoming-gets").get(timeout=1), id, body)
+
+ def assertEqual(self, expected, actual, msg=''):
+ if expected != actual: raise Exception("%s expected: %s actual: %s" % (msg, expected, actual))
+
+ def assertMessageOnQueue(self, queue, id, body):
+ self.session.message_subscribe(destination="incoming-gets", queue=queue, accept_mode=0)
+ self.session.message_flow(destination="incoming-gets", unit=0, value=1)
+ self.session.message_flow(destination="incoming-gets", unit=1, value=0xFFFFFFFF)
+ msg = self.session.incoming("incoming-gets").get(timeout=1)
+ self.assertExpectedContent(msg, id, body)
+ self.ack(msg)
+ self.session.message_cancel(destination="incoming-gets")
+
+
+ def __init__(self):
+ TestBase010.__init__(self, "run")
+ self.setBroker("localhost")
+ self.errata = []
+
+ def connect(self):
+ """ Connects to the broker """
+ self.conn = Connection(connect(self.host, self.port))
+ self.conn.start(timeout=10)
+ self.session = self.conn.session("test-session", timeout=10)
+
+ def run(self, args=sys.argv[1:]):
+ try:
+ opts, extra = getopt(args, "r:s:e:b:p:h", ["retry=", "spec=", "errata=", "broker=", "phase=", "help"])
+ except GetoptError, e:
+ self._die(str(e))
+ phase = 0
+ retry = 0;
+ for opt, value in opts:
+ if opt in ("-h", "--help"): self._die()
+ if opt in ("-s", "--spec"): self.spec = value
+ if opt in ("-e", "--errata"): self.errata.append(value)
+ if opt in ("-b", "--broker"): self.setBroker(value)
+ if opt in ("-p", "--phase"): phase = int(value)
+ if opt in ("-r", "--retry"): retry = int(value)
+
+ if not phase: self._die("please specify the phase to run")
+ phase = "phase%d" % phase
+ self.connect()
+
+ try:
+ getattr(self, phase)()
+ print phase, "succeeded"
+ res = True;
+ except Exception, e:
+ print phase, "failed: ", e
+ traceback.print_exc()
+ res = False
+
+
+ if not self.session.error(): self.session.close(timeout=10)
+ self.conn.close(timeout=10)
+
+ # Crude fix to wait for thread in client to exit after return from session_close()
+ # Reduces occurrences of "Unhandled exception in thread" messages after each test
+ import time
+ time.sleep(1)
+
+ return res
+
+
+ def setBroker(self, broker):
+ rex = re.compile(r"""
+ # [ <user> [ / <password> ] @] <host> [ :<port> ]
+ ^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X)
+ match = rex.match(broker)
+ if not match: self._die("'%s' is not a valid broker" % (broker))
+ self.user, self.password, self.host, self.port = match.groups()
+ self.port = int(default(self.port, 5672))
+ self.user = default(self.user, "guest")
+ self.password = default(self.password, "guest")
+
+ def _die(self, message = None):
+ if message: print message
+ print """
+Options:
+ -h/--help : this message
+ -s/--spec <spec.xml> : file containing amqp XML spec
+ -p/--phase : test phase to run
+ -b/--broker [<user>[/<password>]@]<host>[:<port>] : broker to connect to
+ """
+ sys.exit(1)
+
+def default(value, default):
+ if (value == None): return default
+ else: return value
+
+if __name__ == "__main__":
+ test = PersistenceTest()
+ if not test.run(): sys.exit(1)
diff --git a/qpid/cpp/src/tests/federated_cluster_test_with_node_failure b/qpid/cpp/src/tests/legacystore/run_long_python_tests
index e9ae4b5914..e43b2236ec 100755..100644
--- a/qpid/cpp/src/tests/federated_cluster_test_with_node_failure
+++ b/qpid/cpp/src/tests/legacystore/run_long_python_tests
@@ -1,5 +1,4 @@
#!/bin/bash
-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -8,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,5 +18,4 @@
# under the License.
#
-srcdir=`dirname $0`
-TEST_NODE_FAILURE=1 $srcdir/federated_cluster_test
+./run_python_tests LONG_TEST
diff --git a/qpid/cpp/src/tests/legacystore/run_python_tests b/qpid/cpp/src/tests/legacystore/run_python_tests
new file mode 100644
index 0000000000..d9dec16963
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/run_python_tests
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if test -z ${QPID_DIR} ; then
+ cat <<EOF
+
+ =========== WARNING: PYTHON TESTS DISABLED ==============
+
+ QPID_DIR not set.
+
+ ===========================================================
+
+EOF
+ exit
+fi
+
+. `dirname $0`/tests_env.sh
+
+MODULENAME=python_tests
+
+echo "Running Python tests in module ${MODULENAME}..."
+
+case x$1 in
+ xSHORT_TEST)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.ExchangeQueueTests.* *.flow_to_disk.SimpleMaxSizeCountTest.test_browse_recover *.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2" ;;
+ xLONG_TEST)
+ DEFAULT_PYTHON_TESTS= ;;
+ x)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.* *.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueue*.test_mixed_limit_1 *.flow_to_disk.MultiQueue*.test_mixed_limit_1 *.resize.SimpleTest.* *.federation.*" ;;
+ *)
+ DEFAULT_PYTHON_TESTS=$1
+esac
+
+PYTHON_TESTS=${PYTHON_TESTS:-${DEFAULT_PYTHON_TESTS}}
+
+OUTDIR=${MODULENAME}.tmp
+rm -rf $OUTDIR
+
+# To debug a test, add the following options to the end of the following line:
+# -v DEBUG -c qpid.messaging.io.ops [*.testName]
+${PYTHON_DIR}/qpid-python-test -m ${MODULENAME} -I ${FAILING_PYTHON_TESTS} ${PYTHON_TESTS} -DOUTDIR=$OUTDIR #-v DEBUG
+RETCODE=$?
+
+if test x${RETCODE} != x0; then
+ exit 1;
+fi
+exit 0
diff --git a/qpid/cpp/src/tests/run_long_cluster_tests b/qpid/cpp/src/tests/legacystore/run_short_python_tests
index 5dce0be585..523924fdba 100755..100644
--- a/qpid/cpp/src/tests/run_long_cluster_tests
+++ b/qpid/cpp/src/tests/legacystore/run_short_python_tests
@@ -1,5 +1,4 @@
#!/bin/bash
-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -8,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,6 +18,4 @@
# under the License.
#
-srcdir=`dirname $0`
-$srcdir/run_cluster_tests 'cluster_tests.LongTests.*' -DDURATION=4
-
+./run_python_tests SHORT_TEST
diff --git a/qpid/cpp/src/tests/legacystore/run_test b/qpid/cpp/src/tests/legacystore/run_test
new file mode 100644
index 0000000000..1d5c2ae407
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/run_test
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Set up environment and run a test executable or script.
+#
+# Output nothing if test passes, show the output if it fails and
+# leave output in <test>.log for examination.
+#
+# If qpidd.port exists run test with QPID_PORT=`cat qpidd.port`
+#
+# If $VALGRIND if is set run under valgrind. If there are valgrind
+# erros show valgrind output, also leave it in <test>.valgrind for
+# examination.
+#
+
+source `dirname $0`/vg_check
+
+# Export variables from makefile.
+export VALGRIND srcdir
+
+# Export QPID_PORT if qpidd.port exists.
+test -f qpidd.port && export QPID_PORT=`cat qpidd.port`
+
+# Avoid silly libtool error messages if these are not defined
+test -z "$LC_ALL" && export LC_ALL=
+test -z "$LC_CTYPE" && export LC_CTYPE=
+test -z "$LC_COLLATE" && export LC_COLLATE=
+test -z "$LC_MESSAGES" && export LC_MESSAGES=
+
+VG_LOG="$1.vglog"
+rm -f $VG_LOG*
+
+if grep -l "^# Generated by .*libtool" "$1" >/dev/null 2>&1; then
+ # This is a libtool "executable". Valgrind it if VALGRIND specified.
+ test -n "$VALGRIND" && VALGRIND="$VALGRIND --log-file=$VG_LOG --"
+ # Hide output unless there's an error.
+ libtool --mode=execute $VALGRIND "$@" 2>&1 || ERROR=$?
+ test -n "$VALGRIND" && vg_check $VG_LOG*
+else
+ # This is a non-libtool shell script, just execute it.
+ export VALGRIND srcdir
+ exec "$@"
+fi
+
+if test -z "$ERROR"; then
+ # Clean up logs if there was no error.
+ rm -f $VG_LOG*
+ exit 0
+else
+ exit $ERROR
+fi
diff --git a/qpid/cpp/src/tests/run_cluster_test b/qpid/cpp/src/tests/legacystore/start_broker
index 11df3d63a3..30e4659030 100755..100644
--- a/qpid/cpp/src/tests/run_cluster_test
+++ b/qpid/cpp/src/tests/legacystore/start_broker
@@ -8,9 +8,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,9 +19,7 @@
# under the License.
#
-
-# Run the tests
-srcdir=`dirname $0`
-source cpg_check.sh
-cpg_enabled || exit 0
-with_ais_group $srcdir/run_test ./cluster_test
+QPIDD=$QPID_BLD/src/qpidd
+rm -f qpidd.vglog* qpidd.log
+test -n "$VALGRIND" && VALGRIND="$VALGRIND --log-file=qpidd.vglog --suppressions=$QPID_DIR/cpp/src/tests/.valgrind.supp --"
+exec libtool --mode=execute $VALGRIND $QPIDD --daemon --port=0 --log-enable error+ --log-to-file qpidd.log "$@" > qpidd.port
diff --git a/qpid/cpp/src/tests/cpg_check.sh.in b/qpid/cpp/src/tests/legacystore/stop_broker
index ed97776218..dcefff376f 100755..100644
--- a/qpid/cpp/src/tests/cpg_check.sh.in
+++ b/qpid/cpp/src/tests/legacystore/stop_broker
@@ -1,4 +1,5 @@
#!/bin/bash
+
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -18,21 +19,28 @@
# under the License.
#
-QPID_USE_CPG=${QPID_USE_CPG:-@USE_CPG@}
+# Stop the broker, check for errors.
+#
+if test -f qpidd.port; then
+ export QPID_PORT=`cat qpidd.port`
+ QPIDD=$QPID_BLD/src/qpidd
+ rm -f qpidd.port
+
+ $QPIDD --quit || ERROR=$?
-# Check if CPG is enabled
-cpg_enabled() {
- test x$QPID_USE_CPG = xyes || return 1 # disabled
- ps -u root | grep 'aisexec\|corosync' >/dev/null || {
- echo WARNING: Skip cluster tests, aisexec or corosync daemon is not running.
- return 1; # A warning, not a failure.
+ # Check qpidd.log.
+ grep -a 'warning\|error\|critical' qpidd.log && {
+ echo "WARNING: Suspicious broker log entries in qpidd.log, above."
}
- return 0
-}
-# Execute command with the ais group set if user is a member.
-with_ais_group() {
- if id -nG | grep '\<ais\>' >/dev/null; then sg ais -c "$*"
- else "$@"
+ # Check valgrind log.
+ if test -n "$VALGRIND"; then
+ source `dirname $0`/vg_check $VG_LOG*
+ vg_check qpidd.vglog*
fi
-}
+
+ exit $ERROR
+else
+ echo "No qpidd.port file found - cannot stop broker."
+ exit 1;
+fi
diff --git a/qpid/cpp/src/tests/legacystore/system_test.sh b/qpid/cpp/src/tests/legacystore/system_test.sh
new file mode 100644
index 0000000000..4cccc5ac8d
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/system_test.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+error() { echo $*; exit 1; }
+
+# Make sure $QPID_DIR contains what we need.
+if ! test -d "$QPID_DIR" ; then
+ echo "WARNING: QPID_DIR is not set skipping system tests."
+ exit
+fi
+STORE_LIB=../lib/.libs/msgstore.so
+
+xml_spec=$QPID_DIR/specs/amqp.0-10-qpid-errata.xml
+test -f $xml_spec || error "$xml_spec not found: invalid \$QPID_DIR ?"
+export PYTHONPATH=$QPID_DIR/python:$QPID_DIR/extras/qmf/src/py:$QPID_DIR/tools/src/py
+
+echo "Using directory $TMP_DATA_DIR"
+
+fail=0
+
+# Run the tests with a given set of flags
+BROKER_OPTS="--no-module-dir --load-module=$STORE_LIB --data-dir=$TMP_DATA_DIR --auth=no --wcache-page-size 16"
+run_tests() {
+ for p in `seq 1 8`; do
+ $abs_srcdir/start_broker "$@" ${BROKER_OPTS} || { echo "FAIL broker start"; return 1; }
+ python "$abs_srcdir/persistence.py" -s "$xml_spec" -b localhost:`cat qpidd.port` -p $p -r 3 || fail=1;
+ $abs_srcdir/stop_broker
+ done
+}
+
+run_tests || fail=1
+
+exit $fail
diff --git a/qpid/cpp/src/tests/legacystore/tests_env.sh b/qpid/cpp/src/tests/legacystore/tests_env.sh
new file mode 100644
index 0000000000..30d255b87c
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/tests_env.sh
@@ -0,0 +1,260 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# --- Function definitions ---
+
+
+func_check_required_env ()
+#-------------------------
+# Check that EITHER:
+# QPID_DIR is set (for running against svn QPID)
+# OR
+# QPID_PREFIX is set (for running against installed QPID
+# Will exit with error code 1 if neither of these is defined.
+# Params: None
+# Returns: 0 if env vars ok, 1 otherwise
+{
+ if test -z "${QPID_DIR}" -a -z "${QPID_PREFIX}"; then
+ # Try to find qpidd in the normal installed location
+ if test -x /usr/sbin/qpidd; then
+ QPID_PREFIX=/usr
+ else
+ echo "ERROR: Could not find installed Qpid"
+ echo "Either of the following must be set in the environment for this script to run:"
+ echo " QPID_DIR for running against a Qpid svn build"
+ echo " QPID_PREFIX for running against an installed Qpid"
+ return 1
+ fi
+ fi
+ return 0
+}
+
+
+func_check_qpid_python ()
+#------------------------
+# Check that Qpid python environment is ok
+# Params: None
+# Returns: 0 if Python environment is ok; 1 otherwise
+{
+ if ! python -c "import qpid" ; then
+ cat <<EOF
+
+ =========== WARNING: PYTHON TESTS DISABLED ==============
+
+ Unable to load python qpid module - skipping python tests.
+
+ PYTHONPATH=${PYTHONPATH}
+
+ ===========================================================
+
+EOF
+ return 1
+ fi
+ return 0
+}
+
+
+func_set_env ()
+#--------------
+# Set up the environment based on value of ${QPID_DIR}: if ${QPID_DIR} exists, assume a svn checkout,
+# otherwise set up for an installed or prefix test.
+# Params: None
+# Returns: Nothing
+{
+ if test "${QPID_DIR}" -a -d "${QPID_DIR}" ; then
+ # QPID_DIR is defined for source tree builds by the --with-qpid-checkout configure option.
+ # QPID_BLD is defined as the build directory, either $QPID_DIR/cpp or separately specified with
+ # the --with-qpid-build option for VPATH builds.
+
+ # Check QPID_BLD is also set
+ if test -z ${QPID_BLD}; then
+ QPID_BLD="${QPID_DIR}/cpp"
+ fi
+
+ # Paths and dirs
+ #if test -z ${abs_srcdir}; then
+ # abs_srcdir=`pwd`
+ #fi
+ source $QPID_BLD/src/tests/test_env.sh
+ # Override these two settings from test_env.sh:
+ export RECEIVER_EXEC=$QPID_TEST_EXEC_DIR/qpid-receive
+ export SENDER_EXEC=$QPID_TEST_EXEC_DIR/qpid-send
+
+ echo "abs_srcdir=$abs_srcdir"
+ export STORE_LIB="`pwd`/../lib/.libs/msgstore.so"
+ export STORE_ENABLE=1
+ export CLUSTER_LIB="${QPID_BLD}/src/.libs/cluster.so"
+
+ PYTHON_DIR="${QPID_DIR}/python"
+ export PYTHONPATH="${PYTHONPATH}":"${PYTHON_DIR}":"${QPID_DIR}/extras/qmf/src/py":"${QPID_DIR}/tools/src/py":"${QPID_DIR}/cpp/src/tests":"${abs_srcdir}"
+
+ # Libraries
+
+ # Executables
+ export QPIDD_EXEC="${QPID_BLD}/src/qpidd"
+
+ # Test data
+
+ else
+ # Set up the environment based on value of ${QPID_PREFIX} for testing against an installed qpid
+ # Alternatively, make sure ${QPID_BIN_DIR}, ${QPID_SBIN_DIR}, ${QPID_LIB_DIR} and ${QPID_LIBEXEC_DIR} are set for
+ # the installed location.
+ if test "${QPID_PREFIX}" -a -d "${QPID_PREFIX}" ; then
+ QPID_BIN_DIR=${QPID_PREFIX}/bin
+ QPID_SBIN_DIR=${QPID_PREFIX}/sbin
+ QPID_LIB_DIR=${QPID_PREFIX}/lib
+ QPID_LIBEXEC_DIR=${QPID_PREFIX}/libexec
+ fi
+
+ # These four env vars must be set prior to calling this script
+ func_checkpaths QPID_BIN_DIR QPID_SBIN_DIR QPID_LIB_DIR QPID_LIBEXEC_DIR
+
+ # Paths and dirs
+ export PYTHON_DIR="${QPID_BIN_DIR}"
+ export PYTHONPATH="${PYTHONPATH}":"${QPID_LIB_DIR}/python":"${QPID_LIBEXEC_DIR}/qpid/tests":"${QPID_LIB_DIR}/python2.4"
+
+
+ # Libraries
+
+ # Executables
+ export QPIDD_EXEC="${QPID_SBIN_DIR}/qpidd"
+
+ # Test Data
+
+ fi
+}
+
+
+func_mk_data_dir ()
+#------------------
+# Create a data dir at ${TMP_DATA_DIR} if not present, clear it otherwise.
+# Set TMP_DATA_DIR if it is not set.
+# Params: None
+# Returns: Nothing
+{
+ if test -z "${TMP_DATA_DIR}"; then
+ TMP_DATA_DIR=/tmp/python_tests
+ echo "TMP_DATA_DIR not set; using ${TMP_DATA_DIR}"
+ fi
+
+ # Delete old test dirs if they exist
+ if test -d "${TMP_DATA_DIR}" ; then
+ rm -rf "${TMP_DATA_DIR}/*"
+ fi
+ mkdir -p "${TMP_DATA_DIR}"
+ export TMP_DATA_DIR
+}
+
+
+func_checkvar ()
+#---------------
+# Check that an environment var is set (ie non-zero length)
+# Params: $1 - env var to be checked
+# Returns: 0 = env var is set (ie non-zero length)
+# 1 = env var is not set
+{
+ local loc_VAR=$1
+ if test -z ${!loc_VAR}; then
+ echo "WARNING: environment variable ${loc_VAR} not set."
+ return 1
+ fi
+ return 0
+}
+
+
+func_checkpaths ()
+#-----------------
+# Check a list of paths (each can contain ':'-separated sub-list) is set and valid (ie each path exists as a dir)
+# Params: $@ - List of path env vars to be checked
+# Returns: Nothing
+{
+ local loc_PATHS=$@
+ for path in ${loc_PATHS}; do
+ func_checkvar ${path}
+ if test $? == 0; then
+ local temp_IFS=${IFS}
+ IFS=":"
+ local pl=${!path}
+ for p in ${pl[@]}; do
+ if test ! -d ${p}; then
+ echo "WARNING: Directory ${p} in var ${path} not found."
+ fi
+ done
+ IFS=${temp_IFS}
+ fi
+ done
+}
+
+
+func_checklibs ()
+#----------------
+# Check that a list of libs is set and valid (ie each lib exists as an executable file)
+# Params: $@ - List of lib values to be checked
+# Returns: Nothing
+{
+ local loc_LIBS=$@
+ for lib in ${loc_LIBS[@]}; do
+ func_checkvar ${lib}
+ if test $? == 0; then
+ if test ! -x ${!lib}; then
+ echo "WARNING: Library ${lib}=${!lib} not found."
+ fi
+ fi
+ done
+}
+
+
+func_checkexecs ()
+#-----------------
+# Check that a list of executable is set and valid (ie each exec exists as an executable file)
+# Params: $@ - List of exec values to be checked
+# Returns: Nothing
+{
+ local loc_EXECS=$@
+ for exec in ${loc_EXECS[@]}; do
+ func_checkvar ${exec}
+ if test $? == 0; then
+ if test ! -x ${!exec}; then
+ echo "WARNING: Executable ${exec}=${!exec} not found or is not executable."
+ fi
+ fi
+ done
+}
+
+
+#--- Start of script ---
+
+func_check_required_env || exit 1 # Cannot run, exit with error
+
+srcdir=`dirname $0`
+if test -z ${abs_srcdir}; then
+ abs_srcdir=${srcdir}
+fi
+
+func_set_env
+func_check_qpid_python || exit 0 # A warning, not a failure.
+func_mk_data_dir
+
+# Check expected environment vars are set
+func_checkpaths PYTHON_DIR PYTHONPATH TMP_DATA_DIR
+func_checklibs STORE_LIB CLUSTER_LIB
+func_checkexecs QPIDD_EXEC QPID_CONFIG_EXEC QPID_ROUTE_EXEC SENDER_EXEC RECEIVER_EXEC
+
+FAILING_PYTHON_TESTS="${abs_srcdir}/failing_python_tests.txt"
+
diff --git a/qpid/cpp/src/tests/legacystore/unit_test.cpp b/qpid/cpp/src/tests/legacystore/unit_test.cpp
new file mode 100644
index 0000000000..add80a6f91
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/unit_test.cpp
@@ -0,0 +1,28 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+
+// Defines test_main function to link with actual unit test code.
+#define BOOST_AUTO_TEST_MAIN // Boost 1.33
+#define BOOST_TEST_MAIN
+
+#include "unit_test.h"
+
diff --git a/qpid/cpp/src/tests/legacystore/unit_test.h b/qpid/cpp/src/tests/legacystore/unit_test.h
new file mode 100644
index 0000000000..16b6ae2ffb
--- /dev/null
+++ b/qpid/cpp/src/tests/legacystore/unit_test.h
@@ -0,0 +1,69 @@
+#ifndef QPIPD_TEST_UNIT_TEST_H_
+#define QPIPD_TEST_UNIT_TEST_H_
+
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+
+// Workaround so we can build against boost 1.32, 1.33 and boost 1.34.
+// Remove when we no longer need to support 1.32 or 1.33.
+
+#include <boost/version.hpp>
+
+#if (BOOST_VERSION < 103400) // v.1.33 and earlier
+# include <boost/test/auto_unit_test.hpp>
+#else // v.1.34 and later
+# include <boost/test/unit_test.hpp>
+#endif
+
+// Keep the test function for compilation but do not not register it.
+// TODO aconway 2008-04-23: better workaround for expected failures.
+// The following causes the test testUpdateTxState not to run at all.
+# define QPID_AUTO_TEST_CASE_EXPECTED_FAILURES(test_name,n) \
+ namespace { struct test_name { void test_method(); }; } \
+ void test_name::test_method()
+// The following runs the test testUpdateTxState, but it fails.
+/*#define QPID_AUTO_TEST_CASE_EXPECTED_FAILURES(test_name,n) \
+ namespace { struct test_name { void test_method(); }; } \
+ BOOST_AUTO_TEST_CASE(name)*/
+
+#if (BOOST_VERSION < 103300) // v.1.32 and earlier
+
+# define QPID_AUTO_TEST_SUITE(name)
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_UNIT_TEST(name)
+# define QPID_AUTO_TEST_SUITE_END()
+
+#elif (BOOST_VERSION < 103400) // v.1.33
+
+// Note the trailing ';'
+# define QPID_AUTO_TEST_SUITE(name) BOOST_AUTO_TEST_SUITE(name);
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_TEST_CASE(name)
+# define QPID_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END();
+
+#else // v.1.34 and later
+
+# define QPID_AUTO_TEST_SUITE(name) BOOST_AUTO_TEST_SUITE(name)
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_TEST_CASE(name)
+# define QPID_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()
+
+#endif
+
+#endif /*!QPIPD_TEST_UNIT_TEST_H_*/
diff --git a/qpid/cpp/src/tests/long_cluster_tests.py b/qpid/cpp/src/tests/long_cluster_tests.py
deleted file mode 100755
index f77837f0c4..0000000000
--- a/qpid/cpp/src/tests/long_cluster_tests.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, signal, sys, unittest
-from testlib import TestBaseCluster
-
-class LongClusterTests(TestBaseCluster):
- """Long/Soak cluster tests with async store ability"""
-
-
- def test_LongCluster_01_DummyTest(self):
- """Dummy test - a placeholder for the first of the long/soak python cluster tests"""
- pass
-
-# Start the test here
-
-if __name__ == '__main__':
- if os.getenv("STORE_LIB") != None:
- print "NOTE: Store enabled for the following tests:"
- if not unittest.main(): sys.exit(1)
-
diff --git a/qpid/cpp/src/tests/qpid-test-cluster b/qpid/cpp/src/tests/qpid-test-cluster
deleted file mode 100755
index 40ad452a0d..0000000000
--- a/qpid/cpp/src/tests/qpid-test-cluster
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-usage() {
- echo "Usage: `basename $0` [options] start|stop|restart|check [hosts]
-Start/stop/restart a cluster on specified hosts or on \$HOSTS via ssh.
-
-Options:
- -l USER Run qpidd and copy files as USER.
- -e SCRIPT Source SCRIPT for environment settings. Copies SCRIPT to each host.
- Default is $DEFAULT_ENV.
- -c CONFIG Use CONFIG as qpidd config file. Copies CONFIG to each host.
- Default is $DEFAULT_CONF
- -d Delete data-dir and log file before starting broker.
-"
- exit 1
-}
-
-DEFAULT_CONF=~/qpid-test-qpidd.conf
-DEFAULT_ENV=~/qpid-test-env.sh
-
-test -f $DEFAULT_CONF && CONF_FILE=$DEFAULT_CONF
-test -f $DEFAULT_ENV && ENV_FILE=$DEFAULT_ENV
-
-while getopts "l:e:c:d" opt; do
- case $opt in
- l) SSHOPTS="-l$OPTARG $SSHOPTS" ; RSYNC_USER="$OPTARG@" ;;
- e) ENV_FILE=$OPTARG ;;
- c) CONF_FILE=$OPTARG ;;
- d) DO_DELETE=1 ;;
- *) usage;;
- esac
-done
-shift `expr $OPTIND - 1`
-test "$*" || usage
-CMD=$1; shift
-HOSTS=${*:-$HOSTS}
-
-conf_value() { test -f "$CONF_FILE" && awk -F= "/^$1=/ {print \$2}" $CONF_FILE; }
-
-if test -n "$CONF_FILE"; then
- test -f "$CONF_FILE" || { echo Config file not found: $CONF_FILE; exit 1; }
- RSYNCFILES="$RSYNCFILES $CONF_FILE"
- QPIDD_ARGS="$QPIDD_ARGS --config $CONF_FILE"
- CONF_PORT=`conf_value port`
- CONF_DATA_DIR=`conf_value data-dir`
- CONF_LOG_FILE=`conf_value log-to-file`
-fi
-
-if test -n "$ENV_FILE"; then
- test -f "$ENV_FILE" || { echo Environment file not found: $ENV_FILE; exit 1; }
- RSYNCFILES="$RSYNCFILES $ENV_FILE"
- SOURCE_ENV="source $ENV_FILE ; "
-fi
-
-test -n "$RSYNCFILES" && rsynchosts $RSYNCFILES # Copy conf/env files to all hosts
-
-do_start() {
- for h in $HOSTS; do
- COMMAND="qpidd -d $QPIDD_ARGS"
- id -nG | grep '\<ais\>' >/dev/null && COMMAND="sg ais -c '$COMMAND'"
- if test "$DO_DELETE"; then COMMAND="rm -rf $CONF_DATA_DIR $CONF_LOG_FILE; $COMMAND"; fi
- ssh $h "$SOURCE_ENV $COMMAND" || { echo "Failed to start on $h"; exit 1; }
- done
-}
-
-do_stop() {
- for h in $HOSTS; do
- ssh $h "$SOURCE_ENV qpidd -q --no-module-dir --no-data-dir $QPIDD_ARGS"
- done
-}
-
-do_status() {
- for h in $HOSTS; do
- if ssh $h "$SOURCE_ENV qpidd -c --no-module-dir --no-data-dir $QPIDD_ARGS > /dev/null"; then
- echo "$h ok"
- else
- echo "$h not running"
- STATUS=1
- fi
- done
-}
-
-case $CMD in
- start) do_start ;;
- stop) do_stop ;;
- restart) do_stop ; do_start ;;
- status) do_status ;;
- *) usage;;
-esac
-
-exit $STATUS
diff --git a/qpid/cpp/src/tests/restart_cluster b/qpid/cpp/src/tests/restart_cluster
deleted file mode 100755
index 5b48e619f6..0000000000
--- a/qpid/cpp/src/tests/restart_cluster
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
-#!/bin/bash
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
-#!/bin/bash
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
diff --git a/qpid/cpp/src/tests/run_cluster_authentication_soak b/qpid/cpp/src/tests/run_cluster_authentication_soak
deleted file mode 100755
index 24befa28ba..0000000000
--- a/qpid/cpp/src/tests/run_cluster_authentication_soak
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-source ./test_env.sh
-source sasl_test_setup.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ./cluster_authentication_soak 500
-
diff --git a/qpid/cpp/src/tests/run_cluster_authentication_test b/qpid/cpp/src/tests/run_cluster_authentication_test
deleted file mode 100755
index 844807a857..0000000000
--- a/qpid/cpp/src/tests/run_cluster_authentication_test
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-source ./test_env.sh
-source sasl_test_setup.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ./cluster_authentication_soak
-
diff --git a/qpid/cpp/src/tests/run_cluster_tests b/qpid/cpp/src/tests/run_cluster_tests
deleted file mode 100755
index a5cea5ff6e..0000000000
--- a/qpid/cpp/src/tests/run_cluster_tests
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-
-test -x $QPID_PYTHON_TEST || { echo Skipping test, $QPID_PYTHON_TEST not found; exit 0; }
-
-# Delete old cluster test data
-OUTDIR=${OUTDIR:-brokertest.tmp}
-rm -rf $OUTDIR
-mkdir -p $OUTDIR
-
-# Ignore tests requiring a store by default.
-CLUSTER_TESTS_IGNORE=${CLUSTER_TESTS_IGNORE:--i cluster_tests.StoreTests.* -I $srcdir/cluster_tests.fail}
-CLUSTER_TESTS=${CLUSTER_TESTS:-$*}
-
-with_ais_group $QPID_PYTHON_TEST -DOUTDIR=$OUTDIR -m cluster_tests $CLUSTER_TESTS_IGNORE $CLUSTER_TESTS || exit 1
-rm -rf $OUTDIR
diff --git a/qpid/cpp/src/tests/run_failover_soak b/qpid/cpp/src/tests/run_failover_soak
deleted file mode 100755
index 2c56bf7d6b..0000000000
--- a/qpid/cpp/src/tests/run_failover_soak
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-host=127.0.0.1
-
-unset QPID_NO_MODULE_DIR # failover_soak uses --module-dir, dont want clash
-MODULES=${MODULES:-$moduledir}
-MESSAGES=${MESSAGES:-500000}
-REPORT_FREQUENCY=${REPORT_FREQUENCY:-20000}
-VERBOSITY=${VERBOSITY:-0}
-DURABILITY=${DURABILITY:-0}
-N_QUEUES=${N_QUEUES:-1}
-N_BROKERS=${N_BROKERS:-4}
-
-rm -f soak-*.log
-exec ./failover_soak $MODULES ./declare_queues ./replaying_sender ./resuming_receiver $MESSAGES $REPORT_FREQUENCY $VERBOSITY $DURABILITY $N_QUEUES $N_BROKERS
-
diff --git a/qpid/cpp/src/tests/run_federation_sys_tests b/qpid/cpp/src/tests/run_federation_sys_tests
index 76da176914..d9a9649c37 100755
--- a/qpid/cpp/src/tests/run_federation_sys_tests
+++ b/qpid/cpp/src/tests/run_federation_sys_tests
@@ -25,10 +25,6 @@ source ./test_env.sh
MODULENAME=federation_sys
-# Test for clustering
-source cpg_check.sh
-if cpg_enabled; then CLUSTERING_ENABLED=1; fi
-
# Test for long test
if [[ "$1" == "LONG_TEST" ]]; then
USE_LONG_TEST=1
@@ -42,11 +38,7 @@ if [ -z ${USE_LONG_TEST} ]; then
SKIPTESTS="-i federation_sys.A_Long* -i federation_sys.B_Long* ${SKIPTESTS}"
fi
echo "WARNING: Tests using persistence will be ignored."
-if [ -z ${CLUSTERING_ENABLED} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*"
-elif [ -z ${USE_LONG_TEST} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_Long* -i federation_sys.D_Long*"
-fi
+SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*"
start_brokers() {
start_broker() {
@@ -56,35 +48,21 @@ start_brokers() {
}
start_broker "" LOCAL_PORT
start_broker "" REMOTE_PORT
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_2
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_2
- fi
rm qpidd.port
}
stop_brokers() {
${QPIDD_EXEC} -q --port ${LOCAL_PORT}
${QPIDD_EXEC} -q --port ${REMOTE_PORT}
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C1_1}
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C2_1}
- fi
}
if test -d ${PYTHON_DIR} ; then
start_brokers
- if [ -z ${CLUSTERING_ENABLED} ]; then
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)"
- else
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT}, local cluster nodes ${CLUSTER_C1_1} ${CLUSTER_C1_2}, remote cluster nodes ${CLUSTER_C2_1} ${CLUSTER_C2_2}"
- fi
+ echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)"
if [ -z ${USE_LONG_TEST} ]; then
echo "NOTE: To run a full set of federation system tests, use \"make check-long\". To test with persistence, run the store version of this script."
fi
- ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} -Dlocal-cluster-ports="${CLUSTER_C1_1} ${CLUSTER_C1_2}" -Dremote-cluster-ports="${CLUSTER_C2_1} ${CLUSTER_C2_2}" $@
+ ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} $@
RETCODE=$?
stop_brokers
if test x${RETCODE} != x0; then
diff --git a/qpid/cpp/src/tests/sasl_fed_ex b/qpid/cpp/src/tests/sasl_fed_ex
index cc5b310067..4ea61c5a2a 100755
--- a/qpid/cpp/src/tests/sasl_fed_ex
+++ b/qpid/cpp/src/tests/sasl_fed_ex
@@ -34,17 +34,11 @@ then
echo
# These are the four different ways of creating links ( or routes+links )
# that the qpid-route command provides.
- echo "Usage: ${script_name} dynamic|link|queue|route [cluster]"
+ echo "Usage: ${script_name} dynamic|link|queue|route"
echo
exit 1
fi
-# Has the user told us to do clustering ? -----------
-clustering_flag=
-if [ $# -eq "2" ] && [ "$2" == "cluster" ]; then
- clustering_flag=true
-fi
-
qpid_route_method=$1
# Debugging print. --------------------------
@@ -128,15 +122,7 @@ DST_TCP_PORT=5807
SRC_TCP_PORT_2=5802
DST_TCP_PORT_2=5803
-CLUSTER_NAME_SUFFIX=`hostname | tr '.' ' ' | awk '{print $1}'`
-CLUSTER_1_NAME=sasl_fed_ex_cluster_1_${CLUSTER_NAME_SUFFIX}
-CLUSTER_2_NAME=sasl_fed_ex_cluster_2_${CLUSTER_NAME_SUFFIX}
-
-print "CLUSTER_1_NAME == ${CLUSTER_1_NAME}"
-print "CLUSTER_2_NAME == ${CLUSTER_2_NAME}"
-
SSL_LIB=${moduledir}/ssl.so
-CLUSTER_LIB=${moduledir}/cluster.so
export QPID_SSL_CERT_NAME=${TEST_HOSTNAME}
@@ -183,80 +169,26 @@ COMMON_BROKER_OPTIONS=" \
function start_brokers {
- if [ $1 ]; then
- # clustered ----------------------------------------
- print "Starting SRC cluster"
-
- print " src broker 1"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
-
- broker_ports[0]=${SRC_TCP_PORT}
-
- print " src broker 2"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT_2} \
- --ssl-port ${SRC_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src_2.log 2> /dev/null
-
- broker_ports[1]=${SRC_TCP_PORT_2}
-
-
- print "Starting DST cluster"
-
- print " dst broker 1"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
-
- broker_ports[2]=${DST_TCP_PORT}
-
- print " dst broker 2"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT_2} \
- --ssl-port ${DST_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst_2.log 2> /dev/null
-
- broker_ports[3]=${DST_TCP_PORT_2}
-
- else
# vanilla brokers --------------------------------
print "Starting SRC broker"
$QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
+ --port=${SRC_TCP_PORT} \
+ --ssl-port ${SRC_SSL_PORT} \
+ ${COMMON_BROKER_OPTIONS} \
+ --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
broker_ports[0]=${SRC_TCP_PORT}
print "Starting DST broker"
$QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
+ --port=${DST_TCP_PORT} \
+ --ssl-port ${DST_SSL_PORT} \
+ ${COMMON_BROKER_OPTIONS} \
+ --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
broker_ports[1]=${DST_TCP_PORT}
- fi
}
-
function halt_brokers {
n_brokers=${#broker_ports[@]}
print "Halting ${n_brokers} brokers."
@@ -270,7 +202,7 @@ function halt_brokers {
}
-start_brokers $clustering_flag
+start_brokers
# I am not randomizing these names, because this test creates its own brokers.
@@ -329,9 +261,7 @@ fi
# to avoid false negatives.
sleep 5
-# This should work the same whether or not we are running a clustered test.
-# In the case of clustered tests, the status is not printed by qpid_route.
-# So in either case, I will look only at the transport field, which should be "ssl".
+# Look only at the transport field, which should be "ssl".
print "check the link"
link_status=$($QPID_ROUTE_EXEC link list localhost:${DST_TCP_PORT} | tail -1 | awk '{print $3}')
diff --git a/qpid/cpp/src/tests/sasl_fed_ex_dynamic_cluster b/qpid/cpp/src/tests/sasl_fed_ex_dynamic_cluster
deleted file mode 100755
index fd6b72a4f2..0000000000
--- a/qpid/cpp/src/tests/sasl_fed_ex_dynamic_cluster
+++ /dev/null
@@ -1,30 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-
-with_ais_group ${srcdir}/sasl_fed_ex dynamic cluster
-
-
diff --git a/qpid/cpp/src/tests/sasl_fed_ex_link_cluster b/qpid/cpp/src/tests/sasl_fed_ex_link_cluster
deleted file mode 100755
index 34b2aa4a5f..0000000000
--- a/qpid/cpp/src/tests/sasl_fed_ex_link_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex link cluster
-
-
diff --git a/qpid/cpp/src/tests/sasl_fed_ex_queue_cluster b/qpid/cpp/src/tests/sasl_fed_ex_queue_cluster
deleted file mode 100755
index 14f36f6fc4..0000000000
--- a/qpid/cpp/src/tests/sasl_fed_ex_queue_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex queue cluster
-
-
diff --git a/qpid/cpp/src/tests/sasl_fed_ex_route_cluster b/qpid/cpp/src/tests/sasl_fed_ex_route_cluster
deleted file mode 100755
index 756476056e..0000000000
--- a/qpid/cpp/src/tests/sasl_fed_ex_route_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex route cluster
-
-
diff --git a/qpid/cpp/src/tests/ssl_test b/qpid/cpp/src/tests/ssl_test
index 19a316a483..89aaf44af0 100755
--- a/qpid/cpp/src/tests/ssl_test
+++ b/qpid/cpp/src/tests/ssl_test
@@ -193,29 +193,3 @@ echo "Running SSL/TCP mux test on random port $PORT"
./qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!"
stop_brokers
-
-test -z $CLUSTER_LIB && exit 0 # Exit if cluster not supported.
-
-## Test failover in a cluster using SSL only
-source cpg_check.sh
-cpg_enabled || exit 0
-
-PORT1=`pick_port`; ssl_cluster_broker $PORT1
-echo "Running SSL cluster broker on port $PORT1"
-
-PORT2=`pick_port`; ssl_cluster_broker $PORT2
-echo "Running SSL cluster broker on port $PORT2"
-
-# Pipe receive output to uniq to remove duplicates
-./qpid-receive --connection-options "{reconnect:true, reconnect-timeout:5}" --failover-updates -b amqp:ssl:$TEST_HOSTNAME:$PORT1 -a "foo;{create:always}" -f | uniq > ssl_test_receive.tmp &
-./qpid-send -b amqp:ssl:$TEST_HOSTNAME:$PORT2 --content-string=one -a "foo;{create:always}"
-
-stop_broker 0 # Kill broker 1 - receiver should fail-over.
-echo "Killed SSL cluster broker on port $PORT1"
-
-./qpid-send -b amqp:ssl:$TEST_HOSTNAME:$PORT2 --content-string=two -a "foo;{create:always}" --send-eos 1
-wait # Wait for qpid-receive
-{ echo one; echo two; } > ssl_test_receive.cmp
-diff ssl_test_receive.tmp ssl_test_receive.cmp || { echo "Failover failed"; exit 1; }
-rm -f ssl_test_receive.*
-
diff --git a/qpid/cpp/src/tests/start_cluster b/qpid/cpp/src/tests/start_cluster
deleted file mode 100755
index 78fd104d9c..0000000000
--- a/qpid/cpp/src/tests/start_cluster
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Start a cluster of brokers on local host, put the list of ports for cluster members in cluster.ports
-#
-
-# Execute command with the ais group set.
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-rm -f cluster*.log cluster.ports qpidd.port
-
-SIZE=${1:-3}; shift
-CLUSTER=$HOSTNAME.$$
-OPTS="-d --no-module-dir --load-module $CLUSTER_LIB --cluster-name=$CLUSTER --auth=no --log-enable notice+ --log-enable debug+:cluster $@"
-
-for (( i=0; i<SIZE; ++i )); do
- DDIR=`mktemp -d /tmp/start_cluster.XXXXXXXXXX`
- PORT=`with_ais_group ../qpidd -p0 --log-to-file=cluster$i.log $OPTS --data-dir=$DDIR` || exit 1
- echo $PORT >> cluster.ports
-done
-
-head -n 1 cluster.ports > qpidd.port # First member's port for tests.
-
diff --git a/qpid/cpp/src/tests/start_cluster_hosts b/qpid/cpp/src/tests/start_cluster_hosts
deleted file mode 100755
index 778b4248da..0000000000
--- a/qpid/cpp/src/tests/start_cluster_hosts
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Start a cluster of brokers on local host, put the list of host port addresses
-# in cluster.ports
-#
-# Arguments: [-k] [-p port] HOST [HOST...]
-# -p port to start broker on, can be 0. Actual ports recorded in cluster.addr.
-# -k kill any qpidd processes owned by this user before starting.
-#
-# Start a broker on each named host. Name a host twice to start multiple brokers.
-#
-# You must be able to ssh to each host and be in group ais.
-# $QPIDD must be executable on each host.
-# Logs go to syslog on each host, with a unique prefix per broker.
-#
-
-QPIDD=${QPIDD:-$PWD/../qpidd}
-LIBQPIDCLUSTER=${LIBQPIDCLUSTER:-$PWD/../.libs/cluster.so}
-NAME=$USER # User name is default cluster name.
-RESTART=NO
-
-while getopts "kp:n:q:r" ARG ; do
- case $ARG in
- k) KILL=yes ;;
- p) PORT="$OPTARG" ;;
- n) NAME=$OPTARG ;;
- q) QPIDD=$OPTARG ;;
- l) LIBQPIDCLUSTER=$OPTARG ;;
- r) RESTART=yes ;;
- *) echo "Error parsing options: $ARG"; exit 1 ;;
- esac
-done
-shift `expr $OPTIND - 1`
-test -n "$PORT" && PORTOPT="-p $PORT"
-test "$KILL" = yes && KILL="$QPIDD --no-module-dir -q $PORTOPT ;"
-CLUSTER=${*:-$CLUSTER} # Use args or env
-test -z "$CLUSTER" && { echo Must specify at least one host; exit 1; }
-
-
-OPTS="-d $PORTOPT --load-module $LIBQPIDCLUSTER --cluster-name=$NAME --no-data-dir --auth=no --log-to-syslog --log-enable=info+"
-
-num=0
-for h in $CLUSTER; do
- num=`expr $num + 1` # Give a unique log prefix to each node.
- cmd="$KILL $QPIDD $OPTS --log-prefix $num.$h"
- out=`echo "$cmd" | ssh $h newgrp ais` || { echo == $h error: $out ; exit 1; }
- if [ "$PORT" = 0 ] ; then p=$out; else p=$PORT; fi
- echo "$h $p"
-done
-
diff --git a/qpid/cpp/src/tests/test_env.sh.in b/qpid/cpp/src/tests/test_env.sh.in
index 02c34af649..76e88283ed 100644
--- a/qpid/cpp/src/tests/test_env.sh.in
+++ b/qpid/cpp/src/tests/test_env.sh.in
@@ -43,7 +43,6 @@ export PYTHON_COMMANDS=$QPID_TOOLS/src/py
export PYTHONPATH=$srcdir:$PYTHON_DIR:$PYTHON_COMMANDS:$QPID_TESTS_PY:$QMF_LIB:$PYTHONPATH
export QPID_CONFIG_EXEC=$PYTHON_COMMANDS/qpid-config
export QPID_ROUTE_EXEC=$PYTHON_COMMANDS/qpid-route
-export QPID_CLUSTER_EXEC=$PYTHON_COMMANDS/qpid-cluster
export QPID_HA_EXEC=$PYTHON_COMMANDS/qpid-ha
# Executables
@@ -63,7 +62,6 @@ export TEST_STORE_LIB=$testmoduledir/test_store.so
exportmodule() { test -f $moduledir/$2 && eval "export $1=$moduledir/$2"; }
exportmodule ACL_LIB acl.so
-exportmodule CLUSTER_LIB cluster.so
exportmodule HA_LIB ha.so
exportmodule SSLCONNECTOR_LIB sslconnector.so
exportmodule SSL_LIB ssl.so
diff --git a/qpid/cpp/src/tests/test_store.cpp b/qpid/cpp/src/tests/test_store.cpp
index 83f6a5e4b1..eac4deda2d 100644
--- a/qpid/cpp/src/tests/test_store.cpp
+++ b/qpid/cpp/src/tests/test_store.cpp
@@ -37,6 +37,7 @@
#include "qpid/broker/amqp_0_10/MessageTransfer.h"
#include "qpid/framing/AMQFrame.h"
#include "qpid/log/Statement.h"
+#include "qpid/sys/Thread.h"
#include "qpid/Plugin.h"
#include "qpid/Options.h"
#include <boost/cast.hpp>
diff --git a/qpid/cpp/src/tests/testlib.py b/qpid/cpp/src/tests/testlib.py
deleted file mode 100644
index 71ad59e5c1..0000000000
--- a/qpid/cpp/src/tests/testlib.py
+++ /dev/null
@@ -1,766 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Support library for qpid python tests.
-#
-
-import os, re, signal, subprocess, time, unittest
-
-class TestBase(unittest.TestCase):
- """
- Base class for qpid tests. Provides broker start/stop/kill methods
- """
-
- """
- The following environment vars control if and how the test is run, and determine where many of the helper
- executables/libs are to be found.
- """
- _storeLib = os.getenv("STORE_LIB")
- _storeEnable = _storeLib != None # Must be True for durability to be enabled during the test
- _qpiddExec = os.getenv("QPIDD_EXEC", "/usr/sbin/qpidd")
- _tempStoreDir = os.path.abspath(os.getenv("TMP_DATA_DIR", "/tmp/qpid"))
-
- """Global message counter ensures unique messages"""
- _msgCnt = 0
-
- # --- Helper functions for parameter handling ---
-
- def _paramBool(self, key, val, keyOnly = False):
- if val == None:
- return ""
- if keyOnly:
- if val:
- return " --%s" % key
- else:
- return ""
- else:
- if val:
- return " --%s yes" % key
- else:
- return " --%s no" % key
-
- # --- Helper functions for message creation ---
-
- def _makeMessage(self, msgSize):
- msg = "Message-%04d" % self._msgCnt
- self._msgCnt = self._msgCnt + 1
- msgLen = len(msg)
- if msgSize > msgLen:
- for i in range(msgLen, msgSize):
- if i == msgLen:
- msg += "-"
- else:
- msg += chr(ord('a') + (i % 26))
- return msg
-
- def _makeMessageList(self, numMsgs, msgSize):
- if msgSize == None:
- msgSize = 12
- msgs = ""
- for m in range(0, numMsgs):
- msgs += "%s\n" % self._makeMessage(msgSize)
- return msgs
-
- # --- Starting and stopping a broker ---
-
- def startBroker(self, qpiddArgs, logFile = None):
- """Start a single broker daemon, returns tuple (pid, port)"""
- if self._qpiddExec == None:
- raise Exception("Environment variable QPIDD is not set")
- cmd = "%s --daemon --port=0 %s" % (self._qpiddExec, qpiddArgs)
- portStr = os.popen(cmd).read()
- if len(portStr) == 0:
- err = "Broker daemon startup failed."
- if logFile != None:
- err += " See log file %s" % logFile
- raise Exception(err)
- port = int(portStr)
- pidStr = os.popen("%s -p %d -c" % (self._qpiddExec, port)).read()
- try:
- pid = int(pidStr)
- except:
- raise Exception("Unable to get pid: \"%s -p %d -c\" returned %s" % (self._qpiddExec, port, pidStr))
- #print "started broker: pid=%d, port=%d args: %s" % (pid, port, qpiddArgs)
- return (pid, port)
-
- def killBroker(self, nodeTuple, ignoreFailures = False):
- """Kill a broker using kill -9"""
- try:
- os.kill(nodeTuple[self.PID], signal.SIGKILL)
- try:
- os.waitpid(nodeTuple[self.PID], 0)
- except:
- pass
- #print "killed broker: port=%d pid=%d" % (nodeTuple[self.PORT], nodeTuple[self.PID])
- except:
- if ignoreFailures:
- print "WARNING: killBroker (port=%d pid=%d) failed - ignoring." % (nodeTuple[self.PORT], nodeTuple[self.PID])
- else:
- raise
-
- def stopBroker(self, nodeTuple, ignoreFailures = False):
- """Stop a broker using qpidd -q"""
- try:
- ret = os.spawnl(os.P_WAIT, self._qpiddExec, self._qpiddExec, "--port=%d" % nodeTuple[self.PORT], "--quit", "--no-module-dir")
- if ret != 0:
- raise Exception("stopBroker(): port=%d: qpidd -q returned %d" % (nodeTuple[self.PORT], ret))
- try:
- os.waitpid(nodeTuple[self.PID], 0)
- except:
- pass
- #print "stopped broker: port=%d pid=%d" % (nodeTuple[self.PORT], nodeTuple[self.PID])
- except:
- if ignoreFailures:
- print "WARNING: stopBroker (port=%d pid=%d) failed - ignoring." % (nodeTuple[self.PORT], nodeTuple[self.PID])
- else:
- raise
-
-
-
-class TestBaseCluster(TestBase):
- """
- Base class for cluster tests. Provides methods for starting and stopping clusters and cluster nodes.
- """
-
- """
- The following environment vars control if and how the test is run, and determine where many of the helper
- executables/libs are to be found.
- """
- _clusterLib = os.getenv("CLUSTER_LIB")
- _clusterTestEnable = _clusterLib != None # Must be True for these cluster tests to run
- _xmlLib = os.getenv("XML_LIB")
- _xmlEnable = _xmlLib != None
- _qpidConfigExec = os.getenv("QPID_CONFIG_EXEC", "/usr/bin/qpid-config")
- _qpidRouteExec = os.getenv("QPID_ROUTE_EXEC", "/usr/bin/qpid-route")
- _receiverExec = os.getenv("RECEIVER_EXEC", "/usr/libexec/qpid/test/receiver")
- _senderExec = os.getenv("SENDER_EXEC", "/usr/libexec/qpid/test/sender")
-
-
- """
- _clusterDict is a dictionary of clusters:
- key = cluster name (string)
- val = dictionary of node numbers:
- key = integer node number
- val = tuple containing (pid, port)
- For example, two clusters "TestCluster0" and "TestCluster1" containing several nodes would look as follows:
- {"TestCluster0": {0: (pid0-0, port0-0), 1: (pid0-1, port0-1), ...}, "TestCluster1": {0: (pid1-0, port1-0), 1: (pid1-1, port1-1), ...}}
- where pidm-n and portm-n are the int pid and port for TestCluster m node n respectively.
- """
- _clusterDict = {}
-
- """Index for (pid, port) tuple"""
- PID = 0
- PORT = 1
-
- def run(self, res):
- """ Skip cluster testing if env var RUN_CLUSTER_TESTS is not defined."""
- if not self._clusterTestEnable:
- return
- unittest.TestCase.run(self, res)
-
- # --- Private helper / convenience functions ---
-
- def _checkPids(self, clusterName = None):
- for pid, port in self.getTupleList():
- try:
- os.kill(pid, 0)
- except:
- raise Exception("_checkPids(): Broker with pid %d expected but does not exist! (crashed?)" % pid)
-
-
- # --- Starting cluster node(s) ---
-
- def createClusterNode(self, nodeNumber, clusterName):
- """Create a node and add it to the named cluster"""
- if self._tempStoreDir == None:
- raise Exception("Environment variable TMP_DATA_DIR is not set")
- if self._clusterLib == None:
- raise Exception("Environment variable LIBCLUSTER is not set")
- name = "%s-%d" % (clusterName, nodeNumber)
- dataDir = os.path.join(self._tempStoreDir, "cluster", name)
- logFile = "%s.log" % dataDir
- args = "--no-module-dir --load-module=%s --data-dir=%s --cluster-name=%s --auth=no --log-enable=notice+ --log-to-file=%s" % \
- (self._clusterLib, dataDir, clusterName, logFile)
- if self._storeEnable:
- if self._storeLib == None:
- raise Exception("Environment variable LIBSTORE is not set")
- args += " --load-module %s" % self._storeLib
- self._clusterDict[clusterName][nodeNumber] = self.startBroker(args, logFile)
-
- def createCluster(self, clusterName, numberNodes = 0):
- """Create a cluster containing an initial number of nodes"""
- self._clusterDict[clusterName] = {}
- for n in range(0, numberNodes):
- self.createClusterNode(n, clusterName)
-
- def waitForNodes(self, clusterName):
- """Wait for all nodes to become active (ie finish cluster sync)"""
- # TODO - connect to each known node in cluster
- # Until this is done, wait a bit (hack)
- time.sleep(1)
-
- # --- Cluster and node status ---
-
- def getTupleList(self, clusterName = None):
- """Get list of (pid, port) tuples of all known cluster brokers"""
- tList = []
- for c, l in self._clusterDict.iteritems():
- if clusterName == None or c == clusterName:
- for t in l.itervalues():
- tList.append(t)
- return tList
-
- def getNumBrokers(self):
- """Get total number of brokers in all known clusters"""
- return len(self.getTupleList())
-
- def checkNumBrokers(self, expected = None, checkPids = True):
- """Check that the total number of brokers in all known clusters is the expected value"""
- if expected != None and self.getNumBrokers() != expected:
- raise Exception("Unexpected number of brokers: expected %d, found %d" % (expected, self.getNumBrokers()))
- if checkPids:
- self._checkPids()
-
- def getClusterTupleList(self, clusterName):
- """Get list of (pid, port) tuples of all nodes in named cluster"""
- if clusterName in self._clusterDict:
- return self._clusterDict[clusterName].values()
- return []
-
- def getNumClusterBrokers(self, clusterName):
- """Get total number of brokers in named cluster"""
- return len(self.getClusterTupleList(clusterName))
-
- def getNodeTuple(self, nodeNumber, clusterName):
- """Get the (pid, port) tuple for the given cluster node"""
- return self._clusterDict[clusterName][nodeNumber]
-
- def checkNumClusterBrokers(self, clusterName, expected = None, checkPids = True, waitForNodes = True):
- """Check that the total number of brokers in the named cluster is the expected value"""
- if expected != None and self.getNumClusterBrokers(clusterName) != expected:
- raise Exception("Unexpected number of brokers in cluster %s: expected %d, found %d" % \
- (clusterName, expected, self.getNumClusterBrokers(clusterName)))
- if checkPids:
- self._checkPids(clusterName)
- if waitForNodes:
- self.waitForNodes(clusterName)
-
- def clusterExists(self, clusterName):
- """ Return True if clusterName exists, False otherwise"""
- return clusterName in self._clusterDict.keys()
-
- def clusterNodeExists(self, clusterName, nodeNumber):
- """ Return True if nodeNumber in clusterName exists, False otherwise"""
- if clusterName in self._clusterDict.keys():
- return nodeNumber in self._clusterDict[nodeName]
- return False
-
- def createCheckCluster(self, clusterName, size):
- """Create a cluster using the given name and size, then check the number of brokers"""
- self.createCluster(clusterName, size)
- self.checkNumClusterBrokers(clusterName, size)
-
- # --- Kill cluster nodes using signal 9 ---
-
- def killNode(self, nodeNumber, clusterName, updateDict = True, ignoreFailures = False):
- """Kill the given node in the named cluster using kill -9"""
- self.killBroker(self.getNodeTuple(nodeNumber, clusterName), ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName][nodeNumber])
-
- def killCluster(self, clusterName, updateDict = True, ignoreFailures = False):
- """Kill all nodes in the named cluster"""
- for n in self._clusterDict[clusterName].iterkeys():
- self.killNode(n, clusterName, False, ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName])
-
- def killClusterCheck(self, clusterName):
- """Kill the named cluster and check that the name is removed from the cluster dictionary"""
- self.killCluster(clusterName)
- if self.clusterExists(clusterName):
- raise Exception("Unable to kill cluster %s; %d nodes still exist" % \
- (clusterName, self.getNumClusterBrokers(clusterName)))
-
- def killAllClusters(self, ignoreFailures = False):
- """Kill all known clusters"""
- for n in self._clusterDict.iterkeys():
- self.killCluster(n, False, ignoreFailures)
- self._clusterDict.clear()
-
- def killAllClustersCheck(self, ignoreFailures = False):
- """Kill all known clusters and check that the cluster dictionary is empty"""
- self.killAllClusters(ignoreFailures)
- self.checkNumBrokers(0)
-
- # --- Stop cluster nodes using qpidd -q ---
-
- def stopNode(self, nodeNumber, clusterName, updateDict = True, ignoreFailures = False):
- """Stop the given node in the named cluster using qpidd -q"""
- self.stopBroker(self.getNodeTuple(nodeNumber, clusterName), ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName][nodeNumber])
-
- def stopAllClusters(self, ignoreFailures = False):
- """Stop all known clusters"""
- for n in self._clusterDict.iterkeys():
- self.stopCluster(n, False, ignoreFailures)
- self._clusterDict.clear()
-
-
- def stopCluster(self, clusterName, updateDict = True, ignoreFailures = False):
- """Stop all nodes in the named cluster"""
- for n in self._clusterDict[clusterName].iterkeys():
- self.stopNode(n, clusterName, False, ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName])
-
- def stopCheckCluster(self, clusterName, ignoreFailures = False):
- """Stop the named cluster and check that the name is removed from the cluster dictionary"""
- self.stopCluster(clusterName, True, ignoreFailures)
- if self.clusterExists(clusterName):
- raise Exception("Unable to kill cluster %s; %d nodes still exist" % (clusterName, self.getNumClusterBrokers(clusterName)))
-
- def stopAllCheck(self, ignoreFailures = False):
- """Kill all known clusters and check that the cluster dictionary is empty"""
- self.stopAllClusters()
- self.checkNumBrokers(0)
-
- # --- qpid-config functions ---
-
- def _qpidConfig(self, nodeNumber, clusterName, action):
- """Configure some aspect of a qpid broker using the qpid_config executable"""
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- #print "%s -b localhost:%d %s" % (self._qpidConfigExec, port, action)
- ret = os.spawnl(os.P_WAIT, self._qpidConfigExec, self._qpidConfigExec, "-b", "localhost:%d" % port, *action.split())
- if ret != 0:
- raise Exception("_qpidConfig(): cluster=\"%s\" nodeNumber=%d port=%d action=\"%s\" returned %d" % \
- (clusterName, nodeNumber, port, action, ret))
-
- def addExchange(self, nodeNumber, clusterName, exchangeType, exchangeName, durable = False, sequence = False, \
- ive = False):
- """Add a named exchange."""
- action = "add exchange %s %s" % (exchangeType, exchangeName)
- action += self._paramBool("durable", durable, True)
- action += self._paramBool("sequence", sequence, True)
- action += self._paramBool("ive", ive, True)
- self._qpidConfig(nodeNumber, clusterName, action)
-
- def deleteExchange(self, nodeNumber, clusterName, exchangeName):
- """Delete a named exchange"""
- self._qpidConfig(nodeNumber, clusterName, "del exchange %s" % exchangeName)
-
- def addQueue(self, nodeNumber, clusterName, queueName, configArgs = None):
- """Add a queue using qpid-config."""
- action = "add queue %s" % queueName
- if self._storeEnable:
- action += " --durable"
- if configArgs != None:
- action += " %s" % configArgs
- self._qpidConfig(nodeNumber, clusterName, action)
-
- def delQueue(self, nodeNumber, clusterName, queueName):
- """Delete a named queue using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "del queue %s" % queueName)
-
- def bind(self, nodeNumber, clusterName, exchangeName, queueName, key):
- """Create an exchange-queue binding using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "bind %s %s %s" % (exchangeName, queueName, key))
-
- def unbind(self, nodeNumber, clusterName, exchangeName, queueName, key):
- """Remove an exchange-queue binding using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "unbind %s %s %s" % (exchangeName, queueName, key))
-
- # --- qpid-route functions (federation) ---
-
- def brokerDict(self, nodeNumber, clusterName, host = "localhost", user = None, password = None):
- """Returns a dictionary containing the broker info to be passed to route functions"""
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- return {"cluster": clusterName, "node":nodeNumber, "port":port, "host":host, "user":user, "password":password}
-
- def _brokerStr(self, brokerDict):
- """Set up a broker string in the format [user/password@]host:port"""
- str = ""
- if brokerDict["user"] !=None and brokerDict["password"] != None:
- str = "%s@%s" % (brokerDict["user"], brokerDict["password"])
- str += "%s:%d" % (brokerDict["host"], brokerDict["port"])
- return str
-
- def _qpidRoute(self, action):
- """Set up a route using qpid-route"""
- #print "%s %s" % (self._qpidRouteExec, action)
- ret = os.spawnl(os.P_WAIT, self._qpidRouteExec, self._qpidRouteExec, *action.split())
- if ret != 0:
- raise Exception("_qpidRoute(): action=\"%s\" returned %d" % (action, ret))
-
- def routeDynamicAdd(self, destBrokerDict, srcBrokerDict, exchangeName):
- self._qpidRoute("dynamic add %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName))
-
- def routeDynamicDelete(self, destBrokerDict, srcBrokerDict, exchangeName):
- self._qpidRoute("dynamic del %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName))
-
- def routeAdd(self, destBrokerDict, srcBrokerDict, exchangeName, routingKey):
- self._qpidRoute("route add %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, routingKey))
-
- def routeDelete(self, destBrokerDict, srcBrokerDict, exchangeName, routingKey):
- self._qpidRoute("route del %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, routingKey))
-
- def routeQueueAdd(self, destBrokerDict, srcBrokerDict, exchangeName, queueName):
- self._qpidRoute("queue add %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, queueName))
-
- def routeQueueDelete(self, destBrokerDict, srcBrokerDict, exchangeName, queueName):
- self._qpidRoute("queue del %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, queueName))
-
- def routeLinkAdd(self, destBrokerDict, srcBrokerDict):
- self._qpidRoute("link add %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict)))
-
- def routeLinkDelete(self, destBrokerDict, srcBrokerDict):
- self._qpidRoute("link del %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict)))
-
- # --- Message send and receive functions ---
-
- def _receiver(self, action):
- if self._receiverExec == None:
- raise Exception("Environment variable RECEIVER is not set")
- cmd = "%s %s" % (self._receiverExec, action)
- #print cmd
- return subprocess.Popen(cmd.split(), stdout = subprocess.PIPE)
-
- def _sender(self, action):
- if self._senderExec == None:
- raise Exception("Environment variable SENDER is not set")
- cmd = "%s %s" % (self._senderExec, action)
- #print cmd
- return subprocess.Popen(cmd.split(), stdin = subprocess.PIPE)
-
- def createReciever(self, nodeNumber, clusterName, queueName, numMsgs = None, receiverArgs = None):
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- action = "--port %d --queue %s" % (port, queueName)
- if numMsgs != None:
- action += " --messages %d" % numMsgs
- if receiverArgs != None:
- action += " %s" % receiverArgs
- return self._receiver(action)
-
- def createSender(self, nodeNumber, clusterName, exchangeName, routingKey, senderArgs = None):
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- action = "--port %d --exchange %s" % (port, exchangeName)
- if routingKey != None and len(routingKey) > 0:
- action += " --routing-key %s" % routingKey
- if self._storeEnable:
- action += " --durable yes"
- if senderArgs != None:
- action += " %s" % senderArgs
- return self._sender(action)
-
- def createBindDirectExchangeQueue(self, nodeNumber, clusterName, exchangeName, queueName):
- self.addExchange(nodeNumber, clusterName, "direct", exchangeName)
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, queueName)
-
- def createBindTopicExchangeQueues(self, nodeNumber, clusterName, exchangeName, queueNameKeyList):
- self.addExchange(nodeNumber, clusterName, "topic", exchangeName)
- for queueName, key in queueNameKeyList.iteritems():
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, key)
-
- def createBindFanoutExchangeQueues(self, nodeNumber, clusterName, exchangeName, queueNameList):
- self.addExchange(nodeNumber, clusterName, "fanout", exchangeName)
- for queueName in queueNameList:
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, "")
-
- def sendMsgs(self, nodeNumber, clusterName, exchangeName, routingKey, numMsgs, msgSize = None, wait = True):
- msgs = self._makeMessageList(numMsgs, msgSize)
- sender = self.createSender(nodeNumber, clusterName, exchangeName, routingKey)
- sender.stdin.write(msgs)
- sender.stdin.close()
- if wait:
- sender.wait()
- return msgs
-
- def receiveMsgs(self, nodeNumber, clusterName, queueName, numMsgs, wait = True):
- receiver = self.createReciever(nodeNumber, clusterName, queueName, numMsgs)
- cnt = 0
- msgs = ""
- while cnt < numMsgs:
- rx = receiver.stdout.readline()
- if rx == "" and receiver.poll() != None: break
- msgs += rx
- cnt = cnt + 1
- if wait:
- receiver.wait()
- return msgs
-
-
- # --- Exchange-specific helper inner classes ---
-
- class TestHelper:
- """
- This is a "virtual" superclass for test helpers, and is not useful on its own, but the
- per-exchange subclasses are designed to keep track of the messages sent to and received
- from queues which have bindings to that exchange type.
- """
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
-
- """Dictionary of queues and lists of messages sent to them."""
- self._txMsgs = {}
- """Dictionary of queues and lists of messages received from them."""
- self._rxMsgs = {}
- """List of node numbers currently in the cluster"""
- self._nodes = []
- """List of node numbers which have been killed and can therefore be recovered"""
- self._deadNodes = []
- """Last node to be used"""
- self._lastNode = None
-
- self._testBaseCluster = testBaseCluster
- self._clusterName = clusterName
- self._exchangeName = exchangeName
- self._queueNameList = queueNameList
- self._addQueues(queueNameList)
- self._testBaseCluster.createCheckCluster(clusterName, numNodes)
- self._nodes.extend(range(0, numNodes))
-
- def _addQueues(self, queueNameList):
- for qn in queueNameList:
- if not qn in self._txMsgs:
- self._txMsgs[qn] = []
- if not qn in self._rxMsgs:
- self._rxMsgs[qn] = []
-
- def _bindQueue(self, queueName, bindingKey, nodeNumber = None):
- """Bind a queue to an exchange using a binding key."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # first available node
- self._testBaseCluster.addQueue(nodeNumber, self._clusterName, queueName)
- self._testBaseCluster.bind(nodeNumber, self._clusterName, self._exchangeName, queueName, bindingKey)
-
- def _highestNodeNumber(self):
- """Find the highest node number used so far between the current nodes and those stopped/killed."""
- highestNode = self._nodes[-1]
- if len(self._deadNodes) == 0:
- return highestNode
- highestDeadNode = self._deadNodes[-1]
- if highestNode > highestDeadNode:
- return highestNode
- return highestDeadNode
-
- def killCluster(self):
- """Kill all nodes in the cluster"""
- self._testBaseCluster.killCluster(self._clusterName)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, 0)
- self._deadNodes.extend(self._nodes)
- self._deadNodes.sort()
- del self._nodes[:]
-
- def restoreCluster(self, lastNode = None, restoreNodes = True):
- """Restore a previously killed cluster"""
- self._testBaseCluster.createCluster(self._clusterName)
- if restoreNodes:
- numNodes = len(self._deadNodes)
- self.restoreNodes(lastNode)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, numNodes)
-
- def addNodes(self, numberOfNodes = 1):
- """Add a fixed number of nodes to the cluster."""
- nodeStart = self._highestNodeNumber() + 1
- for i in range(0, numberOfNodes):
- nodeNumber = nodeStart + i
- self._testBaseCluster.createClusterNode(nodeNumber, self._clusterName)
- self._nodes.append(nodeNumber)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, len(self._nodes))
- self._testBaseCluster.waitForNodes(self._clusterName)
-
- def restoreNode(self, nodeNumber):
- """Restore a cluster node that has been previously killed"""
- if nodeNumber not in self._deadNodes:
- raise Exception("restoreNode(): Node number %d not in dead node list %s" % (nodeNumber, self._deadNodes))
- self._testBaseCluster.createClusterNode(nodeNumber, self._clusterName)
- self._deadNodes.remove(nodeNumber)
- self._nodes.append(nodeNumber)
- self._nodes.sort()
-
- def restoreNodes(self, lastNode = None):
- """Restore all known cluster nodes that have been previously killed starting with a known last-used node"""
- if len(self._nodes) == 0: # restore last-used node first
- if lastNode == None:
- lastNode = self._lastNode
- self.restoreNode(lastNode)
- while len(self._deadNodes) > 0:
- self.restoreNode(self._deadNodes[0])
- self._testBaseCluster.waitForNodes(self._clusterName)
-
- def killNode(self, nodeNumber):
- """Kill a cluster node (if it is in the _nodes list)."""
- if nodeNumber not in self._nodes:
- raise Exception("killNode(): Node number %d not in node list %s" % (nodeNumber, self._nodes))
- self._testBaseCluster.killNode(nodeNumber, self._clusterName)
- self._nodes.remove(nodeNumber)
- self._deadNodes.append(nodeNumber)
- self._deadNodes.sort()
-
- def sendMsgs(self, routingKey, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- """Send a fixed number of messages using the given routing key."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # Use first available node
- msgs = self._testBaseCluster._makeMessageList(numMsgs, msgSize)
- sender = self._testBaseCluster.createSender(nodeNumber, self._clusterName, self._exchangeName, routingKey)
- sender.stdin.write(msgs)
- sender.stdin.close()
- if wait:
- sender.wait()
- self._lastNode = nodeNumber
- return msgs.split()
-
- # TODO - this i/f is messy: one mumMsgs can be given, but a list of queues
- # so assuming numMsgs for each queue
- # A mechanism is needed to specify a different numMsgs per queue
- def receiveMsgs(self, numMsgs, nodeNumber = None, queueNameList = None, wait = True):
- """Receive a fixed number of messages from a named queue. If numMsgs == None, get all remaining messages."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # Use first available node
- if queueNameList == None:
- queueNameList = self._txMsgs.iterkeys()
- for qn in queueNameList:
- nm = numMsgs
- if nm == None:
- nm = len(self._txMsgs[qn]) - len(self._rxMsgs[qn]) # get all remaining messages
- if nm > 0:
- while nm > 0:
- receiver = self._testBaseCluster.createReciever(nodeNumber, self._clusterName, qn, nm)
- cnt = 0
- while cnt < nm:
- rx = receiver.stdout.readline().strip()
- if rx == "":
- if receiver.poll() != None: break
- elif rx not in self._rxMsgs[qn]:
- self._rxMsgs[qn].append(rx)
- cnt = cnt + 1
- nm = nm - cnt
- if wait:
- receiver.wait()
- self._rxMsgs[qn].sort()
- self._lastNode = nodeNumber
-
- def receiveRemainingMsgs(self, nodeNumber = None, queueNameList = None, wait = True):
- """Receive all remaining messages on named queue."""
- self.receiveMsgs(None, nodeNumber, queueNameList, wait)
-
- def checkMsgs(self):
- """Return True if all expected messages have been received (ie the transmit and receive list are identical)."""
- txMsgTot = 0
- rxMsgTot = 0
- for qn, txMsgList in self._txMsgs.iteritems():
- rxMsgList = self._rxMsgs[qn]
- txMsgTot = txMsgTot + len(txMsgList)
- rxMsgTot = rxMsgTot + len(rxMsgList)
- if len(txMsgList) != len(rxMsgList):
- return False
- for i, m in enumerate(txMsgList):
- if m != rxMsgList[i]:
- return False
- if txMsgTot == 0 and rxMsgTot == 0:
- print "WARNING: No messages were either sent or received"
- return True
-
- def finalizeTest(self):
- """Recover all the remaining messages on all queues, then check that all expected messages were received."""
- self.receiveRemainingMsgs()
- self._testBaseCluster.stopAllCheck()
- if not self.checkMsgs():
- self.printMsgs()
- self._testBaseCluster.fail("Send - receive message mismatch")
-
- def printMsgs(self, txMsgs = True, rxMsgs = True):
- """Print all messages transmitted and received."""
- for qn, txMsgList in self._txMsgs.iteritems():
- print "Queue: %s" % qn
- if txMsgs:
- print " txMsgList = %s" % txMsgList
- if rxMsgs:
- rxMsgList = self._rxMsgs[qn]
- print " rxMsgList = %s" % rxMsgList
-
-
- class DirectExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList)
- self._testBaseCluster.addExchange(0, clusterName, "direct", exchangeName)
- for qn in queueNameList:
- self._bindQueue(qn, qn)
-
- def addQueues(self, queueNameList):
- self._addQueues(queueNameList)
- for qn in queueNameList:
- self._bindQueue(qn, qn)
-
- def sendMsgs(self, numMsgs, nodeNumber = None, queueNameList = None, msgSize = None, wait = True):
- if queueNameList == None:
- queueNameList = self._txMsgs.iterkeys()
- for qn in queueNameList:
- self._txMsgs[qn].extend(TestBaseCluster.TestHelper.sendMsgs(self, qn, numMsgs, nodeNumber, msgSize, wait))
-
-
- class TopicExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameKeyList):
- self._queueNameKeyList = queueNameKeyList
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameKeyList.iterkeys())
- self._testBaseCluster.addExchange(0, clusterName, "topic", exchangeName)
- for qn, bk in queueNameKeyList.iteritems():
- self._bindQueue(qn, bk)
-
- def addQueues(self, queueNameKeyList):
- self._addQueues(queueNameKeyList.iterkeys())
- for qn, bk in queueNameKeyList.iteritems():
- self._bindQueue(qn, bk)
-
- def _prepareRegex(self, bk):
- # This regex conversion is not very complete - there are other chars that should be escaped too
- return "^%s$" % bk.replace(".", r"\.").replace("*", r"[^.]*").replace("#", ".*")
-
- def sendMsgs(self, routingKey, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- msgList = TestBaseCluster.TestHelper.sendMsgs(self, routingKey, numMsgs, nodeNumber, msgSize, wait)
- for qn, bk in self._queueNameKeyList.iteritems():
- if re.match(self._prepareRegex(bk), routingKey):
- self._txMsgs[qn].extend(msgList)
-
-
- class FanoutExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList)
- self._testBaseCluster.addExchange(0, clusterName, "fanout", exchangeName)
- for qn in queueNameList:
- self._bindQueue(qn, "")
-
- def addQueues(self, queueNameList):
- self._addQueues(queueNameList)
- for qn in queueNameList:
- self._bindQueue(qn, "")
-
- def sendMsgs(self, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- msgList = TestBaseCluster.TestHelper.sendMsgs(self, "", numMsgs, nodeNumber, msgSize, wait)
- for ml in self._txMsgs.itervalues():
- ml.extend(msgList)
-
diff --git a/qpid/cpp/src/tests/verify_cluster_objects b/qpid/cpp/src/tests/verify_cluster_objects
deleted file mode 100755
index 94661cf6b9..0000000000
--- a/qpid/cpp/src/tests/verify_cluster_objects
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Verify managment objects are consistent in a cluster.
-# Arguments: url of one broker in the cluster.
-
-import qmf.console, sys, re
-
-class Session(qmf.console.Session):
- """A qmf.console.Session that caches useful values"""
-
- def __init__(self):
- qmf.console.Session.__init__(self)
- self.classes = None
-
- def all_classes(self):
- if self.classes is None:
- self.classes = [c for p in self.getPackages() for c in self.getClasses(p)]
- return self.classes
-
-class Broker:
- def __init__(self, url, qmf):
- self.url = url
- self.qmf = qmf
- self.broker = self.qmf.addBroker(url)
- self.broker._waitForStable()
- self.objects = None
- self.ignore_list = [ re.compile("org.apache.qpid.broker:system:") ]
-
- def get_objects(self):
- def ignore(name):
- for m in self.ignore_list:
- if m.match(name): return True
- if self.objects is None:
- obj_list = []
- ignored=0
- for c in self.qmf.all_classes():
- for o in self.qmf.getObjects(_key=c, _broker=self.broker):
- name=o.getObjectId().getObject()
- if not ignore(name): obj_list.append(name)
- else: ignored += 1
- self.objects = set(obj_list)
- if (len(obj_list) != len(self.objects)):
- raise Exception("Duplicates in object list for %s"%(self.url))
- print "%d objects on %s, ignored %d."%(len(self.objects), self.url, ignored)
- return self.objects
-
- def compare(self,other):
- def compare1(x,y):
- diff = x.get_objects() - y.get_objects()
- if diff:
- print "ERROR: found on %s but not %s"%(x, y)
- for o in diff: print " %s"%(o)
- return False
- return True
-
- so = compare1(self, other)
- os = compare1(other, self)
- return so and os
-
- def __str__(self): return self.url
-
- def get_cluster(self):
- """Given one Broker, return list of all brokers in its cluster"""
- clusters = self.qmf.getObjects(_class="cluster")
- if not clusters: raise ("%s is not a cluster member"%(self.url))
- def first_address(url):
- """Python doesn't understand the brokers URL syntax. Extract a simple addres"""
- return re.compile("amqp:tcp:([^,]*)").match(url).group(1)
- return [Broker(first_address(url), self.qmf)
- for url in clusters[0].members.split(";")]
-
- def __del__(self): self.qmf.delBroker(self.broker)
-
-def main(argv=None):
- if argv is None: argv = sys.argv
- qmf = Session()
- brokers = Broker(argv[1], qmf).get_cluster()
- print "%d members in cluster."%(len(brokers))
- base = brokers.pop(0)
- try:
- for b in brokers:
- if not base.compare(b): return 1
- print "No differences."
- return 0
- finally:
- del base
- del brokers
-
-if __name__ == "__main__": sys.exit(main())
diff --git a/qpid/cpp/src/versions.cmake b/qpid/cpp/src/versions.cmake
index d7120c680d..f7e42e3688 100644
--- a/qpid/cpp/src/versions.cmake
+++ b/qpid/cpp/src/versions.cmake
@@ -35,4 +35,4 @@ set (qpidmessaging_version 2.0.0)
set (qpidtypes_version 1.0.0)
set (rdmawrap_version 2.0.0)
set (sslcommon_version 2.0.0)
-
+set (legacystore_version 1.0.0)
diff --git a/qpid/cpp/src/windows/QpiddBroker.cpp b/qpid/cpp/src/windows/QpiddBroker.cpp
index de2e41dd4d..b383b7d6c7 100644
--- a/qpid/cpp/src/windows/QpiddBroker.cpp
+++ b/qpid/cpp/src/windows/QpiddBroker.cpp
@@ -27,6 +27,7 @@
#include "qpid/Plugin.h"
#include "qpid/sys/IntegerTypes.h"
#include "qpid/sys/windows/check.h"
+#include "qpid/sys/Thread.h"
#include "qpid/broker/Broker.h"
#include <iostream>
diff --git a/qpid/cpp/xml/cluster.xml b/qpid/cpp/xml/cluster.xml
deleted file mode 100644
index 09434ea37b..0000000000
--- a/qpid/cpp/xml/cluster.xml
+++ /dev/null
@@ -1,339 +0,0 @@
-<?xml version="1.0"?>
-<!--
--
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
--
--->
-
-<!--
-- NOTE: if you make changes to this XML you must update CLUSTER_VERSION
-- in src/qpid/cluster/Cluster.cpp
--->
-
-<amqp major="0" minor="10" port="5672">
-
- <!-- Controls sent between cluster nodes. -->
-
- <class name = "cluster" code = "0x80" label="Qpid clustering extensions.">
- <doc>Qpid extension class to allow clustered brokers to communicate.</doc>
-
- <!-- New joiner requests an update to url. -->
- <control name="update-request" code="0x1">
- <field name="url" type="str16"/>
- </control>
-
- <!-- Sender offers an update to a new joiner. -->
- <control name = "update-offer" code="0x2">
- <field name="updatee" type="uint64"/>
- </control>
-
- <!-- Sender retracts an offer to a new joiner. -->
- <control name = "retract-offer" code="0x3">
- <field name="updatee" type="uint64"/>
- </control>
-
- <!-- Possible states for persistent store -->
- <domain name="store-state" type="uint8">
- <enum>
- <choice name="no-store" value="0"/>
- <choice name="empty-store" value="1"/>
- <choice name="clean-store" value="2"/>
- <choice name="dirty-store" value="3"/>
- </enum>
- </domain>
-
- <!-- Status exchanged when new members join the cluster. -->
- <control name="initial-status" code="0x5">
- <field name="version" type="uint32"/>
- <field name="active" type="bit"/>
- <field name="cluster-id" type="uuid"/>>
- <field name="store-state" type="store-state"/>
- <field name="shutdown-id" type="uuid"/>
- <field name="first-config" type="str16"/>
- <field name="urls" type="array"/> <!-- Array of str16 -->
- </control>
-
- <!-- New member or updater is ready as an active member. -->
- <control name="ready" code="0x10">
- <field name="url" type="str16"/>
- </control>
-
- <control name="config-change" code="0x11" label="Raw cluster membership.">
- <field name="members" type="vbin16"/> <!-- packed member-id array -->
- <field name="joined" type="vbin16"/> <!-- packed member-id array -->
- <field name="left" type="vbin16"/> <!-- packed member-id array -->
- </control>
-
- <domain name="error-type" type="uint8" label="Types of error">
- <enum>
- <choice name="none" value="0"/>
- <choice name="session" value="1"/>
- <choice name="connection" value="2"/>
- </enum>
- </domain>
-
- <!-- Check for error consistency across the cluster -->
- <control name="error-check" code="0x14">
- <field name="type" type="error-type"/>
- <field name="frame-seq" type="sequence-no"/>
- </control>
-
- <!-- Synchronize timer tasks across the cluster -->
- <control name="timer-wakeup" code="0x15">
- <field name="name" type="str16"/>
- </control>
-
- <control name="timer-drop" code="0x16">
- <field name="name" type="str16"/>
- </control>
-
- <!-- Shut down the entire cluster -->
- <control name="shutdown" code="0x20">
- <field name="shutdown-id" type="uuid"/>
- </control>
-
- <!-- Deliver a message to a queue -->
- <control name="deliver-to-queue" code="0x21">
- <field name="queue" type="str16"/>
- <field name="message" type="vbin32"/>
- </control>
-
- <!-- Update the cluster time -->
- <control name="clock" code="0x22">
- <field name="time" type="uint64"/>
- </control>
-
- </class>
-
- <!-- Controls associated with a specific connection. -->
-
- <class name="cluster-connection" code="0x81" label="Qpid clustering extensions.">
-
- <!-- Announce a new connection -->
- <control name="announce" code="0x1">
- <field name="management-id" type="str16"/>
- <!-- Security Strength Factor (ssf): if the transport provides
- encryption (e.g. ssl), ssf is the bit length of the key. Zero if no
- encryption provided. -->
- <field name="ssf" type="uint32"/>
- <!-- external auth id (e.g. ssl client certificate id) -->
- <field name="authid" type="str16"/>
- <!-- exclude certain sasl mechs, used with ssl and sasl-external -->
- <field name="nodict" type="bit"/>
- <!-- User name as negotiated by SASL -->
- <field name="username" type="str32"/>
- <!-- Frames forming the initial connection negotiation. -->
- <field name="initial-frames" type="str32"/>
- </control>
-
- <!-- Marks the cluster-wide point when a connection is considered closed. -->
- <control name="deliver-close" code="0x2"/>
-
- <!-- Permission to generate output up to the limit. -->
- <control name="deliver-do-output" code="0x3">
- <field name="limit" type="uint32"/>
- </control>
-
- <!-- Abort a connection that is sending invalid data. -->
- <control name="abort" code="0x4"/>
-
- <!-- Update controls. Sent to a new broker in joining mode.
- A connection is updated as followed:
- - send the shadow's management ID in shadow-perpare on the update connection
- - open the shadow as a normal connection.
- - attach sessions, create consumers, set flow with normal AMQP cokmmands.
- - send /reset additional session state with controls below.
- - send shadow-ready to mark end of shadow update.
- - send membership when entire update is complete.
- -->
- <!-- Send the user-id for an update connection. -->
- <control name="shadow-set-user" code="0x0E">
- <field name="user-id" type="str16"/>
- </control>
-
- <!-- Prepare to send a shadow connection with the given ID. -->
- <control name="shadow-prepare" code="0x0F">
- <field name="management-id" type="str16"/>
- </control>
-
- <!-- Consumer state that cannot be set by standard AMQP controls. -->
- <control name="consumer-state" code="0x10">
- <field name="name" type="str8"/>
- <field name="blocked" type="bit"/>
- <field name="notifyEnabled" type="bit"/>
- <field name="position" type="sequence-no"/>
- <field name="used-msg-credit" type="uint32"/>
- <field name="used-byte-credit" type="uint32"/>
- <field name="deliveryCount" type="uint32"/>
- </control>
-
- <!-- Delivery-record for outgoing messages sent but not yet accepted. -->
- <control name="delivery-record" code ="0x11">
- <field name="queue" type="str8"/>
- <field name="position" type="sequence-no"/>
- <field name="tag" type="str8"/>
- <field name="id" type="sequence-no"/>
- <field name="acquired" type="bit"/> <!--If not set, message is on update queue. -->
- <field name="accepted" type="bit"/>
- <field name="cancelled" type="bit"/>
- <field name="completed" type="bit"/>
- <field name="ended" type="bit"/>
- <field name="windowing" type="bit"/>
- <field name="enqueued" type="bit"/>
- <field name="credit" type="uint32"/>
- </control>
-
- <!-- Tx transaction state. -->
- <control name="tx-start" code="0x12"/>
- <control name="tx-accept" code="0x13"> <field name="commands" type="sequence-set"/> </control>
- <control name="tx-dequeue" code="0x14"> <field name="queue" type="str8"/> </control>
- <control name="tx-enqueue" code="0x15"> <field name="queue" type="str8"/> </control>
- <control name="tx-publish" code="0x16">
- <field name="queues" type="array"/> <!--Array of str8 -->
- <field name="delivered" type="bit"/>
- </control>
- <control name="tx-end" code="0x17"/>
- <control name="accumulated-ack" code="0x18"> <field name="commands" type="sequence-set"/> </control>
-
- <!-- Consumers in the connection's output task -->
- <control name="output-task" code="0x19">
- <field name="channel" type="uint16"/>
- <field name="name" type="str8"/>
- </control>
-
- <!-- Dtx transaction state. -->
- <control name="dtx-start" code="0x1A">
- <field name="xid" type="str16"/>
- <field name="ended" type="bit"/>
- <field name="suspended" type="bit"/>
- <field name="failed" type="bit"/>
- <field name="expired" type="bit"/>
- </control>
- <control name="dtx-end" code="0x1B"/>
-
- <control name="dtx-ack" code="0x1C"/>
-
- <control name="dtx-buffer-ref" code="0x1D">
- <field name="xid" type="str16"/>
- <field name="index" type="uint32"/>
- <field name="suspended" type="bit"/>
- </control>
-
- <control name="dtx-work-record" code="0x1E">
- <field name="xid" type="str16"/>
- <field name="prepared" type="bit"/>
- <field name="timeout" type="uint32"/>
- </control>
-
- <!-- Complete a session state update. -->
- <control name="session-state" code="0x1F">
- <!-- Target session deduced from channel number. -->
- <field name="replay-start" type="sequence-no"/> <!-- Replay frames will start from this point.-->
- <field name="command-point" type="sequence-no"/> <!-- Id of next command sent -->
- <field name="sent-incomplete" type="sequence-set"/> <!-- Commands sent and incomplete. -->
-
- <field name="expected" type="sequence-no"/> <!-- Next command expected. -->
- <field name="received" type="sequence-no"/> <!-- Received up to here (>= expected) -->
- <field name="unknown-completed" type="sequence-set"/> <!-- Completed but not known to peer. -->
- <field name="received-incomplete" type="sequence-set"/> <!-- Received and incomplete -->
- <field name="dtx-selected" type="bit"/>
- </control>
-
- <!-- Complete a shadow connection update. -->
- <control name="shadow-ready" code="0x20" label="End of shadow connection update.">
- <field name="member-id" type="uint64"/>
- <field name="connection-id" type="uint64"/>
- <field name="management-id" type="str16"/>
- <field name="user-name" type="str8"/>
- <field name="fragment" type="str32"/>
- <field name="send-max" type="uint32"/>
- </control>
-
- <!-- Complete a cluster state update. -->
- <control name="membership" code="0x21" label="Cluster membership details.">
- <field name="joiners" type="map"/> <!-- member-id -> URL -->
- <field name="members" type="map"/> <!-- member-id -> state -->
- <field name="frame-seq" type="sequence-no"/> <!-- frame sequence number -->
- </control>
-
- <!-- Updater cannot fulfill an update offer. -->
- <control name = "retract-offer" code="0x22"/>
-
- <!-- Set the position of a replicated queue. -->
- <control name="queue-position" code="0x30">
- <field name="queue" type="str8"/>
- <field name="position" type="sequence-no"/>
- </control>
-
- <!-- Replicate encoded exchanges/queues. -->
- <control name="exchange" code="0x31"><field name="encoded" type="str32"/></control>
-
- <!-- Add a listener to a queue -->
- <control name="add-queue-listener" code="0x34">
- <field name="queue" type="str8"/>
- <field name="consumer" type="uint32"/>
- </control>
-
- <!-- added by jrd. propagate a management-setup-state widget -->
- <control name="management-setup-state" code="0x36">
- <field name="objectNum" type="uint64"/>
- <field name="bootSequence" type="uint16"/>
- <field name="broker-id" type="uuid"/>
- <field name="vendor" type="str32"/>
- <field name="product" type="str32"/>
- <field name="instance" type="str32"/>
- </control>
-
- <!-- Replicate encoded config objects - e.g. links and bridges. -->
- <control name="config" code="0x37"><field name="encoded" type="str32"/></control>
-
- <!-- Set the fairshare delivery related state of a replicated queue. -->
- <control name="queue-fairshare-state" code="0x38">
- <field name="queue" type="str8"/>
- <field name="position" type="uint8"/>
- <field name="count" type="uint8"/>
- </control>
-
- <!-- Replicate a QueueObserver for a given queue. -->
- <control name="queue-observer-state" code="0x39">
- <field name="queue" type="str8"/>
- <field name="observer-id" type="str8"/>
- <field name="state" type="map"/> <!-- "name"=value -->
- </control>
-
- <!-- Update the cluster time -->
- <control name="clock" code="0x40">
- <field name="time" type="uint64"/>
- </control>
-
- <!-- Update a queue's dequeue rate -->
- <control name="queue-dequeue-since-purge-state" code="0x41">
- <field name="queue" type="str8"/>
- <field name="dequeueSincePurge" type="uint32"/>
- </control>
-
- <!-- Replicate the internal state for an object - e.g. Links, bridges, etc -->
- <control name="internal-state" code="0x42">
- <field name="type" type="str8"/> <!-- The type of object the state is for (e.g. 'link') -->
- <field name="name" type="str8"/> <!-- Identifies the particular object to be updated -->
- <field name="state" type="map"/> <!-- The internal state for the object -->
- </control>
-
- </class>
-
-</amqp>
diff --git a/qpid/doc/book/src/cpp-broker/AMQP-Messaging-Broker-CPP-Book.xml b/qpid/doc/book/src/cpp-broker/AMQP-Messaging-Broker-CPP-Book.xml
index 228c6a5e15..6122b12e18 100644
--- a/qpid/doc/book/src/cpp-broker/AMQP-Messaging-Broker-CPP-Book.xml
+++ b/qpid/doc/book/src/cpp-broker/AMQP-Messaging-Broker-CPP-Book.xml
@@ -53,7 +53,6 @@
<xi:include href="Security.xml"/>
<xi:include href="LVQ.xml"/>
<xi:include href="queue-state-replication.xml"/>
- <xi:include href="Active-Active-Cluster.xml"/>
<xi:include href="producer-flow-control.xml"/>
<xi:include href="AMQP-Compatibility.xml"/>
<xi:include href="Qpid-Interoperability-Documentation.xml"/>
diff --git a/qpid/doc/book/src/cpp-broker/Active-Active-Cluster.xml b/qpid/doc/book/src/cpp-broker/Active-Active-Cluster.xml
deleted file mode 100644
index 28db3876e2..0000000000
--- a/qpid/doc/book/src/cpp-broker/Active-Active-Cluster.xml
+++ /dev/null
@@ -1,561 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!--
-
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
-
--->
-
-<section id="chap-Messaging_User_Guide-Active_Active_Cluster">
- <title>Active-active Messaging Clusters</title>
- <para>
- Active-active Messaging Clusters provide fault tolerance by ensuring that every broker in a <firstterm>cluster</firstterm> has the same queues, exchanges, messages, and bindings, and allowing a client to <firstterm>fail over</firstterm> to a new broker and continue without any loss of messages if the current broker fails or becomes unavailable. <firstterm>Active-active</firstterm> refers to the fact that all brokers in the cluster can actively serve clients. Because all brokers are automatically kept in a consistent state, clients can connect to and use any broker in a cluster. Any number of messaging brokers can be run as one <firstterm>cluster</firstterm>, and brokers can be added to or removed from a cluster while it is in use.
- </para>
- <para>
- High Availability Messaging Clusters are implemented using using the <ulink url="http://www.openais.org/">OpenAIS Cluster Framework</ulink>.
- </para>
- <para>
- An OpenAIS daemon runs on every machine in the cluster, and these daemons communicate using multicast on a particular address. Every qpidd process in a cluster joins a named group that is automatically synchronized using OpenAIS Closed Process Groups (CPG) — the qpidd processes multicast events to the named group, and CPG ensures that each qpidd process receives all the events in the same sequence. All members get an identical sequence of events, so they can all update their state consistently.
- </para>
- <para>
- Two messaging brokers are in the same cluster if
- <orderedlist>
- <listitem>
- <para>
- They run on hosts in the same OpenAIS cluster; that is, OpenAIS is configured with the same mcastaddr, mcastport and bindnetaddr, and
- </para>
-
- </listitem>
- <listitem>
- <para>
- They use the same cluster name.
- </para>
-
- </listitem>
-
- </orderedlist>
-
- </para>
- <para>
- High Availability Clustering has a cost: in order to allow each broker in a cluster to continue the work of any other broker, a cluster must replicate state for all brokers in the cluster. Because of this, the brokers in a cluster should normally be on a LAN; there should be fast and reliable connections between brokers. Even on a LAN, using multiple brokers in a cluster is somewhat slower than using a single broker without clustering. This may be counter-intuitive for people who are used to clustering in the context of High Performance Computing or High Throughput Computing, where clustering increases performance or throughput.
- </para>
-
- <para>
- High Availability Messaging Clusters should be used together with Red Hat Clustering Services (RHCS); without RHCS, clusters are vulnerable to the &#34;split-brain&#34; condition, in which a network failure splits the cluster into two sub-clusters that cannot communicate with each other. See the documentation on the <command>--cluster-cman</command> option for details on running using RHCS with High Availability Messaging Clusters. See the <ulink url="http://sources.redhat.com/cluster/wiki">CMAN Wiki</ulink> for more detail on CMAN and split-brain conditions. Use the <command>--cluster-cman</command> option to enable RHCS when starting the broker.
- </para>
- <section id="sect-Messaging_User_Guide-High_Availability_Messaging_Clusters-Starting_a_Broker_in_a_Cluster">
- <title>Starting a Broker in a Cluster</title>
- <para>
- Clustering is implemented using the <filename>cluster.so</filename> module, which is loaded by default when you start a broker. To run brokers in a cluster, make sure they all use the same OpenAIS mcastaddr, mcastport, and bindnetaddr. All brokers in a cluster must also have the same cluster name — specify the cluster name in <filename>qpidd.conf</filename>:
- </para>
-
- <screen>cluster-name=&#34;local_test_cluster&#34;
- </screen>
- <para>
- On RHEL6, you must create the file <filename>/etc/corosync/uidgid.d/qpidd</filename> to tell Corosync the name of the user running the broker.By default, the user is qpidd:
- </para>
-
- <programlisting>
- uidgid {
- uid: qpidd
- gid: qpidd
- }
- </programlisting>
- <para>
- On RHEL5, the primary group for the process running qpidd must be the ais group. If you are running qpidd as a service, it is run as the <command>qpidd</command> user, which is already in the ais group. If you are running the broker from the command line, you must ensure that the primary group for the user running qpidd is ais. You can set the primary group using <command>newgrp</command>:
- </para>
-
- <screen>$ newgrp ais
- </screen>
- <para>
- You can then run the broker from the command line, specifying the cluster name as an option.
- </para>
-
- <screen>[jonathan@localhost]$ qpidd --cluster-name=&#34;local_test_cluster&#34;
- </screen>
- <para>
- All brokers in a cluster must have identical configuration, with a few exceptions noted below. They must load the same set of plug-ins, and have matching configuration files and command line arguments. The should also have identical ACL files and SASL databases if these are used. If one broker uses persistence, all must use persistence — a mix of transient and persistent brokers is not allowed. Differences in configuration can cause brokers to exit the cluster. For instance, if different ACL settings allow a client to access a queue on broker A but not on broker B, then publishing to the queue will succeed on A and fail on B, so B will exit the cluster to prevent inconsistency.
- </para>
- <para>
- The following settings can differ for brokers on a given cluster:
- </para>
- <itemizedlist>
- <listitem>
- <para>
- logging options
- </para>
-
- </listitem>
- <listitem>
- <para>
- cluster-url — if set, it will be different for each broker.
- </para>
-
- </listitem>
- <listitem>
- <para>
- port — brokers can listen on different ports.
- </para>
-
- </listitem>
-
- </itemizedlist>
- <para>
- The qpid log contains entries that record significant clustering events, e.g. when a broker becomes a member of a cluster, the membership of a cluster is changed, or an old journal is moved out of the way. For instance, the following message states that a broker has been added to a cluster as the first node:
- </para>
-
- <screen>
- 2009-07-09 18:13:41 info 127.0.0.1:1410(READY) member update: 127.0.0.1:1410(member)
- 2009-07-09 18:13:41 notice 127.0.0.1:1410(READY) first in cluster
- </screen>
- <note>
- <para>
- If you are using SELinux, the qpidd process and OpenAIS must have the same SELinux context, or else SELinux must be set to permissive mode. If both qpidd and OpenAIS are run as services, they have the same SELinux context. If both OpenAIS and qpidd are run as user processes, they have the same SELinux context. If one is run as a service, and the other is run as a user process, they have different SELinux contexts.
- </para>
-
- </note>
- <para>
- The following options are available for clustering:
- </para>
- <table frame="all" id="tabl-Messaging_User_Guide-Starting_a_Broker_in_a_Cluster-Options_for_High_Availability_Messaging_Cluster">
- <title>Options for High Availability Messaging Cluster</title>
- <tgroup align="left" cols="2" colsep="1" rowsep="1">
- <colspec colname="c1" colwidth="1*"></colspec>
- <colspec colname="c2" colwidth="4*"></colspec>
- <thead>
- <row>
- <entry align="center" nameend="c2" namest="c1">
- Options for High Availability Messaging Cluster
- </entry>
-
- </row>
-
- </thead>
- <tbody>
- <row>
- <entry>
- <command>--cluster-name <replaceable>NAME</replaceable></command>
- </entry>
- <entry>
- Name of the Messaging Cluster to join. A Messaging Cluster consists of all brokers started with the same cluster-name and openais configuration.
- </entry>
-
- </row>
- <row>
- <entry>
- <command>--cluster-size <replaceable>N</replaceable></command>
- </entry>
- <entry>
- Wait for at least N initial members before completing cluster initialization and serving clients. Use this option in a persistent cluster so all brokers in a persistent cluster can exchange the status of their persistent store and do consistency checks before serving clients.
- </entry>
-
- </row>
- <row>
- <entry>
- <command>--cluster-url <replaceable>URL</replaceable></command>
- </entry>
- <entry>
- An AMQP URL containing the local address that the broker advertizes to clients for fail-over connections. This is different for each host. By default, all local addresses for the broker are advertized. You only need to set this if
- <orderedlist>
- <listitem>
- <para>
- Your host has more than one active network interface, and
- </para>
-
- </listitem>
- <listitem>
- <para>
- You want to restrict client fail-over to a specific interface or interfaces.
- </para>
-
- </listitem>
-
- </orderedlist>
- <para>Each broker in the cluster is specified using the following form:</para>
-
- <programlisting>url = [&#34;amqp:&#34;][ user [&#34;/&#34; password] &#34;@&#34; ] protocol_addr
- (&#34;,&#34; protocol_addr)*
- protocol_addr = tcp_addr / rmda_addr / ssl_addr / ...
- tcp_addr = [&#34;tcp:&#34;] host [&#34;:&#34; port]
- rdma_addr = &#34;rdma:&#34; host [&#34;:&#34; port]
- ssl_addr = &#34;ssl:&#34; host [&#34;:&#34; port]</programlisting>
-
- <para>In most cases, only one address is advertized, but more than one address can be specified in if the machine running the broker has more than one network interface card, and you want to allow clients to connect using multiple network interfaces. Use a comma delimiter (&#34;,&#34;) to separate brokers in the URL. Examples:</para>
- <itemizedlist>
- <listitem>
- <para>
- <command>amqp:tcp:192.168.1.103:5672</command> advertizes a single address to the broker for failover.
- </para>
-
- </listitem>
- <listitem>
- <para>
- <command>amqp:tcp:192.168.1.103:5672,tcp:192.168.1.105:5672</command> advertizes two different addresses to the broker for failover, on two different network interfaces.
- </para>
-
- </listitem>
-
- </itemizedlist>
-
- </entry>
-
- </row>
- <row>
- <entry>
- <command>--cluster-cman</command>
- </entry>
- <entry>
- <para>
- CMAN protects against the &#34;split-brain&#34; condition, in which a network failure splits the cluster into two sub-clusters that cannot communicate with each other. When &#34;split-brain&#34; occurs, each of the sub-clusters can access shared resources without knowledge of the other sub-cluster, resulting in corrupted cluster integrity.
- </para>
- <para>
- To avoid &#34;split-brain&#34;, CMAN uses the notion of a &#34;quorum&#34;. If more than half the cluster nodes are active, the cluster has quorum and can act. If half (or fewer) nodes are active, the cluster does not have quorum, and all cluster activity is stopped. There are other ways to define the quorum for particular use cases (e.g. a cluster of only 2 members), see the <ulink url="http://sources.redhat.com/cluster/wiki">CMAN Wiki</ulink>
- for more detail.
- </para>
- <para>
- When enabled, the broker will wait until it belongs to a quorate cluster before accepting client connections. It continually monitors the quorum status and shuts down immediately if the node it runs on loses touch with the quorum.
- </para>
-
- </entry>
-
- </row>
- <row>
- <entry>
- --cluster-username
- </entry>
- <entry>
- SASL username for connections between brokers.
- </entry>
-
- </row>
- <row>
- <entry>
- --cluster-password
- </entry>
- <entry>
- SASL password for connections between brokers.
- </entry>
-
- </row>
- <row>
- <entry>
- --cluster-mechanism
- </entry>
- <entry>
- SASL authentication mechanism for connections between brokers
- </entry>
-
- </row>
-
- </tbody>
-
- </tgroup>
-
- </table>
- <para>
- If a broker is unable to establish a connection to another broker in the cluster, the log will contain SASL errors, e.g:
- </para>
-
- <screen>2009-aug-04 10:17:37 info SASL: Authentication failed: SASL(-13): user not found: Password verification failed
- </screen>
- <para>
- You can set the SASL user name and password used to connect to other brokers using the <command>cluster-username</command> and <command>cluster-password</command> properties when you start the broker. In most environment, it is easiest to create an account with the same user name and password on each broker in the cluster, and use these as the <command>cluster-username</command> and <command>cluster-password</command>. You can also set the SASL mode using <command>cluster-mechanism</command>. Remember that any mechanism you enable for broker-to-broker communication can also be used by a client, so do not enable <command>cluster-mechanism=ANONYMOUS</command> in a secure environment.
- </para>
- <para>
- Once the cluster is running, run <command>qpid-cluster</command> to make sure that the brokers are running as one cluster. See the following section for details.
- </para>
- <para>
- If the cluster is correctly configured, queues and messages are replicated to all brokers in the cluster, so an easy way to test the cluster is to run a program that routes messages to a queue on one broker, then to a different broker in the same cluster and read the messages to make sure they have been replicated. The <command>drain</command> and <command>spout</command> programs can be used for this test.
- </para>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-High_Availability_Messaging_Clusters-qpid_cluster">
- <title>qpid-cluster</title>
- <para>
- <command>qpid-cluster</command> is a command-line utility that allows you to view information on a cluster and its brokers, disconnect a client connection, shut down a broker in a cluster, or shut down the entire cluster. You can see the options using the <command>--help</command> option:
- </para>
-
- <screen>$ ./qpid-cluster --help
- </screen>
-
- <screen>Usage: qpid-cluster [OPTIONS] [broker-addr]
-
- broker-addr is in the form: [username/password@] hostname | ip-address [:&#60;port&#62;]
- ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost
-
- Options:
- -C [--all-connections] View client connections to all cluster members
- -c [--connections] ID View client connections to specified member
- -d [--del-connection] HOST:PORT
- Disconnect a client connection
- -s [--stop] ID Stop one member of the cluster by its ID
- -k [--all-stop] Shut down the whole cluster
- -f [--force] Suppress the &#39;are-you-sure?&#39; prompt
- -n [--numeric] Don&#39;t resolve names
- </screen>
- <para>
- Let&#39;s connect to a cluster and display basic information about the cluser and its brokers. When you connect to the cluster using <command>qpid-tool</command>, you can use the host and port for any broker in the cluster. For instance, if a broker in the cluster is running on <filename>localhost</filename> on port 6664, you can start <command>qpid-tool</command> like this:
- </para>
-
- <screen>
- $ qpid-cluster localhost:6664
- </screen>
- <para>
- Here is the output:
- </para>
-
- <screen>
- Cluster Name: local_test_cluster
- Cluster Status: ACTIVE
- Cluster Size: 3
- Members: ID=127.0.0.1:13143 URL=amqp:tcp:192.168.1.101:6664,tcp:192.168.122.1:6664,tcp:10.16.10.62:6664
- : ID=127.0.0.1:13167 URL=amqp:tcp:192.168.1.101:6665,tcp:192.168.122.1:6665,tcp:10.16.10.62:6665
- : ID=127.0.0.1:13192 URL=amqp:tcp:192.168.1.101:6666,tcp:192.168.122.1:6666,tcp:10.16.10.62:6666
- </screen>
- <para>
- The ID for each broker in cluster is given on the left. For instance, the ID for the first broker in the cluster is <command>127.0.0.1:13143</command>. The URL in the output is the broker&#39;s advertized address. Let&#39;s use the ID to shut the broker down using the <command>--stop</command> command:
- </para>
-
- <screen>$ ./qpid-cluster localhost:6664 --stop 127.0.0.1:13143
- </screen>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-High_Availability_Messaging_Clusters-Failover_in_Clients">
- <title>Failover in Clients</title>
- <para>
- If a client is connected to a broker, the connection fails if the broker crashes or is killed. If heartbeat is enabled for the connection, a connection also fails if the broker hangs, the machine the broker is running on fails, or the network connection to the broker is lost — the connection fails no later than twice the heartbeat interval.
- </para>
- <para>
- When a client&#39;s connection to a broker fails, any sent messages that have been acknowledged to the sender will have been replicated to all brokers in the cluster, any received messages that have not yet been acknowledged by the receiving client requeued to all brokers, and the client API notifies the application of the failure by throwing an exception.
- </para>
- <para>
- Clients can be configured to automatically reconnect to another broker when it receives such an exception. Any messages that have been sent by the client, but not yet acknowledged as delivered, are resent. Any messages that have been read by the client, but not acknowledged, are delivered to the client.
- </para>
- <para>
- TCP is slow to detect connection failures. A client can configure a connection to use a heartbeat to detect connection failure, and can specify a time interval for the heartbeat. If heartbeats are in use, failures will be detected no later than twice the heartbeat interval. The Java JMS client enables hearbeat by default. See the sections on Failover in Java JMS Clients and Failover in C++ Clients for the code to enable heartbeat.
- </para>
- <section id="sect-Messaging_User_Guide-Failover_in_Clients-Failover_in_Java_JMS_Clients">
- <title>Failover in Java JMS Clients</title>
- <para>
- In Java JMS clients, client failover is handled automatically if it is enabled in the connection. Any messages that have been sent by the client, but not yet acknowledged as delivered, are resent. Any messages that have been read by the client, but not acknowledged, are sent to the client.
- </para>
- <para>
- You can configure a connection to use failover using the <command>failover</command> property:
- </para>
-
- <screen>
- connectionfactory.qpidConnectionfactory = amqp://guest:guest@clientid/test?brokerlist=&#39;tcp://localhost:5672&#39;&amp;failover=&#39;failover_exchange&#39;
- </screen>
- <para>
- This property can take three values:
- </para>
- <variablelist id="vari-Messaging_User_Guide-Failover_in_Java_JMS_Clients-Failover_Modes">
- <title>Failover Modes</title>
- <varlistentry>
- <term>failover_exchange</term>
- <listitem>
- <para>
- If the connection fails, fail over to any other broker in the cluster.
- </para>
-
- </listitem>
-
- </varlistentry>
- <varlistentry>
- <term>roundrobin</term>
- <listitem>
- <para>
- If the connection fails, fail over to one of the brokers specified in the <command>brokerlist</command>.
- </para>
-
- </listitem>
-
- </varlistentry>
- <varlistentry>
- <term>singlebroker</term>
- <listitem>
- <para>
- Failover is not supported; the connection is to a single broker only.
- </para>
-
- </listitem>
-
- </varlistentry>
-
- </variablelist>
- <para>
- In a Connection URL, heartbeat is set using the <command>idle_timeout</command> property, which is an integer corresponding to the heartbeat period in seconds. For instance, the following line from a JNDI properties file sets the heartbeat time out to 3 seconds:
- </para>
-
- <screen>
- connectionfactory.qpidConnectionfactory = amqp://guest:guest@clientid/test?brokerlist=&#39;tcp://localhost:5672&#39;,idle_timeout=3
- </screen>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-Failover_in_Clients-Failover_and_the_Qpid_Messaging_API">
- <title>Failover and the Qpid Messaging API</title>
- <para>
- The Qpid Messaging API also supports automatic reconnection in the event a connection fails. . Senders can also be configured to replay any in-doubt messages (i.e. messages whice were sent but not acknowleged by the broker. See &#34;Connection Options&#34; and &#34;Sender Capacity and Replay&#34; in <citetitle>Programming in Apache Qpid</citetitle> for details.
- </para>
- <para>
- In C++ and python clients, heartbeats are disabled by default. You can enable them by specifying a heartbeat interval (in seconds) for the connection via the &#39;heartbeat&#39; option.
- </para>
- <para>
- See &#34;Cluster Failover&#34; in <citetitle>Programming in Apache Qpid</citetitle> for details on how to keep the client aware of cluster membership.
- </para>
-
- </section>
-
-
- </section>
-
- <section id="sect-Messaging_User_Guide-High_Availability_Messaging_Clusters-Error_handling_in_Clusters">
- <title>Error handling in Clusters</title>
- <para>
- If a broker crashes or is killed, or a broker machine failure, broker connection failure, or a broker hang is detected, the other brokers in the cluster are notified that it is no longer a member of the cluster. If a new broker is joined to the cluster, it synchronizes with an active broker to obtain the current cluster state; if this synchronization fails, the new broker exit the cluster and aborts.
- </para>
- <para>
- If a broker becomes extremely busy and stops responding, it stops accepting incoming work. All other brokers continue processing, and the non-responsive node caches all AIS traffic. When it resumes, the broker completes processes all cached AIS events, then accepts further incoming work. <!-- If a broker is non-responsive for too long, it is assumed to be hanging, and treated as described in the previous paragraph. -->
- </para>
- <para>
- Broker hangs are only detected if the watchdog plugin is loaded and the <command>--watchdog-interval</command> option is set. The watchdog plug-in kills the qpidd broker process if it becomes stuck for longer than the watchdog interval. In some cases, e.g. certain phases of error resolution, it is possible for a stuck process to hang other cluster members that are waiting for it to send a message. Using the watchdog, the stuck process is terminated and removed from the cluster, allowing other members to continue and clients of the stuck process to fail over to other members.
- </para>
- <para>
- Redundancy can also be achieved directly in the AIS network by specifying more than one network interface in the AIS configuration file. This causes Totem to use a redundant ring protocol, which makes failure of a single network transparent.
- </para>
- <para>
- Redundancy can be achieved at the operating system level by using NIC bonding, which combines multiple network ports into a single group, effectively aggregating the bandwidth of multiple interfaces into a single connection. This provides both network load balancing and fault tolerance.
- </para>
- <para>
- If any broker encounters an error, the brokers compare notes to see if they all received the same error. If not, the broker removes itself from the cluster and shuts itself down to ensure that all brokers in the cluster have consistent state. For instance, a broker may run out of disk space; if this happens, the broker shuts itself down. Examining the broker&#39;s log can help determine the error and suggest ways to prevent it from occuring in the future.
- </para>
- <!-- "Bad case" for cluster matrix - things we will fix, or things users may encounter long term? -->
- </section>
-
- <section id="sect-Messaging_User_Guide-High_Availability_Messaging_Clusters-Persistence_in_High_Availability_Message_Clusters">
- <title>Persistence in High Availability Message Clusters</title>
- <para>
- Persistence and clustering are two different ways to provide reliability. Most systems that use a cluster do not enable persistence, but you can do so if you want to ensure that messages are not lost even if the last broker in a cluster fails. A cluster must have all transient or all persistent members, mixed clusters are not allowed. Each broker in a persistent cluster has it&#39;s own independent replica of the cluster&#39;s state it its store.
- </para>
- <section id="sect-Messaging_User_Guide-Persistence_in_High_Availability_Message_Clusters-Clean_and_Dirty_Stores">
- <title>Clean and Dirty Stores</title>
- <para>
- When a broker is an active member of a cluster, its store is marked &#34;dirty&#34; because it may be out of date compared to other brokers in the cluster. If a broker leaves a running cluster because it is stopped, it crashes or the host crashes, its store continues to be marked &#34;dirty&#34;.
- </para>
- <para>
- If the cluster is reduced to a single broker, its store is marked &#34;clean&#34; since it is the only broker making updates. If the cluster is shut down with the command <literal>qpid-cluster -k</literal> then all the stores are marked clean.
- </para>
- <para>
- When a cluster is initially formed, brokers with clean stores read from their stores. Brokers with dirty stores, or brokers that join after the cluster is running, discard their old stores and initialize a new store with an update from one of the running brokers. The <command>--truncate</command> option can be used to force a broker to discard all existing stores even if they are clean. (A dirty store is discarded regardless.)
- </para>
- <para>
- Discarded stores are copied to a back up directory. The active store is in &#60;data-dir&#62;/rhm. Back-up stores are in &#60;data-dir&#62;/_cluster.bak.&#60;nnnn&#62;/rhm, where &#60;nnnn&#62; is a 4 digit number. A higher number means a more recent backup.
- </para>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-Persistence_in_High_Availability_Message_Clusters-Starting_a_persistent_cluster">
- <title>Starting a persistent cluster</title>
- <para>
- When starting a persistent cluster broker, set the cluster-size option to the number of brokers in the cluster. This allows the brokers to wait until the entire cluster is running so that they can synchronize their stored state.
- </para>
- <para>
- The cluster can start if:
- </para>
- <para>
- <itemizedlist>
- <listitem>
- <para>
- all members have empty stores, or
- </para>
-
- </listitem>
- <listitem>
- <para>
- at least one member has a clean store
- </para>
-
- </listitem>
-
- </itemizedlist>
-
- </para>
- <para>
- All members of the new cluster will be initialized with the state from a clean store.
- </para>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-Persistence_in_High_Availability_Message_Clusters-Stopping_a_persistent_cluster">
- <title>Stopping a persistent cluster</title>
- <para>
- To cleanly shut down a persistent cluster use the command <command>qpid-cluster -k</command>. This causes all brokers to synchronize their state and mark their stores as &#34;clean&#34; so they can be used when the cluster restarts.
- </para>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-Persistence_in_High_Availability_Message_Clusters-Starting_a_persistent_cluster_with_no_clean_store">
- <title>Starting a persistent cluster with no clean store</title>
- <para>
- If the cluster has previously had a total failure and there are no clean stores then the brokers will fail to start with the log message <literal>Cannot recover, no clean store.</literal> If this happens you can start the cluster by marking one of the stores &#34;clean&#34; as follows:
- </para>
- <procedure>
- <step>
- <para>
- Move the latest store backup into place in the brokers data-directory. The backups end in a 4 digit number, the latest backup is the highest number.
- </para>
-
- <screen>
- cd &#60;data-dir&#62;
- mv rhm rhm.bak
- cp -a _cluster.bak.&#60;nnnn&#62;/rhm .
- </screen>
-
- </step>
- <step>
- <para>
- Mark the store as clean:
- <screen>qpid-cluster-store -c &#60;data-dir&#62;</screen>
-
- </para>
-
- </step>
-
- </procedure>
-
- <para>
- Now you can start the cluster, all members will be initialized from the store you marked as clean.
- </para>
-
- </section>
-
- <section id="sect-Messaging_User_Guide-Persistence_in_High_Availability_Message_Clusters-Isolated_failures_in_a_persistent_cluster">
- <title>Isolated failures in a persistent cluster</title>
- <para>
- A broker in a persistent cluster may encounter errors that other brokers in the cluster do not; if this happens, the broker shuts itself down to avoid making the cluster state inconsistent. For example a disk failure on one node will result in that node shutting down. Running out of storage capacity can also cause a node to shut down because because the brokers may not run out of storage at exactly the same point, even if they have similar storage configuration. To avoid unnecessary broker shutdowns, make sure the queue policy size of each durable queue is less than the capacity of the journal for the queue.
- </para>
-
- </section>
-
-
- </section>
-
-
-</section>
diff --git a/qpid/doc/book/src/cpp-broker/Active-Passive-Cluster.xml b/qpid/doc/book/src/cpp-broker/Active-Passive-Cluster.xml
index 55893387a4..0a0d59045f 100644
--- a/qpid/doc/book/src/cpp-broker/Active-Passive-Cluster.xml
+++ b/qpid/doc/book/src/cpp-broker/Active-Passive-Cluster.xml
@@ -102,6 +102,11 @@ under the License.
primary. This protects those messages against a failure of the new
primary until the backups have a chance to connect and catch up.
</para>
+ <para>
+ Not all messages need to be replicated to the back-up brokers. If a
+ message is consumed and acknowledged by a regular client before it has
+ been replicated to a backup, then it doesn't need to be replicated.
+ </para>
<variablelist>
<title>Status of a HA broker</title>
<varlistentry>
@@ -149,39 +154,6 @@ under the License.
</variablelist>
</section>
<section>
- <title>Replacing the old cluster module</title>
- <para>
- The High Availability (HA) module replaces the previous
- <firstterm>active-active</firstterm> cluster module. The new active-passive
- approach has several advantages compared to the existing active-active cluster
- module.
- <itemizedlist>
- <listitem>
- It does not depend directly on openais or corosync. It does not use multicast
- which simplifies deployment.
- </listitem>
- <listitem>
- It is more portable: in environments that don't support corosync, it can be
- integrated with a resource manager available in that environment.
- </listitem>
- <listitem>
- It can take advantage of features provided by the resource manager, for example
- virtual IP addresses.
- </listitem>
- <listitem>
- Improved performance and scalability due to better use of multiple CPUs
- </listitem>
- </itemizedlist>
- </para>
- <para>
- You should not enable the old and new cluster modules at the same time
- in a broker, as they may interfere with each other. In other words you
- should not set <literal>cluster-name</literal> at the same time as
- either <literal>ha-cluster</literal> or
- <literal>ha-queue-replication</literal>
- </para>
- </section>
- <section>
<title>Limitations</title>
<para>
There are a number of known limitations in the current preview implementation. These
diff --git a/qpid/doc/book/src/java-broker/Java-Broker-Runtime-Handling-Undeliverable-Messages.xml b/qpid/doc/book/src/java-broker/Java-Broker-Runtime-Handling-Undeliverable-Messages.xml
new file mode 100644
index 0000000000..40c0e44629
--- /dev/null
+++ b/qpid/doc/book/src/java-broker/Java-Broker-Runtime-Handling-Undeliverable-Messages.xml
@@ -0,0 +1,169 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!DOCTYPE entities [
+<!ENTITY % entities SYSTEM "commonEntities.xml">
+%entities;
+]>
+<!--
+
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+
+-->
+
+<section id="Java-Broker-Runtime-Handling-Undeliverable-Messages">
+ <title>Handing Undeliverable Messages</title>
+
+ <section role="h2" id="Java-Broker-Runtime-Handling-Undeliverable-Messages-Introduction">
+ <title>Introduction</title>
+ <para> Messages that cannot be delivered successfully to a consumer (for instance, because the
+ client is using a transacted session and rolls-back the transaction) can be made available on
+ the queue again and then subsequently be redelivered, depending on the precise session
+ acknowledgement mode and messaging model used by the application. This is normally desirable
+ behaviour that contributes to the ability of a system to withstand unexpected errors. However, it
+ leaves open the possibility for a message to be repeatedly redelivered (potentially indefinitely),
+ consuming system resources and preventing the delivery of other messages. Such undeliverable
+ messages are sometimes known as poison messages.</para>
+ <para>For an example, consider a stock ticker application that has been designed to consume prices
+ contained within JMS TextMessages. What if inadvertently a BytesMessage is placed onto the queue?
+ As the ticker application does not expect the BytesMessage, its processing might fail and cause it
+ to roll-back the transaction, however the default behavior of the Broker would mean that the
+ BytesMessage would be delivered over and over again, preventing the delivery of other legitimate
+ messages, until an operator intervenes and removes the erroneous message from the queue. </para>
+ <para>Qpid has maximum delivery count and dead-letter queue (DLQ) features which can be used in
+ concert to construct a system that automatically handles such a condition. These features are
+ described in the following sections.</para>
+ </section>
+
+ <section role="h2" id="Java-Broker-Runtime-Handling-Undeliverable-Messages-Maximum-Delivery-Count">
+ <title>Maximum Delivery Count</title>
+ <para> Maximum delivery count is a property of a queue. If a consumer application is unable to
+ process a message more than the specified number of times, then the broker will either route the
+ message to a dead-letter queue (if one has been defined), or will discard the message. </para>
+ <para> In order for a maximum delivery count to be enforced, the consuming client
+ <emphasis>must</emphasis> call <ulink url="&oracleJeeDocUrl;javax/jms/Session.html#rollback()"
+ >Session#rollback()</ulink> (or <ulink url="&oracleJeeDocUrl;javax/jms/Session.html#recover()"
+ >Session#recover()</ulink> if the session is not transacted). It is during the Broker's
+ processing of Session#rollback() (or Session#recover()) that if a message has been seen
+ at least the maximum number of times then it will move the message to the DLQ or discard the
+ message.</para>
+ <para>If the consuming client fails in another manner, for instance, closes the connection, the
+ message will not be re-routed and consumer application will see the same poison message again
+ once it reconnects.</para>
+ <para> If the consuming application is using AMQP 0-9-1, 0-9, or 0-8 protocols, it is necessary to
+ set the client system property <varname>qpid.reject.behaviour</varname> or connection or binding
+ URL option <varname>rejectbehaviour</varname> to the value <literal>system</literal>.</para>
+ <para>It is possible to determine the number of times a message has been sent to a consumer via
+ the Management interfaces, but is not possible to determine this information from a message client.
+ Specifically, the optional JMS message header <property>JMSXDeliveryCount</property> is not
+ supported.</para>
+ <para>Maximum Delivery Count can be enabled via management (see <xref
+ linkend="Java-Broker-Configuring-And-Managing"/>) using the the queue declare property
+ <property>x-qpid-maximum-delivery-count</property> or via <link
+ linkend="Java-Broker-Runtime-Handling-Undeliverable-Messages-Configuration">configuration</link>
+ as illustrated below.</para>
+ </section>
+
+ <section role="h2" id="Java-Broker-Runtime-Handling-Undeliverable-Messages-Dead-Letter-Queues">
+ <title>Dead Letter Queues (DLQ)</title>
+ <para>A Dead Letter Queue (DLQ) acts as an destination for messages that have somehow exceeded the
+ normal bounds of processing and is utilised to prevent disruption to flow of other messages. When
+ a DLQ is enabled for a given queue if a consuming client indicates it no longer wishes the
+ receive the message (typically by exceeding a Maximum Delivery Count) then the message is moved
+ onto the DLQ and removed from the original queue. </para>
+ <para>The DLQ feature causes generation of a Dead Letter Exchange and a Dead Letter Queue. These
+ are named convention QueueName<emphasis>_DLE</emphasis> and QueueName<emphasis>_DLQ</emphasis>.</para>
+ <para>DLQs can be enabled via management (see <xref linkend="Java-Broker-Configuring-And-Managing"
+ />) using the queue declare property <property>x-qpid-dlq-enabled</property> or via <link
+ linkend="Java-Broker-Runtime-Handling-Undeliverable-Messages-Configuration">configuration</link>
+ as illustrated below.</para>
+ <caution>
+ <title>Avoid excessive queue depth</title>
+ <para>Applications making use of DLQs <emphasis>should</emphasis> make provision for the frequent
+ examination of messages arriving on DLQs so that both corrective actions can be taken to resolve
+ the underlying cause and organise for their timely removal from the DLQ. Messages on DLQs
+ consume system resources in the same manner as messages on normal queues so excessive queue
+ depths should not be permitted to develop.</para>
+ </caution>
+ </section>
+
+ <section role="h2" id="Java-Broker-Runtime-Handling-Undeliverable-Messages-Configuration">
+ <title>Configuration</title>
+ <para>In the below configuration it can be seen that DLQs/Maximum Delivery Count are enabled at
+ the broker level with maximum delivery count set to 5, disabled at the virtualhost level for the
+ 'dev-only' virtualhost, and enabled specifically for the 'dev-only-main-queue' with maximum
+ delivery count overridden to 5. </para>
+ <para>As 'dev-only-main-queue' has its own configuration specified, this value overrides all
+ others and causes the features to be enabled for this queue. In contrast to this,
+ 'dev-only-other-queue' does not specify its own value and picks up the false value specified for
+ its parent virtualhost, causing the DLQ/Maximum Delivery Count features to be disabled for this
+ queue. Any such queue in the 'dev-only' virtualhost which does not specify its own configuration
+ value will have the DLQ/Maximum Delivery Count feature disabled.</para>
+ <para>The queue 'localhost-queue' has the DLQ/Maximum Delivery Count features enabled, as neither
+ the queue itself or the 'localhost' virtualhost specifies a configuration value and so the broker
+ level value of true is used. Any such queue in the 'localhost' virtualhost which does not specify
+ its own configuration value will have the features enabled.</para>
+ <example>
+ <title>Enabling DLQs and maximum delivery count at broker level within config.xml</title>
+ <programlisting><![CDATA[<broker>
+ ...
+ <deadLetterQueues>true</deadLetterQueues>
+ <maximumDeliveryCount>5</maximumDeliveryCount>
+ ...
+</broker>]]></programlisting>
+ </example>
+ <example>
+ <title>Enabling DLQs and maximum delivery count at virtualhost and queue level within
+ virtualhosts.xml</title>
+ <programlisting><![CDATA[<virtualhosts>
+ ...
+ <virtualhost>
+ <name>dev-only</name>
+ <dev-only>
+ <queues>
+ <deadLetterQueues>false</deadLetterQueues>
+ <maximumDeliveryCount>0</maximumDeliveryCount>
+ <queue>
+ <name>dev-only-main-queue</name>
+ <dev-only-main-queue>
+ <deadLetterQueues>true</deadLetterQueues>
+ <maximumDeliveryCount>3</maximumDeliveryCount>
+ </dev-only-main-queue>
+ </queue>
+ <queue>
+ <name>dev-only-other-queue</name>
+ </queue>
+ </queues>
+ </dev-only>
+ </virtualhost>
+ <virtualhost>
+ <name>localhost</name>
+ <localhost>
+ <queues>
+ <queue>
+ <name>localhost-queue</name>
+ </queue>
+ </queues>
+ </localhost>
+ </virtualhost>
+ ...
+</virtualhosts>]]>
+ </programlisting>
+ </example>
+ </section>
+
+
+</section>
diff --git a/qpid/doc/book/src/java-broker/Java-Broker-Runtime.xml b/qpid/doc/book/src/java-broker/Java-Broker-Runtime.xml
index 6b21fd15c2..2af775d2fc 100644
--- a/qpid/doc/book/src/java-broker/Java-Broker-Runtime.xml
+++ b/qpid/doc/book/src/java-broker/Java-Broker-Runtime.xml
@@ -26,4 +26,5 @@
<xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="Java-Broker-Runtime-Alerts.xml"/>
<xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="Java-Broker-Runtime-Disk-Space-Management.xml"/>
<xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="Java-Broker-Runtime-Producer-Transaction-Timeout.xml"/>
+ <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="Java-Broker-Runtime-Handling-Undeliverable-Messages.xml"/>
</chapter>
diff --git a/qpid/extras/nexus/CMakeLists.txt b/qpid/extras/nexus/CMakeLists.txt
new file mode 100644
index 0000000000..04c33c35e2
--- /dev/null
+++ b/qpid/extras/nexus/CMakeLists.txt
@@ -0,0 +1,94 @@
+##
+## Licensed to the Apache Software Foundation (ASF) under one
+## or more contributor license agreements. See the NOTICE file
+## distributed with this work for additional information
+## regarding copyright ownership. The ASF licenses this file
+## to you under the Apache License, Version 2.0 (the
+## "License"); you may not use this file except in compliance
+## with the License. You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing,
+## software distributed under the License is distributed on an
+## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+## KIND, either express or implied. See the License for the
+## specific language governing permissions and limitations
+## under the License.
+##
+
+cmake_minimum_required(VERSION 2.6)
+include(CheckLibraryExists)
+include(CheckSymbolExists)
+
+project(qpid-nexus C)
+
+set (SO_VERSION_MAJOR 0)
+set (SO_VERSION_MINOR 1)
+set (SO_VERSION "${SO_VERSION_MAJOR}.${SO_VERSION_MINOR}")
+
+if (NOT DEFINED LIB_SUFFIX)
+ get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
+ if ("${LIB64}" STREQUAL "TRUE" AND ${CMAKE_SIZEOF_VOID_P} STREQUAL "8")
+ set(LIB_SUFFIX 64)
+ else()
+ set(LIB_SUFFIX "")
+ endif()
+endif()
+
+set(INCLUDE_INSTALL_DIR include CACHE PATH "Include file directory")
+set(LIB_INSTALL_DIR "lib${LIB_SUFFIX}" CACHE PATH "Library object file directory")
+set(SYSCONF_INSTALL_DIR etc CACHE PATH "System read only configuration directory")
+set(SHARE_INSTALL_DIR share CACHE PATH "Shared read only data directory")
+set(MAN_INSTALL_DIR share/man CACHE PATH "Manpage directory")
+
+include_directories(
+ ${CMAKE_CURRENT_SOURCE_DIR}/include
+ ${CMAKE_CURRENT_SOURCE_DIR}/src
+ ${proton_include}
+ )
+
+##
+## Find dependencies
+##
+find_library(proton_lib qpid-proton)
+find_library(pthread_lib pthread)
+find_library(rt_lib rt)
+find_path(proton_include proton/driver.h)
+
+set(CMAKE_C_FLAGS "-pthread -Wall -Werror")
+set(CATCH_UNDEFINED "-Wl,--no-undefined")
+
+##
+## Build the Multi-Threaded Server Library
+##
+set(server_SOURCES
+ src/alloc.c
+ src/auth.c
+ src/container.c
+ src/hash.c
+ src/iterator.c
+ src/log.c
+ src/message.c
+ src/posix/threading.c
+ src/server.c
+ src/timer.c
+ src/work_queue.c
+ )
+
+add_library(qpid-nexus SHARED ${server_SOURCES})
+target_link_libraries(qpid-nexus ${proton_lib} ${pthread_lib} ${rt_lib})
+set_target_properties(qpid-nexus PROPERTIES
+ VERSION "${SO_VERSION}"
+ SOVERSION "${SO_VERSION_MAJOR}"
+ LINK_FLAGS "${CATCH_UNDEFINED}"
+ )
+install(TARGETS qpid-nexus
+ LIBRARY DESTINATION ${LIB_INSTALL_DIR})
+file(GLOB headers "include/qpid/nexus/*.h")
+install(FILES ${headers} DESTINATION ${INCLUDE_INSTALL_DIR}/qpid/nexus)
+
+##
+## Build Tests
+##
+add_subdirectory(tests)
diff --git a/qpid/extras/nexus/include/qpid/nexus/alloc.h b/qpid/extras/nexus/include/qpid/nexus/alloc.h
new file mode 100644
index 0000000000..a0c832c069
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/alloc.h
@@ -0,0 +1,70 @@
+#ifndef __nexus_alloc_h__
+#define __nexus_alloc_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <qpid/nexus/threading.h>
+
+typedef struct nx_alloc_pool_t nx_alloc_pool_t;
+
+typedef struct {
+ int transfer_batch_size;
+ int local_free_list_max;
+ int global_free_list_max;
+} nx_alloc_config_t;
+
+typedef struct {
+ uint64_t total_alloc_from_heap;
+ uint64_t total_free_to_heap;
+ uint64_t held_by_threads;
+ uint64_t batches_rebalanced_to_threads;
+ uint64_t batches_rebalanced_to_global;
+} nx_alloc_stats_t;
+
+typedef struct {
+ char *type_name;
+ size_t type_size;
+ nx_alloc_config_t *config;
+ nx_alloc_stats_t *stats;
+ nx_alloc_pool_t *global_pool;
+ sys_mutex_t *lock;
+} nx_alloc_type_desc_t;
+
+
+void *nx_alloc(nx_alloc_type_desc_t *desc, nx_alloc_pool_t **tpool);
+void nx_dealloc(nx_alloc_type_desc_t *desc, nx_alloc_pool_t **tpool, void *p);
+
+
+#define ALLOC_DECLARE(T) \
+ T *new_##T(); \
+ void free_##T(T *p)
+
+#define ALLOC_DEFINE_CONFIG(T,C) \
+ nx_alloc_type_desc_t __desc_##T = {#T, sizeof(T), C, 0, 0, 0}; \
+ __thread nx_alloc_pool_t *__local_pool_##T = 0; \
+ T *new_##T() { return (T*) nx_alloc(&__desc_##T, &__local_pool_##T); } \
+ void free_##T(T *p) { nx_dealloc(&__desc_##T, &__local_pool_##T, (void*) p); } \
+ nx_alloc_stats_t *alloc_stats_##T() { return __desc_##T.stats; }
+
+#define ALLOC_DEFINE(T) ALLOC_DEFINE_CONFIG(T, 0)
+
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/container.h b/qpid/extras/nexus/include/qpid/nexus/container.h
new file mode 100644
index 0000000000..f6c9839da0
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/container.h
@@ -0,0 +1,122 @@
+#ifndef __container_h__
+#define __container_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <proton/engine.h>
+#include <qpid/nexus/server.h>
+#include <qpid/nexus/alloc.h>
+#include <qpid/nexus/ctools.h>
+
+typedef uint8_t nx_dist_mode_t;
+#define NX_DIST_COPY 0x01
+#define NX_DIST_MOVE 0x02
+#define NX_DIST_BOTH 0x03
+
+typedef enum {
+ NX_LIFE_PERMANENT,
+ NX_LIFE_DELETE_CLOSE,
+ NX_LIFE_DELETE_NO_LINKS,
+ NX_LIFE_DELETE_NO_MESSAGES,
+ NX_LIFE_DELETE_NO_LINKS_MESSAGES
+} nx_lifetime_policy_t;
+
+typedef enum {
+ NX_INCOMING,
+ NX_OUTGOING
+} nx_direction_t;
+
+
+typedef struct nx_node_t nx_node_t;
+typedef struct nx_link_t nx_link_t;
+
+typedef void (*nx_container_delivery_handler_t) (void *node_context, nx_link_t *link, pn_delivery_t *delivery);
+typedef int (*nx_container_link_handler_t) (void *node_context, nx_link_t *link);
+typedef int (*nx_container_link_detach_handler_t) (void *node_context, nx_link_t *link, int closed);
+typedef void (*nx_container_node_handler_t) (void *type_context, nx_node_t *node);
+typedef void (*nx_container_conn_handler_t) (void *type_context, nx_connection_t *conn);
+
+typedef struct {
+ char *type_name;
+ void *type_context;
+ int allow_dynamic_creation;
+
+ //
+ // Node-Instance Handlers
+ //
+ nx_container_delivery_handler_t rx_handler;
+ nx_container_delivery_handler_t tx_handler;
+ nx_container_delivery_handler_t disp_handler;
+ nx_container_link_handler_t incoming_handler;
+ nx_container_link_handler_t outgoing_handler;
+ nx_container_link_handler_t writable_handler;
+ nx_container_link_detach_handler_t link_detach_handler;
+
+ //
+ // Node-Type Handlers
+ //
+ nx_container_node_handler_t node_created_handler;
+ nx_container_node_handler_t node_destroyed_handler;
+ nx_container_conn_handler_t inbound_conn_open_handler;
+ nx_container_conn_handler_t outbound_conn_open_handler;
+} nx_node_type_t;
+
+void nx_container_initialize(void);
+void nx_container_finalize(void);
+
+int nx_container_register_node_type(const nx_node_type_t *nt);
+
+void nx_container_set_default_node_type(const nx_node_type_t *nt,
+ void *node_context,
+ nx_dist_mode_t supported_dist);
+
+nx_node_t *nx_container_create_node(const nx_node_type_t *nt,
+ const char *name,
+ void *node_context,
+ nx_dist_mode_t supported_dist,
+ nx_lifetime_policy_t life_policy);
+void nx_container_destroy_node(nx_node_t *node);
+
+void nx_container_node_set_context(nx_node_t *node, void *node_context);
+nx_dist_mode_t nx_container_node_get_dist_modes(const nx_node_t *node);
+nx_lifetime_policy_t nx_container_node_get_life_policy(const nx_node_t *node);
+
+nx_link_t *nx_link(nx_node_t *node, nx_connection_t *conn, nx_direction_t dir, const char *name);
+void nx_link_set_context(nx_link_t *link, void *link_context);
+void *nx_link_get_context(nx_link_t *link);
+pn_link_t *nx_link_pn(nx_link_t *link);
+pn_terminus_t *nx_link_source(nx_link_t *link);
+pn_terminus_t *nx_link_target(nx_link_t *link);
+pn_terminus_t *nx_link_remote_source(nx_link_t *link);
+pn_terminus_t *nx_link_remote_target(nx_link_t *link);
+void nx_link_activate(nx_link_t *link);
+void nx_link_close(nx_link_t *link);
+
+
+typedef struct nx_link_item_t nx_link_item_t;
+
+struct nx_link_item_t {
+ DEQ_LINKS(nx_link_item_t);
+ nx_link_t *link;
+};
+
+ALLOC_DECLARE(nx_link_item_t);
+DEQ_DECLARE(nx_link_item_t, nx_link_list_t);
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/ctools.h b/qpid/extras/nexus/include/qpid/nexus/ctools.h
new file mode 100644
index 0000000000..6b8f072b75
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/ctools.h
@@ -0,0 +1,146 @@
+#ifndef __ctools_h__
+#define __ctools_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+
+#define CT_ASSERT(exp) { assert(exp); }
+
+#define NEW(t) (t*) malloc(sizeof(t))
+#define NEW_ARRAY(t,n) (t*) malloc(sizeof(t)*(n))
+#define NEW_PTR_ARRAY(t,n) (t**) malloc(sizeof(t*)*(n))
+
+#define DEQ_DECLARE(i,d) typedef struct { \
+ i *head; \
+ i *tail; \
+ i *scratch; \
+ size_t size; \
+ } d
+
+#define DEQ_LINKS(t) t *prev; t *next
+
+#define DEQ_INIT(d) do { d.head = 0; d.tail = 0; d.scratch = 0; d.size = 0; } while (0)
+#define DEQ_ITEM_INIT(i) do { (i)->next = 0; (i)->prev = 0; } while(0)
+#define DEQ_HEAD(d) (d.head)
+#define DEQ_TAIL(d) (d.tail)
+#define DEQ_SIZE(d) (d.size)
+#define DEQ_NEXT(i) (i)->next
+#define DEQ_PREV(i) (i)->prev
+
+#define DEQ_INSERT_HEAD(d,i) \
+do { \
+ CT_ASSERT((i)->next == 0); \
+ CT_ASSERT((i)->prev == 0); \
+ if (d.head) { \
+ (i)->next = d.head; \
+ d.head->prev = i; \
+ } else { \
+ d.tail = i; \
+ (i)->next = 0; \
+ CT_ASSERT(d.size == 0); \
+ } \
+ (i)->prev = 0; \
+ d.head = i; \
+ d.size++; \
+} while (0)
+
+#define DEQ_INSERT_TAIL(d,i) \
+do { \
+ CT_ASSERT((i)->next == 0); \
+ CT_ASSERT((i)->prev == 0); \
+ if (d.tail) { \
+ (i)->prev = d.tail; \
+ d.tail->next = i; \
+ } else { \
+ d.head = i; \
+ (i)->prev = 0; \
+ CT_ASSERT(d.size == 0); \
+ } \
+ (i)->next = 0; \
+ d.tail = i; \
+ d.size++; \
+} while (0)
+
+#define DEQ_REMOVE_HEAD(d) \
+do { \
+ CT_ASSERT(d.head); \
+ if (d.head) { \
+ d.scratch = d.head; \
+ d.head = d.head->next; \
+ if (d.head == 0) { \
+ d.tail = 0; \
+ CT_ASSERT(d.size == 1); \
+ } else \
+ d.head->prev = 0; \
+ d.size--; \
+ d.scratch->next = 0; \
+ d.scratch->prev = 0; \
+ } \
+} while (0)
+
+#define DEQ_REMOVE_TAIL(d) \
+do { \
+ CT_ASSERT(d.tail); \
+ if (d.tail) { \
+ d.scratch = d.tail; \
+ d.tail = d.tail->prev; \
+ if (d.tail == 0) { \
+ d.head = 0; \
+ CT_ASSERT(d.size == 1); \
+ } else \
+ d.tail->next = 0; \
+ d.size--; \
+ d.scratch->next = 0; \
+ d.scratch->prev = 0; \
+ } \
+} while (0)
+
+#define DEQ_INSERT_AFTER(d,i,a) \
+do { \
+ CT_ASSERT((i)->next == 0); \
+ CT_ASSERT((i)->prev == 0); \
+ if ((a)->next) \
+ (a)->next->prev = (i); \
+ else \
+ d.tail = (i); \
+ (i)->next = (a)->next; \
+ (i)->prev = (a); \
+ (a)->next = (i); \
+ d.size++; \
+} while (0)
+
+#define DEQ_REMOVE(d,i) \
+do { \
+ if ((i)->next) \
+ (i)->next->prev = (i)->prev; \
+ else \
+ d.tail = (i)->prev; \
+ if ((i)->prev) \
+ (i)->prev->next = (i)->next; \
+ else \
+ d.head = (i)->next; \
+ d.size--; \
+ (i)->next = 0; \
+ (i)->prev = 0; \
+ CT_ASSERT(d.size || (!d.head && !d.tail)); \
+} while (0)
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/hash.h b/qpid/extras/nexus/include/qpid/nexus/hash.h
new file mode 100644
index 0000000000..0efded35e8
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/hash.h
@@ -0,0 +1,37 @@
+#ifndef __hash_h__
+#define __hash_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdlib.h>
+#include <qpid/nexus/iterator.h>
+
+typedef struct hash_t hash_t;
+
+hash_t *hash(int bucket_exponent, int batch_size, int value_is_const);
+void hash_free(hash_t *h);
+
+size_t hash_size(hash_t *h);
+int hash_insert(hash_t *h, nx_field_iterator_t *key, void *val);
+int hash_insert_const(hash_t *h, nx_field_iterator_t *key, const void *val);
+int hash_retrieve(hash_t *h, nx_field_iterator_t *key, void **val);
+int hash_retrieve_const(hash_t *h, nx_field_iterator_t *key, const void **val);
+int hash_remove(hash_t *h, nx_field_iterator_t *key);
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/iterator.h b/qpid/extras/nexus/include/qpid/nexus/iterator.h
new file mode 100644
index 0000000000..9aca3d4795
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/iterator.h
@@ -0,0 +1,114 @@
+#ifndef __nexus_iterator_h__
+#define __nexus_iterator_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+typedef struct nx_buffer_t nx_buffer_t;
+
+/**
+ * The field iterator is used to access fields within a buffer chain.
+ * It shields the user from the fact that the field may be split across
+ * one or more physical buffers.
+ */
+typedef struct nx_field_iterator_t nx_field_iterator_t;
+
+/**
+ * Iterator views allow the code traversing the field to see a transformed
+ * view of the raw field.
+ *
+ * ITER_VIEW_ALL - No transformation of the raw field data
+ *
+ * ITER_VIEW_NO_HOST - Remove the scheme and host fields from the view
+ *
+ * amqp://host.domain.com:port/node-id/node/specific
+ * ^^^^^^^^^^^^^^^^^^^^^
+ * node-id/node/specific
+ * ^^^^^^^^^^^^^^^^^^^^^
+ *
+ * ITER_VIEW_NODE_ID - Isolate the node identifier from an address
+ *
+ * amqp://host.domain.com:port/node-id/node/specific
+ * ^^^^^^^
+ * node-id/node/specific
+ * ^^^^^^^
+ *
+ * ITER_VIEW_NODE_SPECIFIC - Isolate node-specific text from an address
+ *
+ * amqp://host.domain.com:port/node-id/node/specific
+ * ^^^^^^^^^^^^^
+ * node-id/node/specific
+ * ^^^^^^^^^^^^^
+ */
+typedef enum {
+ ITER_VIEW_ALL,
+ ITER_VIEW_NO_HOST,
+ ITER_VIEW_NODE_ID,
+ ITER_VIEW_NODE_SPECIFIC
+} nx_iterator_view_t;
+
+/**
+ * Create an iterator from a null-terminated string.
+ *
+ * The "text" string must stay intact for the whole life of the iterator. The iterator
+ * does not copy the string, it references it.
+ */
+nx_field_iterator_t* nx_field_iterator_string(const char *text,
+ nx_iterator_view_t view);
+
+/**
+ * Create an iterator from a field in a buffer chain
+ */
+nx_field_iterator_t *nx_field_iterator_buffer(nx_buffer_t *buffer,
+ int offset,
+ int length,
+ nx_iterator_view_t view);
+
+/**
+ * Free an iterator
+ */
+void nx_field_iterator_free(nx_field_iterator_t *iter);
+
+/**
+ * Reset the iterator to the first octet and set a new view
+ */
+void nx_field_iterator_reset(nx_field_iterator_t *iter,
+ nx_iterator_view_t view);
+
+/**
+ * Return the current octet in the iterator's view and step to the next.
+ */
+unsigned char nx_field_iterator_octet(nx_field_iterator_t *iter);
+
+/**
+ * Return true iff the iterator has no more octets in the view.
+ */
+int nx_field_iterator_end(nx_field_iterator_t *iter);
+
+/**
+ * Compare an input string to the iterator's view. Return true iff they are equal.
+ */
+int nx_field_iterator_equal(nx_field_iterator_t *iter, unsigned char *string);
+
+/**
+ * Return a copy of the iterator's view.
+ */
+unsigned char *nx_field_iterator_copy(nx_field_iterator_t *iter);
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/log.h b/qpid/extras/nexus/include/qpid/nexus/log.h
new file mode 100644
index 0000000000..1376405d13
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/log.h
@@ -0,0 +1,31 @@
+#ifndef __nx_log_h__
+#define __nx_log_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#define LOG_NONE 0x00000000
+#define LOG_TRACE 0x00000001
+#define LOG_ERROR 0x00000002
+#define LOG_INFO 0x00000004
+
+void nx_log(const char *module, int cls, const char *fmt, ...);
+
+void nx_log_set_mask(int mask);
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/message.h b/qpid/extras/nexus/include/qpid/nexus/message.h
new file mode 100644
index 0000000000..3bb6b950ea
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/message.h
@@ -0,0 +1,162 @@
+#ifndef __nexus_message_h__
+#define __nexus_message_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <proton/engine.h>
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/iterator.h>
+
+typedef struct nx_message_t nx_message_t;
+typedef struct nx_buffer_t nx_buffer_t;
+
+DEQ_DECLARE(nx_buffer_t, nx_buffer_list_t);
+DEQ_DECLARE(nx_message_t, nx_message_list_t);
+
+typedef struct {
+ nx_buffer_t *buffer; // Buffer that contains the first octet of the field, null if the field is not present
+ size_t offset; // Offset in the buffer to the first octet
+ size_t length; // Length of the field or zero if unneeded
+ int parsed; // non-zero iff the buffer chain has been parsed to find this field
+} nx_field_location_t;
+
+
+// TODO - consider using pointers to nx_field_location_t below to save memory
+struct nx_message_t {
+ DEQ_LINKS(nx_message_t);
+ nx_buffer_list_t buffers; // The buffer chain containing the message
+ pn_delivery_t *in_delivery; // The delivery on which the message arrived
+ pn_delivery_t *out_delivery; // The delivery on which the message was last sent
+ nx_field_location_t section_message_header; // The message header list
+ nx_field_location_t section_delivery_annotation; // The delivery annotation map
+ nx_field_location_t section_message_annotation; // The message annotation map
+ nx_field_location_t section_message_properties; // The message properties list
+ nx_field_location_t section_application_properties; // The application properties list
+ nx_field_location_t section_body; // The message body: Data
+ nx_field_location_t section_footer; // The footer
+ nx_field_location_t field_user_id; // The string value of the user-id
+ nx_field_location_t field_to; // The string value of the to field
+ nx_field_location_t body; // The body of the message
+ nx_field_location_t compose_length;
+ nx_field_location_t compose_count;
+ uint32_t length;
+ uint32_t count;
+};
+
+struct nx_buffer_t {
+ DEQ_LINKS(nx_buffer_t);
+ unsigned int size;
+};
+
+typedef struct {
+ size_t buffer_size;
+ unsigned long buffer_preallocation_count;
+ unsigned long buffer_rebalancing_batch_count;
+ unsigned long buffer_local_storage_max;
+ unsigned long buffer_free_list_max;
+ unsigned long message_allocation_batch_count;
+ unsigned long message_rebalancing_batch_count;
+ unsigned long message_local_storage_max;
+} nx_allocator_config_t;
+
+const nx_allocator_config_t *nx_allocator_default_config(void);
+
+void nx_allocator_initialize(const nx_allocator_config_t *config);
+void nx_allocator_finalize(void);
+
+//
+// Functions for per-thread allocators.
+//
+nx_message_t *nx_allocate_message(void);
+nx_buffer_t *nx_allocate_buffer(void);
+void nx_free_message(nx_message_t *msg);
+void nx_free_buffer(nx_buffer_t *buf);
+
+
+typedef enum {
+ NX_DEPTH_NONE,
+ NX_DEPTH_HEADER,
+ NX_DEPTH_DELIVERY_ANNOTATIONS,
+ NX_DEPTH_MESSAGE_ANNOTATIONS,
+ NX_DEPTH_MESSAGE_PROPERTIES, // Needed for 'user-id' and 'to'
+ NX_DEPTH_APPLICATION_PROPERTIES,
+ NX_DEPTH_BODY,
+ NX_DEPTH_ALL
+} nx_message_depth_t;
+
+//
+// Functions for received messages
+//
+nx_message_t *nx_message_receive(pn_delivery_t *delivery);
+int nx_message_check(nx_message_t *msg, nx_message_depth_t depth);
+nx_field_iterator_t *nx_message_field_to(nx_message_t *msg);
+nx_field_iterator_t *nx_message_body(nx_message_t *msg);
+
+//
+// Functions for composed messages
+//
+
+// Convenience Functions
+void nx_message_compose_1(nx_message_t *msg, const char *to, nx_buffer_t *buf_chain);
+
+// Raw Functions
+void nx_message_begin_header(nx_message_t *msg);
+void nx_message_end_header(nx_message_t *msg);
+
+void nx_message_begin_delivery_annotations(nx_message_t *msg);
+void nx_message_end_delivery_annotations(nx_message_t *msg);
+
+void nx_message_begin_message_annotations(nx_message_t *msg);
+void nx_message_end_message_annotations(nx_message_t *msg);
+
+void nx_message_begin_message_properties(nx_message_t *msg);
+void nx_message_end_message_properties(nx_message_t *msg);
+
+void nx_message_begin_application_properties(nx_message_t *msg);
+void nx_message_end_application_properties(nx_message_t *msg);
+
+void nx_message_append_body_data(nx_message_t *msg, nx_buffer_t *buf_chain);
+
+void nx_message_begin_body_sequence(nx_message_t *msg);
+void nx_message_end_body_sequence(nx_message_t *msg);
+
+void nx_message_begin_footer(nx_message_t *msg);
+void nx_message_end_footer(nx_message_t *msg);
+
+void nx_message_insert_null(nx_message_t *msg);
+void nx_message_insert_boolean(nx_message_t *msg, int value);
+void nx_message_insert_ubyte(nx_message_t *msg, uint8_t value);
+void nx_message_insert_uint(nx_message_t *msg, uint32_t value);
+void nx_message_insert_ulong(nx_message_t *msg, uint64_t value);
+void nx_message_insert_binary(nx_message_t *msg, const uint8_t *start, size_t len);
+void nx_message_insert_string(nx_message_t *msg, const char *start);
+void nx_message_insert_uuid(nx_message_t *msg, const uint8_t *value);
+void nx_message_insert_symbol(nx_message_t *msg, const char *start, size_t len);
+void nx_message_insert_timestamp(nx_message_t *msg, uint64_t value);
+
+//
+// Functions for buffers
+//
+unsigned char *nx_buffer_base(nx_buffer_t *buf); // Pointer to the first octet in the buffer
+unsigned char *nx_buffer_cursor(nx_buffer_t *buf); // Pointer to the first free octet in the buffer
+size_t nx_buffer_capacity(nx_buffer_t *buf); // Size of free space in the buffer in octets
+size_t nx_buffer_size(nx_buffer_t *buf); // Number of octets in the buffer
+void nx_buffer_insert(nx_buffer_t *buf, size_t len); // Notify the buffer that 'len' octets were written at cursor
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/server.h b/qpid/extras/nexus/include/qpid/nexus/server.h
new file mode 100644
index 0000000000..b04db5cf9a
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/server.h
@@ -0,0 +1,403 @@
+#ifndef __nexus_server_h__
+#define __nexus_server_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <proton/engine.h>
+
+/**
+ * \defgroup Control Server Control Functions
+ * @{
+ */
+
+/**
+ * \brief Thread Start Handler
+ *
+ * Callback invoked when a new server thread is started. The callback is
+ * invoked on the newly created thread.
+ *
+ * This handler can be used to set processor affinity or other thread-specific
+ * tuning values.
+ *
+ * @param context The handler context supplied in nx_server_initialize.
+ * @param thread_id The integer thread identifier that uniquely identifies this thread.
+ */
+typedef void (*nx_thread_start_cb_t)(void* context, int thread_id);
+
+
+/**
+ * \brief Initialize the server module and prepare it for operation.
+ *
+ * @param thread_count The number of worker threads (1 or more) that the server shall create
+ */
+void nx_server_initialize(int thread_count);
+
+
+/**
+ * \brief Finalize the server after it has stopped running.
+ */
+void nx_server_finalize(void);
+
+
+/**
+ * \brief Set the optional thread-start handler.
+ *
+ * This handler is called once on each worker thread at the time
+ * the thread is started. This may be used to set tuning settings like processor affinity, etc.
+ *
+ * @param start_handler The thread-start handler invoked per thread on thread startup.
+ * @param context Opaque context to be passed back in the callback function.
+ */
+void nx_server_set_start_handler(nx_thread_start_cb_t start_handler, void *context);
+
+
+/**
+ * \brief Run the server threads until completion.
+ *
+ * Start the operation of the server, including launching all of the worker threads.
+ * This function does not return until after the server has been stopped. The thread
+ * that calls nx_server_run is used as one of the worker threads.
+ */
+void nx_server_run(void);
+
+
+/**
+ * \brief Stop the server
+ *
+ * Stop the server and join all of its worker threads. This function may be called from any
+ * thread. When this function returns, all of the other server threads have been closed and
+ * joined. The calling thread will be the only running thread in the process.
+ */
+void nx_server_stop(void);
+
+
+/**
+ * \brief Pause (quiesce) the server.
+ *
+ * This call blocks until all of the worker threads (except
+ * the one calling the this function) are finished processing and have been blocked. When
+ * this call returns, the calling thread is the only thread running in the process.
+ */
+void nx_server_pause(void);
+
+
+/**
+ * \brief Resume normal operation of a paused server.
+ *
+ * This call unblocks all of the worker threads
+ * so they can resume normal connection processing.
+ */
+void nx_server_resume(void);
+
+
+/**
+ * @}
+ * \defgroup Signal Server Signal Handling Functions
+ * @{
+ */
+
+
+/**
+ * \brief Signal Handler
+ *
+ * Callback for caught signals. This handler will only be invoked for signal numbers
+ * that were registered via nx_server_signal. The handler is not invoked in the context
+ * of the OS signal handler. Rather, it is invoked on one of the worker threads in an
+ * orderly sequence.
+ *
+ * @param context The handler context supplied in nx_server_initialize.
+ * @param signum The signal number that was raised.
+ */
+typedef void (*nx_signal_handler_cb_t)(void* context, int signum);
+
+
+/**
+ * Set the signal handler for the server. The signal handler is invoked cleanly on a worker thread
+ * after the server process catches an operating-system signal. The signal handler is optional and
+ * need not be set.
+ *
+ * @param signal_handler The signal handler called when a registered signal is caught.
+ * @param context Opaque context to be passed back in the callback function.
+ */
+void nx_server_set_signal_handler(nx_signal_handler_cb_t signal_handler, void *context);
+
+
+/**
+ * \brief Register a signal to be caught and handled by the signal handler.
+ *
+ * @param signum The signal number of a signal to be handled by the application.
+ */
+void nx_server_signal(int signum);
+
+
+/**
+ * @}
+ * \defgroup Connection Server AMQP Connection Handling Functions
+ * @{
+ */
+
+/**
+ * \brief Listener objects represent the desire to accept incoming transport connections.
+ */
+typedef struct nx_listener_t nx_listener_t;
+
+/**
+ * \brief Connector objects represent the desire to create and maintain an outgoing transport connection.
+ */
+typedef struct nx_connector_t nx_connector_t;
+
+/**
+ * \brief Connection objects wrap Proton connection objects.
+ */
+typedef struct nx_connection_t nx_connection_t;
+
+/**
+ * Event type for the connection callback.
+ */
+typedef enum {
+ /// The connection just opened via a listener (inbound).
+ NX_CONN_EVENT_LISTENER_OPEN,
+
+ /// The connection just opened via a connector (outbound).
+ NX_CONN_EVENT_CONNECTOR_OPEN,
+
+ /// The connection was closed at the transport level (not cleanly).
+ NX_CONN_EVENT_CLOSE,
+
+ /// The connection requires processing.
+ NX_CONN_EVENT_PROCESS
+} nx_conn_event_t;
+
+
+/**
+ * \brief Connection Event Handler
+ *
+ * Callback invoked when processing is needed on a proton connection. This callback
+ * shall be invoked on one of the server's worker threads. The server guarantees that
+ * no two threads shall be allowed to process a single connection concurrently.
+ * The implementation of this handler may assume that it has exclusive access to the
+ * connection and its subservient components (sessions, links, deliveries, etc.).
+ *
+ * @param context The handler context supplied in nx_server_{connect,listen}.
+ * @param event The event/reason for the invocation of the handler.
+ * @param conn The connection that requires processing by the handler.
+ * @return A value greater than zero if the handler did any proton processing for
+ * the connection. If no work was done, zero is returned.
+ */
+typedef int (*nx_conn_handler_cb_t)(void* context, nx_conn_event_t event, nx_connection_t *conn);
+
+
+/**
+ * \brief Set the connection event handler callback.
+ *
+ * Set the connection handler callback for the server. This callback is mandatory and must be set
+ * prior to the invocation of nx_server_run.
+ *
+ * @param conn_hander The handler for processing connection-related events.
+ */
+void nx_server_set_conn_handler(nx_conn_handler_cb_t conn_handler);
+
+
+/**
+ * \brief Set the user context for a connection.
+ *
+ * @param conn Connection object supplied in NX_CONN_EVENT_{LISTENER,CONNETOR}_OPEN
+ * @param context User context to be stored with the connection.
+ */
+void nx_connection_set_context(nx_connection_t *conn, void *context);
+
+
+/**
+ * \brief Get the user context from a connection.
+ *
+ * @param conn Connection object supplied in NX_CONN_EVENT_{LISTENER,CONNETOR}_OPEN
+ * @return The user context stored with the connection.
+ */
+void *nx_connection_get_context(nx_connection_t *conn);
+
+
+/**
+ * \brief Activate a connection for output.
+ *
+ * This function is used to request that the server activate the indicated connection.
+ * It is assumed that the connection is one that the caller does not have permission to
+ * access (i.e. it may be owned by another thread currently). An activated connection
+ * will, when writable, appear in the internal work list and be invoked for processing
+ * by a worker thread.
+ *
+ * @param conn The connection over which the application wishes to send data
+ */
+void nx_server_activate(nx_connection_t *conn);
+
+
+/**
+ * \brief Get the wrapped proton-engine connection object.
+ *
+ * @param conn Connection object supplied in NX_CONN_EVENT_{LISTENER,CONNETOR}_OPEN
+ * @return The proton connection object.
+ */
+pn_connection_t *nx_connection_pn(nx_connection_t *conn);
+
+
+/**
+ * \brief Configuration block for a connector or a listener.
+ */
+typedef struct nx_server_config_t {
+ /**
+ * Host name or network address to bind to a listener or use in the connector.
+ */
+ char *host;
+
+ /**
+ * Port name or number to bind to a listener or use in the connector.
+ */
+ char *port;
+
+ /**
+ * Space-separated list of SASL mechanisms to be accepted for the connection.
+ */
+ char *sasl_mechanisms;
+
+ /**
+ * If appropriate for the mechanism, the username for authentication
+ * (connector only)
+ */
+ char *sasl_username;
+
+ /**
+ * If appropriate for the mechanism, the password for authentication
+ * (connector only)
+ */
+ char *sasl_password;
+
+ /**
+ * If appropriate for the mechanism, the minimum acceptable security strength factor
+ */
+ int sasl_minssf;
+
+ /**
+ * If appropriate for the mechanism, the maximum acceptable security strength factor
+ */
+ int sasl_maxssf;
+
+ /**
+ * SSL is enabled for this connection iff non-zero.
+ */
+ int ssl_enabled;
+
+ /**
+ * Connection will take on the role of SSL server iff non-zero.
+ */
+ int ssl_server;
+
+ /**
+ * Iff non-zero AND ssl_enabled is non-zero, this listener will detect the client's use
+ * of SSL or non-SSL and conform to the client's protocol.
+ * (listener only)
+ */
+ int ssl_allow_unsecured_client;
+
+ /**
+ * Path to the file containing the PEM-formatted public certificate for the local end
+ * of the connection.
+ */
+ char *ssl_certificate_file;
+
+ /**
+ * Path to the file containing the PEM-formatted private key for the local end of the
+ * connection.
+ */
+ char *ssl_private_key_file;
+
+ /**
+ * The password used to sign the private key, or NULL if the key is not protected.
+ */
+ char *ssl_password;
+
+ /**
+ * Path to the file containing the PEM-formatted set of certificates of trusted CAs.
+ */
+ char *ssl_trusted_certificate_db;
+
+ /**
+ * Iff non-zero, require that the peer's certificate be supplied and that it be authentic
+ * according to the set of trusted CAs.
+ */
+ int ssl_require_peer_authentication;
+
+ /**
+ * Allow the connection to be redirected by the peer (via CLOSE->Redirect). This is
+ * meaningful for outgoing (connector) connections only.
+ */
+ int allow_redirect;
+} nx_server_config_t;
+
+
+/**
+ * \brief Create a listener for incoming connections.
+ *
+ * @param config Pointer to a configuration block for this listener. This block will be
+ * referenced by the server, not copied. The referenced record must remain
+ * in-scope for the life of the listener.
+ * @param context User context passed back in the connection handler.
+ * @return A pointer to the new listener, or NULL in case of failure.
+ */
+nx_listener_t *nx_server_listen(const nx_server_config_t *config, void *context);
+
+
+/**
+ * \brief Free the resources associated with a listener.
+ *
+ * @param li A listener pointer returned by nx_listen.
+ */
+void nx_listener_free(nx_listener_t* li);
+
+
+/**
+ * \brief Close a listener so it will accept no more connections.
+ *
+ * @param li A listener pointer returned by nx_listen.
+ */
+void nx_listener_close(nx_listener_t* li);
+
+
+/**
+ * \brief Create a connector for an outgoing connection.
+ *
+ * @param config Pointer to a configuration block for this connector. This block will be
+ * referenced by the server, not copied. The referenced record must remain
+ * in-scope for the life of the connector..
+ * @param context User context passed back in the connection handler.
+ * @return A pointer to the new connector, or NULL in case of failure.
+ */
+nx_connector_t *nx_server_connect(const nx_server_config_t *config, void *context);
+
+
+/**
+ * \brief Free the resources associated with a connector.
+ *
+ * @param ct A connector pointer returned by nx_connect.
+ */
+void nx_connector_free(nx_connector_t* ct);
+
+/**
+ * @}
+ */
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/threading.h b/qpid/extras/nexus/include/qpid/nexus/threading.h
new file mode 100644
index 0000000000..f275fc0086
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/threading.h
@@ -0,0 +1,45 @@
+#ifndef __sys_threading_h__
+#define __sys_threading_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+typedef struct sys_mutex_t sys_mutex_t;
+
+sys_mutex_t *sys_mutex(void);
+void sys_mutex_free(sys_mutex_t *mutex);
+void sys_mutex_lock(sys_mutex_t *mutex);
+void sys_mutex_unlock(sys_mutex_t *mutex);
+
+
+typedef struct sys_cond_t sys_cond_t;
+
+sys_cond_t *sys_cond(void);
+void sys_cond_free(sys_cond_t *cond);
+void sys_cond_wait(sys_cond_t *cond, sys_mutex_t *held_mutex);
+void sys_cond_signal(sys_cond_t *cond);
+void sys_cond_signal_all(sys_cond_t *cond);
+
+
+typedef struct sys_thread_t sys_thread_t;
+
+sys_thread_t *sys_thread(void *(*run_function) (void *), void *arg);
+void sys_thread_free(sys_thread_t *thread);
+void sys_thread_join(sys_thread_t *thread);
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/timer.h b/qpid/extras/nexus/include/qpid/nexus/timer.h
new file mode 100644
index 0000000000..5444989296
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/timer.h
@@ -0,0 +1,86 @@
+#ifndef __nexus_timer_h__
+#define __nexus_timer_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * \defgroup Timer Server Timer Functions
+ * @{
+ */
+
+typedef struct nx_timer_t nx_timer_t;
+
+/**
+ * Timer Callback
+ *
+ * Callback invoked after a timer's interval expires and the timer fires.
+ *
+ * @param context The context supplied in nx_timer
+ */
+typedef void (*nx_timer_cb_t)(void* context);
+
+
+/**
+ * Create a new timer object.
+ *
+ * @param cb The callback function to be invoked when the timer expires.
+ * @param context An opaque, user-supplied context to be passed into the callback.
+ * @return A pointer to the new timer object or NULL if memory is exhausted.
+ */
+nx_timer_t *nx_timer(nx_timer_cb_t cb, void* context);
+
+
+/**
+ * Free the resources for a timer object. If the timer was scheduled, it will be canceled
+ * prior to freeing. After this function returns, the callback will not be invoked for this
+ * timer.
+ *
+ * @param timer Pointer to the timer object returned by nx_timer.
+ */
+void nx_timer_free(nx_timer_t *timer);
+
+
+/**
+ * Schedule a timer to fire in the future.
+ *
+ * Note that the timer callback will never be invoked synchronously during the execution
+ * of nx_timer_schedule. Even if the interval is immediate (0), the callback invocation will
+ * be asynchronous and after the return of this function.
+ *
+ * @param timer Pointer to the timer object returned by nx_timer.
+ * @param msec The minimum number of milliseconds of delay until the timer fires.
+ * If 0 is supplied, the timer will fire immediately.
+ */
+void nx_timer_schedule(nx_timer_t *timer, long msec);
+
+
+/**
+ * Attempt to cancel a scheduled timer. Since the timer callback can be invoked on any
+ * server thread, it is always possible that a last-second cancel attempt may arrive too late
+ * to stop the timer from firing (i.e. the cancel is concurrent with the fire callback).
+ *
+ * @param timer Pointer to the timer object returned by nx_timer.
+ */
+void nx_timer_cancel(nx_timer_t *timer);
+
+/**
+ * @}
+ */
+
+#endif
diff --git a/qpid/extras/nexus/include/qpid/nexus/user_fd.h b/qpid/extras/nexus/include/qpid/nexus/user_fd.h
new file mode 100644
index 0000000000..2f139c2c4f
--- /dev/null
+++ b/qpid/extras/nexus/include/qpid/nexus/user_fd.h
@@ -0,0 +1,121 @@
+#ifndef __nexus_user_fd_h__
+#define __nexus_user_fd_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+/**
+ * \defgroup UserFd Server User-File-Descriptor Functions
+ * @{
+ */
+
+typedef struct nx_user_fd_t nx_user_fd_t;
+
+
+/**
+ * User_fd Handler
+ *
+ * Callback invoked when a user-managed file descriptor is available for reading or writing or there
+ * was an error on the file descriptor.
+ *
+ * @param context The handler context supplied in the nx_user_fd call.
+ * @param ufd The user_fd handle for the processable fd.
+ */
+typedef void (*nx_user_fd_handler_cb_t)(void* context, nx_user_fd_t *ufd);
+
+
+/**
+ * Set the user-fd handler callback for the server. This handler is optional, but must be supplied
+ * if the nx_server is used to manage the activation of user file descriptors.
+ */
+void nx_server_set_user_fd_handler(nx_user_fd_handler_cb_t ufd_handler);
+
+
+/**
+ * Create a tracker for a user-managed file descriptor.
+ *
+ * A user-fd is appropriate for use when the application opens and manages file descriptors
+ * for purposes other than AMQP communication. Registering a user fd with the nexus server
+ * controls processing of the FD alongside the FDs used for messaging.
+ *
+ * @param fd The open file descriptor being managed by the application.
+ * @param context User context passed back in the connection handler.
+ * @return A pointer to the new user_fd.
+ */
+nx_user_fd_t *nx_user_fd(int fd, void *context);
+
+
+/**
+ * Free the resources for a user-managed FD tracker.
+ *
+ * @param ufd Structure pointer returned by nx_user_fd.
+ */
+void nx_user_fd_free(nx_user_fd_t *ufd);
+
+
+/**
+ * Activate a user-fd for read.
+ *
+ * Use this activation when the application has capacity to receive data from the user-fd. This will
+ * cause the callback set in nx_server_set_user_fd_handler to later be invoked when the
+ * file descriptor has data to read.
+ *
+ * @param ufd Structure pointer returned by nx_user_fd.
+ */
+void nx_user_fd_activate_read(nx_user_fd_t *ufd);
+
+
+/**
+ * Activate a user-fd for write.
+ *
+ * Use this activation when the application has data to write via the user-fd. This will
+ * cause the callback set in nx_server_set_user_fd_handler to later be invoked when the
+ * file descriptor is writable.
+ *
+ * @param ufd Structure pointer returned by nx_user_fd.
+ */
+void nx_user_fd_activate_write(nx_user_fd_t *ufd);
+
+
+/**
+ * Check readable status of a user-fd
+ *
+ * Note: It is possible that readable status is spurious (i.e. this function returns true
+ * but the file-descriptor is not readable and will block if not set to O_NONBLOCK).
+ * Code accordingly.
+ *
+ * @param ufd Structure pointer returned by nx_user_fd.
+ * @return true iff the user file descriptor is readable.
+ */
+bool nx_user_fd_is_readable(nx_user_fd_t *ufd);
+
+
+/**
+ * Check writable status of a user-fd
+ *
+ * @param ufd Structure pointer returned by nx_user_fd.
+ * @return true iff the user file descriptor is writable.
+ */
+bool nx_user_fd_is_writeable(nx_user_fd_t *ufd);
+
+/**
+ * @}
+ */
+
+#endif
diff --git a/qpid/extras/nexus/site/css/style.css b/qpid/extras/nexus/site/css/style.css
new file mode 100644
index 0000000000..b73c136d4a
--- /dev/null
+++ b/qpid/extras/nexus/site/css/style.css
@@ -0,0 +1,280 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+ul {
+ list-style-type:square;
+}
+
+th {
+ text-align: left;
+ font-weight: bold;
+}
+
+body {
+ margin:0;
+ background:#FFFFFF;
+ font-family:"Verdana", sans-serif;
+}
+
+.container {
+ width:950px;
+ margin:0 auto;
+}
+
+.header {
+ height:100px;
+ width:950px;
+ background:url(images/header.png)
+}
+
+.logo {
+ text-align:center;
+ font-weight:600;
+ padding:0 0 0 0;
+ font-size:14px;
+ font-family:"Verdana", cursive;
+}
+
+.logo a {
+ color:#000000;
+ text-decoration:none;
+}
+
+.main_text_area {
+ margin-left:200px;
+}
+
+.main_text_area_top {
+ height:14px;
+ font-size:1px;
+}
+
+.main_text_area_bottom {
+ display:none;
+/* height:14px;
+ margin-bottom:4px;*/
+}
+
+.main_text_area_body {
+ padding:5px 24px;
+}
+
+.main_text_area_body p {
+ text-align:justify;
+}
+
+.main_text_area br {
+ line-height:10px;
+}
+
+.main_text_area h1 {
+ font-size:28px;
+ font-weight:600;
+ margin:0 0 24px 0;
+ color:#0c3b82;
+ font-family:"Verdana", Times, serif;
+}
+
+.main_text_area h2 {
+ font-size:24px;
+ font-weight:600;
+ margin:24px 0 8px 0;
+ color:#0c3b82;
+ font-family:"Verdana",Times, serif;
+}
+
+.main_text_area ol, .main_text_area ul {
+ padding:0;
+ margin:10px 0;
+ margin-left:20px;
+}
+
+.main_text_area li {
+/* margin-left:40px; */
+}
+
+.main_text_area, .menu_box {
+ font-size:13px;
+ line-height:17px;
+ color:#000000;
+}
+
+.main_text_area {
+ font-size:15px;
+}
+
+.main_text_area a {
+ color:#000000;
+}
+
+.main_text_area a:hover {
+ color:#000000;
+}
+
+.menu_box {
+ width:196px;
+ float:left;
+ margin-left:4px;
+}
+
+.menu_box_top {
+ background:url(images/menu_top.png) no-repeat;
+ height:14px;
+ font-size:1px;
+}
+
+.menu_box_body {
+ background:url(images/menu_body.png) repeat-y;
+ padding:5px 24px 5px 24px;
+}
+
+.menu_box_bottom {
+ background:url(images/menu_bottom.png) no-repeat;
+ height:14px;
+ font-size:1px;
+ margin-bottom:1px;
+}
+
+.menu_box h3 {
+ font-size:20px;
+ font-weight:500;
+ margin:0 0 8px 0;
+ color:#0c3b82;
+ font-family:"Verdana",Times, serif;
+}
+
+.menu_box ul {
+ margin:12px;
+ padding:0px;
+}
+
+.menu_box li {
+ list-style:square;
+}
+
+.menu_box a {
+ color:#000000;
+ text-decoration:none;
+}
+
+.menu_box a:hover {
+ color:#000000;
+ text-decoration:underline;
+}
+
+.feature_box {
+ width:698px;
+ overflow:hidden;
+}
+
+.feature_box h3 {
+ font-size:18px;
+ font-weight:600;
+ margin:0 0 8px 0;
+ color:#0c3b82;
+ font-family:"Verdana", Times, serif;
+}
+
+.feature_box_column1 {
+ width:196px;
+ float:left;
+ padding:10px 15px 10px 15px;
+ margin-left:0px;
+}
+
+.feature_box_column2 {
+ width:196px;
+ float:left;
+ padding:10px 15px 10px 15px;
+ margin-left:0px;
+}
+
+.feature_box_column3 {
+ width:196px;
+ float:left;
+ padding:10px 15px 10px 15px;
+ margin-left:0px;
+}
+
+
+.feature_box ul {
+ margin:.8em .4em;
+ padding-left:1.2em;
+ padding:0;
+ list-style-type: square;
+}
+
+.feature_box ul li {
+ font-family:"Verdana",sans-serif;
+ font-size:14px;
+ color:#000;
+ margin:.4em 0;
+}
+
+.feature_box ul li ul {
+ padding-left:1.2em;
+ margin-left:2em;
+}
+
+.feature_box a {
+ color:#000000;
+ text-decoration:none;
+}
+
+.feature_box a:hover {
+ color:#000000;
+ text-decoration:underline;
+}
+
+.footer {
+ color:#000000;
+ clear:both;
+ text-align:center;
+ font-size:11px;
+ line-height:17px;
+ height:45px;
+ padding-top:18px;
+}
+
+.footer a {
+ color:#000000;
+}
+
+.footer a:hover {
+ color:#000000;
+}
+
+.download_table {
+ width:100%;
+}
+
+.download_table_col_1 {
+ width:240px;
+}
+
+.proton_download_table_col_1 {
+ width:420px;
+}
+
+.download_table_amqp_col {
+ text-align:center;
+ width:80px;
+}
+
diff --git a/qpid/extras/nexus/site/images/gwarch.dia b/qpid/extras/nexus/site/images/gwarch.dia
new file mode 100644
index 0000000000..fd7eef97a4
--- /dev/null
+++ b/qpid/extras/nexus/site/images/gwarch.dia
Binary files differ
diff --git a/qpid/extras/nexus/site/images/gwarch.png b/qpid/extras/nexus/site/images/gwarch.png
new file mode 100644
index 0000000000..923baadf9f
--- /dev/null
+++ b/qpid/extras/nexus/site/images/gwarch.png
Binary files differ
diff --git a/qpid/extras/nexus/site/includes/footer.include b/qpid/extras/nexus/site/includes/footer.include
new file mode 100644
index 0000000000..35ff04b9f2
--- /dev/null
+++ b/qpid/extras/nexus/site/includes/footer.include
@@ -0,0 +1,7 @@
+ <div class="footer">
+ <p>
+ &#xA9; 2004-2012 The Apache Software Foundation.<br />
+ Apache Qpid, Qpid, Apache, the Apache feather logo, and the Apache Qpid project logo are trademarks of The Apache Software Foundation.<br />
+ All other marks mentioned may be trademarks or registered trademarks of their respective owners.
+ </p>
+ </div>
diff --git a/qpid/extras/nexus/site/includes/header.include b/qpid/extras/nexus/site/includes/header.include
new file mode 100644
index 0000000000..244dfc4517
--- /dev/null
+++ b/qpid/extras/nexus/site/includes/header.include
@@ -0,0 +1,6 @@
+ <div class="header">
+ <div class="logo">
+ <h1>Apache Qpid&#8482;</h1>
+ <h2>Open Source AMQP Messaging</h2>
+ </div>
+ </div>
diff --git a/qpid/extras/nexus/site/includes/menu.include b/qpid/extras/nexus/site/includes/menu.include
new file mode 100644
index 0000000000..aa96000e94
--- /dev/null
+++ b/qpid/extras/nexus/site/includes/menu.include
@@ -0,0 +1,71 @@
+ <div class="menu_box">
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>Apache Qpid</h3>
+ <ul>
+ <li><a href="index.html">Home</a></li>
+ <li><a href="download.html">Download</a></li>
+ <li><a href="getting_started.html">Getting Started</a></li>
+ <li><a href="http://www.apache.org/licenses/">License</a></li>
+ <li><a href="https://cwiki.apache.org/qpid/faq.html">FAQ</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>Documentation</h3>
+ <ul>
+ <li><a href="documentation.html#doc-release">Latest Release</a></li>
+ <li><a href="documentation.html#doc-trunk">Trunk</a></li>
+ <li><a href="documentation.html#doc-archives">Archive</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>Community</h3>
+ <ul>
+ <li><a href="getting_involved.html">Getting Involved</a></li>
+ <li><a href="source_repository.html">Source Repository</a></li>
+ <li><a href="mailing_lists.html">Mailing Lists</a></li>
+ <li><a href="https://cwiki.apache.org/qpid/">Wiki</a></li>
+ <li><a href="https://issues.apache.org/jira/browse/qpid">Issue Reporting</a></li>
+ <li><a href="people.html">People</a></li>
+ <li><a href="acknowledgements.html">Acknowledgements</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>Developers</h3>
+ <ul>
+ <li><a href="https://cwiki.apache.org/qpid/building.html">Building Qpid</a></li>
+ <li><a href="https://cwiki.apache.org/qpid/developer-pages.html">Developer Pages</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>About AMQP</h3>
+ <ul>
+ <li><a href="amqp.html">What is AMQP?</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+
+ <div class="menu_box_top"></div>
+ <div class="menu_box_body">
+ <h3>About Apache</h3>
+ <ul>
+ <li><a href="http://www.apache.org">Home</a></li>
+ <li><a href="http://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>
+ <li><a href="http://www.apache.org/foundation/thanks.html">Thanks</a></li>
+ <li><a href="http://www.apache.org/security/">Security</a></li>
+ </ul>
+ </div>
+ <div class="menu_box_bottom"></div>
+ </div>
diff --git a/qpid/extras/nexus/site/index.html b/qpid/extras/nexus/site/index.html
new file mode 100755
index 0000000000..806965a9c1
--- /dev/null
+++ b/qpid/extras/nexus/site/index.html
@@ -0,0 +1,98 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<!--
+ -
+ - Licensed to the Apache Software Foundation (ASF) under one
+ - or more contributor license agreements. See the NOTICE file
+ - distributed with this work for additional information
+ - regarding copyright ownership. The ASF licenses this file
+ - to you under the Apache License, Version 2.0 (the
+ - "License"); you may not use this file except in compliance
+ - with the License. You may obtain a copy of the License at
+ -
+ - http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing,
+ - software distributed under the License is distributed on an
+ - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ - KIND, either express or implied. See the License for the
+ - specific language governing permissions and limitations
+ - under the License.
+ -
+-->
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+ <title>Apache Qpid Nexus&#8482;: A Platform for Building AMQP Infrastructure</title>
+ <link href="css/style.css" rel="stylesheet" type="text/css"/>
+ </head>
+
+ <body>
+ <div class="container">
+ <!-- begin header -->
+
+ <div class="header">
+ <div class="logo">
+ <h1>Apache Qpid Nexus&#8482;</h1>
+ <h2>A Platform for Building AMQP Infrastructure</h2>
+ </div>
+ </div>
+
+ <!-- end header -->
+
+ <!-- begin menu -->
+ <!--#include virtual="/includes/menu.include" -->
+ <!-- end menu -->
+
+ <!-- begin content -->
+ <div class="main_text_area">
+ <div class="main_text_area_top"></div>
+
+ <div class="main_text_area_body">
+
+<p>Qpid Nexus is a library to help developers build infrastructure
+components for AMQP. Nexus is not a general-purpose Messaging API.
+Rather, it is a foundation on which to build applications, services, and
+appliances that need direct access to the detailed constructs of AMQP.</p>
+<hr width="80%" />
+<h2>Overview</h2>
+<p>Nexus is an extension of the Engine and Driver interfaces of
+<a href="http://qpid.apache.org/proton">Qpid Proton</a>. The following
+features are provided:</p>
+
+<ul>
+ <li>Safe multi-threaded use of Proton</li>
+ <li>Operating System Signal handling</li>
+ <li>Quiesce and Resume for the application's threads</li>
+ <li>Timers</li>
+ <li>Resilient outbound connections (retry/reconnect)</li>
+ <li>Polling support for the application's non-AMQP file descriptors</li>
+ <li>An AMQP Node Container that allows the developer to create node types</li>
+ <li>Node instances can be statically or dynamically provisioned</li>
+</ul>
+<p />
+<hr width="80%" />
+<h2>Architecture</h2>
+<center><img src="images/gwarch.png" /></center>
+<ul>
+ <li><b>Proton Engine and Driver</b> provide the underlying AMQP capability</li>
+ <li><a href="doxygen/server/modules.html">Nexus Server</a>
+ wraps Proton connections in a multi-threaded server environment</li>
+ <li><b>Nexus Container</b> provides management of AMQP nodes (links, termini, and deliveries)</li>
+ <li><b>Nexus Message</b> provides efficient message encode/decode, optimized for messaging intermediaries</li>
+ <li>The <b>Application</b> uses all of the above services to implement scalable and performant AMQP infrastructure</li>
+</ul>
+<hr width="80%" />
+
+ </div>
+
+ <div class="main_text_area_bottom"></div>
+ </div>
+ <!-- end content -->
+
+ <!-- begin footer -->
+ <!--#include virtual="/includes/footer.include" -->
+ <!-- end footer -->
+
+ </div>
+ </body>
+</html>
diff --git a/qpid/extras/nexus/src/alloc.c b/qpid/extras/nexus/src/alloc.c
new file mode 100644
index 0000000000..397a7897ac
--- /dev/null
+++ b/qpid/extras/nexus/src/alloc.c
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/alloc.h>
+#include <qpid/nexus/ctools.h>
+#include <memory.h>
+#include <stdio.h>
+
+typedef struct item_t item_t;
+
+struct item_t {
+ DEQ_LINKS(item_t);
+ nx_alloc_type_desc_t *desc;
+};
+
+DEQ_DECLARE(item_t, item_list_t);
+
+struct nx_alloc_pool_t {
+ item_list_t free_list;
+};
+
+nx_alloc_config_t nx_alloc_default_config_big = {16, 32, 0};
+nx_alloc_config_t nx_alloc_default_config_small = {64, 128, 0};
+
+sys_mutex_t *init_lock;
+item_list_t type_list;
+
+static void nx_alloc_init(nx_alloc_type_desc_t *desc)
+{
+ sys_mutex_lock(init_lock);
+
+ if (!desc->global_pool) {
+ if (desc->config == 0)
+ desc->config = desc->type_size > 256 ?
+ &nx_alloc_default_config_big : &nx_alloc_default_config_small;
+
+ assert (desc->config->local_free_list_max >= desc->config->transfer_batch_size);
+
+ desc->global_pool = NEW(nx_alloc_pool_t);
+ DEQ_INIT(desc->global_pool->free_list);
+ desc->lock = sys_mutex();
+ desc->stats = NEW(nx_alloc_stats_t);
+ memset(desc->stats, 0, sizeof(nx_alloc_stats_t));
+ }
+
+ item_t *type_item = NEW(item_t);
+ DEQ_ITEM_INIT(type_item);
+ type_item->desc = desc;
+ DEQ_INSERT_TAIL(type_list, type_item);
+
+ sys_mutex_unlock(init_lock);
+}
+
+
+void *nx_alloc(nx_alloc_type_desc_t *desc, nx_alloc_pool_t **tpool)
+{
+ int idx;
+
+ //
+ // If the descriptor is not initialized, set it up now.
+ //
+ if (!desc->global_pool)
+ nx_alloc_init(desc);
+
+ //
+ // If this is the thread's first pass through here, allocate the
+ // thread-local pool for this type.
+ //
+ if (*tpool == 0) {
+ *tpool = NEW(nx_alloc_pool_t);
+ DEQ_INIT((*tpool)->free_list);
+ }
+
+ nx_alloc_pool_t *pool = *tpool;
+
+ //
+ // Fast case: If there's an item on the local free list, take it off the
+ // list and return it. Since everything we've touched is thread-local,
+ // there is no need to acquire a lock.
+ //
+ item_t *item = DEQ_HEAD(pool->free_list);
+ if (item) {
+ DEQ_REMOVE_HEAD(pool->free_list);
+ return &item[1];
+ }
+
+ //
+ // The local free list is empty, we need to either rebalance a batch
+ // of items from the global list or go to the heap to get new memory.
+ //
+ sys_mutex_lock(desc->lock);
+ if (DEQ_SIZE(desc->global_pool->free_list) >= desc->config->transfer_batch_size) {
+ //
+ // Rebalance a full batch from the global free list to the thread list.
+ //
+ desc->stats->batches_rebalanced_to_threads++;
+ desc->stats->held_by_threads += desc->config->transfer_batch_size;
+ for (idx = 0; idx < desc->config->transfer_batch_size; idx++) {
+ item = DEQ_HEAD(desc->global_pool->free_list);
+ DEQ_REMOVE_HEAD(desc->global_pool->free_list);
+ DEQ_INSERT_TAIL(pool->free_list, item);
+ }
+ } else {
+ //
+ // Allocate a full batch from the heap and put it on the thread list.
+ //
+ for (idx = 0; idx < desc->config->transfer_batch_size; idx++) {
+ item = (item_t*) malloc(sizeof(item_t) + desc->type_size);
+ if (item == 0)
+ break;
+ DEQ_ITEM_INIT(item);
+ item->desc = desc;
+ DEQ_INSERT_TAIL(pool->free_list, item);
+ desc->stats->held_by_threads++;
+ desc->stats->total_alloc_from_heap++;
+ }
+ }
+ sys_mutex_unlock(desc->lock);
+
+ item = DEQ_HEAD(pool->free_list);
+ if (item) {
+ DEQ_REMOVE_HEAD(pool->free_list);
+ return &item[1];
+ }
+
+ return 0;
+}
+
+
+void nx_dealloc(nx_alloc_type_desc_t *desc, nx_alloc_pool_t **tpool, void *p)
+{
+ item_t *item = ((item_t*) p) - 1;
+ int idx;
+
+ //
+ // If this is the thread's first pass through here, allocate the
+ // thread-local pool for this type.
+ //
+ if (*tpool == 0) {
+ *tpool = NEW(nx_alloc_pool_t);
+ DEQ_INIT((*tpool)->free_list);
+ }
+
+ nx_alloc_pool_t *pool = *tpool;
+
+ DEQ_INSERT_TAIL(pool->free_list, item);
+
+ if (DEQ_SIZE(pool->free_list) <= desc->config->local_free_list_max)
+ return;
+
+ //
+ // We've exceeded the maximum size of the local free list. A batch must be
+ // rebalanced back to the global list.
+ //
+ sys_mutex_lock(desc->lock);
+ desc->stats->batches_rebalanced_to_global++;
+ desc->stats->held_by_threads -= desc->config->transfer_batch_size;
+ for (idx = 0; idx < desc->config->transfer_batch_size; idx++) {
+ item = DEQ_HEAD(pool->free_list);
+ DEQ_REMOVE_HEAD(pool->free_list);
+ DEQ_INSERT_TAIL(desc->global_pool->free_list, item);
+ }
+
+ //
+ // If there's a global_free_list size limit, remove items until the limit is
+ // not exceeded.
+ //
+ if (desc->config->global_free_list_max != 0) {
+ while (DEQ_SIZE(desc->global_pool->free_list) > desc->config->global_free_list_max) {
+ item = DEQ_HEAD(desc->global_pool->free_list);
+ DEQ_REMOVE_HEAD(desc->global_pool->free_list);
+ free(item);
+ desc->stats->total_free_to_heap++;
+ }
+ }
+
+ sys_mutex_unlock(desc->lock);
+}
+
+
+void nx_alloc_initialize(void)
+{
+ init_lock = sys_mutex();
+ DEQ_INIT(type_list);
+}
+
diff --git a/qpid/extras/nexus/src/alloc_private.h b/qpid/extras/nexus/src/alloc_private.h
new file mode 100644
index 0000000000..00a4380bff
--- /dev/null
+++ b/qpid/extras/nexus/src/alloc_private.h
@@ -0,0 +1,26 @@
+#ifndef __nexus_alloc_private_h__
+#define __nexus_alloc_private_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/alloc.h>
+
+void nx_alloc_initialize(void);
+
+#endif
diff --git a/qpid/extras/nexus/src/auth.c b/qpid/extras/nexus/src/auth.c
new file mode 100644
index 0000000000..f33e907359
--- /dev/null
+++ b/qpid/extras/nexus/src/auth.c
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "auth.h"
+#include "server_private.h"
+#include <proton/sasl.h>
+
+
+void auth_client_handler(pn_connector_t *cxtr)
+{
+ pn_sasl_t *sasl = pn_connector_sasl(cxtr);
+ pn_sasl_state_t state = pn_sasl_state(sasl);
+ nx_connection_t *ctx = (nx_connection_t*) pn_connector_context(cxtr);
+
+ if (state == PN_SASL_CONF) {
+ pn_sasl_mechanisms(sasl, "ANONYMOUS");
+ pn_sasl_client(sasl);
+ }
+
+ state = pn_sasl_state(sasl);
+
+ if (state == PN_SASL_PASS) {
+ ctx->state = CONN_STATE_OPENING;
+ } else if (state == PN_SASL_FAIL) {
+ ctx->state = CONN_STATE_FAILED;
+ }
+}
+
+
+void auth_server_handler(pn_connector_t *cxtr)
+{
+ pn_sasl_t *sasl = pn_connector_sasl(cxtr);
+ pn_sasl_state_t state = pn_sasl_state(sasl);
+ nx_connection_t *ctx = (nx_connection_t*) pn_connector_context(cxtr);
+
+ while (state == PN_SASL_CONF || state == PN_SASL_STEP) {
+ if (state == PN_SASL_CONF) {
+ pn_sasl_mechanisms(sasl, "ANONYMOUS");
+ pn_sasl_server(sasl);
+ } else if (state == PN_SASL_STEP) {
+ const char* mechanisms = pn_sasl_remote_mechanisms(sasl);
+ if (strcmp(mechanisms, "ANONYMOUS") == 0)
+ pn_sasl_done(sasl, PN_SASL_OK);
+ else
+ pn_sasl_done(sasl, PN_SASL_AUTH);
+ }
+ state = pn_sasl_state(sasl);
+ }
+
+ if (state == PN_SASL_PASS) {
+ ctx->state = CONN_STATE_OPENING;
+ } else if (state == PN_SASL_FAIL) {
+ ctx->state = CONN_STATE_FAILED;
+ }
+}
+
+
diff --git a/qpid/extras/nexus/src/auth.h b/qpid/extras/nexus/src/auth.h
new file mode 100644
index 0000000000..c551c8ff76
--- /dev/null
+++ b/qpid/extras/nexus/src/auth.h
@@ -0,0 +1,27 @@
+#ifndef __auth_h__
+#define __auth_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <proton/driver.h>
+
+void auth_client_handler(pn_connector_t *conn);
+void auth_server_handler(pn_connector_t *conn);
+
+#endif
diff --git a/qpid/extras/nexus/src/container.c b/qpid/extras/nexus/src/container.c
new file mode 100644
index 0000000000..3d57a8f21d
--- /dev/null
+++ b/qpid/extras/nexus/src/container.c
@@ -0,0 +1,620 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <qpid/nexus/container.h>
+#include <qpid/nexus/message.h>
+#include <proton/engine.h>
+#include <proton/message.h>
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/hash.h>
+#include <qpid/nexus/threading.h>
+#include <qpid/nexus/iterator.h>
+#include <qpid/nexus/log.h>
+
+static char *module="CONTAINER";
+
+struct nx_node_t {
+ const nx_node_type_t *ntype;
+ char *name;
+ void *context;
+ nx_dist_mode_t supported_dist;
+ nx_lifetime_policy_t life_policy;
+};
+
+ALLOC_DECLARE(nx_node_t);
+ALLOC_DEFINE(nx_node_t);
+ALLOC_DEFINE(nx_link_item_t);
+
+struct nx_link_t {
+ pn_link_t *pn_link;
+ void *context;
+ nx_node_t *node;
+};
+
+ALLOC_DECLARE(nx_link_t);
+ALLOC_DEFINE(nx_link_t);
+
+typedef struct nxc_node_type_t {
+ DEQ_LINKS(struct nxc_node_type_t);
+ const nx_node_type_t *ntype;
+} nxc_node_type_t;
+DEQ_DECLARE(nxc_node_type_t, nxc_node_type_list_t);
+
+
+static hash_t *node_type_map;
+static hash_t *node_map;
+static sys_mutex_t *lock;
+static nx_node_t *default_node;
+static nxc_node_type_list_t node_type_list;
+
+static void setup_outgoing_link(pn_link_t *pn_link)
+{
+ sys_mutex_lock(lock);
+ nx_node_t *node;
+ int result;
+ const char *source = pn_terminus_get_address(pn_link_remote_source(pn_link));
+ nx_field_iterator_t *iter;
+ // TODO - Extract the name from the structured source
+
+ if (source) {
+ iter = nx_field_iterator_string(source, ITER_VIEW_NODE_ID);
+ result = hash_retrieve(node_map, iter, (void*) &node);
+ nx_field_iterator_free(iter);
+ } else
+ result = -1;
+ sys_mutex_unlock(lock);
+
+ if (result < 0) {
+ if (default_node)
+ node = default_node;
+ else {
+ // Reject the link
+ // TODO - When the API allows, add an error message for "no available node"
+ pn_link_close(pn_link);
+ return;
+ }
+ }
+
+ nx_link_t *link = new_nx_link_t();
+ if (!link) {
+ pn_link_close(pn_link);
+ return;
+ }
+
+ link->pn_link = pn_link;
+ link->context = 0;
+ link->node = node;
+
+ pn_link_set_context(pn_link, link);
+ node->ntype->outgoing_handler(node->context, link);
+}
+
+
+static void setup_incoming_link(pn_link_t *pn_link)
+{
+ sys_mutex_lock(lock);
+ nx_node_t *node;
+ int result;
+ const char *target = pn_terminus_get_address(pn_link_remote_target(pn_link));
+ nx_field_iterator_t *iter;
+ // TODO - Extract the name from the structured target
+
+ if (target) {
+ iter = nx_field_iterator_string(target, ITER_VIEW_NODE_ID);
+ result = hash_retrieve(node_map, iter, (void*) &node);
+ nx_field_iterator_free(iter);
+ } else
+ result = -1;
+ sys_mutex_unlock(lock);
+
+ if (result < 0) {
+ if (default_node)
+ node = default_node;
+ else {
+ // Reject the link
+ // TODO - When the API allows, add an error message for "no available node"
+ pn_link_close(pn_link);
+ return;
+ }
+ }
+
+ nx_link_t *link = new_nx_link_t();
+ if (!link) {
+ pn_link_close(pn_link);
+ return;
+ }
+
+ link->pn_link = pn_link;
+ link->context = 0;
+ link->node = node;
+
+ pn_link_set_context(pn_link, link);
+ node->ntype->incoming_handler(node->context, link);
+}
+
+
+static int do_writable(pn_link_t *pn_link)
+{
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+ if (!link)
+ return 0;
+
+ nx_node_t *node = link->node;
+ if (!node)
+ return 0;
+
+ return node->ntype->writable_handler(node->context, link);
+}
+
+
+static void process_receive(pn_delivery_t *delivery)
+{
+ pn_link_t *pn_link = pn_delivery_link(delivery);
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+
+ if (link) {
+ nx_node_t *node = link->node;
+ if (node) {
+ node->ntype->rx_handler(node->context, link, delivery);
+ return;
+ }
+ }
+
+ //
+ // Reject the delivery if we couldn't find a node to handle it
+ //
+ pn_link_advance(pn_link);
+ pn_link_flow(pn_link, 1);
+ pn_delivery_update(delivery, PN_REJECTED);
+ pn_delivery_settle(delivery);
+}
+
+
+static void do_send(pn_delivery_t *delivery)
+{
+ pn_link_t *pn_link = pn_delivery_link(delivery);
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+
+ if (link) {
+ nx_node_t *node = link->node;
+ if (node) {
+ node->ntype->tx_handler(node->context, link, delivery);
+ return;
+ }
+ }
+
+ // TODO - Cancel the delivery
+}
+
+
+static void do_updated(pn_delivery_t *delivery)
+{
+ pn_link_t *pn_link = pn_delivery_link(delivery);
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+
+ if (link) {
+ nx_node_t *node = link->node;
+ if (node)
+ node->ntype->disp_handler(node->context, link, delivery);
+ }
+}
+
+
+static int close_handler(void* unused, pn_connection_t *conn)
+{
+ //
+ // Close all links, passing False as the 'closed' argument. These links are not
+ // being properly 'detached'. They are being orphaned.
+ //
+ pn_link_t *pn_link = pn_link_head(conn, 0);
+ while (pn_link) {
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+ nx_node_t *node = link->node;
+ if (node)
+ node->ntype->link_detach_handler(node->context, link, 0);
+ pn_link_close(pn_link);
+ free_nx_link_t(link);
+ pn_link = pn_link_next(pn_link, 0);
+ }
+
+ // teardown all sessions
+ pn_session_t *ssn = pn_session_head(conn, 0);
+ while (ssn) {
+ pn_session_close(ssn);
+ ssn = pn_session_next(ssn, 0);
+ }
+
+ // teardown the connection
+ pn_connection_close(conn);
+ return 0;
+}
+
+
+static int process_handler(void* unused, pn_connection_t *conn)
+{
+ pn_session_t *ssn;
+ pn_link_t *pn_link;
+ pn_delivery_t *delivery;
+ int event_count = 0;
+
+ // Step 1: setup the engine's connection, and any sessions and links
+ // that may be pending.
+
+ // initialize the connection if it's new
+ if (pn_connection_state(conn) & PN_LOCAL_UNINIT) {
+ pn_connection_open(conn);
+ event_count++;
+ }
+
+ // open all pending sessions
+ ssn = pn_session_head(conn, PN_LOCAL_UNINIT);
+ while (ssn) {
+ pn_session_open(ssn);
+ ssn = pn_session_next(ssn, PN_LOCAL_UNINIT);
+ event_count++;
+ }
+
+ // configure and open any pending links
+ pn_link = pn_link_head(conn, PN_LOCAL_UNINIT);
+ while (pn_link) {
+ if (pn_link_is_sender(pn_link))
+ setup_outgoing_link(pn_link);
+ else
+ setup_incoming_link(pn_link);
+ pn_link = pn_link_next(pn_link, PN_LOCAL_UNINIT);
+ event_count++;
+ }
+
+
+ // Step 2: Now drain all the pending deliveries from the connection's
+ // work queue and process them
+
+ delivery = pn_work_head(conn);
+ while (delivery) {
+ if (pn_delivery_readable(delivery))
+ process_receive(delivery);
+ else if (pn_delivery_writable(delivery))
+ do_send(delivery);
+
+ if (pn_delivery_updated(delivery))
+ do_updated(delivery);
+
+ delivery = pn_work_next(delivery);
+ event_count++;
+ }
+
+ //
+ // Step 2.5: Traverse all of the links on the connection looking for
+ // outgoing links with non-zero credit. Call the attached node's
+ // writable handler for such links.
+ //
+ pn_link = pn_link_head(conn, PN_LOCAL_ACTIVE | PN_REMOTE_ACTIVE);
+ while (pn_link) {
+ assert(pn_session_connection(pn_link_session(pn_link)) == conn);
+ if (pn_link_is_sender(pn_link) && pn_link_credit(pn_link) > 0)
+ event_count += do_writable(pn_link);
+ pn_link = pn_link_next(pn_link, PN_LOCAL_ACTIVE | PN_REMOTE_ACTIVE);
+ }
+
+ // Step 3: Clean up any links or sessions that have been closed by the
+ // remote. If the connection has been closed remotely, clean that up
+ // also.
+
+ // teardown any terminating links
+ pn_link = pn_link_head(conn, PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED);
+ while (pn_link) {
+ nx_link_t *link = (nx_link_t*) pn_link_get_context(pn_link);
+ nx_node_t *node = link->node;
+ if (node)
+ node->ntype->link_detach_handler(node->context, link, 1); // TODO - get 'closed' from detach message
+ pn_link_close(pn_link);
+ pn_link = pn_link_next(pn_link, PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED);
+ event_count++;
+ }
+
+ // teardown any terminating sessions
+ ssn = pn_session_head(conn, PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED);
+ while (ssn) {
+ pn_session_close(ssn);
+ ssn = pn_session_next(ssn, PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED);
+ event_count++;
+ }
+
+ // teardown the connection if it's terminating
+ if (pn_connection_state(conn) == (PN_LOCAL_ACTIVE | PN_REMOTE_CLOSED)) {
+ pn_connection_close(conn);
+ event_count++;
+ }
+
+ return event_count;
+}
+
+
+static void open_handler(nx_connection_t *conn, nx_direction_t dir)
+{
+ const nx_node_type_t *nt;
+
+ //
+ // Note the locking structure in this function. Generally this would be unsafe, but since
+ // this particular list is only ever appended to and never has items inserted or deleted,
+ // this usage is safe in this case.
+ //
+ sys_mutex_lock(lock);
+ nxc_node_type_t *nt_item = DEQ_HEAD(node_type_list);
+ sys_mutex_unlock(lock);
+
+ pn_connection_open(nx_connection_pn(conn));
+
+ while (nt_item) {
+ nt = nt_item->ntype;
+ if (dir == NX_INCOMING) {
+ if (nt->inbound_conn_open_handler)
+ nt->inbound_conn_open_handler(nt->type_context, conn);
+ } else {
+ if (nt->outbound_conn_open_handler)
+ nt->outbound_conn_open_handler(nt->type_context, conn);
+ }
+
+ sys_mutex_lock(lock);
+ nt_item = DEQ_NEXT(nt_item);
+ sys_mutex_unlock(lock);
+ }
+}
+
+
+static int handler(void* context, nx_conn_event_t event, nx_connection_t *nx_conn)
+{
+ pn_connection_t *conn = nx_connection_pn(nx_conn);
+
+ switch (event) {
+ case NX_CONN_EVENT_LISTENER_OPEN: open_handler(nx_conn, NX_INCOMING); break;
+ case NX_CONN_EVENT_CONNECTOR_OPEN: open_handler(nx_conn, NX_OUTGOING); break;
+ case NX_CONN_EVENT_CLOSE: return close_handler(context, conn);
+ case NX_CONN_EVENT_PROCESS: return process_handler(context, conn);
+ }
+
+ return 0;
+}
+
+
+void nx_container_initialize(void)
+{
+ nx_log(module, LOG_TRACE, "Container Initializing");
+
+ // TODO - move allocator init to server?
+ const nx_allocator_config_t *alloc_config = nx_allocator_default_config();
+ nx_allocator_initialize(alloc_config);
+
+ node_type_map = hash(6, 4, 1); // 64 buckets, item batches of 4
+ node_map = hash(10, 32, 0); // 1K buckets, item batches of 32
+ lock = sys_mutex();
+ default_node = 0;
+ DEQ_INIT(node_type_list);
+
+ nx_server_set_conn_handler(handler);
+}
+
+
+void nx_container_finalize(void)
+{
+}
+
+
+int nx_container_register_node_type(const nx_node_type_t *nt)
+{
+ int result;
+ nx_field_iterator_t *iter = nx_field_iterator_string(nt->type_name, ITER_VIEW_ALL);
+ nxc_node_type_t *nt_item = NEW(nxc_node_type_t);
+ DEQ_ITEM_INIT(nt_item);
+ nt_item->ntype = nt;
+
+ sys_mutex_lock(lock);
+ result = hash_insert_const(node_type_map, iter, nt);
+ DEQ_INSERT_TAIL(node_type_list, nt_item);
+ sys_mutex_unlock(lock);
+
+ nx_field_iterator_free(iter);
+ if (result < 0)
+ return result;
+ nx_log(module, LOG_TRACE, "Node Type Registered - %s", nt->type_name);
+
+ return 0;
+}
+
+
+void nx_container_set_default_node_type(const nx_node_type_t *nt,
+ void *context,
+ nx_dist_mode_t supported_dist)
+{
+ if (default_node)
+ nx_container_destroy_node(default_node);
+
+ if (nt) {
+ default_node = nx_container_create_node(nt, 0, context, supported_dist, NX_LIFE_PERMANENT);
+ nx_log(module, LOG_TRACE, "Node of type '%s' installed as default node", nt->type_name);
+ } else {
+ default_node = 0;
+ nx_log(module, LOG_TRACE, "Default node removed");
+ }
+}
+
+
+nx_node_t *nx_container_create_node(const nx_node_type_t *nt,
+ const char *name,
+ void *context,
+ nx_dist_mode_t supported_dist,
+ nx_lifetime_policy_t life_policy)
+{
+ int result;
+ nx_node_t *node = new_nx_node_t();
+ if (!node)
+ return 0;
+
+ node->ntype = nt;
+ node->name = 0;
+ node->context = context;
+ node->supported_dist = supported_dist;
+ node->life_policy = life_policy;
+
+ if (name) {
+ nx_field_iterator_t *iter = nx_field_iterator_string(name, ITER_VIEW_ALL);
+ sys_mutex_lock(lock);
+ result = hash_insert(node_map, iter, node);
+ sys_mutex_unlock(lock);
+ nx_field_iterator_free(iter);
+ if (result < 0) {
+ free_nx_node_t(node);
+ return 0;
+ }
+
+ node->name = (char*) malloc(strlen(name) + 1);
+ strcpy(node->name, name);
+ }
+
+ if (name)
+ nx_log(module, LOG_TRACE, "Node of type '%s' created with name '%s'", nt->type_name, name);
+
+ return node;
+}
+
+
+void nx_container_destroy_node(nx_node_t *node)
+{
+ if (node->name) {
+ nx_field_iterator_t *iter = nx_field_iterator_string(node->name, ITER_VIEW_ALL);
+ sys_mutex_lock(lock);
+ hash_remove(node_map, iter);
+ sys_mutex_unlock(lock);
+ nx_field_iterator_free(iter);
+ free(node->name);
+ }
+
+ free_nx_node_t(node);
+}
+
+
+void nx_container_node_set_context(nx_node_t *node, void *node_context)
+{
+ node->context = node_context;
+}
+
+
+nx_dist_mode_t nx_container_node_get_dist_modes(const nx_node_t *node)
+{
+ return node->supported_dist;
+}
+
+
+nx_lifetime_policy_t nx_container_node_get_life_policy(const nx_node_t *node)
+{
+ return node->life_policy;
+}
+
+
+nx_link_t *nx_link(nx_node_t *node, nx_connection_t *conn, nx_direction_t dir, const char* name)
+{
+ pn_session_t *sess = pn_session(nx_connection_pn(conn));
+ nx_link_t *link = new_nx_link_t();
+
+ if (dir == NX_OUTGOING)
+ link->pn_link = pn_sender(sess, name);
+ else
+ link->pn_link = pn_receiver(sess, name);
+ link->context = node->context;
+ link->node = node;
+
+ pn_link_set_context(link->pn_link, link);
+
+ pn_session_open(sess);
+
+ return link;
+}
+
+
+void nx_link_set_context(nx_link_t *link, void *context)
+{
+ link->context = context;
+}
+
+
+void *nx_link_get_context(nx_link_t *link)
+{
+ return link->context;
+}
+
+
+pn_link_t *nx_link_pn(nx_link_t *link)
+{
+ return link->pn_link;
+}
+
+
+pn_terminus_t *nx_link_source(nx_link_t *link)
+{
+ return pn_link_source(link->pn_link);
+}
+
+
+pn_terminus_t *nx_link_target(nx_link_t *link)
+{
+ return pn_link_target(link->pn_link);
+}
+
+
+pn_terminus_t *nx_link_remote_source(nx_link_t *link)
+{
+ return pn_link_remote_source(link->pn_link);
+}
+
+
+pn_terminus_t *nx_link_remote_target(nx_link_t *link)
+{
+ return pn_link_remote_target(link->pn_link);
+}
+
+
+void nx_link_activate(nx_link_t *link)
+{
+ if (!link || !link->pn_link)
+ return;
+
+ pn_session_t *sess = pn_link_session(link->pn_link);
+ if (!sess)
+ return;
+
+ pn_connection_t *conn = pn_session_connection(sess);
+ if (!conn)
+ return;
+
+ nx_connection_t *ctx = pn_connection_get_context(conn);
+ if (!ctx)
+ return;
+
+ nx_server_activate(ctx);
+}
+
+
+void nx_link_close(nx_link_t *link)
+{
+ pn_link_close(link->pn_link);
+}
+
+
diff --git a/qpid/extras/nexus/src/hash.c b/qpid/extras/nexus/src/hash.c
new file mode 100644
index 0000000000..c5d882519d
--- /dev/null
+++ b/qpid/extras/nexus/src/hash.c
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/hash.h>
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/alloc.h>
+#include <stdio.h>
+#include <string.h>
+
+typedef struct hash_item_t {
+ DEQ_LINKS(struct hash_item_t);
+ unsigned char *key;
+ union {
+ void *val;
+ const void *val_const;
+ } v;
+} hash_item_t;
+
+ALLOC_DECLARE(hash_item_t);
+ALLOC_DEFINE(hash_item_t);
+DEQ_DECLARE(hash_item_t, items_t);
+
+
+typedef struct bucket_t {
+ items_t items;
+} bucket_t;
+
+
+struct hash_t {
+ bucket_t *buckets;
+ unsigned int bucket_count;
+ unsigned int bucket_mask;
+ int batch_size;
+ size_t size;
+ int is_const;
+};
+
+
+// djb2 hash algorithm
+static unsigned long hash_function(nx_field_iterator_t *iter)
+{
+ unsigned long hash = 5381;
+ int c;
+
+ while (!nx_field_iterator_end(iter)) {
+ c = (int) nx_field_iterator_octet(iter);
+ hash = ((hash << 5) + hash) + c; /* hash * 33 + c */
+ }
+
+ return hash;
+}
+
+
+hash_t *hash(int bucket_exponent, int batch_size, int value_is_const)
+{
+ int i;
+ hash_t *h = NEW(hash_t);
+
+ if (!h)
+ return 0;
+
+ h->bucket_count = 1 << bucket_exponent;
+ h->bucket_mask = h->bucket_count - 1;
+ h->batch_size = batch_size;
+ h->size = 0;
+ h->is_const = value_is_const;
+ h->buckets = NEW_ARRAY(bucket_t, h->bucket_count);
+ for (i = 0; i < h->bucket_count; i++) {
+ DEQ_INIT(h->buckets[i].items);
+ }
+
+ return h;
+}
+
+
+void hash_free(hash_t *h)
+{
+ // TODO - Implement this
+}
+
+
+size_t hash_size(hash_t *h)
+{
+ return h ? h->size : 0;
+}
+
+
+static hash_item_t *hash_internal_insert(hash_t *h, nx_field_iterator_t *key, int *error)
+{
+ unsigned long idx = hash_function(key) & h->bucket_mask;
+ hash_item_t *item = DEQ_HEAD(h->buckets[idx].items);
+
+ *error = 0;
+
+ while (item) {
+ if (nx_field_iterator_equal(key, item->key))
+ break;
+ item = item->next;
+ }
+
+ if (item) {
+ *error = -1;
+ return 0;
+ }
+
+ item = new_hash_item_t();
+ if (!item) {
+ *error = -2;
+ return 0;
+ }
+
+ DEQ_ITEM_INIT(item);
+ item->key = nx_field_iterator_copy(key);
+
+ DEQ_INSERT_TAIL(h->buckets[idx].items, item);
+ h->size++;
+ return item;
+}
+
+
+int hash_insert(hash_t *h, nx_field_iterator_t *key, void *val)
+{
+ int error = 0;
+ hash_item_t *item = hash_internal_insert(h, key, &error);
+
+ if (item)
+ item->v.val = val;
+ return error;
+}
+
+
+int hash_insert_const(hash_t *h, nx_field_iterator_t *key, const void *val)
+{
+ if (!h->is_const)
+ return -3;
+
+ int error = 0;
+ hash_item_t *item = hash_internal_insert(h, key, &error);
+
+ if (item)
+ item->v.val_const = val;
+ return error;
+}
+
+
+static hash_item_t *hash_internal_retrieve(hash_t *h, nx_field_iterator_t *key)
+{
+ unsigned long idx = hash_function(key) & h->bucket_mask;
+ hash_item_t *item = DEQ_HEAD(h->buckets[idx].items);
+
+ while (item) {
+ if (nx_field_iterator_equal(key, item->key))
+ break;
+ item = item->next;
+ }
+
+ return item;
+}
+
+
+int hash_retrieve(hash_t *h, nx_field_iterator_t *key, void **val)
+{
+ hash_item_t *item = hash_internal_retrieve(h, key);
+ if (item) {
+ *val = item->v.val;
+ return 0;
+ }
+ return -1;
+}
+
+
+int hash_retrieve_const(hash_t *h, nx_field_iterator_t *key, const void **val)
+{
+ if (!h->is_const)
+ return -3;
+
+ hash_item_t *item = hash_internal_retrieve(h, key);
+ if (item) {
+ *val = item->v.val_const;
+ return 0;
+ }
+ return -1;
+}
+
+
+int hash_remove(hash_t *h, nx_field_iterator_t *key)
+{
+ unsigned long idx = hash_function(key) & h->bucket_mask;
+ hash_item_t *item = DEQ_HEAD(h->buckets[idx].items);
+
+ while (item) {
+ if (nx_field_iterator_equal(key, item->key))
+ break;
+ item = item->next;
+ }
+
+ if (item) {
+ free(item->key);
+ DEQ_REMOVE(h->buckets[idx].items, item);
+ free_hash_item_t(item);
+ h->size--;
+ return 0;
+ }
+
+ return -1;
+}
+
diff --git a/qpid/extras/nexus/src/iterator.c b/qpid/extras/nexus/src/iterator.c
new file mode 100644
index 0000000000..d03590e851
--- /dev/null
+++ b/qpid/extras/nexus/src/iterator.c
@@ -0,0 +1,268 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/iterator.h>
+#include <qpid/nexus/message.h>
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/alloc.h>
+#include <stdio.h>
+#include <string.h>
+
+typedef enum {
+MODE_TO_END,
+MODE_TO_SLASH
+} parse_mode_t;
+
+struct nx_field_iterator_t {
+ nx_buffer_t *start_buffer;
+ unsigned char *start_cursor;
+ int start_length;
+ nx_buffer_t *buffer;
+ unsigned char *cursor;
+ int length;
+ nx_iterator_view_t view;
+ parse_mode_t mode;
+};
+
+
+ALLOC_DECLARE(nx_field_iterator_t);
+ALLOC_DEFINE(nx_field_iterator_t);
+
+
+typedef enum {
+STATE_START,
+STATE_SLASH_LEFT,
+STATE_SKIPPING_TO_NEXT_SLASH,
+STATE_SCANNING,
+STATE_COLON,
+STATE_COLON_SLASH,
+STATE_AT_NODE_ID
+} state_t;
+
+
+static void view_initialize(nx_field_iterator_t *iter)
+{
+ if (iter->view == ITER_VIEW_ALL) {
+ iter->mode = MODE_TO_END;
+ return;
+ }
+
+ //
+ // Advance to the node-id.
+ //
+ state_t state = STATE_START;
+ unsigned int octet;
+ while (!nx_field_iterator_end(iter) && state != STATE_AT_NODE_ID) {
+ octet = nx_field_iterator_octet(iter);
+ switch (state) {
+ case STATE_START :
+ if (octet == '/')
+ state = STATE_SLASH_LEFT;
+ else
+ state = STATE_SCANNING;
+ break;
+
+ case STATE_SLASH_LEFT :
+ if (octet == '/')
+ state = STATE_SKIPPING_TO_NEXT_SLASH;
+ else
+ state = STATE_AT_NODE_ID;
+ break;
+
+ case STATE_SKIPPING_TO_NEXT_SLASH :
+ if (octet == '/')
+ state = STATE_AT_NODE_ID;
+ break;
+
+ case STATE_SCANNING :
+ if (octet == ':')
+ state = STATE_COLON;
+ break;
+
+ case STATE_COLON :
+ if (octet == '/')
+ state = STATE_COLON_SLASH;
+ else
+ state = STATE_SCANNING;
+ break;
+
+ case STATE_COLON_SLASH :
+ if (octet == '/')
+ state = STATE_SKIPPING_TO_NEXT_SLASH;
+ else
+ state = STATE_SCANNING;
+ break;
+
+ case STATE_AT_NODE_ID :
+ break;
+ }
+ }
+
+ if (state != STATE_AT_NODE_ID) {
+ //
+ // The address string was relative, not absolute. The node-id
+ // is at the beginning of the string.
+ //
+ iter->buffer = iter->start_buffer;
+ iter->cursor = iter->start_cursor;
+ iter->length = iter->start_length;
+ }
+
+ //
+ // Cursor is now on the first octet of the node-id
+ //
+ if (iter->view == ITER_VIEW_NODE_ID) {
+ iter->mode = MODE_TO_SLASH;
+ return;
+ }
+
+ if (iter->view == ITER_VIEW_NO_HOST) {
+ iter->mode = MODE_TO_END;
+ return;
+ }
+
+ if (iter->view == ITER_VIEW_NODE_SPECIFIC) {
+ iter->mode = MODE_TO_END;
+ while (!nx_field_iterator_end(iter)) {
+ octet = nx_field_iterator_octet(iter);
+ if (octet == '/')
+ break;
+ }
+ return;
+ }
+}
+
+
+nx_field_iterator_t* nx_field_iterator_string(const char *text, nx_iterator_view_t view)
+{
+ nx_field_iterator_t *iter = new_nx_field_iterator_t();
+ if (!iter)
+ return 0;
+
+ iter->start_buffer = 0;
+ iter->start_cursor = (unsigned char*) text;
+ iter->start_length = strlen(text);
+
+ nx_field_iterator_reset(iter, view);
+
+ return iter;
+}
+
+
+nx_field_iterator_t *nx_field_iterator_buffer(nx_buffer_t *buffer, int offset, int length, nx_iterator_view_t view)
+{
+ nx_field_iterator_t *iter = new_nx_field_iterator_t();
+ if (!iter)
+ return 0;
+
+ iter->start_buffer = buffer;
+ iter->start_cursor = nx_buffer_base(buffer) + offset;
+ iter->start_length = length;
+
+ nx_field_iterator_reset(iter, view);
+
+ return iter;
+}
+
+
+void nx_field_iterator_free(nx_field_iterator_t *iter)
+{
+ free_nx_field_iterator_t(iter);
+}
+
+
+void nx_field_iterator_reset(nx_field_iterator_t *iter, nx_iterator_view_t view)
+{
+ iter->buffer = iter->start_buffer;
+ iter->cursor = iter->start_cursor;
+ iter->length = iter->start_length;
+ iter->view = view;
+
+ view_initialize(iter);
+}
+
+
+unsigned char nx_field_iterator_octet(nx_field_iterator_t *iter)
+{
+ if (iter->length == 0)
+ return (unsigned char) 0;
+
+ unsigned char result = *(iter->cursor);
+
+ iter->cursor++;
+ iter->length--;
+
+ if (iter->length > 0) {
+ if (iter->buffer) {
+ if (iter->cursor - nx_buffer_base(iter->buffer) == nx_buffer_size(iter->buffer)) {
+ iter->buffer = iter->buffer->next;
+ if (iter->buffer == 0)
+ iter->length = 0;
+ iter->cursor = nx_buffer_base(iter->buffer);
+ }
+ }
+ }
+
+ if (iter->length && iter->mode == MODE_TO_SLASH && *(iter->cursor) == '/')
+ iter->length = 0;
+
+ return result;
+}
+
+
+int nx_field_iterator_end(nx_field_iterator_t *iter)
+{
+ return iter->length == 0;
+}
+
+
+int nx_field_iterator_equal(nx_field_iterator_t *iter, unsigned char *string)
+{
+ nx_field_iterator_reset(iter, iter->view);
+ while (!nx_field_iterator_end(iter) && *string) {
+ if (*string != nx_field_iterator_octet(iter))
+ return 0;
+ string++;
+ }
+
+ return (nx_field_iterator_end(iter) && (*string == 0));
+}
+
+
+unsigned char *nx_field_iterator_copy(nx_field_iterator_t *iter)
+{
+ int length = 0;
+ int idx = 0;
+ unsigned char *copy;
+
+ nx_field_iterator_reset(iter, iter->view);
+ while (!nx_field_iterator_end(iter)) {
+ nx_field_iterator_octet(iter);
+ length++;
+ }
+
+ nx_field_iterator_reset(iter, iter->view);
+ copy = (unsigned char*) malloc(length + 1);
+ while (!nx_field_iterator_end(iter))
+ copy[idx++] = nx_field_iterator_octet(iter);
+ copy[idx] = '\0';
+
+ return copy;
+}
+
diff --git a/qpid/extras/nexus/src/log.c b/qpid/extras/nexus/src/log.c
new file mode 100644
index 0000000000..ca1af86915
--- /dev/null
+++ b/qpid/extras/nexus/src/log.c
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/log.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+static int mask=LOG_INFO;
+
+static char *cls_prefix(int cls)
+{
+ switch (cls) {
+ case LOG_TRACE : return "TRACE";
+ case LOG_ERROR : return "ERROR";
+ case LOG_INFO : return "INFO";
+ }
+
+ return "";
+}
+
+void nx_log(const char *module, int cls, const char *fmt, ...)
+{
+ if (!(cls & mask))
+ return;
+
+ va_list ap;
+ char line[128];
+
+ va_start(ap, fmt);
+ vsnprintf(line, 127, fmt, ap);
+ va_end(ap);
+ fprintf(stderr, "%s (%s): %s\n", module, cls_prefix(cls), line);
+}
+
+void nx_log_set_mask(int _mask)
+{
+ mask = _mask;
+}
+
diff --git a/qpid/extras/nexus/src/message.c b/qpid/extras/nexus/src/message.c
new file mode 100644
index 0000000000..11f58a1474
--- /dev/null
+++ b/qpid/extras/nexus/src/message.c
@@ -0,0 +1,1164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/message.h>
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/threading.h>
+#include <string.h>
+#include <stdio.h>
+
+
+//
+// Per-Thread allocator
+//
+typedef struct nx_allocator_t {
+ nx_message_list_t message_free_list;
+ nx_buffer_list_t buffer_free_list;
+} nx_allocator_t;
+
+//
+// Global allocator (protected by a global lock)
+//
+typedef struct {
+ nx_message_list_t message_free_list;
+ nx_buffer_list_t buffer_free_list;
+ sys_mutex_t *lock;
+} nx_global_allocator_t;
+
+static nx_global_allocator_t global;
+static nx_allocator_config_t default_config;
+static const nx_allocator_config_t *config;
+
+
+static nx_allocator_t *nx_get_allocator(void)
+{
+ static __thread nx_allocator_t *alloc = 0;
+
+ if (!alloc) {
+ alloc = NEW(nx_allocator_t);
+
+ if (!alloc)
+ return 0;
+
+ DEQ_INIT(alloc->message_free_list);
+ DEQ_INIT(alloc->buffer_free_list);
+ }
+
+ return alloc;
+}
+
+
+static void advance(unsigned char **cursor, nx_buffer_t **buffer, int consume)
+{
+ unsigned char *local_cursor = *cursor;
+ nx_buffer_t *local_buffer = *buffer;
+
+ int remaining = nx_buffer_size(local_buffer) - (local_cursor - nx_buffer_base(local_buffer));
+ while (consume > 0) {
+ if (consume < remaining) {
+ local_cursor += consume;
+ consume = 0;
+ } else {
+ consume -= remaining;
+ local_buffer = local_buffer->next;
+ if (local_buffer == 0){
+ local_cursor = 0;
+ break;
+ }
+ local_cursor = nx_buffer_base(local_buffer);
+ remaining = nx_buffer_size(local_buffer) - (local_cursor - nx_buffer_base(local_buffer));
+ }
+ }
+
+ *cursor = local_cursor;
+ *buffer = local_buffer;
+}
+
+
+static unsigned char next_octet(unsigned char **cursor, nx_buffer_t **buffer)
+{
+ unsigned char result = **cursor;
+ advance(cursor, buffer, 1);
+ return result;
+}
+
+
+static int traverse_field(unsigned char **cursor, nx_buffer_t **buffer, nx_field_location_t *field)
+{
+ unsigned char tag = next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+ int consume = 0;
+ switch (tag & 0xF0) {
+ case 0x40 : consume = 0; break;
+ case 0x50 : consume = 1; break;
+ case 0x60 : consume = 2; break;
+ case 0x70 : consume = 4; break;
+ case 0x80 : consume = 8; break;
+ case 0x90 : consume = 16; break;
+
+ case 0xB0 :
+ case 0xD0 :
+ case 0xF0 :
+ consume |= ((int) next_octet(cursor, buffer)) << 24;
+ if (!(*cursor)) return 0;
+ consume |= ((int) next_octet(cursor, buffer)) << 16;
+ if (!(*cursor)) return 0;
+ consume |= ((int) next_octet(cursor, buffer)) << 8;
+ if (!(*cursor)) return 0;
+ // Fall through to the next case...
+
+ case 0xA0 :
+ case 0xC0 :
+ case 0xE0 :
+ consume |= (int) next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+ break;
+ }
+
+ if (field) {
+ field->buffer = *buffer;
+ field->offset = *cursor - nx_buffer_base(*buffer);
+ field->length = consume;
+ field->parsed = 1;
+ }
+
+ advance(cursor, buffer, consume);
+ return 1;
+}
+
+
+static int start_list(unsigned char **cursor, nx_buffer_t **buffer)
+{
+ unsigned char tag = next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+ int length = 0;
+ int count = 0;
+
+ switch (tag) {
+ case 0x45 : // list0
+ break;
+ case 0xd0 : // list32
+ length |= ((int) next_octet(cursor, buffer)) << 24;
+ if (!(*cursor)) return 0;
+ length |= ((int) next_octet(cursor, buffer)) << 16;
+ if (!(*cursor)) return 0;
+ length |= ((int) next_octet(cursor, buffer)) << 8;
+ if (!(*cursor)) return 0;
+ length |= (int) next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+
+ count |= ((int) next_octet(cursor, buffer)) << 24;
+ if (!(*cursor)) return 0;
+ count |= ((int) next_octet(cursor, buffer)) << 16;
+ if (!(*cursor)) return 0;
+ count |= ((int) next_octet(cursor, buffer)) << 8;
+ if (!(*cursor)) return 0;
+ count |= (int) next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+
+ break;
+
+ case 0xc0 : // list8
+ length |= (int) next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+
+ count |= (int) next_octet(cursor, buffer);
+ if (!(*cursor)) return 0;
+ break;
+ }
+
+ return count;
+}
+
+
+//
+// Check the buffer chain, starting at cursor to see if it matches the pattern.
+// If the pattern matches, check the next tag to see if it's in the set of expected
+// tags. If not, return zero. If so, set the location descriptor to the good
+// tag and advance the cursor (and buffer, if needed) to the end of the matched section.
+//
+// If there is no match, don't advance the cursor.
+//
+// Return 0 if the pattern matches but the following tag is unexpected
+// Return 0 if the pattern matches and the location already has a pointer (duplicate section)
+// Return 1 if the pattern matches and we've advanced the cursor/buffer
+// Return 1 if the pattern does not match
+//
+static int nx_check_and_advance(nx_buffer_t **buffer,
+ unsigned char **cursor,
+ unsigned char *pattern,
+ int pattern_length,
+ unsigned char *expected_tags,
+ nx_field_location_t *location)
+{
+ nx_buffer_t *test_buffer = *buffer;
+ unsigned char *test_cursor = *cursor;
+
+ if (!test_cursor)
+ return 1; // no match
+
+ unsigned char *end_of_buffer = nx_buffer_base(test_buffer) + nx_buffer_size(test_buffer);
+ int idx = 0;
+
+ while (idx < pattern_length && *test_cursor == pattern[idx]) {
+ idx++;
+ test_cursor++;
+ if (test_cursor == end_of_buffer) {
+ test_buffer = test_buffer->next;
+ if (test_buffer == 0)
+ return 1; // Pattern didn't match
+ test_cursor = nx_buffer_base(test_buffer);
+ end_of_buffer = test_cursor + nx_buffer_size(test_buffer);
+ }
+ }
+
+ if (idx < pattern_length)
+ return 1; // Pattern didn't match
+
+ //
+ // Pattern matched, check the tag
+ //
+ while (*expected_tags && *test_cursor != *expected_tags)
+ expected_tags++;
+ if (*expected_tags == 0)
+ return 0; // Unexpected tag
+
+ if (location->parsed)
+ return 0; // Duplicate section
+
+ //
+ // Pattern matched and tag is expected. Mark the beginning of the section.
+ //
+ location->parsed = 1;
+ location->buffer = test_buffer;
+ location->offset = test_cursor - nx_buffer_base(test_buffer);
+ location->length = 0;
+
+ //
+ // Advance the pointers to consume the whole section.
+ //
+ int consume = 0;
+ unsigned char tag = next_octet(&test_cursor, &test_buffer);
+ if (!test_cursor) return 0;
+ switch (tag) {
+ case 0x45 : // list0
+ break;
+
+ case 0xd0 : // list32
+ case 0xd1 : // map32
+ case 0xb0 : // vbin32
+ consume |= ((int) next_octet(&test_cursor, &test_buffer)) << 24;
+ if (!test_cursor) return 0;
+ consume |= ((int) next_octet(&test_cursor, &test_buffer)) << 16;
+ if (!test_cursor) return 0;
+ consume |= ((int) next_octet(&test_cursor, &test_buffer)) << 8;
+ if (!test_cursor) return 0;
+ // Fall through to the next case...
+
+ case 0xc0 : // list8
+ case 0xc1 : // map8
+ case 0xa0 : // vbin8
+ consume |= (int) next_octet(&test_cursor, &test_buffer);
+ if (!test_cursor) return 0;
+ break;
+ }
+
+ if (consume)
+ advance(&test_cursor, &test_buffer, consume);
+
+ *cursor = test_cursor;
+ *buffer = test_buffer;
+ return 1;
+}
+
+
+static void nx_insert(nx_message_t *msg, const uint8_t *seq, size_t len)
+{
+ nx_buffer_t *buf = DEQ_TAIL(msg->buffers);
+
+ while (len > 0) {
+ if (buf == 0 || nx_buffer_capacity(buf) == 0) {
+ buf = nx_allocate_buffer();
+ if (buf == 0)
+ return;
+ DEQ_INSERT_TAIL(msg->buffers, buf);
+ }
+
+ size_t to_copy = nx_buffer_capacity(buf);
+ if (to_copy > len)
+ to_copy = len;
+ memcpy(nx_buffer_cursor(buf), seq, to_copy);
+ nx_buffer_insert(buf, to_copy);
+ len -= to_copy;
+ seq += to_copy;
+ msg->length += to_copy;
+ }
+}
+
+
+static void nx_insert_8(nx_message_t *msg, uint8_t value)
+{
+ nx_insert(msg, &value, 1);
+}
+
+
+static void nx_insert_32(nx_message_t *msg, uint32_t value)
+{
+ uint8_t buf[4];
+ buf[0] = (uint8_t) ((value & 0xFF000000) >> 24);
+ buf[1] = (uint8_t) ((value & 0x00FF0000) >> 16);
+ buf[2] = (uint8_t) ((value & 0x0000FF00) >> 8);
+ buf[3] = (uint8_t) (value & 0x000000FF);
+ nx_insert(msg, buf, 4);
+}
+
+
+static void nx_insert_64(nx_message_t *msg, uint64_t value)
+{
+ uint8_t buf[8];
+ buf[0] = (uint8_t) ((value & 0xFF00000000000000L) >> 56);
+ buf[1] = (uint8_t) ((value & 0x00FF000000000000L) >> 48);
+ buf[2] = (uint8_t) ((value & 0x0000FF0000000000L) >> 40);
+ buf[3] = (uint8_t) ((value & 0x000000FF00000000L) >> 32);
+ buf[4] = (uint8_t) ((value & 0x00000000FF000000L) >> 24);
+ buf[5] = (uint8_t) ((value & 0x0000000000FF0000L) >> 16);
+ buf[6] = (uint8_t) ((value & 0x000000000000FF00L) >> 8);
+ buf[7] = (uint8_t) (value & 0x00000000000000FFL);
+ nx_insert(msg, buf, 8);
+}
+
+
+static void nx_overwrite(nx_buffer_t **buf, size_t *cursor, uint8_t value)
+{
+ while (*buf) {
+ if (*cursor >= nx_buffer_size(*buf)) {
+ *buf = (*buf)->next;
+ *cursor = 0;
+ } else {
+ nx_buffer_base(*buf)[*cursor] = value;
+ (*cursor)++;
+ return;
+ }
+ }
+}
+
+
+static void nx_overwrite_32(nx_field_location_t *field, uint32_t value)
+{
+ nx_buffer_t *buf = field->buffer;
+ size_t cursor = field->offset;
+
+ nx_overwrite(&buf, &cursor, (uint8_t) ((value & 0xFF000000) >> 24));
+ nx_overwrite(&buf, &cursor, (uint8_t) ((value & 0x00FF0000) >> 24));
+ nx_overwrite(&buf, &cursor, (uint8_t) ((value & 0x0000FF00) >> 24));
+ nx_overwrite(&buf, &cursor, (uint8_t) (value & 0x000000FF));
+}
+
+
+static void nx_start_list_performative(nx_message_t *msg, uint8_t code)
+{
+ //
+ // Insert the short-form performative tag
+ //
+ nx_insert(msg, (const uint8_t*) "\x00\x53", 2);
+ nx_insert_8(msg, code);
+
+ //
+ // Open the list with a list32 tag
+ //
+ nx_insert_8(msg, 0xd0);
+
+ //
+ // Mark the current location to later overwrite the length
+ //
+ msg->compose_length.buffer = DEQ_TAIL(msg->buffers);
+ msg->compose_length.offset = nx_buffer_size(msg->compose_length.buffer);
+ msg->compose_length.length = 4;
+ msg->compose_length.parsed = 1;
+
+ nx_insert(msg, (const uint8_t*) "\x00\x00\x00\x00", 4);
+
+ //
+ // Mark the current location to later overwrite the count
+ //
+ msg->compose_count.buffer = DEQ_TAIL(msg->buffers);
+ msg->compose_count.offset = nx_buffer_size(msg->compose_count.buffer);
+ msg->compose_count.length = 4;
+ msg->compose_count.parsed = 1;
+
+ nx_insert(msg, (const uint8_t*) "\x00\x00\x00\x00", 4);
+
+ msg->length = 4; // Include the length of the count field
+ msg->count = 0;
+}
+
+
+static void nx_end_list(nx_message_t *msg)
+{
+ nx_overwrite_32(&msg->compose_length, msg->length);
+ nx_overwrite_32(&msg->compose_count, msg->count);
+}
+
+
+const nx_allocator_config_t *nx_allocator_default_config(void)
+{
+ default_config.buffer_size = 1024;
+ default_config.buffer_preallocation_count = 512;
+ default_config.buffer_rebalancing_batch_count = 16;
+ default_config.buffer_local_storage_max = 64;
+ default_config.buffer_free_list_max = 1000000;
+ default_config.message_allocation_batch_count = 256;
+ default_config.message_rebalancing_batch_count = 64;
+ default_config.message_local_storage_max = 256;
+
+ return &default_config;
+}
+
+
+void nx_allocator_initialize(const nx_allocator_config_t *c)
+{
+ config = c;
+
+ // Initialize the fields in the global structure.
+ DEQ_INIT(global.message_free_list);
+ DEQ_INIT(global.buffer_free_list);
+ global.lock = sys_mutex();
+
+ // Pre-allocate buffers according to the configuration
+ int i;
+ nx_buffer_t *buf;
+
+ for (i = 0; i < config->buffer_preallocation_count; i++) {
+ buf = (nx_buffer_t*) malloc (sizeof(nx_buffer_t) + config->buffer_size);
+ DEQ_ITEM_INIT(buf);
+ DEQ_INSERT_TAIL(global.buffer_free_list, buf);
+ }
+}
+
+
+void nx_allocator_finalize(void)
+{
+ // TODO - Free buffers and messages
+}
+
+
+nx_message_t *nx_allocate_message(void)
+{
+ nx_allocator_t *alloc = nx_get_allocator();
+ nx_message_t *msg;
+ int i;
+
+ if (DEQ_SIZE(alloc->message_free_list) == 0) {
+ //
+ // The local free list is empty, rebalance a batch of objects from the global
+ // free list.
+ //
+ sys_mutex_lock(global.lock);
+ if (DEQ_SIZE(global.message_free_list) >= config->message_rebalancing_batch_count) {
+ for (i = 0; i < config->message_rebalancing_batch_count; i++) {
+ msg = DEQ_HEAD(global.message_free_list);
+ DEQ_REMOVE_HEAD(global.message_free_list);
+ DEQ_INSERT_TAIL(alloc->message_free_list, msg);
+ }
+ }
+ sys_mutex_unlock(global.lock);
+ }
+
+ if (DEQ_SIZE(alloc->message_free_list) == 0) {
+ //
+ // The local free list is still empty. This means there were not enough objects on the
+ // global free list to make up a batch. Allocate new objects from the heap and store
+ // them in the local free list.
+ //
+ nx_message_t *batch = NEW_ARRAY(nx_message_t, config->message_allocation_batch_count);
+ memset(batch, 0, sizeof(nx_message_t) * config->message_allocation_batch_count);
+ for (i = 0; i < config->message_allocation_batch_count; i++) {
+ DEQ_INSERT_TAIL(alloc->message_free_list, &batch[i]);
+ }
+ }
+
+ //
+ // If the local free list is still empty, we're out of memory.
+ //
+ if (DEQ_SIZE(alloc->message_free_list) == 0)
+ return 0;
+
+ msg = DEQ_HEAD(alloc->message_free_list);
+ DEQ_REMOVE_HEAD(alloc->message_free_list);
+
+ DEQ_INIT(msg->buffers);
+ msg->in_delivery = NULL;
+ msg->out_delivery = NULL;
+ msg->section_message_header.buffer = 0;
+ msg->section_message_header.parsed = 0;
+ msg->section_delivery_annotation.buffer = 0;
+ msg->section_delivery_annotation.parsed = 0;
+ msg->section_message_annotation.buffer = 0;
+ msg->section_message_annotation.parsed = 0;
+ msg->section_message_properties.buffer = 0;
+ msg->section_message_properties.parsed = 0;
+ msg->section_application_properties.buffer = 0;
+ msg->section_application_properties.parsed = 0;
+ msg->section_body.buffer = 0;
+ msg->section_body.parsed = 0;
+ msg->section_footer.buffer = 0;
+ msg->section_footer.parsed = 0;
+ msg->field_user_id.buffer = 0;
+ msg->field_user_id.parsed = 0;
+ msg->field_to.buffer = 0;
+ msg->field_to.parsed = 0;
+ msg->body.buffer = 0;
+ msg->body.parsed = 0;
+ return msg;
+}
+
+
+nx_buffer_t *nx_allocate_buffer(void)
+{
+ nx_allocator_t *alloc = nx_get_allocator();
+ nx_buffer_t *buf;
+ int i;
+
+ if (DEQ_SIZE(alloc->buffer_free_list) == 0) {
+ sys_mutex_lock(global.lock);
+ if (DEQ_SIZE(global.buffer_free_list) >= config->buffer_rebalancing_batch_count) {
+ // Rebalance a batch of free descriptors to the local free list.
+ for (i = 0; i < config->buffer_rebalancing_batch_count; i++) {
+ buf = DEQ_HEAD(global.buffer_free_list);
+ DEQ_REMOVE_HEAD(global.buffer_free_list);
+ DEQ_INSERT_TAIL(alloc->buffer_free_list, buf);
+ }
+ }
+ sys_mutex_unlock(global.lock);
+ }
+
+ if (DEQ_SIZE(alloc->buffer_free_list) == 0) {
+ // Allocate a buffer from the heap
+ buf = (nx_buffer_t*) malloc (sizeof(nx_buffer_t) + config->buffer_size);
+ DEQ_ITEM_INIT(buf);
+ DEQ_INSERT_TAIL(alloc->buffer_free_list, buf);
+ }
+
+ if (DEQ_SIZE(alloc->buffer_free_list) == 0)
+ return 0;
+
+ buf = DEQ_HEAD(alloc->buffer_free_list);
+ DEQ_REMOVE_HEAD(alloc->buffer_free_list);
+
+ buf->size = 0;
+
+ return buf;
+}
+
+
+void nx_free_message(nx_message_t *msg)
+{
+ nx_allocator_t *alloc = nx_get_allocator();
+
+ // Free any buffers in the message
+ int i;
+ nx_buffer_t *buf = DEQ_HEAD(msg->buffers);
+ while (buf) {
+ DEQ_REMOVE_HEAD(msg->buffers);
+ nx_free_buffer(buf);
+ buf = DEQ_HEAD(msg->buffers);
+ }
+
+ DEQ_INSERT_TAIL(alloc->message_free_list, msg);
+ if (DEQ_SIZE(alloc->message_free_list) > config->message_local_storage_max) {
+ //
+ // The local free list has exceeded the threshold for local storage.
+ // Rebalance a batch of free objects to the global free list.
+ //
+ sys_mutex_lock(global.lock);
+ for (i = 0; i < config->message_rebalancing_batch_count; i++) {
+ msg = DEQ_HEAD(alloc->message_free_list);
+ DEQ_REMOVE_HEAD(alloc->message_free_list);
+ DEQ_INSERT_TAIL(global.message_free_list, msg);
+ }
+ sys_mutex_unlock(global.lock);
+ }
+}
+
+
+void nx_free_buffer(nx_buffer_t *buf)
+{
+ nx_allocator_t *alloc = nx_get_allocator();
+ int i;
+
+ DEQ_INSERT_TAIL(alloc->buffer_free_list, buf);
+ if (DEQ_SIZE(alloc->buffer_free_list) > config->buffer_local_storage_max) {
+ // Rebalance a batch of free descriptors to the global free list.
+ sys_mutex_lock(global.lock);
+ for (i = 0; i < config->buffer_rebalancing_batch_count; i++) {
+ buf = DEQ_HEAD(alloc->buffer_free_list);
+ DEQ_REMOVE_HEAD(alloc->buffer_free_list);
+ DEQ_INSERT_TAIL(global.buffer_free_list, buf);
+ }
+ sys_mutex_unlock(global.lock);
+ }
+}
+
+
+nx_message_t *nx_message_receive(pn_delivery_t *delivery)
+{
+ pn_link_t *link = pn_delivery_link(delivery);
+ nx_message_t *msg = (nx_message_t*) pn_delivery_get_context(delivery);
+ ssize_t rc;
+ nx_buffer_t *buf;
+
+ //
+ // If there is no message associated with the delivery, this is the first time
+ // we've received anything on this delivery. Allocate a message descriptor and
+ // link it and the delivery together.
+ //
+ if (!msg) {
+ msg = nx_allocate_message();
+ pn_delivery_set_context(delivery, (void*) msg);
+
+ //
+ // Record the incoming delivery only if it is not settled. If it is
+ // settled, there's no need to propagate disposition back to the sender.
+ //
+ if (!pn_delivery_settled(delivery))
+ msg->in_delivery = delivery;
+ }
+
+ //
+ // Get a reference to the tail buffer on the message. This is the buffer into which
+ // we will store incoming message data. If there is no buffer in the message, allocate
+ // an empty one and add it to the message.
+ //
+ buf = DEQ_TAIL(msg->buffers);
+ if (!buf) {
+ buf = nx_allocate_buffer();
+ DEQ_INSERT_TAIL(msg->buffers, buf);
+ }
+
+ while (1) {
+ //
+ // Try to receive enough data to fill the remaining space in the tail buffer.
+ //
+ rc = pn_link_recv(link, (char*) nx_buffer_cursor(buf), nx_buffer_capacity(buf));
+
+ //
+ // If we receive PN_EOS, we have come to the end of the message.
+ //
+ if (rc == PN_EOS) {
+ //
+ // If the last buffer in the list is empty, remove it and free it. This
+ // will only happen if the size of the message content is an exact multiple
+ // of the buffer size.
+ //
+ if (nx_buffer_size(buf) == 0) {
+ DEQ_REMOVE_TAIL(msg->buffers);
+ nx_free_buffer(buf);
+ }
+ return msg;
+ }
+
+ if (rc > 0) {
+ //
+ // We have received a positive number of bytes for the message. Advance
+ // the cursor in the buffer.
+ //
+ nx_buffer_insert(buf, rc);
+
+ //
+ // If the buffer is full, allocate a new empty buffer and append it to the
+ // tail of the message's list.
+ //
+ if (nx_buffer_capacity(buf) == 0) {
+ buf = nx_allocate_buffer();
+ DEQ_INSERT_TAIL(msg->buffers, buf);
+ }
+ } else
+ //
+ // We received zero bytes, and no PN_EOS. This means that we've received
+ // all of the data available up to this point, but it does not constitute
+ // the entire message. We'll be back later to finish it up.
+ //
+ break;
+ }
+
+ return NULL;
+}
+
+
+int nx_message_check(nx_message_t *msg, nx_message_depth_t depth)
+{
+
+#define LONG 10
+#define SHORT 3
+#define MSG_HDR_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x70"
+#define MSG_HDR_SHORT (unsigned char*) "\x00\x53\x70"
+#define DELIVERY_ANNOTATION_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x71"
+#define DELIVERY_ANNOTATION_SHORT (unsigned char*) "\x00\x53\x71"
+#define MESSAGE_ANNOTATION_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x72"
+#define MESSAGE_ANNOTATION_SHORT (unsigned char*) "\x00\x53\x72"
+#define MESSAGE_PROPERTIES_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x73"
+#define MESSAGE_PROPERTIES_SHORT (unsigned char*) "\x00\x53\x73"
+#define APPLICATION_PROPERTIES_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x74"
+#define APPLICATION_PROPERTIES_SHORT (unsigned char*) "\x00\x53\x74"
+#define BODY_DATA_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x75"
+#define BODY_DATA_SHORT (unsigned char*) "\x00\x53\x75"
+#define BODY_SEQUENCE_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x76"
+#define BODY_SEQUENCE_SHORT (unsigned char*) "\x00\x53\x76"
+#define FOOTER_LONG (unsigned char*) "\x00\x80\x00\x00\x00\x00\x00\x00\x00\x78"
+#define FOOTER_SHORT (unsigned char*) "\x00\x53\x78"
+#define TAGS_LIST (unsigned char*) "\x45\xc0\xd0"
+#define TAGS_MAP (unsigned char*) "\xc1\xd1"
+#define TAGS_BINARY (unsigned char*) "\xa0\xb0"
+
+ nx_buffer_t *buffer = DEQ_HEAD(msg->buffers);
+ unsigned char *cursor;
+
+ if (!buffer)
+ return 0; // Invalid - No data in the message
+
+ if (depth == NX_DEPTH_NONE)
+ return 1;
+
+ cursor = nx_buffer_base(buffer);
+
+ //
+ // MESSAGE HEADER
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, MSG_HDR_LONG, LONG, TAGS_LIST, &msg->section_message_header))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, MSG_HDR_SHORT, SHORT, TAGS_LIST, &msg->section_message_header))
+ return 0;
+
+ if (depth == NX_DEPTH_HEADER)
+ return 1;
+
+ //
+ // DELIVERY ANNOTATION
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, DELIVERY_ANNOTATION_LONG, LONG, TAGS_MAP, &msg->section_delivery_annotation))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, DELIVERY_ANNOTATION_SHORT, SHORT, TAGS_MAP, &msg->section_delivery_annotation))
+ return 0;
+
+ if (depth == NX_DEPTH_DELIVERY_ANNOTATIONS)
+ return 1;
+
+ //
+ // MESSAGE ANNOTATION
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, MESSAGE_ANNOTATION_LONG, LONG, TAGS_MAP, &msg->section_message_annotation))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, MESSAGE_ANNOTATION_SHORT, SHORT, TAGS_MAP, &msg->section_message_annotation))
+ return 0;
+
+ if (depth == NX_DEPTH_MESSAGE_ANNOTATIONS)
+ return 1;
+
+ //
+ // MESSAGE PROPERTIES
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, MESSAGE_PROPERTIES_LONG, LONG, TAGS_LIST, &msg->section_message_properties))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, MESSAGE_PROPERTIES_SHORT, SHORT, TAGS_LIST, &msg->section_message_properties))
+ return 0;
+
+ if (depth == NX_DEPTH_MESSAGE_PROPERTIES)
+ return 1;
+
+ //
+ // APPLICATION PROPERTIES
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, APPLICATION_PROPERTIES_LONG, LONG, TAGS_MAP, &msg->section_application_properties))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, APPLICATION_PROPERTIES_SHORT, SHORT, TAGS_MAP, &msg->section_application_properties))
+ return 0;
+
+ if (depth == NX_DEPTH_APPLICATION_PROPERTIES)
+ return 1;
+
+ //
+ // BODY (Note that this function expects a single data section or a single AMQP sequence)
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, BODY_DATA_LONG, LONG, TAGS_BINARY, &msg->section_body))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, BODY_DATA_SHORT, SHORT, TAGS_BINARY, &msg->section_body))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, BODY_SEQUENCE_LONG, LONG, TAGS_LIST, &msg->section_body))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, BODY_SEQUENCE_SHORT, SHORT, TAGS_LIST, &msg->section_body))
+ return 0;
+
+ if (depth == NX_DEPTH_BODY)
+ return 1;
+
+ //
+ // FOOTER
+ //
+ if (0 == nx_check_and_advance(&buffer, &cursor, FOOTER_LONG, LONG, TAGS_MAP, &msg->section_footer))
+ return 0;
+ if (0 == nx_check_and_advance(&buffer, &cursor, FOOTER_SHORT, SHORT, TAGS_MAP, &msg->section_footer))
+ return 0;
+
+ return 1;
+}
+
+
+nx_field_iterator_t *nx_message_field_to(nx_message_t *msg)
+{
+ while (1) {
+ if (msg->field_to.parsed)
+ return nx_field_iterator_buffer(msg->field_to.buffer, msg->field_to.offset, msg->field_to.length, ITER_VIEW_ALL);
+
+ if (msg->section_message_properties.parsed == 0)
+ break;
+
+ nx_buffer_t *buffer = msg->section_message_properties.buffer;
+ unsigned char *cursor = nx_buffer_base(buffer) + msg->section_message_properties.offset;
+
+ int count = start_list(&cursor, &buffer);
+ int result;
+
+ if (count < 3)
+ break;
+
+ result = traverse_field(&cursor, &buffer, 0); // message_id
+ if (!result) return 0;
+ result = traverse_field(&cursor, &buffer, 0); // user_id
+ if (!result) return 0;
+ result = traverse_field(&cursor, &buffer, &msg->field_to); // to
+ if (!result) return 0;
+ }
+
+ return 0;
+}
+
+
+nx_field_iterator_t *nx_message_body(nx_message_t *msg)
+{
+ while (1) {
+ if (msg->body.parsed)
+ return nx_field_iterator_buffer(msg->body.buffer, msg->body.offset, msg->body.length, ITER_VIEW_ALL);
+
+ if (msg->section_body.parsed == 0)
+ break;
+
+ nx_buffer_t *buffer = msg->section_body.buffer;
+ unsigned char *cursor = nx_buffer_base(buffer) + msg->section_body.offset;
+ int result;
+
+ result = traverse_field(&cursor, &buffer, &msg->body);
+ if (!result) return 0;
+ }
+
+ return 0;
+}
+
+
+void nx_message_compose_1(nx_message_t *msg, const char *to, nx_buffer_t *buf_chain)
+{
+ nx_message_begin_header(msg);
+ nx_message_insert_boolean(msg, 0); // durable
+ //nx_message_insert_null(msg); // priority
+ //nx_message_insert_null(msg); // ttl
+ //nx_message_insert_boolean(msg, 0); // first-acquirer
+ //nx_message_insert_uint(msg, 0); // delivery-count
+ nx_message_end_header(msg);
+
+ nx_message_begin_message_properties(msg);
+ nx_message_insert_null(msg); // message-id
+ nx_message_insert_null(msg); // user-id
+ nx_message_insert_string(msg, to); // to
+ //nx_message_insert_null(msg); // subject
+ //nx_message_insert_null(msg); // reply-to
+ //nx_message_insert_null(msg); // correlation-id
+ //nx_message_insert_null(msg); // content-type
+ //nx_message_insert_null(msg); // content-encoding
+ //nx_message_insert_timestamp(msg, 0); // absolute-expiry-time
+ //nx_message_insert_timestamp(msg, 0); // creation-time
+ //nx_message_insert_null(msg); // group-id
+ //nx_message_insert_uint(msg, 0); // group-sequence
+ //nx_message_insert_null(msg); // reply-to-group-id
+ nx_message_end_message_properties(msg);
+
+ if (buf_chain)
+ nx_message_append_body_data(msg, buf_chain);
+}
+
+
+void nx_message_begin_header(nx_message_t *msg)
+{
+ nx_start_list_performative(msg, 0x70);
+}
+
+
+void nx_message_end_header(nx_message_t *msg)
+{
+ nx_end_list(msg);
+}
+
+
+void nx_message_begin_delivery_annotations(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_end_delivery_annotations(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_begin_message_annotations(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_end_message_annotations(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_begin_message_properties(nx_message_t *msg)
+{
+ nx_start_list_performative(msg, 0x73);
+}
+
+
+void nx_message_end_message_properties(nx_message_t *msg)
+{
+ nx_end_list(msg);
+}
+
+
+void nx_message_begin_application_properties(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_end_application_properties(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_append_body_data(nx_message_t *msg, nx_buffer_t *buf_chain)
+{
+ uint32_t len = 0;
+ nx_buffer_t *buf = buf_chain;
+ nx_buffer_t *last = 0;
+ size_t count = 0;
+
+ while (buf) {
+ len += nx_buffer_size(buf);
+ count++;
+ last = buf;
+ buf = DEQ_NEXT(buf);
+ }
+
+ nx_insert(msg, (const uint8_t*) "\x00\x53\x75", 3);
+ if (len < 256) {
+ nx_insert_8(msg, 0xa0); // vbin8
+ nx_insert_8(msg, (uint8_t) len);
+ } else {
+ nx_insert_8(msg, 0xb0); // vbin32
+ nx_insert_32(msg, len);
+ }
+
+ if (len > 0) {
+ buf_chain->prev = msg->buffers.tail;
+ msg->buffers.tail->next = buf_chain;
+ msg->buffers.tail = last;
+ msg->buffers.size += count;
+ }
+}
+
+
+void nx_message_begin_body_sequence(nx_message_t *msg)
+{
+}
+
+
+void nx_message_end_body_sequence(nx_message_t *msg)
+{
+}
+
+
+void nx_message_begin_footer(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_end_footer(nx_message_t *msg)
+{
+ assert(0); // Not Implemented
+}
+
+
+void nx_message_insert_null(nx_message_t *msg)
+{
+ nx_insert_8(msg, 0x40);
+ msg->count++;
+}
+
+
+void nx_message_insert_boolean(nx_message_t *msg, int value)
+{
+ if (value)
+ nx_insert(msg, (const uint8_t*) "\x56\x01", 2);
+ else
+ nx_insert(msg, (const uint8_t*) "\x56\x00", 2);
+ msg->count++;
+}
+
+
+void nx_message_insert_ubyte(nx_message_t *msg, uint8_t value)
+{
+ nx_insert_8(msg, 0x50);
+ nx_insert_8(msg, value);
+ msg->count++;
+}
+
+
+void nx_message_insert_uint(nx_message_t *msg, uint32_t value)
+{
+ if (value == 0) {
+ nx_insert_8(msg, 0x43); // uint0
+ } else if (value < 256) {
+ nx_insert_8(msg, 0x52); // smalluint
+ nx_insert_8(msg, (uint8_t) value);
+ } else {
+ nx_insert_8(msg, 0x70); // uint
+ nx_insert_32(msg, value);
+ }
+ msg->count++;
+}
+
+
+void nx_message_insert_ulong(nx_message_t *msg, uint64_t value)
+{
+ if (value == 0) {
+ nx_insert_8(msg, 0x44); // ulong0
+ } else if (value < 256) {
+ nx_insert_8(msg, 0x53); // smallulong
+ nx_insert_8(msg, (uint8_t) value);
+ } else {
+ nx_insert_8(msg, 0x80); // ulong
+ nx_insert_64(msg, value);
+ }
+ msg->count++;
+}
+
+
+void nx_message_insert_binary(nx_message_t *msg, const uint8_t *start, size_t len)
+{
+ if (len < 256) {
+ nx_insert_8(msg, 0xa0); // vbin8
+ nx_insert_8(msg, (uint8_t) len);
+ } else {
+ nx_insert_8(msg, 0xb0); // vbin32
+ nx_insert_32(msg, len);
+ }
+ nx_insert(msg, start, len);
+ msg->count++;
+}
+
+
+void nx_message_insert_string(nx_message_t *msg, const char *start)
+{
+ uint32_t len = strlen(start);
+
+ if (len < 256) {
+ nx_insert_8(msg, 0xa1); // str8-utf8
+ nx_insert_8(msg, (uint8_t) len);
+ nx_insert(msg, (const uint8_t*) start, len);
+ } else {
+ nx_insert_8(msg, 0xb1); // str32-utf8
+ nx_insert_32(msg, len);
+ nx_insert(msg, (const uint8_t*) start, len);
+ }
+ msg->count++;
+}
+
+
+void nx_message_insert_uuid(nx_message_t *msg, const uint8_t *value)
+{
+ nx_insert_8(msg, 0x98); // uuid
+ nx_insert(msg, value, 16);
+ msg->count++;
+}
+
+
+void nx_message_insert_symbol(nx_message_t *msg, const char *start, size_t len)
+{
+ if (len < 256) {
+ nx_insert_8(msg, 0xa3); // sym8
+ nx_insert_8(msg, (uint8_t) len);
+ nx_insert(msg, (const uint8_t*) start, len);
+ } else {
+ nx_insert_8(msg, 0xb3); // sym32
+ nx_insert_32(msg, len);
+ nx_insert(msg, (const uint8_t*) start, len);
+ }
+ msg->count++;
+}
+
+
+void nx_message_insert_timestamp(nx_message_t *msg, uint64_t value)
+{
+ nx_insert_8(msg, 0x83); // timestamp
+ nx_insert_64(msg, value);
+ msg->count++;
+}
+
+
+unsigned char *nx_buffer_base(nx_buffer_t *buf)
+{
+ return (unsigned char*) &buf[1];
+}
+
+
+unsigned char *nx_buffer_cursor(nx_buffer_t *buf)
+{
+ return ((unsigned char*) &buf[1]) + buf->size;
+}
+
+
+size_t nx_buffer_capacity(nx_buffer_t *buf)
+{
+ return config->buffer_size - buf->size;
+}
+
+
+size_t nx_buffer_size(nx_buffer_t *buf)
+{
+ return buf->size;
+}
+
+
+void nx_buffer_insert(nx_buffer_t *buf, size_t len)
+{
+ buf->size += len;
+ assert(buf->size <= config->buffer_size);
+}
+
diff --git a/qpid/extras/nexus/src/posix/threading.c b/qpid/extras/nexus/src/posix/threading.c
new file mode 100644
index 0000000000..6121151378
--- /dev/null
+++ b/qpid/extras/nexus/src/posix/threading.c
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/threading.h>
+#include <qpid/nexus/ctools.h>
+#include <stdio.h>
+#include <pthread.h>
+
+struct sys_mutex_t {
+ pthread_mutex_t mutex;
+ int acquired;
+};
+
+sys_mutex_t *sys_mutex(void)
+{
+ sys_mutex_t *mutex = NEW(sys_mutex_t);
+ pthread_mutex_init(&(mutex->mutex), 0);
+ mutex->acquired = 0;
+ return mutex;
+}
+
+
+void sys_mutex_free(sys_mutex_t *mutex)
+{
+ assert(!mutex->acquired);
+ pthread_mutex_destroy(&(mutex->mutex));
+ free(mutex);
+}
+
+
+void sys_mutex_lock(sys_mutex_t *mutex)
+{
+ pthread_mutex_lock(&(mutex->mutex));
+ assert(!mutex->acquired);
+ mutex->acquired++;
+}
+
+
+void sys_mutex_unlock(sys_mutex_t *mutex)
+{
+ mutex->acquired--;
+ assert(!mutex->acquired);
+ pthread_mutex_unlock(&(mutex->mutex));
+}
+
+
+struct sys_cond_t {
+ pthread_cond_t cond;
+};
+
+
+sys_cond_t *sys_cond(void)
+{
+ sys_cond_t *cond = NEW(sys_cond_t);
+ pthread_cond_init(&(cond->cond), 0);
+ return cond;
+}
+
+
+void sys_cond_free(sys_cond_t *cond)
+{
+ pthread_cond_destroy(&(cond->cond));
+ free(cond);
+}
+
+
+void sys_cond_wait(sys_cond_t *cond, sys_mutex_t *held_mutex)
+{
+ assert(held_mutex->acquired);
+ held_mutex->acquired--;
+ pthread_cond_wait(&(cond->cond), &(held_mutex->mutex));
+ held_mutex->acquired++;
+}
+
+
+void sys_cond_signal(sys_cond_t *cond)
+{
+ pthread_cond_signal(&(cond->cond));
+}
+
+
+void sys_cond_signal_all(sys_cond_t *cond)
+{
+ pthread_cond_broadcast(&(cond->cond));
+}
+
+
+struct sys_thread_t {
+ pthread_t thread;
+};
+
+sys_thread_t *sys_thread(void *(*run_function) (void *), void *arg)
+{
+ sys_thread_t *thread = NEW(sys_thread_t);
+ pthread_create(&(thread->thread), 0, run_function, arg);
+ return thread;
+}
+
+
+void sys_thread_free(sys_thread_t *thread)
+{
+ free(thread);
+}
+
+
+void sys_thread_join(sys_thread_t *thread)
+{
+ pthread_join(thread->thread, 0);
+}
+
diff --git a/qpid/extras/nexus/src/server.c b/qpid/extras/nexus/src/server.c
new file mode 100644
index 0000000000..16740b812f
--- /dev/null
+++ b/qpid/extras/nexus/src/server.c
@@ -0,0 +1,903 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/threading.h>
+#include <qpid/nexus/log.h>
+#include "server_private.h"
+#include "timer_private.h"
+#include "alloc_private.h"
+#include "auth.h"
+#include "work_queue.h"
+#include <stdio.h>
+#include <time.h>
+#include <signal.h>
+
+static char *module="SERVER";
+
+typedef struct nx_thread_t {
+ int thread_id;
+ volatile int running;
+ volatile int canceled;
+ int using_thread;
+ sys_thread_t *thread;
+} nx_thread_t;
+
+
+typedef struct nx_server_t {
+ int thread_count;
+ pn_driver_t *driver;
+ nx_thread_start_cb_t start_handler;
+ nx_conn_handler_cb_t conn_handler;
+ nx_signal_handler_cb_t signal_handler;
+ nx_user_fd_handler_cb_t ufd_handler;
+ void *start_context;
+ void *conn_context;
+ void *signal_context;
+ sys_cond_t *cond;
+ sys_mutex_t *lock;
+ nx_thread_t **threads;
+ work_queue_t *work_queue;
+ nx_timer_list_t pending_timers;
+ bool a_thread_is_waiting;
+ int threads_active;
+ int pause_requests;
+ int threads_paused;
+ int pause_next_sequence;
+ int pause_now_serving;
+ int pending_signal;
+} nx_server_t;
+
+
+ALLOC_DEFINE(nx_listener_t);
+ALLOC_DEFINE(nx_connector_t);
+ALLOC_DEFINE(nx_connection_t);
+ALLOC_DEFINE(nx_user_fd_t);
+
+
+/**
+ * Singleton Concurrent Proton Driver object
+ */
+static nx_server_t *nx_server = 0;
+
+
+static void signal_handler(int signum)
+{
+ nx_server->pending_signal = signum;
+ sys_cond_signal_all(nx_server->cond);
+}
+
+
+static nx_thread_t *thread(int id)
+{
+ nx_thread_t *thread = NEW(nx_thread_t);
+ if (!thread)
+ return 0;
+
+ thread->thread_id = id;
+ thread->running = 0;
+ thread->canceled = 0;
+ thread->using_thread = 0;
+
+ return thread;
+}
+
+
+static void thread_process_listeners(pn_driver_t *driver)
+{
+ pn_listener_t *listener = pn_driver_listener(driver);
+ pn_connector_t *cxtr;
+ nx_connection_t *ctx;
+
+ while (listener) {
+ nx_log(module, LOG_TRACE, "Accepting Connection");
+ cxtr = pn_listener_accept(listener);
+ ctx = new_nx_connection_t();
+ ctx->state = CONN_STATE_SASL_SERVER;
+ ctx->owner_thread = CONTEXT_NO_OWNER;
+ ctx->enqueued = 0;
+ ctx->pn_cxtr = cxtr;
+ ctx->pn_conn = 0;
+ ctx->listener = (nx_listener_t*) pn_listener_context(listener);
+ ctx->connector = 0;
+ ctx->context = ctx->listener->context;
+ ctx->ufd = 0;
+
+ pn_connector_set_context(cxtr, ctx);
+ listener = pn_driver_listener(driver);
+ }
+}
+
+
+static void handle_signals_LH(void)
+{
+ int signum = nx_server->pending_signal;
+
+ if (signum) {
+ nx_server->pending_signal = 0;
+ if (nx_server->signal_handler) {
+ sys_mutex_unlock(nx_server->lock);
+ nx_server->signal_handler(nx_server->signal_context, signum);
+ sys_mutex_lock(nx_server->lock);
+ }
+ }
+}
+
+
+static void block_if_paused_LH(void)
+{
+ if (nx_server->pause_requests > 0) {
+ nx_server->threads_paused++;
+ sys_cond_signal_all(nx_server->cond);
+ while (nx_server->pause_requests > 0)
+ sys_cond_wait(nx_server->cond, nx_server->lock);
+ nx_server->threads_paused--;
+ }
+}
+
+
+static void process_connector(pn_connector_t *cxtr)
+{
+ nx_connection_t *ctx = pn_connector_context(cxtr);
+ int events = 0;
+ int auth_passes = 0;
+
+ if (ctx->state == CONN_STATE_USER) {
+ nx_server->ufd_handler(ctx->ufd->context, ctx->ufd);
+ return;
+ }
+
+ do {
+ //
+ // Step the engine for pre-handler processing
+ //
+ pn_connector_process(cxtr);
+
+ //
+ // Call the handler that is appropriate for the connector's state.
+ //
+ switch (ctx->state) {
+ case CONN_STATE_CONNECTING:
+ if (!pn_connector_closed(cxtr)) {
+ ctx->state = CONN_STATE_SASL_CLIENT;
+ assert(ctx->connector);
+ ctx->connector->state = CXTR_STATE_OPEN;
+ events = 1;
+ } else {
+ ctx->state = CONN_STATE_FAILED;
+ events = 0;
+ }
+ break;
+
+ case CONN_STATE_SASL_CLIENT:
+ if (auth_passes == 0) {
+ auth_client_handler(cxtr);
+ events = 1;
+ } else {
+ auth_passes++;
+ events = 0;
+ }
+ break;
+
+ case CONN_STATE_SASL_SERVER:
+ if (auth_passes == 0) {
+ auth_server_handler(cxtr);
+ events = 1;
+ } else {
+ auth_passes++;
+ events = 0;
+ }
+ break;
+
+ case CONN_STATE_OPENING:
+ ctx->state = CONN_STATE_OPERATIONAL;
+
+ pn_connection_t *conn = pn_connection();
+ pn_connection_set_container(conn, "nexus"); // TODO - make unique
+ pn_connector_set_connection(cxtr, conn);
+ pn_connection_set_context(conn, ctx);
+ ctx->pn_conn = conn;
+
+ nx_conn_event_t ce = NX_CONN_EVENT_PROCESS; // Initialize to keep the compiler happy
+
+ if (ctx->listener) {
+ ce = NX_CONN_EVENT_LISTENER_OPEN;
+ } else if (ctx->connector) {
+ ce = NX_CONN_EVENT_CONNECTOR_OPEN;
+ ctx->connector->delay = 0;
+ } else
+ assert(0);
+
+ nx_server->conn_handler(ctx->context, ce, (nx_connection_t*) pn_connector_context(cxtr));
+ events = 1;
+ break;
+
+ case CONN_STATE_OPERATIONAL:
+ if (pn_connector_closed(cxtr)) {
+ nx_server->conn_handler(ctx->context,
+ NX_CONN_EVENT_CLOSE,
+ (nx_connection_t*) pn_connector_context(cxtr));
+ events = 0;
+ }
+ else
+ events = nx_server->conn_handler(ctx->context,
+ NX_CONN_EVENT_PROCESS,
+ (nx_connection_t*) pn_connector_context(cxtr));
+ break;
+
+ default:
+ break;
+ }
+ } while (events > 0);
+}
+
+
+//
+// TEMPORARY FUNCTION PROTOTYPES
+//
+void pn_driver_wait_1(pn_driver_t *d);
+int pn_driver_wait_2(pn_driver_t *d, int timeout);
+void pn_driver_wait_3(pn_driver_t *d);
+//
+// END TEMPORARY
+//
+
+static void *thread_run(void *arg)
+{
+ nx_thread_t *thread = (nx_thread_t*) arg;
+ pn_connector_t *work;
+ pn_connection_t *conn;
+ nx_connection_t *ctx;
+ int error;
+ int poll_result;
+ int timer_holdoff = 0;
+
+ if (!thread)
+ return 0;
+
+ thread->running = 1;
+
+ if (thread->canceled)
+ return 0;
+
+ //
+ // Invoke the start handler if the application supplied one.
+ // This handler can be used to set NUMA or processor affinnity for the thread.
+ //
+ if (nx_server->start_handler)
+ nx_server->start_handler(nx_server->start_context, thread->thread_id);
+
+ //
+ // Main Loop
+ //
+ while (thread->running) {
+ sys_mutex_lock(nx_server->lock);
+
+ //
+ // Check for pending signals to process
+ //
+ handle_signals_LH();
+ if (!thread->running) {
+ sys_mutex_unlock(nx_server->lock);
+ break;
+ }
+
+ //
+ // Check to see if the server is pausing. If so, block here.
+ //
+ block_if_paused_LH();
+ if (!thread->running) {
+ sys_mutex_unlock(nx_server->lock);
+ break;
+ }
+
+ //
+ // Service pending timers.
+ //
+ nx_timer_t *timer = DEQ_HEAD(nx_server->pending_timers);
+ if (timer) {
+ DEQ_REMOVE_HEAD(nx_server->pending_timers);
+
+ //
+ // Mark the timer as idle in case it reschedules itself.
+ //
+ nx_timer_idle_LH(timer);
+
+ //
+ // Release the lock and invoke the connection handler.
+ //
+ sys_mutex_unlock(nx_server->lock);
+ timer->handler(timer->context);
+ pn_driver_wakeup(nx_server->driver);
+ continue;
+ }
+
+ //
+ // Check the work queue for connectors scheduled for processing.
+ //
+ work = work_queue_get(nx_server->work_queue);
+ if (!work) {
+ //
+ // There is no pending work to do
+ //
+ if (nx_server->a_thread_is_waiting) {
+ //
+ // Another thread is waiting on the proton driver, this thread must
+ // wait on the condition variable until signaled.
+ //
+ sys_cond_wait(nx_server->cond, nx_server->lock);
+ } else {
+ //
+ // This thread elects itself to wait on the proton driver. Set the
+ // thread-is-waiting flag so other idle threads will not interfere.
+ //
+ nx_server->a_thread_is_waiting = true;
+
+ //
+ // Ask the timer module when its next timer is scheduled to fire. We'll
+ // use this value in driver_wait as the timeout. If there are no scheduled
+ // timers, the returned value will be -1.
+ //
+ long duration = nx_timer_next_duration_LH();
+
+ //
+ // Invoke the proton driver's wait sequence. This is a bit of a hack for now
+ // and will be improved in the future. The wait process is divided into three parts,
+ // the first and third of which need to be non-reentrant, and the second of which
+ // must be reentrant (and blocks).
+ //
+ pn_driver_wait_1(nx_server->driver);
+ sys_mutex_unlock(nx_server->lock);
+
+ do {
+ error = 0;
+ poll_result = pn_driver_wait_2(nx_server->driver, duration);
+ if (poll_result == -1)
+ error = pn_driver_errno(nx_server->driver);
+ } while (error == PN_INTR);
+ if (error) {
+ nx_log(module, LOG_ERROR, "Driver Error: %s", pn_error_text(pn_error(nx_server->driver)));
+ exit(-1);
+ }
+
+ sys_mutex_lock(nx_server->lock);
+ pn_driver_wait_3(nx_server->driver);
+
+ if (!thread->running) {
+ sys_mutex_unlock(nx_server->lock);
+ break;
+ }
+
+ //
+ // Visit the timer module.
+ //
+ if (poll_result == 0 || ++timer_holdoff == 100) {
+ struct timespec tv;
+ clock_gettime(CLOCK_REALTIME, &tv);
+ long milliseconds = tv.tv_sec * 1000 + tv.tv_nsec / 1000000;
+ nx_timer_visit_LH(milliseconds);
+ timer_holdoff = 0;
+ }
+
+ //
+ // Process listeners (incoming connections).
+ //
+ thread_process_listeners(nx_server->driver);
+
+ //
+ // Traverse the list of connectors-needing-service from the proton driver.
+ // If the connector is not already in the work queue and it is not currently
+ // being processed by another thread, put it in the work queue and signal the
+ // condition variable.
+ //
+ work = pn_driver_connector(nx_server->driver);
+ while (work) {
+ ctx = pn_connector_context(work);
+ if (!ctx->enqueued && ctx->owner_thread == CONTEXT_NO_OWNER) {
+ ctx->enqueued = 1;
+ work_queue_put(nx_server->work_queue, work);
+ sys_cond_signal(nx_server->cond);
+ }
+ work = pn_driver_connector(nx_server->driver);
+ }
+
+ //
+ // Release our exclusive claim on pn_driver_wait.
+ //
+ nx_server->a_thread_is_waiting = false;
+ }
+ }
+
+ //
+ // If we were given a connector to work on from the work queue, mark it as
+ // owned by this thread and as no longer enqueued.
+ //
+ if (work) {
+ ctx = pn_connector_context(work);
+ if (ctx->owner_thread == CONTEXT_NO_OWNER) {
+ ctx->owner_thread = thread->thread_id;
+ ctx->enqueued = 0;
+ nx_server->threads_active++;
+ } else {
+ //
+ // This connector is being processed by another thread, re-queue it.
+ //
+ work_queue_put(nx_server->work_queue, work);
+ work = 0;
+ }
+ }
+ sys_mutex_unlock(nx_server->lock);
+
+ //
+ // Process the connector that we now have exclusive access to.
+ //
+ if (work) {
+ process_connector(work);
+
+ //
+ // Check to see if the connector was closed during processing
+ //
+ if (pn_connector_closed(work)) {
+ //
+ // Connector is closed. Free the context and the connector.
+ //
+ conn = pn_connector_connection(work);
+ if (ctx->connector) {
+ ctx->connector->ctx = 0;
+ ctx->connector->state = CXTR_STATE_CONNECTING;
+ nx_timer_schedule(ctx->connector->timer, ctx->connector->delay);
+ }
+ sys_mutex_lock(nx_server->lock);
+ free_nx_connection_t(ctx);
+ pn_connector_free(work);
+ if (conn)
+ pn_connection_free(conn);
+ nx_server->threads_active--;
+ sys_mutex_unlock(nx_server->lock);
+ } else {
+ //
+ // The connector lives on. Mark it as no longer owned by this thread.
+ //
+ sys_mutex_lock(nx_server->lock);
+ ctx->owner_thread = CONTEXT_NO_OWNER;
+ nx_server->threads_active--;
+ sys_mutex_unlock(nx_server->lock);
+ }
+
+ //
+ // Wake up the proton driver to force it to reconsider its set of FDs
+ // in light of the processing that just occurred.
+ //
+ pn_driver_wakeup(nx_server->driver);
+ }
+ }
+
+ return 0;
+}
+
+
+static void thread_start(nx_thread_t *thread)
+{
+ if (!thread)
+ return;
+
+ thread->using_thread = 1;
+ thread->thread = sys_thread(thread_run, (void*) thread);
+}
+
+
+static void thread_cancel(nx_thread_t *thread)
+{
+ if (!thread)
+ return;
+
+ thread->running = 0;
+ thread->canceled = 1;
+}
+
+
+static void thread_join(nx_thread_t *thread)
+{
+ if (!thread)
+ return;
+
+ if (thread->using_thread)
+ sys_thread_join(thread->thread);
+}
+
+
+static void thread_free(nx_thread_t *thread)
+{
+ if (!thread)
+ return;
+
+ free(thread);
+}
+
+
+static void cxtr_try_open(void *context)
+{
+ nx_connector_t *ct = (nx_connector_t*) context;
+ if (ct->state != CXTR_STATE_CONNECTING)
+ return;
+
+ nx_connection_t *ctx = new_nx_connection_t();
+ ctx->state = CONN_STATE_CONNECTING;
+ ctx->owner_thread = CONTEXT_NO_OWNER;
+ ctx->enqueued = 0;
+ ctx->pn_conn = 0;
+ ctx->listener = 0;
+ ctx->connector = ct;
+ ctx->context = ct->context;
+ ctx->user_context = 0;
+ ctx->ufd = 0;
+
+ //
+ // pn_connector is not thread safe
+ //
+ sys_mutex_lock(nx_server->lock);
+ ctx->pn_cxtr = pn_connector(nx_server->driver, ct->config->host, ct->config->port, (void*) ctx);
+ sys_mutex_unlock(nx_server->lock);
+
+ ct->ctx = ctx;
+ ct->delay = 5000;
+ nx_log(module, LOG_TRACE, "Connecting to %s:%s", ct->config->host, ct->config->port);
+}
+
+
+void nx_server_initialize(int thread_count)
+{
+ int i;
+
+ if (nx_server)
+ return; // TODO - Fail in a more dramatic way
+
+ nx_alloc_initialize();
+ nx_server = NEW(nx_server_t);
+
+ if (!nx_server)
+ return; // TODO - Fail in a more dramatic way
+
+ nx_server->thread_count = thread_count;
+ nx_server->driver = pn_driver();
+ nx_server->start_handler = 0;
+ nx_server->conn_handler = 0;
+ nx_server->signal_handler = 0;
+ nx_server->ufd_handler = 0;
+ nx_server->start_context = 0;
+ nx_server->signal_context = 0;
+ nx_server->lock = sys_mutex();
+ nx_server->cond = sys_cond();
+
+ nx_timer_initialize(nx_server->lock);
+
+ nx_server->threads = NEW_PTR_ARRAY(nx_thread_t, thread_count);
+ for (i = 0; i < thread_count; i++)
+ nx_server->threads[i] = thread(i);
+
+ nx_server->work_queue = work_queue();
+ DEQ_INIT(nx_server->pending_timers);
+ nx_server->a_thread_is_waiting = false;
+ nx_server->threads_active = 0;
+ nx_server->pause_requests = 0;
+ nx_server->threads_paused = 0;
+ nx_server->pause_next_sequence = 0;
+ nx_server->pause_now_serving = 0;
+ nx_server->pending_signal = 0;
+}
+
+
+void nx_server_finalize(void)
+{
+ int i;
+ if (!nx_server)
+ return;
+
+ for (i = 0; i < nx_server->thread_count; i++)
+ thread_free(nx_server->threads[i]);
+
+ work_queue_free(nx_server->work_queue);
+
+ pn_driver_free(nx_server->driver);
+ sys_mutex_free(nx_server->lock);
+ sys_cond_free(nx_server->cond);
+ free(nx_server);
+ nx_server = 0;
+}
+
+
+void nx_server_set_conn_handler(nx_conn_handler_cb_t handler)
+{
+ nx_server->conn_handler = handler;
+}
+
+
+void nx_server_set_signal_handler(nx_signal_handler_cb_t handler, void *context)
+{
+ nx_server->signal_handler = handler;
+ nx_server->signal_context = context;
+}
+
+
+void nx_server_set_start_handler(nx_thread_start_cb_t handler, void *context)
+{
+ nx_server->start_handler = handler;
+ nx_server->start_context = context;
+}
+
+
+void nx_server_set_user_fd_handler(nx_user_fd_handler_cb_t ufd_handler)
+{
+ nx_server->ufd_handler = ufd_handler;
+}
+
+
+void nx_server_run(void)
+{
+ int i;
+ if (!nx_server)
+ return;
+
+ assert(nx_server->conn_handler); // Server can't run without a connection handler.
+
+ for (i = 1; i < nx_server->thread_count; i++)
+ thread_start(nx_server->threads[i]);
+
+ nx_log(module, LOG_INFO, "Operational, %d Threads Running", nx_server->thread_count);
+
+ thread_run((void*) nx_server->threads[0]);
+
+ for (i = 1; i < nx_server->thread_count; i++)
+ thread_join(nx_server->threads[i]);
+
+ nx_log(module, LOG_INFO, "Shut Down");
+}
+
+
+void nx_server_stop(void)
+{
+ int idx;
+
+ sys_mutex_lock(nx_server->lock);
+ for (idx = 0; idx < nx_server->thread_count; idx++)
+ thread_cancel(nx_server->threads[idx]);
+ sys_cond_signal_all(nx_server->cond);
+ pn_driver_wakeup(nx_server->driver);
+ sys_mutex_unlock(nx_server->lock);
+}
+
+
+void nx_server_signal(int signum)
+{
+ signal(signum, signal_handler);
+}
+
+
+void nx_server_pause(void)
+{
+ sys_mutex_lock(nx_server->lock);
+
+ //
+ // Bump the request count to stop all the threads.
+ //
+ nx_server->pause_requests++;
+ int my_sequence = nx_server->pause_next_sequence++;
+
+ //
+ // Awaken all threads that are currently blocking.
+ //
+ sys_cond_signal_all(nx_server->cond);
+ pn_driver_wakeup(nx_server->driver);
+
+ //
+ // Wait for the paused thread count plus the number of threads requesting a pause to equal
+ // the total thread count. Also, don't exit the blocking loop until now_serving equals our
+ // sequence number. This ensures that concurrent pausers don't run at the same time.
+ //
+ while ((nx_server->threads_paused + nx_server->pause_requests < nx_server->thread_count) ||
+ (my_sequence != nx_server->pause_now_serving))
+ sys_cond_wait(nx_server->cond, nx_server->lock);
+
+ sys_mutex_unlock(nx_server->lock);
+}
+
+
+void nx_server_resume(void)
+{
+ sys_mutex_lock(nx_server->lock);
+ nx_server->pause_requests--;
+ nx_server->pause_now_serving++;
+ sys_cond_signal_all(nx_server->cond);
+ sys_mutex_unlock(nx_server->lock);
+}
+
+
+void nx_server_activate(nx_connection_t *ctx)
+{
+ if (!ctx)
+ return;
+
+ pn_connector_t *ctor = ctx->pn_cxtr;
+ if (!ctor)
+ return;
+
+ if (!pn_connector_closed(ctor))
+ pn_connector_activate(ctor, PN_CONNECTOR_WRITABLE);
+}
+
+
+void nx_connection_set_context(nx_connection_t *conn, void *context)
+{
+ conn->user_context = context;
+}
+
+
+void *nx_connection_get_context(nx_connection_t *conn)
+{
+ return conn->user_context;
+}
+
+
+pn_connection_t *nx_connection_pn(nx_connection_t *conn)
+{
+ return conn->pn_conn;
+}
+
+
+nx_listener_t *nx_server_listen(const nx_server_config_t *config, void *context)
+{
+ nx_listener_t *li = new_nx_listener_t();
+
+ if (!li)
+ return 0;
+
+ li->config = config;
+ li->context = context;
+ li->pn_listener = pn_listener(nx_server->driver, config->host, config->port, (void*) li);
+
+ if (!li->pn_listener) {
+ nx_log(module, LOG_ERROR, "Driver Error %d (%s)",
+ pn_driver_errno(nx_server->driver), pn_driver_error(nx_server->driver));
+ free_nx_listener_t(li);
+ return 0;
+ }
+ nx_log(module, LOG_TRACE, "Listening on %s:%s", config->host, config->port);
+
+ return li;
+}
+
+
+void nx_server_listener_free(nx_listener_t* li)
+{
+ pn_listener_free(li->pn_listener);
+ free_nx_listener_t(li);
+}
+
+
+void nx_server_listener_close(nx_listener_t* li)
+{
+ pn_listener_close(li->pn_listener);
+}
+
+
+nx_connector_t *nx_server_connect(const nx_server_config_t *config, void *context)
+{
+ nx_connector_t *ct = new_nx_connector_t();
+
+ if (!ct)
+ return 0;
+
+ ct->state = CXTR_STATE_CONNECTING;
+ ct->config = config;
+ ct->context = context;
+ ct->ctx = 0;
+ ct->timer = nx_timer(cxtr_try_open, (void*) ct);
+ ct->delay = 0;
+
+ nx_timer_schedule(ct->timer, ct->delay);
+ return ct;
+}
+
+
+void nx_server_connector_free(nx_connector_t* ct)
+{
+ // Don't free the proton connector. This will be done by the connector
+ // processing/cleanup.
+
+ if (ct->ctx) {
+ pn_connector_close(ct->ctx->pn_cxtr);
+ ct->ctx->connector = 0;
+ }
+
+ nx_timer_free(ct->timer);
+ free_nx_connector_t(ct);
+}
+
+
+nx_user_fd_t *nx_user_fd(int fd, void *context)
+{
+ nx_user_fd_t *ufd = new_nx_user_fd_t();
+
+ if (!ufd)
+ return 0;
+
+ nx_connection_t *ctx = new_nx_connection_t();
+ ctx->state = CONN_STATE_USER;
+ ctx->owner_thread = CONTEXT_NO_OWNER;
+ ctx->enqueued = 0;
+ ctx->pn_conn = 0;
+ ctx->listener = 0;
+ ctx->connector = 0;
+ ctx->context = 0;
+ ctx->user_context = 0;
+ ctx->ufd = ufd;
+
+ ufd->context = context;
+ ufd->fd = fd;
+ ufd->pn_conn = pn_connector_fd(nx_server->driver, fd, (void*) ctx);
+ pn_driver_wakeup(nx_server->driver);
+
+ return ufd;
+}
+
+
+void nx_user_fd_free(nx_user_fd_t *ufd)
+{
+ pn_connector_close(ufd->pn_conn);
+ free_nx_user_fd_t(ufd);
+}
+
+
+void nx_user_fd_activate_read(nx_user_fd_t *ufd)
+{
+ pn_connector_activate(ufd->pn_conn, PN_CONNECTOR_READABLE);
+ pn_driver_wakeup(nx_server->driver);
+}
+
+
+void nx_user_fd_activate_write(nx_user_fd_t *ufd)
+{
+ pn_connector_activate(ufd->pn_conn, PN_CONNECTOR_WRITABLE);
+ pn_driver_wakeup(nx_server->driver);
+}
+
+
+bool nx_user_fd_is_readable(nx_user_fd_t *ufd)
+{
+ return pn_connector_activated(ufd->pn_conn, PN_CONNECTOR_READABLE);
+}
+
+
+bool nx_user_fd_is_writeable(nx_user_fd_t *ufd)
+{
+ return pn_connector_activated(ufd->pn_conn, PN_CONNECTOR_WRITABLE);
+}
+
+
+void nx_server_timer_pending_LH(nx_timer_t *timer)
+{
+ DEQ_INSERT_TAIL(nx_server->pending_timers, timer);
+}
+
+
+void nx_server_timer_cancel_LH(nx_timer_t *timer)
+{
+ DEQ_REMOVE(nx_server->pending_timers, timer);
+}
+
diff --git a/qpid/extras/nexus/src/server_private.h b/qpid/extras/nexus/src/server_private.h
new file mode 100644
index 0000000000..a7f0a18ef7
--- /dev/null
+++ b/qpid/extras/nexus/src/server_private.h
@@ -0,0 +1,95 @@
+#ifndef __server_private_h__
+#define __server_private_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/server.h>
+#include <qpid/nexus/user_fd.h>
+#include <qpid/nexus/timer.h>
+#include <qpid/nexus/alloc.h>
+#include <proton/driver.h>
+
+void nx_server_timer_pending_LH(nx_timer_t *timer);
+void nx_server_timer_cancel_LH(nx_timer_t *timer);
+
+
+typedef enum {
+ CONN_STATE_CONNECTING = 0,
+ CONN_STATE_SASL_CLIENT,
+ CONN_STATE_SASL_SERVER,
+ CONN_STATE_OPENING,
+ CONN_STATE_OPERATIONAL,
+ CONN_STATE_FAILED,
+ CONN_STATE_USER
+} conn_state_t;
+
+#define CONTEXT_NO_OWNER -1
+
+typedef enum {
+ CXTR_STATE_CONNECTING = 0,
+ CXTR_STATE_OPEN,
+ CXTR_STATE_FAILED
+} cxtr_state_t;
+
+
+struct nx_listener_t {
+ const nx_server_config_t *config;
+ void *context;
+ pn_listener_t *pn_listener;
+};
+
+
+struct nx_connector_t {
+ cxtr_state_t state;
+ const nx_server_config_t *config;
+ void *context;
+ nx_connection_t *ctx;
+ nx_timer_t *timer;
+ long delay;
+};
+
+
+struct nx_connection_t {
+ conn_state_t state;
+ int owner_thread;
+ int enqueued;
+ pn_connector_t *pn_cxtr;
+ pn_connection_t *pn_conn;
+ nx_listener_t *listener;
+ nx_connector_t *connector;
+ void *context; // Copy of context from listener or connector
+ void *user_context;
+ nx_user_fd_t *ufd;
+};
+
+
+struct nx_user_fd_t {
+ void *context;
+ int fd;
+ pn_connector_t *pn_conn;
+};
+
+
+ALLOC_DECLARE(nx_listener_t);
+ALLOC_DECLARE(nx_connector_t);
+ALLOC_DECLARE(nx_connection_t);
+ALLOC_DECLARE(nx_user_fd_t);
+
+
+#endif
diff --git a/qpid/extras/nexus/src/timer.c b/qpid/extras/nexus/src/timer.c
new file mode 100644
index 0000000000..81b531305d
--- /dev/null
+++ b/qpid/extras/nexus/src/timer.c
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "timer_private.h"
+#include "server_private.h"
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/threading.h>
+#include <assert.h>
+#include <stdio.h>
+
+static sys_mutex_t *lock;
+static nx_timer_list_t free_list;
+static nx_timer_list_t idle_timers;
+static nx_timer_list_t scheduled_timers;
+static long time_base;
+
+
+//=========================================================================
+// Private static functions
+//=========================================================================
+
+static void nx_timer_cancel_LH(nx_timer_t *timer)
+{
+ switch (timer->state) {
+ case TIMER_FREE:
+ assert(0);
+ break;
+
+ case TIMER_IDLE:
+ break;
+
+ case TIMER_SCHEDULED:
+ if (timer->next)
+ timer->next->delta_time += timer->delta_time;
+ DEQ_REMOVE(scheduled_timers, timer);
+ DEQ_INSERT_TAIL(idle_timers, timer);
+ break;
+
+ case TIMER_PENDING:
+ nx_server_timer_cancel_LH(timer);
+ break;
+ }
+
+ timer->state = TIMER_IDLE;
+}
+
+
+//=========================================================================
+// Public Functions from timer.h
+//=========================================================================
+
+nx_timer_t *nx_timer(nx_timer_cb_t cb, void* context)
+{
+ nx_timer_t *timer;
+
+ sys_mutex_lock(lock);
+
+ timer = DEQ_HEAD(free_list);
+ if (timer) {
+ DEQ_REMOVE_HEAD(free_list);
+ } else {
+ timer = NEW(nx_timer_t);
+ DEQ_ITEM_INIT(timer);
+ }
+
+ if (timer) {
+ timer->handler = cb;
+ timer->context = context;
+ timer->delta_time = 0;
+ timer->state = TIMER_IDLE;
+ DEQ_INSERT_TAIL(idle_timers, timer);
+ }
+
+ sys_mutex_unlock(lock);
+ return timer;
+}
+
+
+void nx_timer_free(nx_timer_t *timer)
+{
+ sys_mutex_lock(lock);
+ nx_timer_cancel_LH(timer);
+ DEQ_REMOVE(idle_timers, timer);
+ DEQ_INSERT_TAIL(free_list, timer);
+ timer->state = TIMER_FREE;
+ sys_mutex_unlock(lock);
+}
+
+
+void nx_timer_schedule(nx_timer_t *timer, long duration)
+{
+ nx_timer_t *ptr;
+ nx_timer_t *last;
+ long total_time;
+
+ sys_mutex_lock(lock);
+ nx_timer_cancel_LH(timer); // Timer is now on the idle list
+ assert(timer->state == TIMER_IDLE);
+ DEQ_REMOVE(idle_timers, timer);
+
+ //
+ // Handle the special case of a zero-time scheduling. In this case,
+ // the timer doesn't go on the scheduled list. It goes straight to the
+ // pending list in the server.
+ //
+ if (duration == 0) {
+ timer->state = TIMER_PENDING;
+ nx_server_timer_pending_LH(timer);
+ sys_mutex_unlock(lock);
+ return;
+ }
+
+ //
+ // Find the insert point in the schedule.
+ //
+ total_time = 0;
+ ptr = DEQ_HEAD(scheduled_timers);
+ assert(!ptr || ptr->prev == 0);
+ while (ptr) {
+ total_time += ptr->delta_time;
+ if (total_time > duration)
+ break;
+ ptr = ptr->next;
+ }
+
+ //
+ // Insert the timer into the schedule and adjust the delta time
+ // of the following timer if present.
+ //
+ if (total_time <= duration) {
+ assert(ptr == 0);
+ timer->delta_time = duration - total_time;
+ DEQ_INSERT_TAIL(scheduled_timers, timer);
+ } else {
+ total_time -= ptr->delta_time;
+ timer->delta_time = duration - total_time;
+ assert(ptr->delta_time > timer->delta_time);
+ ptr->delta_time -= timer->delta_time;
+ last = ptr->prev;
+ if (last)
+ DEQ_INSERT_AFTER(scheduled_timers, timer, last);
+ else
+ DEQ_INSERT_HEAD(scheduled_timers, timer);
+ }
+
+ timer->state = TIMER_SCHEDULED;
+
+ sys_mutex_unlock(lock);
+}
+
+
+void nx_timer_cancel(nx_timer_t *timer)
+{
+ sys_mutex_lock(lock);
+ nx_timer_cancel_LH(timer);
+ sys_mutex_unlock(lock);
+}
+
+
+//=========================================================================
+// Private Functions from timer_private.h
+//=========================================================================
+
+void nx_timer_initialize(sys_mutex_t *server_lock)
+{
+ lock = server_lock;
+ DEQ_INIT(free_list);
+ DEQ_INIT(idle_timers);
+ DEQ_INIT(scheduled_timers);
+ time_base = 0;
+}
+
+
+void nx_timer_finalize(void)
+{
+ lock = 0;
+}
+
+
+long nx_timer_next_duration_LH(void)
+{
+ nx_timer_t *timer = DEQ_HEAD(scheduled_timers);
+ if (timer)
+ return timer->delta_time;
+ return -1;
+}
+
+
+void nx_timer_visit_LH(long current_time)
+{
+ long delta;
+ nx_timer_t *timer = DEQ_HEAD(scheduled_timers);
+
+ if (time_base == 0) {
+ time_base = current_time;
+ return;
+ }
+
+ delta = current_time - time_base;
+ time_base = current_time;
+
+ while (timer) {
+ assert(delta >= 0);
+ if (timer->delta_time > delta) {
+ timer->delta_time -= delta;
+ break;
+ } else {
+ DEQ_REMOVE_HEAD(scheduled_timers);
+ delta -= timer->delta_time;
+ timer->state = TIMER_PENDING;
+ nx_server_timer_pending_LH(timer);
+
+ }
+ timer = DEQ_HEAD(scheduled_timers);
+ }
+}
+
+
+void nx_timer_idle_LH(nx_timer_t *timer)
+{
+ timer->state = TIMER_IDLE;
+ DEQ_INSERT_TAIL(idle_timers, timer);
+}
+
diff --git a/qpid/extras/nexus/src/timer_private.h b/qpid/extras/nexus/src/timer_private.h
new file mode 100644
index 0000000000..fa9891953f
--- /dev/null
+++ b/qpid/extras/nexus/src/timer_private.h
@@ -0,0 +1,51 @@
+#ifndef __timer_private_h__
+#define __timer_private_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/ctools.h>
+#include <qpid/nexus/timer.h>
+#include <qpid/nexus/threading.h>
+
+typedef enum {
+ TIMER_FREE,
+ TIMER_IDLE,
+ TIMER_SCHEDULED,
+ TIMER_PENDING
+} nx_timer_state_t;
+
+
+struct nx_timer_t {
+ DEQ_LINKS(nx_timer_t);
+ nx_timer_cb_t handler;
+ void *context;
+ long delta_time;
+ nx_timer_state_t state;
+};
+
+DEQ_DECLARE(nx_timer_t, nx_timer_list_t);
+
+void nx_timer_initialize(sys_mutex_t *server_lock);
+void nx_timer_finalize(void);
+long nx_timer_next_duration_LH(void);
+void nx_timer_visit_LH(long current_time);
+void nx_timer_idle_LH(nx_timer_t *timer);
+
+
+#endif
diff --git a/qpid/extras/nexus/src/work_queue.c b/qpid/extras/nexus/src/work_queue.c
new file mode 100644
index 0000000000..b9555b3cb2
--- /dev/null
+++ b/qpid/extras/nexus/src/work_queue.c
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <qpid/nexus/ctools.h>
+#include "work_queue.h"
+#include <string.h>
+#include <stdio.h>
+
+#define BATCH_SIZE 100
+typedef struct work_item_t work_item_t;
+
+struct work_item_t {
+ DEQ_LINKS(work_item_t);
+ pn_connector_t *conn;
+};
+
+DEQ_DECLARE(work_item_t, work_list_t);
+
+struct work_queue_t {
+ work_list_t items;
+ work_list_t free_list;
+};
+
+static void allocate_batch(work_queue_t *w)
+{
+ int i;
+ work_item_t *batch = NEW_ARRAY(work_item_t, BATCH_SIZE);
+ if (!batch)
+ return;
+
+ memset(batch, 0, sizeof(work_item_t) * BATCH_SIZE);
+
+ for (i = 0; i < BATCH_SIZE; i++)
+ DEQ_INSERT_TAIL(w->free_list, &batch[i]);
+}
+
+
+work_queue_t *work_queue(void)
+{
+ work_queue_t *w = NEW(work_queue_t);
+ if (!w)
+ return 0;
+
+ DEQ_INIT(w->items);
+ DEQ_INIT(w->free_list);
+
+ allocate_batch(w);
+
+ return w;
+}
+
+
+void work_queue_free(work_queue_t *w)
+{
+ if (!w)
+ return;
+
+ // KEEP TRACK OF BATCHES AND FREE
+ free(w);
+}
+
+
+void work_queue_put(work_queue_t *w, pn_connector_t *conn)
+{
+ work_item_t *item;
+
+ if (!w)
+ return;
+ if (DEQ_SIZE(w->free_list) == 0)
+ allocate_batch(w);
+ if (DEQ_SIZE(w->free_list) == 0)
+ return;
+
+ item = DEQ_HEAD(w->free_list);
+ DEQ_REMOVE_HEAD(w->free_list);
+
+ item->conn = conn;
+
+ DEQ_INSERT_TAIL(w->items, item);
+}
+
+
+pn_connector_t *work_queue_get(work_queue_t *w)
+{
+ work_item_t *item;
+ pn_connector_t *conn;
+
+ if (!w)
+ return 0;
+ item = DEQ_HEAD(w->items);
+ if (!item)
+ return 0;
+
+ DEQ_REMOVE_HEAD(w->items);
+ conn = item->conn;
+ item->conn = 0;
+
+ DEQ_INSERT_TAIL(w->free_list, item);
+
+ return conn;
+}
+
+
+int work_queue_empty(work_queue_t *w)
+{
+ return !w || DEQ_SIZE(w->items) == 0;
+}
+
+
+int work_queue_depth(work_queue_t *w)
+{
+ if (!w)
+ return 0;
+ return DEQ_SIZE(w->items);
+}
+
diff --git a/qpid/extras/nexus/src/work_queue.h b/qpid/extras/nexus/src/work_queue.h
new file mode 100644
index 0000000000..597a484a9c
--- /dev/null
+++ b/qpid/extras/nexus/src/work_queue.h
@@ -0,0 +1,33 @@
+#ifndef __work_queue_h__
+#define __work_queue_h__ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <proton/driver.h>
+
+typedef struct work_queue_t work_queue_t;
+
+work_queue_t *work_queue(void);
+void work_queue_free(work_queue_t *w);
+void work_queue_put(work_queue_t *w, pn_connector_t *conn);
+pn_connector_t *work_queue_get(work_queue_t *w);
+int work_queue_empty(work_queue_t *w);
+int work_queue_depth(work_queue_t *w);
+
+#endif
diff --git a/qpid/extras/nexus/tests/CMakeLists.txt b/qpid/extras/nexus/tests/CMakeLists.txt
new file mode 100644
index 0000000000..383c3c9919
--- /dev/null
+++ b/qpid/extras/nexus/tests/CMakeLists.txt
@@ -0,0 +1,34 @@
+##
+## Licensed to the Apache Software Foundation (ASF) under one
+## or more contributor license agreements. See the NOTICE file
+## distributed with this work for additional information
+## regarding copyright ownership. The ASF licenses this file
+## to you under the Apache License, Version 2.0 (the
+## "License"); you may not use this file except in compliance
+## with the License. You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing,
+## software distributed under the License is distributed on an
+## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+## KIND, either express or implied. See the License for the
+## specific language governing permissions and limitations
+## under the License.
+##
+
+##
+## Build test applications
+##
+set(test_SOURCES
+ alloc_test.c
+ message_test.c
+ run_tests.c
+ server_test.c
+ timer_test.c
+ tool_test.c
+ )
+
+add_executable(run_tests ${test_SOURCES})
+target_link_libraries(run_tests qpid-nexus)
+
diff --git a/qpid/extras/nexus/tests/alloc_test.c b/qpid/extras/nexus/tests/alloc_test.c
new file mode 100644
index 0000000000..02f48af7e7
--- /dev/null
+++ b/qpid/extras/nexus/tests/alloc_test.c
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "test_case.h"
+#include <stdio.h>
+#include <string.h>
+#include "alloc_private.h"
+
+typedef struct {
+ int A;
+ int B;
+} object_t;
+
+nx_alloc_config_t config = {3, 7, 10};
+
+ALLOC_DECLARE(object_t);
+ALLOC_DEFINE_CONFIG(object_t, &config);
+
+
+static char* check_stats(nx_alloc_stats_t *stats, uint64_t ah, uint64_t fh, uint64_t ht, uint64_t rt, uint64_t rg)
+{
+ if (stats->total_alloc_from_heap != ah) return "Incorrect alloc-from-heap";
+ if (stats->total_free_to_heap != fh) return "Incorrect free-to-heap";
+ if (stats->held_by_threads != ht) return "Incorrect held-by-threads";
+ if (stats->batches_rebalanced_to_threads != rt) return "Incorrect rebalance-to-threads";
+ if (stats->batches_rebalanced_to_global != rg) return "Incorrect rebalance-to-global";
+ return 0;
+}
+
+
+static char* test_alloc_basic(void *context)
+{
+ object_t *obj[50];
+ int idx;
+ nx_alloc_stats_t *stats;
+ char *error;
+
+ for (idx = 0; idx < 20; idx++)
+ obj[idx] = new_object_t();
+
+ stats = alloc_stats_object_t();
+ error = check_stats(stats, 21, 0, 21, 0, 0);
+ if (error) return error;
+
+ for (idx = 0; idx < 20; idx++)
+ free_object_t(obj[idx]);
+
+ error = check_stats(stats, 21, 5, 6, 0, 5);
+ if (error) return error;
+
+ for (idx = 0; idx < 20; idx++)
+ obj[idx] = new_object_t();
+
+ error = check_stats(stats, 27, 5, 21, 3, 5);
+ if (error) return error;
+
+ return 0;
+}
+
+
+int alloc_tests(void)
+{
+ int result = 0;
+ nx_alloc_initialize();
+
+ TEST_CASE(test_alloc_basic, 0);
+
+ return result;
+}
+
diff --git a/qpid/extras/nexus/tests/message_test.c b/qpid/extras/nexus/tests/message_test.c
new file mode 100644
index 0000000000..e9a4f01636
--- /dev/null
+++ b/qpid/extras/nexus/tests/message_test.c
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "test_case.h"
+#include <stdio.h>
+#include <string.h>
+#include <qpid/nexus/message.h>
+#include <qpid/nexus/iterator.h>
+#include <proton/message.h>
+
+
+static char* test_init(void *context)
+{
+ nx_allocator_initialize(nx_allocator_default_config());
+ nx_allocator_finalize();
+ return 0;
+}
+
+
+static char* test_send_to_messenger(void *context)
+{
+ nx_allocator_initialize(nx_allocator_default_config());
+
+ nx_message_t *msg = nx_allocate_message();
+ nx_message_compose_1(msg, "test_addr_0", 0);
+ nx_buffer_t *buf = DEQ_HEAD(msg->buffers);
+ if (buf == 0) return "Expected a buffer in the test message";
+
+ pn_message_t *pn_msg = pn_message();
+ int result = pn_message_decode(pn_msg, (const char*) nx_buffer_base(buf), nx_buffer_size(buf));
+ if (result != 0) return "Error in pn_message_decode";
+
+ if (strcmp(pn_message_get_address(pn_msg), "test_addr_0") != 0)
+ return "Address mismatch in received message";
+
+ pn_message_free(pn_msg);
+ nx_free_message(msg);
+
+ nx_allocator_finalize();
+ return 0;
+}
+
+
+static char* test_receive_from_messenger(void *context)
+{
+ nx_allocator_initialize(nx_allocator_default_config());
+
+ pn_message_t *pn_msg = pn_message();
+ pn_message_set_address(pn_msg, "test_addr_1");
+
+ nx_buffer_t *buf = nx_allocate_buffer();
+ size_t size = nx_buffer_capacity(buf);
+ int result = pn_message_encode(pn_msg, (char*) nx_buffer_cursor(buf), &size);
+ if (result != 0) return "Error in pn_message_encode";
+ nx_buffer_insert(buf, size);
+
+ nx_message_t *msg = nx_allocate_message();
+ DEQ_INSERT_TAIL(msg->buffers, buf);
+ int valid = nx_message_check(msg, NX_DEPTH_ALL);
+ if (!valid) return "nx_message_check returns 'invalid'";
+
+ nx_field_iterator_t *iter = nx_message_field_to(msg);
+ if (iter == 0) return "Expected an iterator for the 'to' field";
+
+ if (!nx_field_iterator_equal(iter, (unsigned char*) "test_addr_1"))
+ return "Mismatched 'to' field contents";
+
+ pn_message_free(pn_msg);
+ nx_free_message(msg);
+
+ nx_allocator_finalize();
+ return 0;
+}
+
+
+static char* test_insufficient_check_depth(void *context)
+{
+ nx_allocator_initialize(nx_allocator_default_config());
+
+ pn_message_t *pn_msg = pn_message();
+ pn_message_set_address(pn_msg, "test_addr_2");
+
+ nx_buffer_t *buf = nx_allocate_buffer();
+ size_t size = nx_buffer_capacity(buf);
+ int result = pn_message_encode(pn_msg, (char*) nx_buffer_cursor(buf), &size);
+ if (result != 0) return "Error in pn_message_encode";
+ nx_buffer_insert(buf, size);
+
+ nx_message_t *msg = nx_allocate_message();
+ DEQ_INSERT_TAIL(msg->buffers, buf);
+ int valid = nx_message_check(msg, NX_DEPTH_DELIVERY_ANNOTATIONS);
+ if (!valid) return "nx_message_check returns 'invalid'";
+
+ nx_field_iterator_t *iter = nx_message_field_to(msg);
+ if (iter) return "Expected no iterator for the 'to' field";
+
+ nx_free_message(msg);
+
+ nx_allocator_finalize();
+ return 0;
+}
+
+
+int message_tests(void)
+{
+ int result = 0;
+
+ TEST_CASE(test_init, 0);
+ TEST_CASE(test_send_to_messenger, 0);
+ TEST_CASE(test_receive_from_messenger, 0);
+ TEST_CASE(test_insufficient_check_depth, 0);
+
+ return result;
+}
+
diff --git a/qpid/gentools/templ.java/model/version/AmqpConstantsClass.vm b/qpid/extras/nexus/tests/run_tests.c
index 8d459f2977..a677c04577 100644
--- a/qpid/gentools/templ.java/model/version/AmqpConstantsClass.vm
+++ b/qpid/extras/nexus/tests/run_tests.c
@@ -1,6 +1,4 @@
-&{AmqpConstants.java}
/*
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -17,21 +15,22 @@
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
- *
*/
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-package org.apache.qpid.framing;
+int tool_tests();
+int timer_tests();
+int alloc_tests();
+int server_tests();
+int message_tests();
-class AmqpConstants
+int main(int argc, char** argv)
{
- // Constant getValue methods
-
-%{TLIST} ${const_get_method}
-
+ int result = 0;
+ result += tool_tests();
+ result += timer_tests();
+ result += alloc_tests();
+ result += server_tests();
+ result += message_tests();
+ return result;
}
+
diff --git a/qpid/extras/nexus/tests/server_test.c b/qpid/extras/nexus/tests/server_test.c
new file mode 100644
index 0000000000..29cd70eeb3
--- /dev/null
+++ b/qpid/extras/nexus/tests/server_test.c
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <assert.h>
+#include <qpid/nexus/timer.h>
+#include "test_case.h"
+#include <qpid/nexus/server.h>
+#include <qpid/nexus/user_fd.h>
+#include <qpid/nexus/threading.h>
+#include <qpid/nexus/log.h>
+
+#define THREAD_COUNT 4
+#define OCTET_COUNT 100
+
+static sys_mutex_t *test_lock;
+
+static void *expected_context;
+static int call_count;
+static int threads_seen[THREAD_COUNT];
+static char stored_error[512];
+
+static int write_count;
+static int read_count;
+static int fd[2];
+static nx_user_fd_t *ufd_write;
+static nx_user_fd_t *ufd_read;
+
+
+static void thread_start(void *context, int thread_id)
+{
+ sys_mutex_lock(test_lock);
+ if (context != expected_context && !stored_error[0])
+ sprintf(stored_error, "Unexpected Context Value: %lx", (long) context);
+ if (thread_id >= THREAD_COUNT && !stored_error[0])
+ sprintf(stored_error, "Thread_ID too large: %d", thread_id);
+ if (thread_id < 0 && !stored_error[0])
+ sprintf(stored_error, "Thread_ID negative: %d", thread_id);
+
+ call_count++;
+ if (thread_id >= 0 && thread_id < THREAD_COUNT)
+ threads_seen[thread_id]++;
+
+ if (call_count == THREAD_COUNT)
+ nx_server_stop();
+ sys_mutex_unlock(test_lock);
+}
+
+
+static int conn_handler(void *context, nx_conn_event_t event, nx_connection_t *conn)
+{
+ return 0;
+}
+
+
+static void ufd_handler(void *context, nx_user_fd_t *ufd)
+{
+ long dir = (long) context;
+ char buffer;
+ ssize_t len;
+ static int in_read = 0;
+ static int in_write = 0;
+
+ if (dir == 0) { // READ
+ in_read++;
+ assert(in_read == 1);
+ if (!nx_user_fd_is_readable(ufd_read)) {
+ sprintf(stored_error, "Expected Readable");
+ nx_server_stop();
+ } else {
+ len = read(fd[0], &buffer, 1);
+ if (len == 1) {
+ read_count++;
+ if (read_count == OCTET_COUNT)
+ nx_server_stop();
+ }
+ nx_user_fd_activate_read(ufd_read);
+ }
+ in_read--;
+ } else { // WRITE
+ in_write++;
+ assert(in_write == 1);
+ if (!nx_user_fd_is_writeable(ufd_write)) {
+ sprintf(stored_error, "Expected Writable");
+ nx_server_stop();
+ } else {
+ write(fd[1], "X", 1);
+
+ write_count++;
+ if (write_count < OCTET_COUNT)
+ nx_user_fd_activate_write(ufd_write);
+ }
+ in_write--;
+ }
+}
+
+
+static void fd_test_start(void *context)
+{
+ nx_user_fd_activate_read(ufd_read);
+}
+
+
+static char* test_start_handler(void *context)
+{
+ int i;
+
+ nx_server_initialize(THREAD_COUNT);
+
+ expected_context = (void*) 0x00112233;
+ stored_error[0] = 0x0;
+ call_count = 0;
+ for (i = 0; i < THREAD_COUNT; i++)
+ threads_seen[i] = 0;
+
+ nx_server_set_conn_handler(conn_handler);
+ nx_server_set_start_handler(thread_start, expected_context);
+ nx_server_run();
+ nx_server_finalize();
+
+ if (stored_error[0]) return stored_error;
+ if (call_count != THREAD_COUNT) return "Incorrect number of thread-start callbacks";
+ for (i = 0; i < THREAD_COUNT; i++)
+ if (threads_seen[i] != 1) return "Incorrect count on one thread ID";
+
+ return 0;
+}
+
+
+static char* test_user_fd(void *context)
+{
+ int res;
+ nx_timer_t *timer;
+
+ nx_server_initialize(THREAD_COUNT);
+ nx_server_set_conn_handler(conn_handler);
+ nx_server_set_user_fd_handler(ufd_handler);
+ timer = nx_timer(fd_test_start, 0);
+ nx_timer_schedule(timer, 0);
+
+ stored_error[0] = 0x0;
+ res = pipe2(fd, O_NONBLOCK);
+ if (res != 0) return "Error creating pipe2";
+
+ ufd_write = nx_user_fd(fd[1], (void*) 1);
+ ufd_read = nx_user_fd(fd[0], (void*) 0);
+
+ nx_server_run();
+ nx_timer_free(timer);
+ nx_server_finalize();
+ close(fd[0]);
+ close(fd[1]);
+
+ if (stored_error[0]) return stored_error;
+ if (write_count - OCTET_COUNT > 2) sprintf(stored_error, "Excessively high Write Count: %d", write_count);
+ if (read_count != OCTET_COUNT) sprintf(stored_error, "Incorrect Read Count: %d", read_count);;
+
+ if (stored_error[0]) return stored_error;
+ return 0;
+}
+
+
+int server_tests(void)
+{
+ int result = 0;
+ test_lock = sys_mutex();
+ nx_log_set_mask(LOG_NONE);
+
+ TEST_CASE(test_start_handler, 0);
+ TEST_CASE(test_user_fd, 0);
+
+ sys_mutex_free(test_lock);
+ return result;
+}
+
diff --git a/qpid/extras/nexus/tests/test_case.h b/qpid/extras/nexus/tests/test_case.h
new file mode 100644
index 0000000000..6e36b440a5
--- /dev/null
+++ b/qpid/extras/nexus/tests/test_case.h
@@ -0,0 +1,36 @@
+#ifndef _nexus_test_case_h_
+#define _nexus_test_case_h_ 1
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+typedef char* (*testcase_t)(void *context);
+
+#define TEST_CASE(T,C) do { \
+ char *r = T(C); \
+ printf("Test Case %s.%s: ", __FUNCTION__, #T); \
+ if (r) { \
+ printf("FAIL: %s\n", r); \
+ result++; \
+ } else \
+ printf("PASS\n"); \
+} while(0);
+
+
+#endif
+
diff --git a/qpid/extras/nexus/tests/timer_test.c b/qpid/extras/nexus/tests/timer_test.c
new file mode 100644
index 0000000000..f50f9367ea
--- /dev/null
+++ b/qpid/extras/nexus/tests/timer_test.c
@@ -0,0 +1,386 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include <stdio.h>
+#include <qpid/nexus/timer.h>
+#include "timer_private.h"
+#include "test_case.h"
+#include <qpid/nexus/threading.h>
+
+
+static unsigned long fire_mask;
+static nx_timer_list_t pending_timers;
+static sys_mutex_t *lock;
+static long time;
+static nx_timer_t *timers[16];
+
+
+void nx_server_timer_pending_LH(nx_timer_t *timer)
+{
+ DEQ_INSERT_TAIL(pending_timers, timer);
+}
+
+
+void nx_server_timer_cancel_LH(nx_timer_t *timer)
+{
+ if (timer->state == TIMER_PENDING)
+ DEQ_REMOVE(pending_timers, timer);
+}
+
+
+static int fire_head()
+{
+ sys_mutex_lock(lock);
+ int result = DEQ_SIZE(pending_timers);
+ nx_timer_t *timer = DEQ_HEAD(pending_timers);
+ if (timer) {
+ DEQ_REMOVE_HEAD(pending_timers);
+ nx_timer_idle_LH(timer);
+ fire_mask |= (unsigned long) timer->context;
+ }
+ sys_mutex_unlock(lock);
+ return result;
+}
+
+
+static char* test_quiet(void *context)
+{
+ fire_mask = 0;
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+
+ while(fire_head());
+
+ if (fire_mask != 0)
+ return "Expected zero timers fired";
+ return 0;
+}
+
+static char* test_immediate(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 0);
+
+ if (fire_mask != 0) return "Premature firing";
+ if (fire_head() > 1) return "Too many firings";
+ if (fire_mask != 1) return "Incorrect fire mask";
+
+ return 0;
+}
+
+
+static char* test_immediate_plus_delayed(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 0);
+ nx_timer_schedule(timers[1], 5);
+
+ if (fire_mask != 0) return "Premature firing";
+ if (fire_head() > 1) return "Too many firings";
+ if (fire_mask != 1) return "Incorrect fire mask 1";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ time += 8;
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+
+ if (fire_head() < 1) return "Delayed Failed to fire";
+ if (fire_mask != 3) return "Incorrect fire mask 3";
+
+ return 0;
+}
+
+
+static char* test_single(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 2);
+ if (fire_head() > 0) return "Premature firing 1";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() > 0) return "Premature firing 2";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() < 1) return "Failed to fire";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() != 0) return "Spurious fires";
+
+ if (fire_mask != 1) return "Incorrect fire mask";
+ if (timers[0]->state != TIMER_IDLE) return "Expected idle timer state";
+
+ return 0;
+}
+
+
+static char* test_two_inorder(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 2);
+ nx_timer_schedule(timers[1], 4);
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ int count = fire_head();
+ if (count < 1) return "First failed to fire";
+ if (count > 1) return "Second fired prematurely";
+ if (fire_mask != 1) return "Incorrect fire mask 1";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() < 1) return "Second failed to fire";
+ if (fire_mask != 3) return "Incorrect fire mask 3";
+
+ return 0;
+}
+
+
+static char* test_two_reverse(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 4);
+ nx_timer_schedule(timers[1], 2);
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ int count = fire_head();
+ if (count < 1) return "First failed to fire";
+ if (count > 1) return "Second fired prematurely";
+ if (fire_mask != 2) return "Incorrect fire mask 2";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() < 1) return "Second failed to fire";
+ if (fire_mask != 3) return "Incorrect fire mask 3";
+
+ return 0;
+}
+
+
+static char* test_two_duplicate(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 2);
+ nx_timer_schedule(timers[1], 2);
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ int count = fire_head();
+ if (count != 2) return "Expected two firings";
+ fire_head();
+ if (fire_mask != 3) return "Incorrect fire mask 3";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ if (fire_head() > 0) return "Spurious timer fires";
+
+ return 0;
+}
+
+
+static char* test_separated(void *context)
+{
+ int count;
+
+ while(fire_head());
+ fire_mask = 0;
+
+ nx_timer_schedule(timers[0], 2);
+ nx_timer_schedule(timers[1], 4);
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ count = fire_head();
+ if (count < 1) return "First failed to fire";
+ if (count > 1) return "Second fired prematurely";
+ if (fire_mask != 1) return "Incorrect fire mask 1";
+
+ nx_timer_schedule(timers[2], 2);
+ nx_timer_schedule(timers[3], 4);
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ count = fire_head();
+ fire_head();
+ if (count < 1) return "Second failed to fire";
+ if (count < 2) return "Third failed to fire";
+ if (fire_mask != 7) return "Incorrect fire mask 7";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ count = fire_head();
+ if (count < 1) return "Fourth failed to fire";
+ if (fire_mask != 15) return "Incorrect fire mask 15";
+
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ count = fire_head();
+ if (count > 0) return "Spurious fire";
+
+ return 0;
+}
+
+
+static char* test_big(void *context)
+{
+ while(fire_head());
+ fire_mask = 0;
+
+ long durations[16] =
+ { 5, 8, 7, 6,
+ 14, 10, 16, 15,
+ 11, 12, 9, 12,
+ 1, 2, 3, 4};
+ unsigned long masks[18] = {
+ 0x1000,
+ 0x3000,
+ 0x7000,
+ 0xf000,
+ 0xf001,
+ 0xf009,
+ 0xf00d,
+ 0xf00f,
+ 0xf40f,
+ 0xf42f,
+ 0xf52f,
+ 0xff2f,
+ 0xff2f,
+ 0xff3f,
+ 0xffbf,
+ 0xffff,
+ 0xffff,
+ 0xffff
+ };
+
+ int i;
+ for (i = 0; i < 16; i++)
+ nx_timer_schedule(timers[i], durations[i]);
+ for (i = 0; i < 18; i++) {
+ sys_mutex_lock(lock);
+ nx_timer_visit_LH(time++);
+ sys_mutex_unlock(lock);
+ while(fire_head());
+ if (fire_mask != masks[i]) {
+ static char error[100];
+ sprintf(error, "Iteration %d: expected mask %04lx, got %04lx", i, masks[i], fire_mask);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+
+int timer_tests(void)
+{
+ int result = 0;
+
+ fire_mask = 0;
+ DEQ_INIT(pending_timers);
+ lock = sys_mutex();
+ nx_timer_initialize(lock);
+ time = 1;
+
+ timers[0] = nx_timer(0, (void*) 0x00000001);
+ timers[1] = nx_timer(0, (void*) 0x00000002);
+ timers[2] = nx_timer(0, (void*) 0x00000004);
+ timers[3] = nx_timer(0, (void*) 0x00000008);
+ timers[4] = nx_timer(0, (void*) 0x00000010);
+ timers[5] = nx_timer(0, (void*) 0x00000020);
+ timers[6] = nx_timer(0, (void*) 0x00000040);
+ timers[7] = nx_timer(0, (void*) 0x00000080);
+ timers[8] = nx_timer(0, (void*) 0x00000100);
+ timers[9] = nx_timer(0, (void*) 0x00000200);
+ timers[10] = nx_timer(0, (void*) 0x00000400);
+ timers[11] = nx_timer(0, (void*) 0x00000800);
+ timers[12] = nx_timer(0, (void*) 0x00001000);
+ timers[13] = nx_timer(0, (void*) 0x00002000);
+ timers[14] = nx_timer(0, (void*) 0x00004000);
+ timers[15] = nx_timer(0, (void*) 0x00008000);
+
+ TEST_CASE(test_quiet, 0);
+ TEST_CASE(test_immediate, 0);
+ TEST_CASE(test_immediate_plus_delayed, 0);
+ TEST_CASE(test_single, 0);
+ TEST_CASE(test_two_inorder, 0);
+ TEST_CASE(test_two_reverse, 0);
+ TEST_CASE(test_two_duplicate, 0);
+ TEST_CASE(test_separated, 0);
+ TEST_CASE(test_big, 0);
+
+ int i;
+ for (i = 0; i < 16; i++)
+ nx_timer_free(timers[i]);
+
+ nx_timer_finalize();
+
+ return result;
+}
+
diff --git a/qpid/extras/nexus/tests/tool_test.c b/qpid/extras/nexus/tests/tool_test.c
new file mode 100644
index 0000000000..0848b51ec7
--- /dev/null
+++ b/qpid/extras/nexus/tests/tool_test.c
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include "test_case.h"
+#include <stdio.h>
+#include <string.h>
+#include <qpid/nexus/ctools.h>
+
+typedef struct item_t {
+ DEQ_LINKS(struct item_t);
+ char letter;
+} item_t;
+
+DEQ_DECLARE(item_t, item_list_t);
+
+
+static char* list_well_formed(item_list_t list, char *key)
+{
+ item_t *ptr;
+ item_t *last = 0;
+ int size = DEQ_SIZE(list);
+ int count = 0;
+ char str[32];
+
+ ptr = DEQ_HEAD(list);
+ while (ptr) {
+ str[count] = ptr->letter;
+ count++;
+ if (DEQ_PREV(ptr) != last) return "Corrupt previous link";
+ last = ptr;
+ ptr = DEQ_NEXT(ptr);
+ }
+ str[count] = '\0';
+ if (strcmp(str, key) != 0) return "Invalid key";
+
+ if (count != size) return "Size different from number of items (forward)";
+
+ count = 0;
+ last = 0;
+ ptr = DEQ_TAIL(list);
+ while (ptr) {
+ count++;
+ if (DEQ_NEXT(ptr) != last) return "Corrupt next link";
+ last = ptr;
+ ptr = DEQ_PREV(ptr);
+ }
+
+ if (count != size) return "Size different from number of items (backward)";
+
+ return 0;
+}
+
+
+static char* test_deq_basic(void *context)
+{
+ item_list_t list;
+ item_t item[10];
+ item_t *ptr;
+ int idx;
+ char *subtest;
+
+ DEQ_INIT(list);
+ if (DEQ_SIZE(list) != 0) return "Expected zero initial size";
+
+ for (idx = 0; idx < 10; idx++) {
+ DEQ_ITEM_INIT(&item[idx]);
+ item[idx].letter = 'A' + idx;
+ DEQ_INSERT_TAIL(list, &item[idx]);
+ }
+ if (DEQ_SIZE(list) != 10) return "Expected 10 items in list";
+
+ ptr = DEQ_HEAD(list);
+ if (!ptr) return "Expected valid head item";
+ if (DEQ_PREV(ptr)) return "Head item has non-null previous link";
+ if (ptr->letter != 'A') return "Expected item A at the head";
+ if (DEQ_NEXT(ptr) == 0) return "Head item has null next link";
+ subtest = list_well_formed(list, "ABCDEFGHIJ");
+ if (subtest) return subtest;
+
+ DEQ_REMOVE_HEAD(list);
+ if (DEQ_SIZE(list) != 9) return "Expected 9 items in list";
+ ptr = DEQ_HEAD(list);
+ if (ptr->letter != 'B') return "Expected item B at the head";
+ subtest = list_well_formed(list, "BCDEFGHIJ");
+ if (subtest) return subtest;
+
+ DEQ_REMOVE_TAIL(list);
+ if (DEQ_SIZE(list) != 8) return "Expected 8 items in list";
+ ptr = DEQ_TAIL(list);
+ if (ptr->letter != 'I') return "Expected item I at the tail";
+ subtest = list_well_formed(list, "BCDEFGHI");
+ if (subtest) return subtest;
+
+ DEQ_REMOVE(list, &item[4]);
+ if (DEQ_SIZE(list) != 7) return "Expected 7 items in list";
+ subtest = list_well_formed(list, "BCDFGHI");
+ if (subtest) return subtest;
+
+ DEQ_REMOVE(list, &item[1]);
+ if (DEQ_SIZE(list) != 6) return "Expected 6 items in list";
+ subtest = list_well_formed(list, "CDFGHI");
+ if (subtest) return subtest;
+
+ DEQ_REMOVE(list, &item[8]);
+ if (DEQ_SIZE(list) != 5) return "Expected 5 items in list";
+ subtest = list_well_formed(list, "CDFGH");
+ if (subtest) return subtest;
+
+ DEQ_INSERT_HEAD(list, &item[8]);
+ if (DEQ_SIZE(list) != 6) return "Expected 6 items in list";
+ ptr = DEQ_HEAD(list);
+ if (ptr->letter != 'I') return "Expected item I at the head";
+ subtest = list_well_formed(list, "ICDFGH");
+ if (subtest) return subtest;
+
+ DEQ_INSERT_AFTER(list, &item[4], &item[7]);
+ if (DEQ_SIZE(list) != 7) return "Expected 7 items in list";
+ ptr = DEQ_TAIL(list);
+ if (ptr->letter != 'E') return "Expected item E at the head";
+ subtest = list_well_formed(list, "ICDFGHE");
+ if (subtest) return subtest;
+
+ DEQ_INSERT_AFTER(list, &item[1], &item[5]);
+ if (DEQ_SIZE(list) != 8) return "Expected 8 items in list";
+ subtest = list_well_formed(list, "ICDFBGHE");
+ if (subtest) return subtest;
+
+ if (item[0].prev || item[0].next) return "Unlisted item A has non-null pointers";
+ if (item[9].prev || item[9].next) return "Unlisted item J has non-null pointers";
+
+ return 0;
+}
+
+
+int tool_tests(void)
+{
+ int result = 0;
+
+ TEST_CASE(test_deq_basic, 0);
+
+ return result;
+}
+
diff --git a/qpid/gentools/LICENSE b/qpid/gentools/LICENSE
deleted file mode 100644
index 43fa6abd19..0000000000
--- a/qpid/gentools/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/qpid/gentools/NOTICE b/qpid/gentools/NOTICE
deleted file mode 100644
index 09e9ae4902..0000000000
--- a/qpid/gentools/NOTICE
+++ /dev/null
@@ -1,2 +0,0 @@
-This product includes software developed by The Apache Software Foundation (http://www.apache.org/).
-
diff --git a/qpid/gentools/README.txt b/qpid/gentools/README.txt
deleted file mode 100644
index 94f705b064..0000000000
--- a/qpid/gentools/README.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-================================================================================
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements. See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership. The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied. See the License for the
-specific language governing permissions and limitations
-under the License.
-================================================================================
-
-AMQP MULTI_VERSION CODE GENERATOR
-
-This directory contains the first part of the new multi-AMQP-version code
-generator. The Java generation is almost complete, C++ will follow.
-
-NOTE: The generator has NOT been integrated into the current build, and is
-included here to run stand-alone for the purposes of review and comment. As
-currently configured, this generator will not interact with any file or
-directory outside of this directory.
-
-To build (from this directory):
-rm org/apache/qpid/gentools/*.class
-javac org/apache/qpid/gentools/Main.java
-
-Make sure you are using Sun's JDK1.5.0; Eclipse and gcj do not work.
-
-To run (from this directory):
-java org/apache/qpid/gentools/Main -j [xml_spec_file, ...]
-
-XML test files are located in the xml-src directory. Pay attention to the
-Basic class and Basic.Consume method - these were the primary test vehicles
-for this generator. *** NOTE *** These files do not represent any current or
-future version of the AMQP specification - do not use in production!
-
-Folders:
---------
-org/apache/qpid/gentools/: Source.
-xml-src/: Test AMQP specification files.
-templ.java/: Templates for java code generation.
-out.java/: Output folder for generated Java files (will be created with use
- of -j flag on command-line).
-templ.cpp/: (Future:) Templates for C++ code generation.
-out.cpp/: Output folder for generated C++ files (will be created with use
- of -c flag on command-line).
-
-For a more detaild description of the generator, see the Qpid Wiki
-(http://cwiki.apache.org/qpid/multiple-amqp-version-support.html).
-
-Please send comments and bugs to me (kim.vdriet [at] redhat.com) or via the
-Apache Qpid list (dev [at] qpid.apache.org).
-
-Kim van der Riet
diff --git a/qpid/gentools/build b/qpid/gentools/build
deleted file mode 100755
index a18a984dff..0000000000
--- a/qpid/gentools/build
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-cd src
-echo "--------- Building gentools ----------"
-echo "Clearing out old build files..."
-for f in org/apache/qpid/gentools/*.class; do
- if [ -e $f ]; then
- rm $f
- fi
-done
-echo "Compiling..."
-javac -source=1.5 -target=1.5 org/apache/qpid/gentools/*.java
-echo "Done. Try it out..."
-java org.apache.qpid.gentools.Main
-echo "--------- Building gentools completed ----------"
-cd ..
diff --git a/qpid/gentools/build.xml b/qpid/gentools/build.xml
deleted file mode 100644
index f6760a215e..0000000000
--- a/qpid/gentools/build.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<!--
- -
- - Licensed to the Apache Software Foundation (ASF) under one
- - or more contributor license agreements. See the NOTICE file
- - distributed with this work for additional information
- - regarding copyright ownership. The ASF licenses this file
- - to you under the Apache License, Version 2.0 (the
- - "License"); you may not use this file except in compliance
- - with the License. You may obtain a copy of the License at
- -
- - http://www.apache.org/licenses/LICENSE-2.0
- -
- - Unless required by applicable law or agreed to in writing,
- - software distributed under the License is distributed on an
- - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- - KIND, either express or implied. See the License for the
- - specific language governing permissions and limitations
- - under the License.
- -
- -->
-<project name="gentools" default="compile">
- <property name="src" location="src" />
-
- <property name="java.source" value="1.5"/>
- <property name="java.target" value="1.5"/>
-
- <target name="compile">
- <javac srcdir="${src}" source="${java.source}" target="${java.target}" fork="true" debug="on" includeantruntime="false">
- <classpath>
- <fileset dir="${src}/../lib">
- <include name="**/*.jar"/>
- </fileset>
- </classpath>
- </javac>
- </target>
-
- <target name="clean">
- <delete>
- <fileset dir="${src}/org/apache/qpid/gentools" includes="*.class" />
- </delete>
- </target>
-
-</project>
diff --git a/qpid/gentools/lib/LICENSE b/qpid/gentools/lib/LICENSE
deleted file mode 100644
index e69de29bb2..0000000000
--- a/qpid/gentools/lib/LICENSE
+++ /dev/null
diff --git a/qpid/gentools/lib/NOTICE b/qpid/gentools/lib/NOTICE
deleted file mode 100644
index e69de29bb2..0000000000
--- a/qpid/gentools/lib/NOTICE
+++ /dev/null
diff --git a/qpid/gentools/lib/README.txt b/qpid/gentools/lib/README.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/qpid/gentools/lib/README.txt
+++ /dev/null
diff --git a/qpid/gentools/lib/velocity-1.4.jar b/qpid/gentools/lib/velocity-1.4.jar
deleted file mode 100644
index 04ec9d2f85..0000000000
--- a/qpid/gentools/lib/velocity-1.4.jar
+++ /dev/null
Binary files differ
diff --git a/qpid/gentools/lib/velocity-dep-1.4.jar b/qpid/gentools/lib/velocity-dep-1.4.jar
deleted file mode 100644
index 375712b0e8..0000000000
--- a/qpid/gentools/lib/velocity-dep-1.4.jar
+++ /dev/null
Binary files differ
diff --git a/qpid/gentools/templ.cpp/method/MethodBodyClass.h.tmpl b/qpid/gentools/templ.cpp/method/MethodBodyClass.h.tmpl
deleted file mode 100644
index 5819a9cf9c..0000000000
--- a/qpid/gentools/templ.cpp/method/MethodBodyClass.h.tmpl
+++ /dev/null
@@ -1,112 +0,0 @@
-&{${CLASS}${METHOD}Body.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_${CLASS}${METHOD}Body__
-#define qpid_framing_${CLASS}${METHOD}Body__
-
-#include <string>
-#include <sstream>
-
-#include <amqp_types.h>
-#include <AMQMethodBody.h>
-#include <Buffer.h>
-#include <FieldTable.h>
-
-namespace qpid
-{
-namespace framing
-{
-${version_namespace_start}
-
-class ${CLASS}${METHOD}Body : public AMQMethodBody
-{
- // Method field declarations
-
-%{FLIST} ${mb_field_declaration}
-
-
-public:
- typedef boost::shared_ptr<${CLASS}${METHOD}Body> shared_ptr;
-
- // Constructors and destructors
-
-${mb_constructor_with_initializers}
-
- inline ${CLASS}${METHOD}Body(u_int8_t major, u_int8_t minor): AMQMethodBody(major, minor) {}
- inline ${CLASS}${METHOD}Body(ProtocolVersion& version): AMQMethodBody(version) {}
- virtual ~${CLASS}${METHOD}Body() {}
-
- // Attribute get methods
-
-%{FLIST} ${mb_field_get_method}
-
- // Helper methods
-
- inline void print(std::ostream& out) const
- {
- out << "${CLASS}${METHOD}: ";
-%{FLIST} ${mb_field_print}
- }
-
- inline u_int16_t amqpClassId() const
- {
- return ${CLASS_ID_INIT};
- }
-
- inline u_int16_t amqpMethodId() const
- {
- return ${METHOD_ID_INIT};
- }
-
- inline u_int32_t bodySize() const
- {
- u_int32_t size = 0;
-%{FLIST} ${mb_body_size}
- return size;
- }
-
- inline void encodeContent(Buffer&${mb_buffer_param}) const
- {
-%{FLIST} ${mb_encode}
- }
-
- inline void decodeContent(Buffer&${mb_buffer_param})
- {
-%{FLIST} ${mb_decode}
- }
-
-${mb_server_operation_invoke}
-
-}; // class ${CLASS}${METHOD}Body
-
-${version_namespace_end}
-} // namespace framing
-} // namespace qpid
-
-#endif
-
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ClientOperations.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_ClientOperations.h.tmpl
deleted file mode 100644
index a9fb0e0f69..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_ClientOperations.h.tmpl
+++ /dev/null
@@ -1,82 +0,0 @@
-&{AMQP_ClientOperations.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_AMQP_ClientOperations__
-#define qpid_framing_AMQP_ClientOperations__
-
-#include <sstream>
-
-#include <FieldTable.h>
-#include <ProtocolVersion.h>
-#include <ProtocolVersionException.h>
-
-namespace qpid {
-namespace framing {
-
-class AMQP_ClientProxy;
-
-class AMQP_ClientOperations
-{
-protected:
- ProtocolVersion version;
- AMQP_ClientOperations() {}
-
-public:
- AMQP_ClientOperations(u_int8_t major, u_int8_t minor) : version(major, minor) {}
- AMQP_ClientOperations(ProtocolVersion& version) : version(version) {}
- virtual ~AMQP_ClientOperations() {}
-
- inline u_int8_t getMajor() const { return version.getMajor(); }
- inline u_int8_t getMinor() const { return version.getMinor(); }
- inline const ProtocolVersion& getVersion() const { return version; }
- inline bool isVersion(u_int8_t _major, u_int8_t _minor) const
- {
- return version.equals(_major, _minor);
- }
- inline bool isVersion(ProtocolVersion& _version) const
- {
- return version.equals(_version);
- }
-
- // Include framing constant declarations
- #include <AMQP_Constants.h>
-
- // Inner classes
-
-%{CLIST} ${coh_inner_class}
-
- // Method handler get methods
-
-%{CLIST} ${coh_method_handler_get_method}
-
-}; /* class AMQP_ClientOperations */
-
-} /* namespace framing */
-} /* namespace qpid */
-
-#endif
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ClientProxy.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_ClientProxy.h.tmpl
deleted file mode 100644
index 0653ed7186..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_ClientProxy.h.tmpl
+++ /dev/null
@@ -1,75 +0,0 @@
-&{AMQP_ClientProxy.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_AMQP_ClientProxy__
-#define qpid_framing_AMQP_ClientProxy__
-
-#include <AMQP_ClientOperations.h>
-#include <FieldTable.h>
-#include <OutputHandler.h>
-
-namespace qpid {
-namespace framing {
-
-class AMQP_ClientProxy : public AMQP_ClientOperations
-{
-private:
-
- ProtocolVersion version;
- OutputHandler* out;
-%{CLIST} ${cph_handler_pointer_defn}
-
-public:
- AMQP_ClientProxy(OutputHandler* out, u_int8_t major, u_int8_t minor);
- ProtocolVersion& getProtocolVersion() {return version;}
- virtual ~AMQP_ClientProxy() {}
-
- // Get methods for handlers
-
-%{CLIST} ${cph_handler_pointer_get_method}
-
- // Inner class definitions
-
-%{CLIST} ${cph_inner_class_defn}
-
-private:
- // Inner class instances
-
-%{CLIST} ${cph_inner_class_instance}
-
-public:
- // Inner class instance get methods
-
-%{CLIST} ${cph_inner_class_get_method}
-
-}; /* class AMQP_ClientProxy */
-
-} /* namespace framing */
-} /* namespace qpid */
-
-#endif
diff --git a/qpid/gentools/templ.cpp/model/AMQP_Constants.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_Constants.h.tmpl
deleted file mode 100644
index 4631bc8de6..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_Constants.h.tmpl
+++ /dev/null
@@ -1,34 +0,0 @@
-&{AMQP_Constants.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
- // NOTE: This file is intended to be included within the class structure of both
- // the client and server operations classes. These need to have <sstream> included.
-
- // Constant getValue methods
-
-%{TLIST} ${ch_get_value_method}
- \ No newline at end of file
diff --git a/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.cpp.tmpl b/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.cpp.tmpl
deleted file mode 100644
index dc2a890c88..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.cpp.tmpl
+++ /dev/null
@@ -1,62 +0,0 @@
-&{AMQP_MethodVersionMap.cpp}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#include <sstream>
-
-#include <AMQP_MethodVersionMap.h>
-
-namespace qpid
-{
-namespace framing
-{
-
-AMQP_MethodVersionMap::AMQP_MethodVersionMap()
-{
-%{CLIST} ${mc_create_method_body_map_entry}
-}
-
-AMQMethodBody* AMQP_MethodVersionMap::createMethodBody(u_int16_t classId, u_int16_t methodId, u_int8_t major, u_int8_t minor)
-{
- iterator itr = find(createMapKey(classId, methodId, major, minor));
- if (itr == end())
- {
- std::stringstream ss;
- ss << "Unable to find MethodBody class for classId = " << classId << ", methodId = " <<
- methodId << ", AMQ protocol version = " << major << "-" << minor << ".";
- throw ProtocolVersionException(ss.str());
- }
- return (itr->second)(major, minor);
-}
-
-u_int64_t AMQP_MethodVersionMap::createMapKey(u_int16_t classId, u_int16_t methodId, u_int8_t major, u_int8_t minor)
-{
- return ((u_int64_t)classId<<48) + ((u_int64_t)methodId<<32) + ((u_int64_t)major<<16) + minor;
-}
-
-} /* namespace framing */
-} /* namespace qpid */
diff --git a/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.h.tmpl
deleted file mode 100644
index c197871d4b..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_MethodVersionMap.h.tmpl
+++ /dev/null
@@ -1,57 +0,0 @@
-&{AMQP_MethodVersionMap.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_AMQP_MethodVersionMap__
-#define qpid_framing_AMQP_MethodVersionMap__
-
-#include <map>
-#include <AMQMethodBody.h>
-
-%{MLIST} ${mc_method_body_include}
-
-namespace qpid
-{
-namespace framing
-{
-
-template <class T> AMQMethodBody* createMethodBodyFn(u_int8_t major, u_int8_t minor) { return new T(major, minor); }
-typedef AMQMethodBody* (*fnPtr)(u_int8_t, u_int8_t);
-
-class AMQP_MethodVersionMap: public std::map<u_int64_t, fnPtr>
-{
-protected:
- u_int64_t createMapKey(u_int16_t classId, u_int16_t methodId, u_int8_t major, u_int8_t minor);
-public:
- AMQP_MethodVersionMap();
- AMQMethodBody* createMethodBody(u_int16_t classId, u_int16_t methodId, u_int8_t major, u_int8_t minor);
-};
-
-} /* namespace framing */
-} /* namespace qpid */
-
-#endif
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ServerOperations.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_ServerOperations.h.tmpl
deleted file mode 100644
index e87723667b..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_ServerOperations.h.tmpl
+++ /dev/null
@@ -1,83 +0,0 @@
-&{AMQP_ServerOperations.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_AMQP_ServerOperations__
-#define qpid_framing_AMQP_ServerOperations__
-
-#include <sstream>
-
-#include <FieldTable.h>
-#include <ProtocolVersion.h>
-#include <ProtocolVersionException.h>
-
-namespace qpid {
-namespace framing {
-
-class AMQP_ServerProxy;
-class AMQP_ClientProxy;
-
-class AMQP_ServerOperations
-{
-protected:
- ProtocolVersion version;
- AMQP_ServerOperations() {}
-
-public:
- AMQP_ServerOperations(u_int8_t major, u_int8_t minor) : version(major, minor) {}
- AMQP_ServerOperations(ProtocolVersion& version) : version(version) {}
- virtual ~AMQP_ServerOperations() {}
-
- inline u_int8_t getMajor() const { return version.getMajor(); }
- inline u_int8_t getMinor() const { return version.getMinor(); }
- inline const ProtocolVersion& getVersion() const { return version; }
- inline bool isVersion(u_int8_t _major, u_int8_t _minor) const
- {
- return version.equals(_major, _minor);
- }
- inline bool isVersion(ProtocolVersion& _version) const
- {
- return version.equals(_version);
- }
-
- // Include framing constant declarations
- #include <AMQP_Constants.h>
-
- // Inner classes
-
-%{CLIST} ${soh_inner_class}
-
- // Method handler get methods
-
-%{CLIST} ${soh_method_handler_get_method}
-
-}; /* class AMQP_ServerOperations */
-
-} /* namespace framing */
-} /* namespace qpid */
-
-#endif
diff --git a/qpid/gentools/templ.cpp/model/AMQP_ServerProxy.h.tmpl b/qpid/gentools/templ.cpp/model/AMQP_ServerProxy.h.tmpl
deleted file mode 100644
index fab29f2c60..0000000000
--- a/qpid/gentools/templ.cpp/model/AMQP_ServerProxy.h.tmpl
+++ /dev/null
@@ -1,74 +0,0 @@
-&{AMQP_ServerProxy.h}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-#ifndef qpid_framing_AMQP_ServerProxy__
-#define qpid_framing_AMQP_ServerProxy__
-
-#include <AMQP_ServerOperations.h>
-#include <FieldTable.h>
-#include <OutputHandler.h>
-
-namespace qpid {
-namespace framing {
-
-class AMQP_ServerProxy : public AMQP_ServerOperations
-{
-private:
- ProtocolVersion version;
- OutputHandler* out;
-%{CLIST} ${sph_handler_pointer_defn}
-
-public:
- AMQP_ServerProxy(OutputHandler* out, u_int8_t major, u_int8_t minor);
- ProtocolVersion& getProtocolVersion() {return version;}
- virtual ~AMQP_ServerProxy() {}
-
- // Get methods for handlers
-
-%{CLIST} ${sph_handler_pointer_get_method}
-
- // Inner class definitions
-
-%{CLIST} ${sph_inner_class_defn}
-
-private:
- // Inner class instances
-
-%{CLIST} ${sph_inner_class_instance}
-
-public:
- // Inner class instance get methods
-
-%{CLIST} ${sph_inner_class_get_method}
-
-}; /* class AMQP_ServerProxy */
-
-} /* namespace framing */
-} /* namespace qpid */
-
-#endif
diff --git a/qpid/gentools/templ.java/PropertyContentHeaderClass.tmpl b/qpid/gentools/templ.java/PropertyContentHeaderClass.tmpl
deleted file mode 100644
index ab6406b1fe..0000000000
--- a/qpid/gentools/templ.java/PropertyContentHeaderClass.tmpl
+++ /dev/null
@@ -1,208 +0,0 @@
-&{${CLASS}ContentHeaderProperties.java}
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${GENERATOR} - do not modify.
- * Supported AMQP versions:
-%{VLIST} * ${major}-${minor}
- */
-
-package org.apache.qpid.framing;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.mina.common.ByteBuffer;
-
-public class ${CLASS}ContentHeaderProperties implements ContentHeaderProperties
-{
- private static final Logger logger = LoggerFactory.getLogger(BasicContentHeaderProperties.class);
-
- /**
- * We store the encoded form when we decode the content header so that if we need to
- * write it out without modifying it we can do so without incurring the expense of
- * reencoding it.
- */
- private byte[] encodedBuffer;
-
- /**
- * Flag indicating whether the entire content header has been decoded yet.
- */
- private boolean decodedFlag = true;
-
- /**
- * We have some optimisations for partial decoding for maximum performance. The
- * headers are used in the broker for routing in some cases so we can decode that
- * separately.
- */
- private boolean decodedHeadersFlag = true;
-
- /**
- * We have some optimisations for partial decoding for maximum performance. The
- * content type is used by all clients to determine the message type.
- */
- private boolean decodedContentTypeFlag = true;
-
- /**
- * AMQP major and minor version of this instance.
- */
- private byte major;
- private byte minor;
-
- /**
- * Property flags.
- */
- ${pch_property_flags_declare}
-
- // Header fields from specification
-%{FLIST} ${field_declaration}
-
- /**
- * Constructor
- */
- public ${CLASS}ContentHeaderProperties(byte major, byte minor)
- {
- this.major = major;
- this.minor = minor;
-
- // Although one flag is initialized per property, the flags are used
- // in ordinal order of the AMQP version represented by this instance,
- // thus the number of flags actually used may be less than the total
- // number defined.
- ${pch_property_flags_initializer}
- }
-
- public int getPropertyListSize()
- {
- if (encodedBuffer != null)
- {
- return encodedBuffer.length;
- }
- else
- {
- int size = 0;
-%{FLIST} ${pch_field_list_size}
- return size;
- }
- }
-
- private void clearEncodedForm()
- {
- if (!decodedFlag && encodedBuffer != null)
- {
- //decode();
- }
- encodedBuffer = null;
- }
-
- public void setPropertyFlags(int[] compactPropertyFlags)
- throws AMQProtocolVersionException
- {
- clearEncodedForm();
-${pch_compact_property_flags_check}
-%{FLIST} ${pch_set_compact_property_flags}
- }
-
- public int[] getPropertyFlags()
- {
- int[] compactPropertyFlags = new int[] { 0 };
-${pch_compact_property_flags_initializer}
-%{FLIST} ${pch_get_compact_property_flags}
- return compactPropertyFlags;
- }
-
- public void writePropertyListPayload(ByteBuffer buffer)
- {
- if (encodedBuffer != null)
- {
- buffer.put(encodedBuffer);
- }
- else
- {
-%{FLIST} ${pch_field_list_payload}
- }
- }
-
- public void populatePropertiesFromBuffer(ByteBuffer buffer, int[] propertyFlags, int size)
- throws AMQFrameDecodingException, AMQProtocolVersionException
- {
- setPropertyFlags(propertyFlags);
-
- if (logger.isDebugEnabled())
- {
- logger.debug("Property flags: " + propertyFlags);
- }
- decode(buffer);
- /*encodedBuffer = new byte[size];
- buffer.get(encodedBuffer, 0, size);
- decodedFlag = false;
- decodedHeadersFlag = false;
- decodedContentTypeFlag = false;*/
- }
-
- private void decode(ByteBuffer buffer)
- {
- //ByteBuffer buffer = ByteBuffer.wrap(encodedBuffer);
- int pos = buffer.position();
- try
- {
-%{FLIST} ${pch_field_list_decode}
- // This line does nothing, but prevents a compiler error (Exception not thrown)
- // if this block is empty.
- if (false) throw new AMQFrameDecodingException("");
- }
- catch (AMQFrameDecodingException e)
- {
- throw new RuntimeException("Error in content header data: " + e);
- }
-
- final int endPos = buffer.position();
- buffer.position(pos);
- final int len = endPos - pos;
- encodedBuffer = new byte[len];
- final int limit = buffer.limit();
- buffer.limit(endPos);
- buffer.get(encodedBuffer, 0, len);
- buffer.limit(limit);
- buffer.position(endPos);
- decodedFlag = true;
- }
-
- private void decodeIfNecessary()
- {
- if (!decodedFlag)
- {
- //decode();
- }
- }
-
- // Field clear methods
-
-%{FLIST} ${pch_field_clear_methods}
-
- // Field get methods
-
-%{FLIST} ${pch_field_get_methods}
-
- // Field set methods
-
-%{FLIST} ${pch_field_set_methods}
-}
diff --git a/qpid/gentools/templ.java/method/version/MethodBodyClass.vm b/qpid/gentools/templ.java/method/version/MethodBodyClass.vm
deleted file mode 100644
index bb62438a65..0000000000
--- a/qpid/gentools/templ.java/method/version/MethodBodyClass.vm
+++ /dev/null
@@ -1,190 +0,0 @@
-#macro( UpperCamel $name )
-#set( $name = "${name.substring(0,1).toUpperCase()}${name.substring(1)}" )
-#end
-#macro( toUpperCamel $name )${name.substring(0,1).toUpperCase()}${name.substring(1)}#end
-
-
-
-#set( $amqp_ClassName = $amqpClass.Name)
-#UpperCamel( $amqp_ClassName )
-#set( $amqp_MethodName = $amqpMethod.Name )
-#UpperCamel( $amqp_MethodName )
-#set( $javaClassName = "${amqp_ClassName}${amqp_MethodName}BodyImpl" )
-#set( $interfaceName = "${amqp_ClassName}${amqp_MethodName}Body" )
-#set( $amqpPackageName = "amqp_$version.getMajor()_$version.getMinor()" )
-
-#set( $filename = "${amqpPackageName}/${javaClassName}.java")
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by ${generator} - do not modify.
- * Supported AMQP version:
- * $version.getMajor()-$version.getMinor()
- */
-
-#set( $clazz = $amqpClass.asSingleVersionClass( $version ) )
-#set( $method = $amqpMethod.asSingleVersionMethod( $version ) )
-
-package org.apache.qpid.framing.amqp_$version.getMajor()_$version.getMinor();
-
-import java.util.HashMap;
-
-import org.apache.mina.common.ByteBuffer;
-import org.apache.qpid.framing.*;
-
-public class ${javaClassName} extends AMQMethodBody_$version.getMajor()_$version.getMinor() implements $interfaceName
-{
- private static final AMQMethodBodyInstanceFactory FACTORY_INSTANCE = new AMQMethodBodyInstanceFactory()
- {
- public AMQMethodBody newInstance(ByteBuffer in, long size) throws AMQFrameDecodingException
- {
- return new ${javaClassName}(in);
- }
-
-
- };
-
-
- public static AMQMethodBodyInstanceFactory getFactory()
- {
- return FACTORY_INSTANCE;
- }
-
- public static int CLASS_ID = $clazz.ClassId;
-
- public static int METHOD_ID = $method.MethodId;
-
-
-
- // Fields declared in specification
-#foreach( $field in $method.ConsolidatedFields )
- private final $field.NativeType _$field.getName(); // $field.UnderlyingFields
-#end
-
-
- // Constructor
-
- public ${javaClassName}(ByteBuffer buffer) throws AMQFrameDecodingException
- {
-#foreach( $field in $method.ConsolidatedFields )
- _$field.Name = read$field.getEncodingType()( buffer );
-#end
- }
-
- public ${javaClassName}(
-#foreach( $field in $method.FieldList )
-#if( $velocityCount == $method.getFieldList().size() )
- $field.NativeType $field.Name
-#else
- $field.NativeType $field.Name,
-#end
-#end)
- {
-#set( $consolidatedFieldName = "" )
-#foreach( $field in $method.FieldList )
-#if( $method.isConsolidated( $field.Name ) )
-#if( !$method.getConsolidatedFieldName( $field.Name ).equals( $consolidatedFieldName ) )
-#if( !$consolidatedFieldName.equals("") )
- _$consolidatedFieldName = $consolidatedFieldName; // 1
-#end
-#set( $consolidatedFieldName = $method.getConsolidatedFieldName( $field.Name ) )
- byte $consolidatedFieldName = (byte)0;
-#end
- if( $field.Name )
- {
- $consolidatedFieldName = (byte) (((int) $consolidatedFieldName) | (1 << $method.getPositionInBitField( $field.Name )));
- }
-#if( $velocityCount == $method.getFieldList().size())
- _$consolidatedFieldName = $consolidatedFieldName;
-#else
-
-#end
-#else
-#if( !$consolidatedFieldName.equals("") )
- _$consolidatedFieldName = $consolidatedFieldName;
-#end
-#set( $consolidatedFieldName = "" )
- _$field.Name = $field.Name;
-#end
-#end
- }
-
- public int getClazz()
- {
- return CLASS_ID;
- }
-
- public int getMethod()
- {
- return METHOD_ID;
- }
-
-
-#foreach( $field in $method.FieldList )
- public final $field.NativeType get#toUpperCamel( ${field.Name} )()
- {
-#if( $method.isConsolidated( $field.Name ) )
- return (((int)(_$method.getConsolidatedFieldName( $field.Name ))) & ( 1 << $method.getPositionInBitField( $field.Name ))) != 0;
-#else
- return _$field.Name;
-#end
- }
-#end
-
- protected int getBodySize()
- {
- int size = 0;
-#foreach( $field in $method.ConsolidatedFields )
-#if( $field.isFixedSize() )
- size += $field.Size;
-#else
- size += getSizeOf( _$field.Name );
-#end
-#end
- return size;
- }
-
- public void writeMethodPayload(ByteBuffer buffer)
- {
-#foreach( $field in $method.ConsolidatedFields )
-
- write$field.getEncodingType()( buffer, _$field.Name );
-#end
- }
-
-
- public String toString()
- {
- StringBuffer buf = new StringBuffer("[$javaClassName: ");
-#foreach( $field in $method.FieldList )
- buf.append( "$field.Name=" );
- buf.append( get#toUpperCamel( $field.Name )() );
-#if( $velocityCount != $method.FieldList.size() )
- buf.append( ", " );
-#end
-#end
- buf.append("]");
- return buf.toString();
- }
-
-
-}
diff --git a/qpid/gentools/templ.java/model/ProtocolVersionListClass.vm b/qpid/gentools/templ.java/model/ProtocolVersionListClass.vm
deleted file mode 100644
index bcf7db345b..0000000000
--- a/qpid/gentools/templ.java/model/ProtocolVersionListClass.vm
+++ /dev/null
@@ -1,154 +0,0 @@
-#set( $filename = "ProtocolVersion.java" )
-/*
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied. See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-*/
-
-/*
-* This file is auto-generated by $generator - do not modify.
-* Supported AMQP versions:
-#foreach( $version in $model.getVersionSet() )
-* $version.getMajor()-$version.getMinor()
-#end
-*/
-
-package org.apache.qpid.framing;
-
-import java.util.SortedSet;
-import java.util.Collections;
-import java.util.TreeSet;
-
-
-public class ProtocolVersion implements Comparable
-{
- private final byte _majorVersion;
- private final byte _minorVersion;
- private final String _stringFormat;
-
-
- public ProtocolVersion(byte majorVersion, byte minorVersion)
- {
- _majorVersion = majorVersion;
- _minorVersion = minorVersion;
- _stringFormat = _majorVersion+"-"+_minorVersion;
- }
-
- public byte getMajorVersion()
- {
- return _majorVersion;
- }
-
- public byte getMinorVersion()
- {
- return _minorVersion;
- }
-
- public String toString()
- {
- return _stringFormat;
- }
-
- public int compareTo(Object o)
- {
- ProtocolVersion pv = (ProtocolVersion) o;
-
- /*
- * 0-8 has it's major and minor numbers the wrong way round (it's actually 8-0)...
- * so we need to deal with that case specially
- */
-
- if((_majorVersion == (byte) 8) && (_minorVersion == (byte) 0))
- {
- ProtocolVersion fixedThis = new ProtocolVersion(_minorVersion, _majorVersion);
- return fixedThis.compareTo(pv);
- }
-
- if((pv.getMajorVersion() == (byte) 8) && (pv.getMinorVersion() == (byte) 0))
- {
- ProtocolVersion fixedOther = new ProtocolVersion(pv.getMinorVersion(), pv.getMajorVersion());
- return this.compareTo(fixedOther);
- }
-
- if(_majorVersion > pv.getMajorVersion())
- {
- return 1;
- }
- else if(_majorVersion < pv.getMajorVersion())
- {
- return -1;
- }
- else if(_minorVersion > pv.getMinorVersion())
- {
- return 1;
- }
- else if(getMinorVersion() < pv.getMinorVersion())
- {
- return -1;
- }
- else
- {
- return 0;
- }
-
- }
-
- public boolean equals(Object o)
- {
- return o != null && (o == this || (compareTo(o) == 0));
- }
-
- public int hashCode()
- {
- return (0xFF & (int)_minorVersion) | ((0xFF & (int)_majorVersion) << 8);
- }
-
-
- public boolean isSupported()
- {
- return _supportedVersions.contains(this);
- }
-
- public static ProtocolVersion getLatestSupportedVersion()
- {
- return _supportedVersions.last();
- }
-
- private static final SortedSet<ProtocolVersion> _supportedVersions;
-
- static
- {
- SortedSet<ProtocolVersion> versions = new TreeSet<ProtocolVersion>();
-
-#foreach( $version in $model.getVersionSet() )
- versions.add(new ProtocolVersion((byte)$version.getMajor(),(byte)$version.getMinor()));
-#end
- _supportedVersions = Collections.unmodifiableSortedSet(versions);
- }
-
-
- public static SortedSet<ProtocolVersion> getSupportedProtocolVersions()
- {
- return _supportedVersions;
- }
-
-
-
-
-
-}
diff --git a/qpid/gentools/templ.java/model/version/MethodRegistryClass.vm b/qpid/gentools/templ.java/model/version/MethodRegistryClass.vm
deleted file mode 100644
index 82287e7f8f..0000000000
--- a/qpid/gentools/templ.java/model/version/MethodRegistryClass.vm
+++ /dev/null
@@ -1,145 +0,0 @@
-#set( $filename = "amqp_$version.getMajor()_$version.getMinor()/MethodRegistry_${version.getMajor()}_${version.getMinor()}.java")
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/*
- * This file is auto-generated by $generator - do not modify.
- * Supported AMQP version:
- * $version.getMajor()-$version.getMinor()
- */
-
-package org.apache.qpid.framing.amqp_${version.getMajor()}_${version.getMinor()};
-
-import org.apache.qpid.framing.AMQMethodBodyInstanceFactory;
-import org.apache.qpid.framing.AMQFrameDecodingException;
-import org.apache.qpid.framing.AMQMethodBody;
-import org.apache.qpid.framing.MethodRegistry;
-import org.apache.qpid.framing.ProtocolVersion;
-
-
-import org.apache.log4j.Logger;
-import org.apache.mina.common.ByteBuffer;
-
-public class MethodRegistry_$version.getMajor()_$version.getMinor() extends MethodRegistry
-{
-
- private static final Logger _log = Logger.getLogger(MethodRegistry.class);
-
-#set( $specificModel = $model.asSingleVersionModel() )
-
-
-
- private final AMQMethodBodyInstanceFactory[][] _factories = new AMQMethodBodyInstanceFactory[$specificModel.getMaximumClassId()+1][];
-
- public MethodRegistry_$version.getMajor()_$version.getMinor()()
- {
- this(new ProtocolVersion((byte)$version.getMajor(),(byte)$version.getMinor()));
- }
-
- public MethodRegistry_$version.getMajor()_$version.getMinor()(ProtocolVersion pv)
- {
- super(pv);
-#foreach( $amqpClass in $specificModel.getClassList() )
-#set( $amqpClassNameFirstChar = $amqpClass.getName().substring(0,1) )
-#set( $amqpClassNameFirstCharU = $amqpClassNameFirstChar.toUpperCase() )
-#set( $amqpClassNameUpperCamel = "$amqpClassNameFirstCharU$amqpClass.getName().substring(1)" )
-
-
-
- // Register method body instance factories for the $amqpClassNameUpperCamel class.
-
- _factories[$amqpClass.getClassId()] = new AMQMethodBodyInstanceFactory[$amqpClass.getMaximumMethodId()+1];
-
-#foreach( $amqpMethod in $amqpClass.getMethodList() )
-#set( $amqpMethodNameFirstChar = $amqpMethod.getName().substring(0,1) )
-#set( $amqpMethodNameFirstCharU = $amqpMethodNameFirstChar.toUpperCase() )
-#set( $amqpMethodNameUpperCamel = "$amqpMethodNameFirstCharU$amqpMethod.getName().substring(1)" )
- _factories[$amqpClass.getClassId()][$amqpMethod.getMethodId()] = ${amqpClassNameUpperCamel}${amqpMethodNameUpperCamel}BodyImpl.getFactory();
-#end
-
-#end
-
-
- }
-
-
- public AMQMethodBody convertToBody(ByteBuffer in, long size)
- throws AMQFrameDecodingException
- {
- int classId = in.getUnsignedShort();
- int methodId = in.getUnsignedShort();
-
- AMQMethodBodyInstanceFactory bodyFactory;
- try
- {
- bodyFactory = _factories[classId][methodId];
- }
- catch(NullPointerException e)
- {
- throw new AMQFrameDecodingException(_log,
- "Class " + classId + " unknown in AMQP version $version.getMajor()-$version.getMinor()"
- + " (while trying to decode class " + classId + " method " + methodId + ".");
- }
- catch(IndexOutOfBoundsException e)
- {
- if(classId >= _factories.length)
- {
- throw new AMQFrameDecodingException(_log,
- "Class " + classId + " unknown in AMQP version $version.getMajor()-$version.getMinor()"
- + " (while trying to decode class " + classId + " method " + methodId + ".");
-
- }
- else
- {
- throw new AMQFrameDecodingException(_log,
- "Method " + methodId + " unknown in AMQP version $version.getMajor()-$version.getMinor()"
- + " (while trying to decode class " + classId + " method " + methodId + ".");
-
- }
- }
-
-
- if (bodyFactory == null)
- {
- throw new AMQFrameDecodingException(_log,
- "Method " + methodId + " unknown in AMQP version $version.getMajor()-$version.getMinor()"
- + " (while trying to decode class " + classId + " method " + methodId + ".");
- }
-
-
- return bodyFactory.newInstance(in, size);
-
-
- }
-
-
- public int getMaxClassId()
- {
- return $specificModel.getMaximumClassId();
- }
-
- public int getMaxMethodId(int classId)
- {
- return _factories[classId].length - 1;
- }
-
-
-}
diff --git a/qpid/gentools/xml-src/amqp-0.10.test.xml b/qpid/gentools/xml-src/amqp-0.10.test.xml
deleted file mode 100644
index 5d3d80648b..0000000000
--- a/qpid/gentools/xml-src/amqp-0.10.test.xml
+++ /dev/null
@@ -1,4241 +0,0 @@
-<?xml version = "1.0"?>
-
-<!--
- EDITORS: (PH) Pieter Hintjens <ph@imatix.com>
- (KvdR) Kim van der Riet <kim.vdriet@redhat.com>
-
- These editors have been assigned by the AMQP working group.
- Please do not edit/commit this file without consulting with
- one of the above editors.
- ========================================================
-
- TODOs
- - see TODO comments in the text
--->
-
-<!--
- Copyright Notice
- ================
- (c) Copyright JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc.,
- iMatix Corporation, IONA\ufffd Technologies, Red Hat, Inc.,
- TWIST Process Innovations, and 29West Inc. 2006. All rights reserved.
-
- License
- =======
- JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc., iMatix
- Corporation, IONA\ufffd Technologies, Red Hat, Inc., TWIST Process Innovations, and
- 29West Inc. (collectively, the "Authors") each hereby grants to you a worldwide,
- perpetual, royalty-free, nontransferable, nonexclusive license to
- (i) copy, display, and implement the Advanced Messaging Queue Protocol
- ("AMQP") Specification and (ii) the Licensed Claims that are held by
- the Authors, all for the purpose of implementing the Advanced Messaging
- Queue Protocol Specification. Your license and any rights under this
- Agreement will terminate immediately without notice from
- any Author if you bring any claim, suit, demand, or action related to
- the Advanced Messaging Queue Protocol Specification against any Author.
- Upon termination, you shall destroy all copies of the Advanced Messaging
- Queue Protocol Specification in your possession or control.
-
- As used hereunder, "Licensed Claims" means those claims of a patent or
- patent application, throughout the world, excluding design patents and
- design registrations, owned or controlled, or that can be sublicensed
- without fee and in compliance with the requirements of this
- Agreement, by an Author or its affiliates now or at any
- future time and which would necessarily be infringed by implementation
- of the Advanced Messaging Queue Protocol Specification. A claim is
- necessarily infringed hereunder only when it is not possible to avoid
- infringing it because there is no plausible non-infringing alternative
- for implementing the required portions of the Advanced Messaging Queue
- Protocol Specification. Notwithstanding the foregoing, Licensed Claims
- shall not include any claims other than as set forth above even if
- contained in the same patent as Licensed Claims; or that read solely
- on any implementations of any portion of the Advanced Messaging Queue
- Protocol Specification that are not required by the Advanced Messaging
- Queue Protocol Specification, or that, if licensed, would require a
- payment of royalties by the licensor to unaffiliated third parties.
- Moreover, Licensed Claims shall not include (i) any enabling technologies
- that may be necessary to make or use any Licensed Product but are not
- themselves expressly set forth in the Advanced Messaging Queue Protocol
- Specification (e.g., semiconductor manufacturing technology, compiler
- technology, object oriented technology, networking technology, operating
- system technology, and the like); or (ii) the implementation of other
- published standards developed elsewhere and merely referred to in the
- body of the Advanced Messaging Queue Protocol Specification, or
- (iii) any Licensed Product and any combinations thereof the purpose or
- function of which is not required for compliance with the Advanced
- Messaging Queue Protocol Specification. For purposes of this definition,
- the Advanced Messaging Queue Protocol Specification shall be deemed to
- include both architectural and interconnection requirements essential
- for interoperability and may also include supporting source code artifacts
- where such architectural, interconnection requirements and source code
- artifacts are expressly identified as being required or documentation to
- achieve compliance with the Advanced Messaging Queue Protocol Specification.
-
- As used hereunder, "Licensed Products" means only those specific portions
- of products (hardware, software or combinations thereof) that implement
- and are compliant with all relevant portions of the Advanced Messaging
- Queue Protocol Specification.
-
- The following disclaimers, which you hereby also acknowledge as to any
- use you may make of the Advanced Messaging Queue Protocol Specification:
-
- THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION IS PROVIDED "AS IS,"
- AND THE AUTHORS MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, OR TITLE; THAT THE
- CONTENTS OF THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION ARE
- SUITABLE FOR ANY PURPOSE; NOR THAT THE IMPLEMENTATION OF THE ADVANCED
- MESSAGING QUEUE PROTOCOL SPECIFICATION WILL NOT INFRINGE ANY THIRD PARTY
- PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
-
- THE AUTHORS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL,
- INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATING TO ANY
- USE, IMPLEMENTATION OR DISTRIBUTION OF THE ADVANCED MESSAGING QUEUE
- PROTOCOL SPECIFICATION.
-
- The name and trademarks of the Authors may NOT be used in any manner,
- including advertising or publicity pertaining to the Advanced Messaging
- Queue Protocol Specification or its contents without specific, written
- prior permission. Title to copyright in the Advanced Messaging Queue
- Protocol Specification will at all times remain with the Authors.
-
- No other rights are granted by implication, estoppel or otherwise.
-
- Upon termination of your license or rights under this Agreement, you
- shall destroy all copies of the Advanced Messaging Queue Protocol
- Specification in your possession or control.
-
- Trademarks
- ==========
- "JPMorgan", "JPMorgan Chase", "Chase", the JPMorgan Chase logo and the
- Octagon Symbol are trademarks of JPMorgan Chase & Co.
-
- IMATIX and the iMatix logo are trademarks of iMatix Corporation sprl.
-
- IONA, IONA Technologies, and the IONA logos are trademarks of IONA
- Technologies PLC and/or its subsidiaries.
-
- LINUX is a trademark of Linus Torvalds. RED HAT and JBOSS are registered
- trademarks of Red Hat, Inc. in the US and other countries.
-
- Java, all Java-based trademarks and OpenOffice.org are trademarks of
- Sun Microsystems, Inc. in the United States, other countries, or both.
-
- Other company, product, or service names may be trademarks or service
- marks of others.
-
- Links to full AMQP specification:
- =================================
- http://www.envoytech.org/spec/amq/
- http://www.iona.com/opensource/amqp/
- http://www.redhat.com/solutions/specifications/amqp/
- http://www.twiststandards.org/tiki-index.php?page=AMQ
- http://www.imatix.com/amqp
--->
-
-<!--
- <!DOCTYPE amqp SYSTEM "amqp.dtd">
--->
-
-<!-- XML Notes
-
- We use entities to indicate repetition; attributes to indicate properties.
-
- We use the 'name' attribute as an identifier, usually within the context
- of the surrounding entities.
-
- We use spaces to seperate words in names, so that we can print names in
- their natural form depending on the context - underlines for source code,
- hyphens for written text, etc.
-
- We do not enforce any particular validation mechanism but we support all
- mechanisms. The protocol definition conforms to a formal grammar that is
- published seperately in several technologies.
-
- -->
-
-<amqp major = "0" minor = "10" port = "5672" comment = "AMQ Protocol">
- <!--
- ======================================================
- == CONSTANTS
- ======================================================
- -->
- <!-- Frame types -->
- <constant name = "frame-method" value = "1" />
- <constant name = "frame-header" value = "2" />
- <constant name = "frame-body" value = "3" />
- <constant name = "frame-oob-method" value = "4" />
- <constant name = "frame-oob-header" value = "5" />
- <constant name = "frame-oob-body" value = "6" />
- <constant name = "frame-trace" value = "7" />
- <constant name = "frame-heartbeat" value = "8" />
-
- <!-- Protocol constants -->
- <constant name = "frame-min-size" value = "4096" />
- <constant name = "frame-end" value = "206" />
-
- <!-- Reply codes -->
- <constant name = "reply-success" value = "200">
- <doc>
- Indicates that the method completed successfully. This reply code is
- reserved for future use - the current protocol design does not use positive
- confirmation and reply codes are sent only in case of an error.
- </doc>
- </constant>
-
- <constant name = "not-delivered" value = "310" class = "soft-error">
- <doc>
- The client asked for a specific message that is no longer available.
- The message was delivered to another client, or was purged from the queue
- for some other reason.
- </doc>
- </constant>
-
- <constant name = "content-too-large" value = "311" class = "soft-error">
- <doc>
- The client attempted to transfer content larger than the server could accept
- at the present time. The client may retry at a later time.
- </doc>
- </constant>
-
- <constant name = "connection-forced" value = "320" class = "hard-error">
- <doc>
- An operator intervened to close the connection for some reason. The client
- may retry at some later date.
- </doc>
- </constant>
-
- <constant name = "invalid-path" value = "402" class = "hard-error">
- <doc>
- The client tried to work with an unknown virtual host.
- </doc>
- </constant>
-
- <constant name = "access-refused" value = "403" class = "soft-error">
- <doc>
- The client attempted to work with a server entity to which it has no
- access due to security settings.
- </doc>
- </constant>
-
- <constant name = "not-found" value = "404" class = "soft-error">
- <doc>The client attempted to work with a server entity that does not exist.</doc>
- </constant>
-
- <constant name = "resource-locked" value = "405" class = "soft-error">
- <doc>
- The client attempted to work with a server entity to which it has no
- access because another client is working with it.
- </doc>
- </constant>
-
- <constant name = "precondition-failed" value = "406" class = "soft-error">
- <doc>
- The client requested a method that was not allowed because some precondition
- failed.
- </doc>
- </constant>
-
- <constant name = "frame-error" value = "501" class = "hard-error">
- <doc>
- The client sent a malformed frame that the server could not decode. This
- strongly implies a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "syntax-error" value = "502" class = "hard-error">
- <doc>
- The client sent a frame that contained illegal values for one or more
- fields. This strongly implies a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "command-invalid" value = "503" class = "hard-error">
- <doc>
- The client sent an invalid sequence of frames, attempting to perform an
- operation that was considered invalid by the server. This usually implies
- a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "channel-error" value = "504" class = "hard-error">
- <doc>
- The client attempted to work with a channel that had not been correctly
- opened. This most likely indicates a fault in the client layer.
- </doc>
- </constant>
-
- <constant name = "resource-error" value = "506" class = "hard-error">
- <doc>
- The server could not complete the method because it lacked sufficient
- resources. This may be due to the client creating too many of some type
- of entity.
- </doc>
- </constant>
-
- <constant name = "not-allowed" value = "530" class = "hard-error">
- <doc>
- The client tried to work with some entity in a manner that is prohibited
- by the server, due to security settings or by some other criteria.
- </doc>
- </constant>
-
- <constant name = "not-implemented" value = "540" class = "hard-error">
- <doc>
- The client tried to use functionality that is not implemented in the
- server.
- </doc>
- </constant>
-
- <constant name = "internal-error" value = "545" class = "hard-error">
- <doc>
- The server could not complete the method because of an internal error.
- The server may require intervention by an operator in order to resume
- normal operations.
- </doc>
- </constant>
-
- <constant name = "test-double" value = "3.141592654"/>
- <constant name = "test-str1" value = "hello, world!"/>
- <constant name = "test-str2" value = "1.2.3.4"/>
-
- <!--
- ======================================================
- == DOMAIN TYPES
- ======================================================
- -->
-
- <domain name = "access-ticket" type = "short" label = "access ticket granted by server">
- <doc>
- An access ticket granted by the server for a certain set of access rights
- within a specific realm. Access tickets are valid within the channel where
- they were created, and expire when the channel closes.
- </doc>
- <assert check = "ne" value = "0" />
- </domain>
-
- <domain name = "class-id" type = "short" />
-
- <domain name = "consumer-tag" type = "shortstr" label = "consumer tag">
- <doc>
- Identifier for the consumer, valid within the current connection.
- </doc>
- </domain>
-
- <domain name = "delivery-tag" type = "longlong" label = "server-assigned delivery tag">
- <doc>
- The server-assigned and channel-specific delivery tag
- </doc>
- <rule name = "channel-local">
- <doc>
- The delivery tag is valid only within the channel from which the message was
- received. I.e. a client MUST NOT receive a message on one channel and then
- acknowledge it on another.
- </doc>
- </rule>
- <rule name = "non-zero">
- <doc>
- The server MUST NOT use a zero value for delivery tags. Zero is reserved
- for client use, meaning "all messages so far received".
- </doc>
- </rule>
- </domain>
-
- <domain name = "exchange-name" type = "shortstr" label = "exchange name">
- <doc>
- The exchange name is a client-selected string that identifies the exchange for publish
- methods. Exchange names may consist of any mixture of digits, letters, and underscores.
- Exchange names are scoped by the virtual host.
- </doc>
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "known-hosts" type = "shortstr" label = "list of known hosts">
- <doc>
- Specifies the list of equivalent or alternative hosts that the server knows about,
- which will normally include the current server itself. Clients can cache this
- information and use it when reconnecting to a server after a failure. This field
- may be empty.
- </doc>
- </domain>
-
- <domain name = "method-id" type = "long" />
-
- <domain name = "no-ack" type = "bit" label = "no acknowledgement needed">
- <doc>
- If this field is set the server does not expect acknowledgments for
- messages. That is, when a message is delivered to the client the server
- automatically and silently acknowledges it on behalf of the client. This
- functionality increases performance but at the cost of reliability.
- Messages can get lost if a client dies before it can deliver them to the
- application.
- </doc>
- </domain>
-
- <domain name = "no-local" type = "bit" label = "do not deliver own messages">
- <doc>
- If the no-local field is set the server will not send messages to the client that
- published them.
- </doc>
- </domain>
-
- <domain name = "path" type = "shortstr">
- <doc>
- Must start with a slash "/" and continue with path names separated by slashes. A path
- name consists of any combination of at least one of [A-Za-z0-9] plus zero or more of
- [.-_+!=:].
- </doc>
-
- <assert check = "notnull" />
- <assert check = "syntax" rule = "path" />
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "peer-properties" type = "table">
- <doc>
- This string provides a set of peer properties, used for identification, debugging, and
- general information.
- </doc>
- </domain>
-
- <domain name = "queue-name" type = "shortstr" label = "queue name">
- <doc>
- The queue name identifies the queue within the vhost. Queue names may consist of any
- mixture of digits, letters, and underscores.
- </doc>
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "redelivered" type = "bit" label = "message is being redelivered">
- <doc>
- This indicates that the message has been previously delivered to this or
- another client.
- </doc>
- <rule name = "implementation">
- <doc>
- The server SHOULD try to signal redelivered messages when it can. When
- redelivering a message that was not successfully acknowledged, the server
- SHOULD deliver it to the original client if possible.
- </doc>
- <doc type = "scenario">
- Create a shared queue and publish a message to the queue. Consume the
- message using explicit acknowledgements, but do not acknowledge the
- message. Close the connection, reconnect, and consume from the queue
- again. The message should arrive with the redelivered flag set.
- </doc>
- </rule>
- <rule name = "hinting">
- <doc>
- The client MUST NOT rely on the redelivered field but should take it as a
- hint that the message may already have been processed. A fully robust
- client must be able to track duplicate received messages on non-transacted,
- and locally-transacted channels.
- </doc>
- </rule>
- </domain>
-
- <domain name = "reply-code" type = "short" label = "reply code from server">
- <doc>
- The reply code. The AMQ reply codes are defined as constants at the start
- of this formal specification.
- </doc>
- <assert check = "notnull" />
- </domain>
-
- <domain name = "reply-text" type = "shortstr" label = "localised reply text">
- <doc>
- The localised reply text. This text can be logged as an aid to resolving
- issues.
- </doc>
- <assert check = "notnull" />
- </domain>
-
- <!-- Elementary domains -->
- <domain name = "bit" type = "bit" label = "single bit" />
- <domain name = "octet" type = "octet" label = "single octet" />
- <domain name = "short" type = "short" label = "16-bit integer" />
- <domain name = "long" type = "long" label = "32-bit integer" />
- <domain name = "longlong" type = "longlong" label = "64-bit integer" />
- <domain name = "shortstr" type = "shortstr" label = "short string" />
- <domain name = "longstr" type = "longstr" label = "long string" />
- <domain name = "timestamp" type = "timestamp" label = "64-bit timestamp" />
- <domain name = "table" type = "table" label = "field table" />
-
- <!-- == CONNECTION ======================================================= -->
-
- <!-- TODO 0.81 - the 'handler' attribute of methods needs to be reviewed, and if
- no current implementations use it, removed. /PH 2006/07/20
- -->
-
- <class name = "connection" handler = "connection" index = "10" label = "work with socket connections">
- <doc>
- The connection class provides methods for a client to establish a network connection to
- a server, and for both peers to operate the connection thereafter.
- </doc>
-
- <doc type = "grammar">
- connection = open-connection *use-connection close-connection
- open-connection = C:protocol-header
- S:START C:START-OK
- *challenge
- S:TUNE C:TUNE-OK
- C:OPEN S:OPEN-OK | S:REDIRECT
- challenge = S:SECURE C:SECURE-OK
- use-connection = *channel
- close-connection = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "start" synchronous = "1" index = "10" label = "start connection negotiation">
- <doc>
- This method starts the connection negotiation process by telling the client the
- protocol version that the server proposes, along with a list of security mechanisms
- which the client can use for authentication.
- </doc>
-
- <rule name = "protocol-name">
- <doc>
- If the server cannot support the protocol specified in the protocol header,
- it MUST close the socket connection without sending any response method.
- </doc>
- <doc type = "scenario">
- The client sends a protocol header containing an invalid protocol name.
- The server must respond by closing the connection.
- </doc>
- </rule>
- <rule name = "server-support">
- <doc>
- The server MUST provide a protocol version that is lower than or equal to
- that requested by the client in the protocol header.
- </doc>
- <doc type = "scenario">
- The client requests a protocol version that is higher than any valid
- implementation, e.g. 9.0. The server must respond with a current
- protocol version, e.g. 1.0.
- </doc>
- </rule>
- <rule name = "client-support">
- <doc>
- If the client cannot handle the protocol version suggested by the server
- it MUST close the socket connection.
- </doc>
- <doc type = "scenario">
- The server sends a protocol version that is lower than any valid
- implementation, e.g. 0.1. The client must respond by closing the
- connection.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <response name = "start-ok" />
-
- <field name = "version-major" domain = "octet" label = "protocol major version">
- <doc>
- The version of the protocol, expressed in protocol units of 0.1 public
- versions and properly printed as two digits with a leading zero. I.e. a
- protocol version of "09" represents a public version "0.9". The decimal
- shift allows the correct expression of pre-1.0 protocol releases.
- </doc>
- <doc type = "todo">
- This field should be renamed to "protocol version".
- </doc>
- </field>
-
- <field name = "version-minor" domain = "octet" label = "protocol major version">
- <doc>
- The protocol revision, expressed as an integer from 0 to 9. The use of more
- than ten revisions is discouraged. The public version string is constructed
- from the protocol version and revision as follows: we print the protocol
- version with one decimal position, and we append the protocol revision. A
- version=10 and revision=2 are printed as "1.02".
- </doc>
- <doc type = "todo">
- This field should be renamed to "protocol revision".
- </doc>
- </field>
-
- <field name = "server-properties" domain = "peer-properties" label = "server properties">
- <rule name = "required-fields">
- <doc>
- The properties SHOULD contain at least these fields: "host", specifying the
- server host name or address, "product", giving the name of the server product,
- "version", giving the name of the server version, "platform", giving the name
- of the operating system, "copyright", if appropriate, and "information", giving
- other general information.
- </doc>
- <doc type = "scenario">
- Client connects to server and inspects the server properties. It checks for
- the presence of the required fields.
- </doc>
- </rule>
- </field>
-
- <field name = "mechanisms" domain = "longstr" label = "available security mechanisms">
- <doc>
- A list of the security mechanisms that the server supports, delimited by spaces.
- Currently ASL supports these mechanisms: PLAIN.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "locales" domain = "longstr" label = "available message locales">
- <doc>
- A list of the message locales that the server supports, delimited by spaces. The
- locale defines the language in which the server will send reply texts.
- </doc>
- <rule name = "required-support">
- <doc>
- The server MUST support at least the en_US locale.
- </doc>
- <doc type = "scenario">
- Client connects to server and inspects the locales field. It checks for
- the presence of the required locale(s).
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
- </method>
-
- <method name = "start-ok" synchronous = "1" index = "11"
- label = "select security mechanism and locale">
- <doc>
- This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to
- negotiate authentication and encryption.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "client-properties" domain = "peer-properties" label = "client properties">
- <rule name = "required-fields">
- <!-- This rule is not testable from the client side -->
- <doc>
- The properties SHOULD contain at least these fields: "product", giving the name
- of the client product, "version", giving the name of the client version, "platform",
- giving the name of the operating system, "copyright", if appropriate, and
- "information", giving other general information.
- </doc>
- </rule>
- </field>
-
- <field name = "mechanism" domain = "shortstr" label = "selected security mechanism">
- <doc>
- A single security mechanisms selected by the client, which must be one of those
- specified by the server.
- </doc>
- <rule name = "security">
- <doc>
- The client SHOULD authenticate using the highest-level security profile it
- can handle from the list provided by the server.
- </doc>
- </rule>
- <rule name = "validity">
- <doc>
- If the mechanism field does not contain one of the security mechanisms
- proposed by the server in the Start method, the server MUST close the
- connection without sending any further data.
- </doc>
- <doc type = "scenario">
- Client connects to server and sends an invalid security mechanism. The
- server must respond by closing the connection (a socket close, with no
- connection close negotiation).
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
-
- <field name = "response" domain = "longstr" label = "security response data">
- <doc>
- A block of opaque data passed to the security mechanism. The contents of this
- data are defined by the SASL security mechanism.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "locale" domain = "shortstr" label = "selected message locale">
- <doc>
- A single message local selected by the client, which must be one of those
- specified by the server.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "secure" synchronous = "1" index = "20" label = "security mechanism challenge">
- <doc>
- The SASL protocol works by exchanging challenges and responses until both peers have
- received sufficient information to authenticate each other. This method challenges
- the client to provide more information.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- <response name = "secure-ok" />
-
- <field name = "challenge" domain = "longstr" label = "security challenge data">
- <doc>
- Challenge information, a block of opaque binary data passed to the security
- mechanism.
- </doc>
- </field>
- </method>
-
- <method name = "secure-ok" synchronous = "1" index = "21" label = "security mechanism response">
- <doc>
- This method attempts to authenticate, passing a block of SASL data for the security
- mechanism at the server side.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "response" domain = "longstr" label = "security response data">
- <doc>
- A block of opaque data passed to the security mechanism. The contents of this
- data are defined by the SASL security mechanism.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "tune" synchronous = "1" index = "30"
- label = "propose connection tuning parameters">
- <doc>
- This method proposes a set of connection configuration values to the client. The
- client can accept and/or adjust these.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <response name = "tune-ok" />
-
- <field name = "channel-max" domain = "short" label = "proposed maximum channels">
- <doc>
- The maximum total number of channels that the server allows per connection. Zero
- means that the server does not impose a fixed limit, but the number of allowed
- channels may be limited by available server resources.
- </doc>
- </field>
-
- <field name = "frame-max" domain = "long" label = "proposed maximum frame size">
- <doc>
- The largest frame size that the server proposes for the connection. The client
- can negotiate a lower value. Zero means that the server does not impose any
- specific limit but may reject very large frames if it cannot allocate resources
- for them.
- </doc>
- <rule name = "minimum">
- <doc>
- Until the frame-max has been negotiated, both peers MUST accept frames of up
- to frame-min-size octets large, and the minimum negotiated value for frame-max
- is also frame-min-size.
- </doc>
- <doc type = "scenario">
- Client connects to server and sends a large properties field, creating a frame
- of frame-min-size octets. The server must accept this frame.
- </doc>
- </rule>
- </field>
-
- <field name = "heartbeat" domain = "short" label = "desired heartbeat delay">
- <!-- TODO 0.82 - the heartbeat negotiation mechanism was changed during
- implementation because the model documented here does not actually
- work properly. The best model we found is that the server proposes
- a heartbeat value to the client; the client can reply with zero, meaning
- 'do not use heartbeats (as documented here), or can propose its own
- heartbeat value, which the server should then accept. This is different
- from the model here which is disconnected - e.g. each side requests a
- heartbeat independently. Basically a connection is heartbeated in
- both ways, or not at all, depending on whether both peers support
- heartbeating or not, and the heartbeat value should itself be chosen
- by the client so that remote links can get a higher value. Also, the
- actual heartbeat mechanism needs documentation, and is as follows: so
- long as there is activity on a connection - in or out - both peers
- assume the connection is active. When there is no activity, each peer
- must send heartbeat frames. When no heartbeat frame is received after
- N cycles (where N is at least 2), the connection can be considered to
- have died. /PH 2006/07/19
- -->
- <doc>
- The delay, in seconds, of the connection heartbeat that the server wants.
- Zero means the server does not want a heartbeat.
- </doc>
- </field>
- </method>
-
- <method name = "tune-ok" synchronous = "1" index = "31"
- label = "negotiate connection tuning parameters">
- <doc>
- This method sends the client's connection tuning parameters to the server.
- Certain fields are negotiated, others provide capability information.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "channel-max" domain = "short" label = "negotiated maximum channels">
- <doc>
- The maximum total number of channels that the client will use per connection.
- </doc>
- <rule name = "upper-limit">
- <doc>
- If the client specifies a channel max that is higher than the value provided
- by the server, the server MUST close the connection without attempting a
- negotiated close. The server may report the error in some fashion to assist
- implementors.
- </doc>
- </rule>
- <assert check = "notnull" />
- <assert check = "le" method = "tune" field = "channel-max" />
- </field>
-
- <field name = "frame-max" domain = "long" label = "negotiated maximum frame size">
- <doc>
- The largest frame size that the client and server will use for the connection.
- Zero means that the client does not impose any specific limit but may reject
- very large frames if it cannot allocate resources for them. Note that the
- frame-max limit applies principally to content frames, where large contents can
- be broken into frames of arbitrary size.
- </doc>
- <rule name = "minimum">
- <doc>
- Until the frame-max has been negotiated, both peers MUST accept frames of up
- to frame-min-size octets large, and the minimum negotiated value for frame-max
- is also frame-min-size.
- </doc>
- </rule>
- <rule name = "upper-limit">
- <doc>
- If the client specifies a frame max that is higher than the value provided
- by the server, the server MUST close the connection without attempting a
- negotiated close. The server may report the error in some fashion to assist
- implementors.
- </doc>
- </rule>
- </field>
-
- <field name = "heartbeat" domain = "short" label = "desired heartbeat delay">
- <doc>
- The delay, in seconds, of the connection heartbeat that the client wants. Zero
- means the client does not want a heartbeat.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "40" label = "open connection to virtual host">
- <doc>
- This method opens a connection to a virtual host, which is a collection of
- resources, and acts to separate multiple application domains within a server.
- The server may apply arbitrary limits per virtual host, such as the number
- of each type of entity that may be used, per connection and/or in total.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "open-ok" />
- <response name = "redirect" />
-
- <field name = "virtual-host" domain = "path" label = "virtual host name">
- <!-- TODO 0.82 - the entire vhost model needs review. This concept was
- prompted by the HTTP vhost concept but does not fit very well into
- AMQP. Currently we use the vhost as a "cluster identifier" which is
- inaccurate usage. /PH 2006/07/19
- -->
- <assert check = "regexp" value = "^[a-zA-Z0-9/-_]+$" />
- <doc>
- The name of the virtual host to work with.
- </doc>
- <rule name = "separation">
- <doc>
- If the server supports multiple virtual hosts, it MUST enforce a full
- separation of exchanges, queues, and all associated entities per virtual
- host. An application, connected to a specific virtual host, MUST NOT be able
- to access resources of another virtual host.
- </doc>
- </rule>
- <rule name = "security">
- <doc>
- The server SHOULD verify that the client has permission to access the
- specified virtual host.
- </doc>
- </rule>
- </field>
-
- <field name = "capabilities" domain = "shortstr" label = "required capabilities">
- <doc>
- The client can specify zero or more capability names, delimited by spaces.
- The server can use this string to how to process the client's connection
- request.
- </doc>
- </field>
-
- <field name = "insist" domain = "bit" label = "insist on connecting to server">
- <doc>
- In a configuration with multiple collaborating servers, the server may respond
- to a Connection.Open method with a Connection.Redirect. The insist option tells
- the server that the client is insisting on a connection to the specified server.
- </doc>
- <rule name = "behaviour">
- <doc>
- When the client uses the insist option, the server MUST NOT respond with a
- Connection.Redirect method. If it cannot accept the client's connection
- request it should respond by closing the connection with a suitable reply
- code.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "41" label = "signal that connection is ready">
- <doc>
- This method signals to the client that the connection is ready for use.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "known-hosts" domain = "known-hosts" />
- </method>
-
- <method name = "redirect" synchronous = "1" index = "42" label = "redirects client to other server">
- <doc>
- This method redirects the client to another server, based on the requested virtual
- host and/or capabilities.
- </doc>
- <rule name = "usage">
- <doc>
- When getting the Connection.Redirect method, the client SHOULD reconnect to
- the host specified, and if that host is not present, to any of the hosts
- specified in the known-hosts list.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <field name = "host" domain = "shortstr" label = "server to connect to">
- <doc>
- Specifies the server to connect to. This is an IP address or a DNS name,
- optionally followed by a colon and a port number. If no port number is
- specified, the client should use the default port number for the protocol.
- </doc>
- <assert check = "notnull" />
- </field>
- <field name = "known-hosts" domain = "known-hosts" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "close" synchronous = "1" index = "50" label = "request a connection close">
- <doc>
- This method indicates that the sender wants to close the connection. This may be
- due to internal conditions (e.g. a forced shut-down) or due to an error handling
- a specific method, i.e. an exception. When a close is due to an exception, the
- sender provides the class and method id of the method which caused the exception.
- </doc>
- <!-- TODO: the connection close mechanism needs to be reviewed from the ODF
- documentation and better expressed as rules here. /PH 2006/07/20
- -->
- <rule name = "stability">
- <doc>
- After sending this method any received method except the Close-OK method MUST
- be discarded.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- <response name = "close-ok" />
-
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "class-id" domain = "class-id" label = "failing method class">
- <doc>
- When the close is provoked by a method exception, this is the class of the
- method.
- </doc>
- </field>
-
- <field name = "method-id" domain = "method-id" label = "failing method ID">
- <doc>
- When the close is provoked by a method exception, this is the ID of the method.
- </doc>
- </field>
- </method>
-
- <method name = "close-ok" synchronous = "1" index = "51" label = "confirm a connection close">
- <doc>
- This method confirms a Connection.Close method and tells the recipient that it is
- safe to release resources for the connection and close the socket.
- </doc>
- <rule name = "reporting">
- <doc>
- A peer that detects a socket closure without having received a Close-Ok
- handshake method SHOULD log the error.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- </method>
- </class>
-
- <!-- == CHANNEL ========================================================== -->
-
- <class name = "channel" handler = "channel" index = "20" label = "work with channels">
- <doc>
- The channel class provides methods for a client to establish a channel to a
- server and for both peers to operate the channel thereafter.
- </doc>
-
- <doc type = "grammar">
- channel = open-channel *use-channel close-channel
- open-channel = C:OPEN S:OPEN-OK
- use-channel = C:FLOW S:FLOW-OK
- / S:FLOW C:FLOW-OK
- / S:ALERT
- / functional-class
- close-channel = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "10" label = "open a channel for use">
- <doc>
- This method opens a channel to the server.
- </doc>
- <rule name = "state" on-failure = "channel-error">
- <doc>
- The client MUST NOT use this method on an alread-opened channel.
- </doc>
- <doc type = "scenario">
- Client opens a channel and then reopens the same channel.
- </doc>
- </rule>
- <chassis name = "server" implement = "MUST" />
- <response name = "open-ok" />
- <field name = "out of band" domain = "shortstr" label = "out-of-band settings">
- <doc>
- Configures out-of-band transfers on this channel. The syntax and meaning of this
- field will be formally defined at a later date.
- </doc>
- <assert check = "null" />
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "11" label = "signal that the channel is ready">
- <doc>
- This method signals to the client that the channel is ready for use.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "flow" synchronous = "1" index = "20" label = "enable/disable flow from peer">
- <doc>
- This method asks the peer to pause or restart the flow of content data. This is a
- simple flow-control mechanism that a peer can use to avoid oveflowing its queues or
- otherwise finding itself receiving more messages than it can process. Note that this
- method is not intended for window control. The peer that receives a disable flow
- method should finish sending the current content frame, if any, then pause.
- </doc>
-
- <rule name = "initial-state">
- <doc>
- When a new channel is opened, it is active (flow is active). Some applications
- assume that channels are inactive until started. To emulate this behaviour a
- client MAY open the channel, then pause it.
- </doc>
- </rule>
-
- <rule name = "bidirectional">
- <doc>
- When sending content frames, a peer SHOULD monitor the channel for incoming
- methods and respond to a Channel.Flow as rapidly as possible.
- </doc>
- </rule>
-
- <rule name = "throttling">
- <doc>
- A peer MAY use the Channel.Flow method to throttle incoming content data for
- internal reasons, for example, when exchanging data over a slower connection.
- </doc>
- </rule>
-
- <rule name = "expected-behaviour">
- <doc>
- The peer that requests a Channel.Flow method MAY disconnect and/or ban a peer
- that does not respect the request. This is to prevent badly-behaved clients
- from overwhelming a broker.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <response name = "flow-ok" />
-
- <field name = "active" domain = "bit" label = "start/stop content frames">
- <doc>
- If 1, the peer starts sending content frames. If 0, the peer stops sending
- content frames.
- </doc>
- </field>
- </method>
-
- <method name = "flow-ok" index = "21" label = "confirm a flow method">
- <doc>
- Confirms to the peer that a flow command was received and processed.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
- <field name = "active" domain = "bit" label = "current flow setting">
- <doc>
- Confirms the setting of the processed flow method: 1 means the peer will start
- sending or continue to send content frames; 0 means it will not.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <!-- TODO 0.82 - remove this method entirely
- /PH 2006/07/20
- -->
- <method name = "alert" index = "30" label = "send a non-fatal warning message">
- <doc>
- This method allows the server to send a non-fatal warning to the client. This is
- used for methods that are normally asynchronous and thus do not have confirmations,
- and for which the server may detect errors that need to be reported. Fatal errors
- are handled as channel or connection exceptions; non-fatal errors are sent through
- this method.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
- <field name = "details" domain = "table" label = "detailed information for warning">
- <doc>
- A set of fields that provide more information about the problem. The meaning of
- these fields are defined on a per-reply-code basis (TO BE DEFINED).
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "close" synchronous = "1" index = "40" label = "request a channel close">
- <doc>
- This method indicates that the sender wants to close the channel. This may be due to
- internal conditions (e.g. a forced shut-down) or due to an error handling a specific
- method, i.e. an exception. When a close is due to an exception, the sender provides
- the class and method id of the method which caused the exception.
- </doc>
-
- <!-- TODO: the channel close behaviour needs to be reviewed from the ODF
- documentation and better expressed as rules here. /PH 2006/07/20
- -->
- <rule name = "stability">
- <doc>
- After sending this method any received method except the Close-OK method MUST
- be discarded.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- <response name = "close-ok" />
-
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "class-id" domain = "class-id" label = "failing method class">
- <doc>
- When the close is provoked by a method exception, this is the class of the
- method.
- </doc>
- </field>
-
- <field name = "method-id" domain = "method-id" label = "failing method ID">
- <doc>
- When the close is provoked by a method exception, this is the ID of the method.
- </doc>
- </field>
- </method>
-
- <method name = "close-ok" synchronous = "1" index = "41" label = "confirm a channel close">
- <doc>
- This method confirms a Channel.Close method and tells the recipient that it is safe
- to release resources for the channel and close the socket.
- </doc>
- <rule name = "reporting">
- <doc>
- A peer that detects a socket closure without having received a Channel.Close-Ok
- handshake method SHOULD log the error.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- </method>
- </class>
-
- <!-- == ACCESS =========================================================== -->
-
- <!-- TODO 0.82 - this class must be implemented by two teams before we can
- consider it matured.
- -->
-
- <class name = "access" handler = "connection" index = "30" label = "work with access tickets">
- <doc>
- The protocol control access to server resources using access tickets. A
- client must explicitly request access tickets before doing work. An access
- ticket grants a client the right to use a specific set of resources -
- called a "realm" - in specific ways.
- </doc>
-
- <doc type = "grammar">
- access = C:REQUEST S:REQUEST-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "request" synchronous = "1" index = "10" label = "request an access ticket">
- <doc>
- This method requests an access ticket for an access realm. The server
- responds by granting the access ticket. If the client does not have
- access rights to the requested realm this causes a connection exception.
- Access tickets are a per-channel resource.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "request-ok" />
-
- <field name = "realm" domain = "shortstr" label = "name of requested realm">
- <doc>
- Specifies the name of the realm to which the client is requesting access.
- The realm is a configured server-side object that collects a set of
- resources (exchanges, queues, etc.). If the channel has already requested
- an access ticket onto this realm, the previous ticket is destroyed and a
- new ticket is created with the requested access rights, if allowed.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST specify a realm that is known to the server. The server
- makes an identical response for undefined realms as it does for realms
- that are defined but inaccessible to this client.
- </doc>
- <doc type = "scenario">
- Client specifies an undefined realm.
- </doc>
- </rule>
- </field>
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive access to the realm, meaning that this will be the only
- channel that uses the realm's resources.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MAY NOT request exclusive access to a realm that has active
- access tickets, unless the same channel already had the only access
- ticket onto that realm.
- </doc>
- <doc type = "scenario">
- Client opens two channels and requests exclusive access to the same realm.
- </doc>
- </rule>
- </field>
- <field name = "passive" domain = "bit" label = "request passive access">
- <doc>
- Request message passive access to the specified access realm. Passive
- access lets a client get information about resources in the realm but
- not to make any changes to them.
- </doc>
- </field>
- <field name = "active" domain = "bit" label = "request active access">
- <doc>
- Request message active access to the specified access realm. Active access lets
- a client get create and delete resources in the realm.
- </doc>
- </field>
- <field name = "write" domain = "bit" label = "request write access">
- <doc>
- Request write access to the specified access realm. Write access lets a client
- publish messages to all exchanges in the realm.
- </doc>
- </field>
- <field name = "read" domain = "bit" label = "request read access">
- <doc>
- Request read access to the specified access realm. Read access lets a client
- consume messages from queues in the realm.
- </doc>
- </field>
- </method>
-
- <method name = "request-ok" synchronous = "1" index = "11" label = "grant access to server resources">
- <doc>
- This method provides the client with an access ticket. The access ticket is valid
- within the current channel and for the lifespan of the channel.
- </doc>
- <rule name = "per-channel" on-failure = "not-allowed">
- <doc>
- The client MUST NOT use access tickets except within the same channel as
- originally granted.
- </doc>
- <doc type = "scenario">
- Client opens two channels, requests a ticket on one channel, and then
- tries to use that ticket in a seconc channel.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <field name = "ticket" domain = "access-ticket" />
- </method>
- </class>
-
- <!-- == EXCHANGE ========================================================= -->
-
- <class name = "exchange" handler = "channel" index = "40" label = "work with exchanges">
- <doc>
- Exchanges match and distribute messages across queues. Exchanges can be configured in
- the server or created at runtime.
- </doc>
-
- <doc type = "grammar">
- exchange = C:DECLARE S:DECLARE-OK
- / C:DELETE S:DELETE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <rule name = "required-types">
- <doc>
- The server MUST implement these standard exchange types: fanout, direct.
- </doc>
- <doc type = "scenario">
- Client attempts to declare an exchange with each of these standard types.
- </doc>
- </rule>
- <rule name = "recommended-types">
- <doc>
- The server SHOULD implement these standard exchange types: topic, headers.
- </doc>
- <doc type = "scenario">
- Client attempts to declare an exchange with each of these standard types.
- </doc>
- </rule>
- <rule name = "required-instances">
- <doc>
- The server MUST, in each virtual host, pre-declare an exchange instance
- for each standard exchange type that it implements, where the name of the
- exchange instance is "amq." followed by the exchange type name.
- </doc>
- <doc type = "scenario">
- Client creates a temporary queue and attempts to bind to each required
- exchange instance (amq.fanout, amq.direct, and amq.topic, amq.headers if
- those types are defined).
- </doc>
- </rule>
- <rule name = "default-exchange">
- <doc>
- The server MUST predeclare a direct exchange to act as the default exchange
- for content Publish methods and for default queue bindings.
- </doc>
- <doc type = "scenario">
- Client checks that the default exchange is active by specifying a queue
- binding with no exchange name, and publishing a message with a suitable
- routing key but without specifying the exchange name, then ensuring that
- the message arrives in the queue correctly.
- </doc>
- </rule>
- <rule name = "default-access">
- <doc>
- The server MUST NOT allow clients to access the default exchange except
- by specifying an empty exchange name in the Queue.Bind and content Publish
- methods.
- </doc>
- </rule>
- <rule name = "extensions">
- <doc>
- The server MAY implement other exchange types as wanted.
- </doc>
- </rule>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "declare" synchronous = "1" index = "10" label = "declare exchange, create if needed">
- <doc>
- This method creates an exchange if it does not already exist, and if the exchange
- exists, verifies that it is of the correct and expected class.
- </doc>
- <rule name = "minimum">
- <doc>
- The server SHOULD support a minimum of 16 exchanges per virtual host and
- ideally, impose no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- The client creates as many exchanges as it can until the server reports
- an error; the number of exchanges successfuly created must be at least
- sixteen.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "declare-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- When a client defines a new exchange, this belongs to the access realm of the
- ticket used. All further work done with that exchange must be done with an
- access ticket for the same realm.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access to
- the realm in which the exchange exists or will be created, or "passive"
- access if the if-exists flag is set.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <rule name = "reserved" on-failure = "access-refused">
- <doc>
- Exchange names starting with "amq." are reserved for predeclared and
- standardised exchanges. The client MUST NOT attempt to create an exchange
- starting with "amq.".
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]+$" />
- </field>
-
- <field name = "type" domain = "shortstr" label = "exchange type">
- <doc>
- Each exchange belongs to one of a set of exchange types implemented by the
- server. The exchange types define the functionality of the exchange - i.e. how
- messages are routed through it. It is not valid or meaningful to attempt to
- change the type of an existing exchange.
- </doc>
- <rule name = "typed" on-failure = "not-allowed">
- <doc>
- Exchanges cannot be redeclared with different types. The client MUST not
- attempt to redeclare an existing exchange with a different type than used
- in the original Exchange.Declare method.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <rule name = "support" on-failure = "command-invalid">
- <doc>
- The client MUST NOT attempt to create an exchange with a type that the
- server does not support.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]+$" />
- </field>
-
- <field name = "passive" domain = "bit" label = "do not create exchange">
- <doc>
- If set, the server will not create the exchange. The client can use this to
- check whether an exchange exists without modifying the server state.
- </doc>
- <rule name = "not-found">
- <doc>
- If set, and the exchange does not already exist, the server MUST raise a
- channel exception with reply code 404 (not found).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "durable" domain = "bit" label = "request a durable exchange">
- <doc>
- If set when creating a new exchange, the exchange will be marked as durable.
- Durable exchanges remain active when a server restarts. Non-durable exchanges
- (transient exchanges) are purged if/when a server restarts.
- </doc>
- <rule name = "support">
- <doc>
- The server MUST support both durable and transient exchanges.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <rule name = "sticky">
- <doc>
- The server MUST ignore the durable field if the exchange already exists.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <!-- TODO 0.82 - clarify how this works; there is no way to cancel a binding
- except by deleting a queue.
- -->
- <field name = "auto-delete" domain = "bit" label = "auto-delete when unused">
- <doc>
- If set, the exchange is deleted when all queues have finished using it.
- </doc>
- <rule name = "sticky">
- <doc>
- The server MUST ignore the auto-delete field if the exchange already
- exists.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "internal" domain = "bit" label = "create internal exchange">
- <doc>
- If set, the exchange may not be used directly by publishers, but only when bound
- to other exchanges. Internal exchanges are used to construct wiring that is not
- visible to applications.
- </doc>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for declaration">
- <doc>
- A set of arguments for the declaration. The syntax and semantics of these
- arguments depends on the server implementation. This field is ignored if passive
- is 1.
- </doc>
- </field>
- </method>
-
- <method name = "declare-ok" synchronous = "1" index = "11" label = "confirm exchange declaration">
- <doc>
- This method confirms a Declare method and confirms the name of the exchange,
- essential for automatically-named exchanges.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "delete" synchronous = "1" index = "20" label = "delete an exchange">
- <doc>
- This method deletes an exchange. When an exchange is deleted all queue bindings on
- the exchange are cancelled.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "delete-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access
- rights to the exchange's access realm.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <rule name = "exists" on-failure = "not-found">
- <doc>
- The client MUST NOT attempt to delete an exchange that does not exist.
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
-
- <!-- TODO 0.82 - discuss whether this option is useful or not. I don't have
- any real use case for it. /PH 2006-07-23.
- -->
- <field name = "if-unused" domain = "bit" label = "delete only if unused">
- <doc>
- If set, the server will only delete the exchange if it has no queue bindings. If
- the exchange has queue bindings the server does not delete it but raises a
- channel exception instead.
- </doc>
- </field>
- </method>
-
- <method name = "delete-ok" synchronous = "1" index = "21"
- label = "confirm deletion of an exchange">
- <doc>This method confirms the deletion of an exchange.</doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == QUEUE ============================================================ -->
-
- <class name = "queue" handler = "channel" index = "50" label = "work with queues">
- <doc>
- Queues store and forward messages. Queues can be configured in the server or created at
- runtime. Queues must be attached to at least one exchange in order to receive messages
- from publishers.
- </doc>
-
- <doc type = "grammar">
- queue = C:DECLARE S:DECLARE-OK
- / C:BIND S:BIND-OK
- / C:PURGE S:PURGE-OK
- / C:DELETE S:DELETE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <rule name = "any-content">
- <doc>
- A server MUST allow any content class to be sent to any queue, in any mix, and
- queue and deliver these content classes independently. Note that all methods
- that fetch content off queues are specific to a given content class.
- </doc>
- <doc type = "scenario">
- Client creates an exchange of each standard type and several queues that
- it binds to each exchange. It must then sucessfully send each of the standard
- content types to each of the available queues.
- </doc>
- </rule>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "declare" synchronous = "1" index = "10" label = "declare queue, create if needed">
- <doc>
- This method creates or checks a queue. When creating a new queue the client can
- specify various properties that control the durability of the queue and its
- contents, and the level of sharing for the queue.
- </doc>
-
- <rule name = "default-binding">
- <doc>
- The server MUST create a default binding for a newly-created queue to the
- default exchange, which is an exchange of type 'direct'.
- </doc>
- <doc type = "scenario">
- Client creates a new queue, and then without explicitly binding it to an
- exchange, attempts to send a message through the default exchange binding,
- i.e. publish a message to the empty exchange, with the queue name as routing
- key.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_35" -->
- <rule name = "minimum-queues">
- <doc>
- The server SHOULD support a minimum of 256 queues per virtual host and ideally,
- impose no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- Client attempts to create as many queues as it can until the server reports
- an error. The resulting count must at least be 256.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "declare-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- When a client defines a new queue, this belongs to the access realm of the
- ticket used. All further work done with that queue must be done with an access
- ticket for the same realm.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access to
- the realm in which the queue exists or will be created.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <rule name = "default-name">
- <doc>
- The queue name MAY be empty, in which case the server MUST create a new
- queue with a unique generated name and return this to the client in the
- Declare-Ok method.
- </doc>
- <doc type = "scenario">
- Client attempts to create several queues with an empty name. The client then
- verifies that the server-assigned names are unique and different.
- </doc>
- </rule>
- <rule name = "reserved-prefix" on-failure = "not-allowed">
- <doc>
- Queue names starting with "amq." are reserved for predeclared and
- standardised server queues. A client MAY NOT attempt to declare a queue with a
- name that starts with "amq." and the passive option set to zero.
- </doc>
- <doc type = "scenario">
- A client attempts to create a queue with a name starting with "amq." and with
- the passive option set to zero.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]*$" />
- </field>
-
- <field name = "passive" domain = "bit" label = "do not create queue">
- <doc>
- If set, the server will not create the queue. This field allows the client
- to assert the presence of a queue without modifying the server state.
- </doc>
- <rule name = "passive" on-failure = "not-found">
- <doc>
- The client MAY ask the server to assert that a queue exists without
- creating the queue if not. If the queue does not exist, the server
- treats this as a failure.
- </doc>
- <doc type = "scenario">
- Client declares an existing queue with the passive option and expects
- the server to respond with a declare-ok. Client then attempts to declare
- a non-existent queue with the passive option, and the server must close
- the channel with the correct reply-code.
- </doc>
- </rule>
- </field>
-
- <field name = "durable" domain = "bit" label = "request a durable queue">
- <doc>
- If set when creating a new queue, the queue will be marked as durable. Durable
- queues remain active when a server restarts. Non-durable queues (transient
- queues) are purged if/when a server restarts. Note that durable queues do not
- necessarily hold persistent messages, although it does not make sense to send
- persistent messages to a transient queue.
- </doc>
- <!-- Rule test name: was "amq_queue_03" -->
- <rule name = "persistence">
- <doc>The server MUST recreate the durable queue after a restart.</doc>
-
- <!-- TODO: use 'client does something' rather than 'a client does something'. -->
- <doc type = "scenario">
- A client creates a durable queue. The server is then restarted. The client
- then attempts to send a message to the queue. The message should be successfully
- delivered.
- </doc>
- </rule>
- <!-- Rule test name: was "amq_queue_36" -->
- <rule name = "types">
- <doc>The server MUST support both durable and transient queues.</doc>
- <doc type = "scenario">
- A client creates two named queues, one durable and one transient.
- </doc>
- </rule>
- <!-- Rule test name: was "amq_queue_37" -->
- <rule name = "pre-existence">
- <doc>The server MUST ignore the durable field if the queue already exists.</doc>
- <doc type = "scenario">
- A client creates two named queues, one durable and one transient. The client
- then attempts to declare the two queues using the same names again, but reversing
- the value of the durable flag in each case. Verify that the queues still exist
- with the original durable flag values.
- <!-- TODO: but how? -->
- </doc>
- </rule>
- </field>
-
- <field name = "exclusive" domain = "bit" label = "request an exclusive queue">
- <doc>
- Exclusive queues may only be consumed from by the current connection. Setting
- the 'exclusive' flag always implies 'auto-delete'.
- </doc>
-
- <!-- Rule test name: was "amq_queue_38" -->
- <rule name = "types">
- <doc>
- The server MUST support both exclusive (private) and non-exclusive (shared)
- queues.
- </doc>
- <doc type = "scenario">
- A client creates two named queues, one exclusive and one non-exclusive.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_04" -->
- <rule name = "02" on-failure = "channel-error">
- <doc>
- The client MAY NOT attempt to declare any existing and exclusive queue
- on multiple connections.
- </doc>
- <doc type = "scenario">
- A client declares an exclusive named queue. A second client on a different
- connection attempts to declare a queue of the same name.
- </doc>
- </rule>
- </field>
-
- <field name = "auto-delete" domain = "bit" label = "auto-delete queue when unused">
- <doc>
- If set, the queue is deleted when all consumers have finished using it. Last
- consumer can be cancelled either explicitly or because its channel is closed. If
- there was no consumer ever on the queue, it won't be deleted.
- </doc>
-
- <!-- Rule test name: was "amq_queue_31" -->
- <rule name = "pre-existence">
- <doc>
- The server MUST ignore the auto-delete field if the queue already exists.
- </doc>
- <doc type = "scenario">
- A client creates two named queues, one as auto-delete and one explicit-delete.
- The client then attempts to declare the two queues using the same names again,
- but reversing the value of the auto-delete field in each case. Verify that the
- queues still exist with the original auto-delete flag values.
- <!-- TODO: but how? -->
- </doc>
- </rule>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for declaration">
- <doc>
- A set of arguments for the declaration. The syntax and semantics of these
- arguments depends on the server implementation. This field is ignored if passive
- is 1.
- </doc>
- </field>
- </method>
-
- <method name = "declare-ok" synchronous = "1" index = "11" label = "confirms a queue definition">
- <doc>
- This method confirms a Declare method and confirms the name of the queue, essential
- for automatically-named queues.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Reports the name of the queue. If the server generated a queue name, this field
- contains that name.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "message-count" domain = "long" label = "number of messages in queue">
- <doc>
- Reports the number of messages in the queue, which will be zero for
- newly-created queues.
- </doc>
- </field>
-
- <field name = "consumer-count" domain = "long" label = "number of consumers">
- <doc>
- Reports the number of active consumers for the queue. Note that consumers can
- suspend activity (Channel.Flow) in which case they do not appear in this count.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "bind" synchronous = "1" index = "20" label = "bind queue to an exchange">
- <doc>
- This method binds a queue to an exchange. Until a queue is bound it will not receive
- any messages. In a classic messaging model, store-and-forward queues are bound to a
- dest exchange and subscription queues are bound to a dest_wild exchange.
- </doc>
-
- <!-- Rule test name: was "amq_queue_25" -->
- <rule name = "duplicates">
- <doc>
- A server MUST allow ignore duplicate bindings - that is, two or more bind
- methods for a specific queue, with identical arguments - without treating these
- as an error.
- </doc>
- <doc type = "scenario">
- A client binds a named queue to an exchange. The client then repeats the bind
- (with identical arguments).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_39" -->
- <rule name = "failure" on-failure = "??????">
- <!--
- TODO: Find correct code. The on-failure code returned should depend on why the bind
- failed. Assuming that failures owing to bad parameters are covered in the rules relating
- to those parameters, the only remaining reason for a failure would be the lack of
- server resorces or some internal error - such as too many queues open. Would these
- cases qualify as "resource error" 506 or "internal error" 541?
- -->
- <doc>If a bind fails, the server MUST raise a connection exception.</doc>
- <doc type = "scenario">
- TODO
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_12" -->
- <rule name = "transient-exchange" on-failure = "not-allowed">
- <doc>
- The server MUST NOT allow a durable queue to bind to a transient exchange.
- </doc>
- <doc type = "scenario">
- A client creates a transient exchange. The client then declares a named durable
- queue and then attempts to bind the transient exchange to the durable queue.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_13" -->
- <rule name = "durable-exchange">
- <doc>
- Bindings for durable queues are automatically durable and the server SHOULD
- restore such bindings after a server restart.
- </doc>
- <doc type = "scenario">
- A server creates a named durable queue and binds it to a durable exchange. The
- server is restarted. The client then attempts to use the queue/exchange combination.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_17" -->
- <rule name = "internal-exchange">
- <doc>
- If the client attempts to bind to an exchange that was declared as internal, the server
- MUST raise a connection exception with reply code 530 (not allowed).
- </doc>
- <doc type = "scenario">
- A client attempts to bind a named queue to an internal exchange.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_40" -->
- <rule name = "binding-count">
- <doc>
- The server SHOULD support at least 4 bindings per queue, and ideally, impose no
- limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- A client creates a named queue and attempts to bind it to 4 different non-internal
- exchanges.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "bind-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- The client provides a valid access ticket giving "active" access rights to the
- queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to bind. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "empty-queue" on-failure = "not-allowed">
- <doc>
- A client MUST NOT be allowed to bind a non-existent and unnamed queue (i.e.
- empty queue name) to an exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind with an unnamed (empty) queue name to an exchange.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_26" -->
- <rule name = "queue-existence" on-failure = "not-found">
- <doc>
- A client MUST NOT be allowed to bind a non-existent queue (i.e. not previously
- declared) to an exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind an undeclared queue name to an exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name" label = "name of the exchange to bind to">
- <!-- Rule test name: was "amq_queue_14" -->
- <rule name = "exchange-existence" on-failure = "not-found">
- <doc>
- A client MUST NOT be allowed to bind a queue to a non-existent exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind an named queue to a undeclared exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "message routing key">
- <doc>
- Specifies the routing key for the binding. The routing key is used for routing
- messages depending on the exchange configuration. Not all exchanges use a
- routing key - refer to the specific exchange documentation. If the queue name
- is empty, the server uses the last queue declared on the channel. If the
- routing key is also empty, the server uses this queue name for the routing
- key as well. If the queue name is provided but the routing key is empty, the
- server does the binding with that empty routing key. The meaning of empty
- routing keys depends on the exchange implementation.
- </doc>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for binding">
- <doc>
- A set of arguments for the binding. The syntax and semantics of these arguments
- depends on the exchange class.
- </doc>
- </field>
- </method>
-
- <method name = "bind-ok" synchronous = "1" index = "21" label = "confirm bind successful">
- <doc>This method confirms that the bind was successful.</doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "purge" synchronous = "1" index = "30" label = "purge a queue">
- <doc>
- This method removes all messages from a queue. It does not cancel consumers. Purged
- messages are deleted without any formal "undo" mechanism.
- </doc>
-
- <!-- Rule test name: was "amq_queue_15" -->
- <rule name = "01">
- <doc>A call to purge MUST result in an empty queue.</doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_41" -->
- <rule name = "02">
- <doc>
- On transacted channels the server MUST not purge messages that have already been
- sent to a client but not yet acknowledged.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <!-- Rule test name: was "amq_queue_42" -->
- <rule name = "03">
- <doc>
- The server MAY implement a purge queue or log that allows system administrators
- to recover accidentally-purged messages. The server SHOULD NOT keep purged
- messages in the same storage spaces as the live messages since the volumes of
- purged messages may get very large.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "purge-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>The access ticket must be for the access realm that holds the queue.</doc>
-
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the queue's access realm. Note that purging a queue is equivalent to reading
- all messages and discarding them.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to purge. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
-
- <!-- TODO Rule split? -->
-
- <!-- Rule test name: was "amq_queue_16" -->
- <rule name = "02">
- <doc>
- The queue MUST exist. Attempting to purge a non-existing queue MUST cause a
- channel exception.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "purge-ok" synchronous = "1" index = "31" label = "confirms a queue purge">
- <doc>This method confirms the purge of a queue.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "message-count" domain = "long" label = "number of messages purged">
- <doc>Reports the number of messages purged.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "delete" synchronous = "1" index = "40" label = "delete a queue">
- <doc>
- This method deletes a queue. When a queue is deleted any pending messages are sent
- to a dead-letter queue if this is defined in the server configuration, and all
- consumers on the queue are cancelled.
- </doc>
-
- <!-- TODO: Rule split? -->
-
- <!-- Rule test name: was "amq_queue_43" -->
- <rule name = "01">
- <doc>
- The server SHOULD use a dead-letter queue to hold messages that were pending on
- a deleted queue, and MAY provide facilities for a system administrator to move
- these messages back to an active queue.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "delete-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- The client provides a valid access ticket giving "active" access rights to the
- queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to delete. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_21" -->
- <rule name = "02">
- <doc>
- The queue must exist. If the client attempts to delete a non-existing queue
- the server MUST raise a channel exception with reply code 404 (not found).
- </doc>
- </rule>
- </field>
-
- <field name = "if-unused" domain = "bit" label = "delete only if unused">
- <doc>
- If set, the server will only delete the queue if it has no consumers. If the
- queue has consumers the server does does not delete it but raises a channel
- exception instead.
- </doc>
-
- <!-- Rule test name: was "amq_queue_29" and "amq_queue_30" -->
- <rule name = "01">
- <doc>The server MUST respect the if-unused flag when deleting a queue.</doc>
- </rule>
- </field>
-
- <field name = "if-empty" domain = "bit" label = "delete only if empty">
- <doc>
- If set, the server will only delete the queue if it has no messages.
- </doc>
- <rule name = "01">
- <doc>
- If the queue is not empty the server MUST raise a channel exception with
- reply code 406 (precondition failed).
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "delete-ok" synchronous = "1" index = "41" label = "confirm deletion of a queue">
- <doc>This method confirms the deletion of a queue.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "message-count" domain = "long" label = "number of messages purged">
- <doc>Reports the number of messages purged.</doc>
- </field>
- </method>
- </class>
-
- <!-- == BASIC ============================================================ -->
-
- <class name = "basic" handler = "channel" index = "60" label = "work with basic content">
- <doc>
- The Basic class provides methods that support an industry-standard messaging model.
- </doc>
-
- <doc type = "grammar">
- basic = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN content
- / S:DELIVER content
- / C:GET ( S:GET-OK content / S:GET-EMPTY )
- / C:ACK
- / C:REJECT
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- Rule test name: was "amq_basic_08" -->
- <rule name = "01">
- <doc>
- The server SHOULD respect the persistent property of basic messages and
- SHOULD make a best-effort to hold persistent basic messages on a reliable
- storage mechanism.
- </doc>
- <doc type = "scenario">
- Send a persistent message to queue, stop server, restart server and then
- verify whether message is still present. Assumes that queues are durable.
- Persistence without durable queues makes no sense.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_09" -->
- <rule name = "02">
- <doc>
- The server MUST NOT discard a persistent basic message in case of a queue
- overflow.
- </doc>
- <doc type = "scenario">
- Create a queue overflow situation with persistent messages and verify that
- messages do not get lost (presumably the server will write them to disk).
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The server MAY use the Channel.Flow method to slow or stop a basic message
- publisher when necessary.
- </doc>
- <doc type = "scenario">
- Create a queue overflow situation with non-persistent messages and verify
- whether the server responds with Channel.Flow or not. Repeat with persistent
- messages.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_10" -->
- <rule name = "04">
- <doc>
- The server MAY overflow non-persistent basic messages to persistent
- storage.
- </doc>
- <!-- Test scenario: untestable -->
- </rule>
-
- <rule name = "05">
- <doc>
- The server MAY discard or dead-letter non-persistent basic messages on a
- priority basis if the queue size exceeds some configured limit.
- </doc>
- <!-- Test scenario: untestable -->
- </rule>
-
- <!-- Rule test name: was "amq_basic_11" -->
- <rule name = "06">
- <doc>
- The server MUST implement at least 2 priority levels for basic messages,
- where priorities 0-4 and 5-9 are treated as two distinct levels.
- </doc>
- <doc type = "scenario">
- Send a number of priority 0 messages to a queue. Send one priority 9
- message. Consume messages from the queue and verify that the first message
- received was priority 9.
- </doc>
- </rule>
-
- <rule name = "07">
- <doc>
- The server MAY implement up to 10 priority levels.
- </doc>
- <doc type = "scenario">
- Send a number of messages with mixed priorities to a queue, so that all
- priority values from 0 to 9 are exercised. A good scenario would be ten
- messages in low-to-high priority. Consume from queue and verify how many
- priority levels emerge.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_12" -->
- <rule name = "08">
- <doc>
- The server MUST deliver messages of the same priority in order irrespective of
- their individual persistence.
- </doc>
- <doc type = "scenario">
- Send a set of messages with the same priority but different persistence
- settings to a queue. Consume and verify that messages arrive in same order
- as originally published.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_13" -->
- <rule name = "09">
- <doc>
- The server MUST support automatic acknowledgements on Basic content, i.e.
- consumers with the no-ack field set to FALSE.
- </doc>
- <doc type = "scenario">
- Create a queue and a consumer using automatic acknowledgements. Publish
- a set of messages to the queue. Consume the messages and verify that all
- messages are received.
- </doc>
- </rule>
-
- <rule name = "10">
- <doc>
- The server MUST support explicit acknowledgements on Basic content, i.e.
- consumers with the no-ack field set to TRUE.
- </doc>
- <doc type = "scenario">
- Create a queue and a consumer using explicit acknowledgements. Publish a
- set of messages to the queue. Consume the messages but acknowledge only
- half of them. Disconnect and reconnect, and consume from the queue.
- Verify that the remaining messages are received.
- </doc>
- </rule>
-
- <!-- These are the properties for a Basic content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "delivery-mode" domain = "octet" label = "non-persistent (1) or persistent (2)" />
- <field name = "priority" domain = "short" label = "message priority, 0 to 9" />
- <field name = "correlation-id" domain = "shortstr" label = "application correlation identifier" />
- <field name = "reply-to" domain = "shortstr" label = "destination to reply to" />
- <field name = "expiration" domain = "shortstr" label = "message expiration specification" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
- <field name = "message-id" domain = "shortstr" label = "application message identifier" />
- <field name = "type" domain = "shortstr" label = "message type name" />
- <field name = "user-id" domain = "shortstr" label = "creating user id" />
- <field name = "app-id" domain = "shortstr" label = "creating application id" />
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "intra-cluster routing identifier" />
-
- <!-- Type diversity test -->
- <field name = "property-bit" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-octet" domain = "octet" label = "Extra property for testing only" />
- <field name = "property-short" domain = "short" label = "Extra property for testing only" />
- <field name = "property-long" domain = "long" label = "Extra property for testing only" />
- <field name = "property-longlong" domain = "longlong" label = "Extra property for testing only" />
- <field name = "property-shortstr" domain = "shortstr" label = "Extra property for testing only" />
- <field name = "property-longstr" domain = "longstr" label = "Extra property for testing only" />
- <field name = "property-timestamp" domain = "timestamp" label = "Extra property for testing only" />
- <field name = "property-table" domain = "table" label = "Extra property for testing only" />
- <field name = "property-access-ticket" domain = "access-ticket" label = "Extra property for testing only" />
- <field name = "property-class-id" domain = "class-id" label = "Extra property for testing only" />
- <field name = "property-consumer-tag" domain = "consumer-tag" label = "Extra property for testing only" />
- <field name = "property-delivery-tag" domain = "delivery-tag" label = "Extra property for testing only" />
- <field name = "property-exchange-name" domain = "exchange-name" label = "Extra property for testing only" />
- <field name = "property-known-hosts" domain = "known-hosts" label = "Extra property for testing only" />
- <field name = "property-method-id" domain = "method-id" label = "Extra property for testing only" />
- <field name = "property-no-ack" domain = "no-ack" label = "Extra property for testing only" />
- <field name = "property-no-local" domain = "no-local" label = "Extra property for testing only" />
- <field name = "property-path" domain = "path" label = "Extra property for testing only" />
- <field name = "property-peer-properties" domain = "peer-properties" label = "Extra property for testing only" />
- <field name = "property-queue-name" domain = "queue-name" label = "Extra property for testing only" />
- <field name = "property-redelivered" domain = "redelivered" label = "Extra property for testing only" />
- <field name = "property-reply-code" domain = "reply-code" label = "Extra property for testing only" />
- <field name = "property-reply-text" domain = "reply-text" label = "Extra property for testing only" />
-
- <!-- Bit field test -->
- <field name = "property-long-A" domain = "long" label = "Extra property for testing only" />
- <field name = "property-bit-B" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-bit-C" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-bit-D" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-bit-E" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-bit-F" domain = "bit" label = "Extra property for testing only" />
- <field name = "property-shortstr-G" domain = "shortstr" label = "Extra property for testing only" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. The server
- will send a message in advance if it is equal to or smaller in size than the
- available prefetch size (and also falls into other prefetch limits). May be set
- to zero, meaning "no specific limit", although other prefetch limits may still
- apply. The prefetch-size is ignored if the no-ack option is set.
- </doc>
- <!-- Rule test name: was "amq_basic_17" -->
- <rule name = "01">
- <doc>
- The server MUST ignore this setting when the client is not processing any
- messages - i.e. the prefetch size does not limit the transfer of single
- messages to a client, only the sending in advance of more messages while
- the client still has one or more unacknowledged messages.
- </doc>
- <doc type = "scenario">
- Define a QoS prefetch-size limit and send a single message that exceeds
- that limit. Verify that the message arrives correctly.
- </doc>
- </rule>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This field may be used
- in combination with the prefetch-size field; a message will only be sent in
- advance if both prefetch windows (and those at the channel and connection level)
- allow it. The prefetch-count is ignored if the no-ack option is set.
- </doc>
- <!-- Rule test name: was "amq_basic_18" -->
- <rule name = "01">
- <doc>
- The server may send less data in advance than allowed by the client's
- specified prefetch windows but it MUST NOT send more.
- </doc>
- <doc type = "scenario">
- Define a QoS prefetch-size limit and a prefetch-count limit greater than
- one. Send multiple messages that exceed the prefetch size. Verify that
- no more than one message arrives at once.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "20" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <!-- Rule test name: was "amq_basic_01" -->
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, and ideally, impose
- no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- Create a queue and create consumers on that queue until the server closes the
- connection. Verify that the number of consumers created was at least sixteen
- and report the total number.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer with an invalid (non-zero) access ticket.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
- <rule name = "01" on-failure = "not-allowed">
- <doc>
- If the queue name is empty the client MUST have previously declared a
- queue using this channel.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer with an empty queue name and no previously
- declared queue on the channel.
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
- <rule name = "01" on-failure = "not-allowed">
- <doc>
- The client MUST NOT specify a tag that refers to an existing consumer.
- </doc>
- <doc type = "scenario">
- Attempt to create two consumers with the same non-empty tag.
- </doc>
- </rule>
- <rule name = "02" on-failure = "not-allowed">
- <doc>
- The consumer tag is valid only within the channel from which the
- consumer was created. I.e. a client MUST NOT create a consumer in one
- channel and then use it in another.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer in one channel, then use in another channel,
- in which consumers have also been created (to test that the server uses
- unique consumer tags).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise
- a channel or connection exception.
- </doc>
- </field>
-
- <field name = "bit-test-1" domain = "bit" />
- <field name = "bit-test-2" domain = "bit" />
- <field name = "bit-test-3" domain = "bit" />
- <field name = "bit-test-4" domain = "bit" />
- <field name = "bit-test-5" domain = "bit" />
- <field name = "bit-test-6" domain = "bit" />
- <field name = "bit-test-7" domain = "bit" />
- <field name = "bit-test-8" domain = "bit" />
- <field name = "bit-test-9" domain = "bit" />
-
- <field name = "no-ack" domain = "short" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
- <!-- Rule test name: was "amq_basic_02" -->
- <rule name = "01" on-failure = "access-refused">
- <doc>
- The client MAY NOT gain exclusive access to a queue that already has
- active consumers.
- </doc>
- <doc type = "scenario">
- Open two connections to a server, and in one connection create a shared
- (non-exclusive) queue and then consume from the queue. In the second
- connection attempt to consume from the same queue using the exclusive
- option.
- </doc>
- </rule>
- </field>
-
- <field name = "priority" domain = "short" label = "consume priority"/>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "21" label = "confirm a new consumer">
- <doc>
- The server provides the client with a consumer tag, which is used by the client
- for methods called on the consumer at a later stage.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Holds the consumer tag specified by the client or provided by the server.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "30" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. This does not affect already delivered
- messages, but it does mean the server will not send any more messages for
- that consumer. The client may receive an abitrary number of messages in
- between sending the cancel method and receiving the cancel-ok reply.
- </doc>
-
- <rule name = "01">
- <doc>
- If the queue does not exist the server MUST ignore the cancel method, so
- long as the consumer tag is valid for that channel.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "cancel-ok" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "31" label = "confirm a cancelled consumer">
- <doc>
- This method confirms that the cancellation was completed.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" content = "1" index = "40" label = "publish a message">
- <doc>
- This method publishes a message to a specific exchange. The message will be routed
- to queues as defined by the exchange configuration and distributed to any active
- consumers when the transaction, if any, is committed.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <!-- Rule test name: was "amq_basic_06" -->
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_14" -->
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST raise
- a channel exception with a reply code 403 (access refused).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_15" -->
- <rule name = "03">
- <doc>
- The exchange MAY refuse basic content in which case it MUST raise a channel
- exception with reply code 540 (not implemented).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
- <!-- Rule test name: was "amq_basic_07" -->
- <rule name = "01">
- <doc>
- The server SHOULD implement the mandatory flag.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
- <!-- Rule test name: was "amq_basic_16" -->
- <rule name = "01">
- <doc>
- The server SHOULD implement the immediate flag.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "50" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key name specified when the message was published.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" content = "1" index = "60"
- label = "notify the client of a consumer message">
- <doc>
- This method delivers a message to the client, via a consumer. In the asynchronous
- message delivery model, the client starts a consumer using the Consume method, then
- the server responds with Deliver methods as and when messages arrive for that
- consumer.
- </doc>
-
- <!-- Rule test name: was "amq_basic_19" -->
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server SHOULD track the number of times a message has been delivered to
- clients and when a message is redelivered a certain number of times - e.g. 5
- times - without being acknowledged, the server SHOULD consider the message to be
- unprocessable (possibly causing client applications to abort), and move the
- message to a dead letter queue.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "get" synchronous = "1" index = "70" label = "direct access to a queue">
- <doc>
- This method provides a direct access to the messages in a queue using a synchronous
- dialogue that is designed for specific types of application where synchronous
- functionality is more important than performance.
- </doc>
-
- <response name = "get-ok" />
- <response name = "get-empty" />
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "no-ack" domain = "no-ack" />
- </method>
-
- <method name = "get-ok" synchronous = "1" content = "1" index = "71"
- label = "provide client with a message">
- <doc>
- This method delivers a message to the client following a get method. A message
- delivered by 'get-ok' must be acknowledged unless the no-ack option was set in the
- get method.
- </doc>
-
- <chassis name = "client" implement = "MAY" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- If empty, the message was published to the default exchange.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
-
- <field name = "message-count" domain = "long" label = "number of messages pending">
- <doc>
- This field reports the number of messages pending on the queue, excluding the
- message being delivered. Note that this figure is indicative, not reliable, and
- can change arbitrarily as messages are added to the queue and removed by other
- clients.
- </doc>
- </field>
- </method>
-
- <method name = "get-empty" synchronous = "1" index = "72"
- label = "indicate no messages available">
- <doc>
- This method tells the client that the queue has no messages available for the
- client.
- </doc>
-
- <chassis name = "client" implement = "MAY" />
-
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "Cluster id">
- <doc>
- For use by cluster applications, should not be used by client applications.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "ack" index = "80" label = "acknowledge one or more messages">
- <doc>
- This method acknowledges one or more messages delivered via the Deliver or Get-Ok
- methods. The client can ask to confirm a single message or a set of messages up to
- and including a specific message.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "multiple" domain = "bit" label = "acknowledge multiple messages">
- <doc>
- If set to 1, the delivery tag is treated as "up to and including", so that the
- client can acknowledge multiple messages with a single method. If set to zero,
- the delivery tag refers to a single message. If the multiple field is 1, and the
- delivery tag is zero, tells the server to acknowledge all outstanding mesages.
- </doc>
-
- <!-- Rule test name: was "amq_basic_20" -->
- <rule name = "01">
- <doc>
- The server MUST validate that a non-zero delivery-tag refers to an delivered
- message, and raise a channel exception if this is not the case.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "reject" index = "90" label = "reject an incoming message">
- <doc>
- This method allows a client to reject a message. It can be used to interrupt and
- cancel large incoming messages, or return untreatable messages to their original
- queue.
- </doc>
-
- <!-- Rule test name: was "amq_basic_21" -->
- <rule name = "01">
- <doc>
- The server SHOULD be capable of accepting and process the Reject method while
- sending message content with a Deliver or Get-Ok method. I.e. the server should
- read and process incoming methods while sending output frames. To cancel a
- partially-send content, the server sends a content body frame of size 1 (i.e.
- with no data except the frame-end octet).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_22" -->
- <rule name = "02">
- <doc>
- The server SHOULD interpret this method as meaning that the client is unable to
- process the message at this time.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <rule name = "03">
- <!-- TODO: Rule split? -->
- <doc>
- A client MUST NOT use this method as a means of selecting messages to process. A
- rejected message MAY be discarded or dead-lettered, not necessarily passed to
- another client.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be discarded. If this bit is 1, the
- server will attempt to requeue the message.
- </doc>
-
- <!-- Rule test name: was "amq_basic_23" -->
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST NOT deliver the message to the same client within the
- context of the current channel. The recommended strategy is to attempt to
- deliver the message to an alternative consumer, and if that is not possible,
- to move the message to a dead-letter queue. The server MAY use more
- sophisticated tracking to hold the message on the queue and redeliver it to
- the same client at a later stage.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "recover" index = "100" label = "redeliver unacknowledged messages">
- <doc>
- This method asks the broker to redeliver all unacknowledged messages on a specified
- channel. Zero or more messages may be redelivered. This method is only allowed on
- non-transacted channels.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST set the redelivered flag on all messages that are resent.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- The server MUST raise a channel exception if this is called on a transacted
- channel.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be redelivered to the original
- recipient. If this bit is 1, the server will attempt to requeue the message,
- potentially then delivering it to an alternative subscriber.
- </doc>
- </field>
- </method>
- </class>
-
- <!-- == FILE ============================================================= -->
-
- <class name = "file" handler = "channel" index = "70" label = "work with file content">
- <doc>
- The file class provides methods that support reliable file transfer. File
- messages have a specific set of properties that are required for interoperability
- with file transfer applications. File messages and acknowledgements are subject to
- channel transactions. Note that the file class does not provide message browsing
- methods; these are not compatible with the staging model. Applications that need
- browsable file transfer should use Basic content and the Basic class.
- </doc>
-
- <doc type = "grammar">
- file = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:OPEN S:OPEN-OK C:STAGE content
- / S:OPEN C:OPEN-OK S:STAGE content
- / C:PUBLISH
- / S:DELIVER
- / S:RETURN
- / C:ACK
- / C:REJECT
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <rule name = "01">
- <doc>
- The server MUST make a best-effort to hold file messages on a reliable storage
- mechanism.
- </doc>
- </rule>
-
- <!-- TODO Rule implement attr inverse? -->
-
- <!-- TODO: Rule split? -->
-
- <rule name = "02">
- <doc>
- The server MUST NOT discard a file message in case of a queue overflow. The server
- MUST use the Channel.Flow method to slow or stop a file message publisher when
- necessary.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "03">
- <doc>
- The server MUST implement at least 2 priority levels for file messages, where
- priorities 0-4 and 5-9 are treated as two distinct levels. The server MAY implement
- up to 10 priority levels.
- </doc>
- </rule>
-
- <rule name = "04">
- <doc>
- The server MUST support both automatic and explicit acknowledgements on file
- content.
- </doc>
- </rule>
-
- <!-- These are the properties for a File content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "priority" domain = "octet" label = "message priority, 0 to 9" />
- <field name = "reply-to" domain = "shortstr" label = "destination to reply to" />
- <field name = "message-id" domain = "shortstr" label = "application message identifier" />
- <field name = "filename" domain = "shortstr" label = "message filename" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "intra-cluster routing identifier" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. May be set
- to zero, meaning "no specific limit". Note that other prefetch limits may still
- apply. The prefetch-size is ignored if the no-ack option is set.
- </doc>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This is compatible with
- some file API implementations. This field may be used in combination with the
- prefetch-size field; a message will only be sent in advance if both prefetch
- windows (and those at the channel and connection level) allow it. The
- prefetch-count is ignored if the no-ack option is set.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MAY send less data in advance than allowed by the client's
- specified prefetch windows but it MUST NOT send more.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "20" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, unless the queue was
- declared as private, and ideally, impose no limit except as defined by available
- resources.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The tag MUST NOT refer to an existing consumer. If the client attempts to
- create two consumers with the same non-empty tag the server MUST raise a
- connection exception with reply code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "no-ack" domain = "no-ack" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>
- If the server cannot grant exclusive access to the queue when asked, -
- because there are other consumers active - it MUST raise a channel exception
- with return code 405 (resource locked).
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "21" label = "confirm a new consumer">
- <doc>
- This method provides the client with a consumer tag which it MUST use in methods
- that work with the consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>Holds the consumer tag specified by the client or provided by the server.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "30" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. This does not affect already delivered messages, but
- it does mean the server will not send any more messages for that consumer.
- </doc>
-
- <response name = "cancel-ok" />
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "31" label = "confirm a cancelled consumer">
- <doc>This method confirms that the cancellation was completed.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "40" label = "request to start staging">
- <doc>
- This method requests permission to start staging a message. Staging means sending
- the message into a temporary area at the recipient end and then delivering the
- message by referring to this temporary area. Staging is how the protocol handles
- partial file transfers - if a message is partially staged and the connection breaks,
- the next time the sender starts to stage it, it can restart from where it left off.
- </doc>
-
- <response name = "open-ok" />
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier. This is an arbitrary string chosen by the
- sender. For staging to work correctly the sender must use the same staging
- identifier when staging the same message a second time after recovery from a
- failure. A good choice for the staging identifier would be the SHA1 hash of the
- message properties data (including the original filename, revised time, etc.).
- </doc>
- </field>
-
- <field name = "content-size" domain = "longlong" label = "message content size">
- <doc>
- The size of the content in octets. The recipient may use this information to
- allocate or check available space in advance, to avoid "disk full" errors during
- staging of very large messages.
- </doc>
-
- <rule name = "01">
- <doc>
- The sender MUST accurately fill the content-size field. Zero-length content
- is permitted.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "41" label = "confirm staging ready">
- <doc>
- This method confirms that the recipient is ready to accept staged data. If the
- message was already partially-staged at a previous time the recipient will report
- the number of octets already staged.
- </doc>
-
- <response name = "stage" />
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "staged-size" domain = "longlong" label = "already staged amount">
- <doc>
- The amount of previously-staged content in octets. For a new message this will
- be zero.
- </doc>
-
- <rule name = "01">
- <doc>
- The sender MUST start sending data from this octet offset in the message,
- counting from zero.
- </doc>
- </rule>
-
- <rule name = "02">
- <!-- TODO: Rule split? -->
- <doc>
- The recipient MAY decide how long to hold partially-staged content and MAY
- implement staging by always discarding partially-staged content. However if
- it uses the file content type it MUST support the staging methods.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "stage" content = "1" index = "50" label = "stage message content">
- <doc>
- This method stages the message, sending the message content to the recipient from
- the octet offset specified in the Open-Ok method.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" index = "60" label = "publish a message">
- <doc>
- This method publishes a staged file message to a specific exchange. The file message
- will be routed to queues as defined by the exchange configuration and distributed to
- any active consumers when the transaction, if any, is committed.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST
- respond with a reply code 403 (access refused) and raise a channel
- exception.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "03">
- <doc>
- The exchange MAY refuse file content in which case it MUST respond with a
- reply code 540 (not implemented) and raise a channel exception.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the mandatory flag.</doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the immediate flag.</doc>
- </rule>
- </field>
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier of the message to publish. The message must have
- been staged. Note that a client can send the Publish method asynchronously
- without waiting for staging to finish.
- </doc>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "70" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" index = "80" label = "notify the client of a consumer message">
- <doc>
- This method delivers a staged file message to the client, via a consumer. In the
- asynchronous message delivery model, the client starts a consumer using the Consume
- method, then the server responds with Deliver methods as and when messages arrive
- for that consumer.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server SHOULD track the number of times a message has been delivered to
- clients and when a message is redelivered a certain number of times - e.g. 5
- times - without being acknowledged, the server SHOULD consider the message to be
- unprocessable (possibly causing client applications to abort), and move the
- message to a dead letter queue.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier of the message to deliver. The message must have
- been staged. Note that a server can send the Deliver method asynchronously
- without waiting for staging to finish.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "ack" index = "90" label = "acknowledge one or more messages">
- <doc>
- This method acknowledges one or more messages delivered via the Deliver method. The
- client can ask to confirm a single message or a set of messages up to and including
- a specific message.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "multiple" domain = "bit" label = "acknowledge multiple messages">
- <doc>
- If set to 1, the delivery tag is treated as "up to and including", so that the
- client can acknowledge multiple messages with a single method. If set to zero,
- the delivery tag refers to a single message. If the multiple field is 1, and the
- delivery tag is zero, tells the server to acknowledge all outstanding mesages.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST validate that a non-zero delivery-tag refers to an delivered
- message, and raise a channel exception if this is not the case.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "reject" index = "100" label = "reject an incoming message">
- <doc>
- This method allows a client to reject a message. It can be used to return
- untreatable messages to their original queue. Note that file content is staged
- before delivery, so the client will not use this method to interrupt delivery of a
- large message.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD interpret this method as meaning that the client is unable to
- process the message at this time.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "02">
- <doc>
- A client MUST NOT use this method as a means of selecting messages to process. A
- rejected message MAY be discarded or dead-lettered, not necessarily passed to
- another client.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be discarded. If this bit is 1, the
- server will attempt to requeue the message.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST NOT deliver the message to the same client within the
- context of the current channel. The recommended strategy is to attempt to
- deliver the message to an alternative consumer, and if that is not possible,
- to move the message to a dead-letter queue. The server MAY use more
- sophisticated tracking to hold the message on the queue and redeliver it to
- the same client at a later stage.
- </doc>
- </rule>
- </field>
- </method>
- </class>
-
- <!-- == STREAM =========================================================== -->
-
- <class name = "stream" handler = "channel" index = "80" label = "work with streaming content">
- <doc>
- The stream class provides methods that support multimedia streaming. The stream class
- uses the following semantics: one message is one packet of data; delivery is
- unacknowleged and unreliable; the consumer can specify quality of service parameters
- that the server can try to adhere to; lower-priority messages may be discarded in favour
- of high priority messages.
- </doc>
-
- <doc type = "grammar">
- stream = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN
- / S:DELIVER content
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <rule name = "01">
- <doc>
- The server SHOULD discard stream messages on a priority basis if the queue size
- exceeds some configured limit.
- </doc>
- </rule>
-
- <rule name = "02">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST implement at least 2 priority levels for stream messages, where
- priorities 0-4 and 5-9 are treated as two distinct levels. The server MAY implement
- up to 10 priority levels.
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The server MUST implement automatic acknowledgements on stream content. That is, as
- soon as a message is delivered to a client via a Deliver method, the server must
- remove it from the queue.
- </doc>
- </rule>
-
- <!-- These are the properties for a Stream content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "priority" domain = "octet" label = "message priority, 0 to 9" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. May be set
- to zero, meaning "no specific limit". Note that other prefetch limits may still
- apply.
- </doc>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This field may be used
- in combination with the prefetch-size field; a message will only be sent in
- advance if both prefetch windows (and those at the channel and connection level)
- allow it.
- </doc>
- </field>
-
- <field name = "consume-rate" domain = "long" label = "transfer rate in octets/second">
- <doc>
- Specifies a desired transfer rate in octets per second. This is usually
- determined by the application that uses the streaming data. A value of zero
- means "no limit", i.e. as rapidly as possible.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MAY ignore the prefetch values and consume rates, depending on
- the type of stream and the ability of the server to queue and/or reply it.
- The server MAY drop low-priority messages in favour of high-priority
- messages.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "20" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, unless the queue was
- declared as private, and ideally, impose no limit except as defined by available
- resources.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- Streaming applications SHOULD use different channels to select different
- streaming resolutions. AMQP makes no provision for filtering and/or transforming
- streams except on the basis of priority-based selective delivery of individual
- messages.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The tag MUST NOT refer to an existing consumer. If the client attempts to
- create two consumers with the same non-empty tag the server MUST raise a
- connection exception with reply code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
-
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>
- If the server cannot grant exclusive access to the queue when asked, -
- because there are other consumers active - it MUST raise a channel exception
- with return code 405 (resource locked).
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "21" label = "confirm a new consumer">
- <doc>
- This method provides the client with a consumer tag which it may use in methods that
- work with the consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>Holds the consumer tag specified by the client or provided by the server.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "30" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. Since message delivery is asynchronous the client
- may continue to receive messages for a short while after canceling a consumer. It
- may process or discard these as appropriate.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "cancel-ok" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "31" label = "confirm a cancelled consumer">
- <doc>This method confirms that the cancellation was completed.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" content = "1" index = "40" label = "publish a message">
- <doc>
- This method publishes a message to a specific exchange. The message will be routed
- to queues as defined by the exchange configuration and distributed to any active
- consumers as appropriate.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST
- respond with a reply code 403 (access refused) and raise a channel
- exception.
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The exchange MAY refuse stream content in which case it MUST respond with a
- reply code 540 (not implemented) and raise a channel exception.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
-
- <!-- Rule test name: was "amq_stream_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the mandatory flag.</doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
-
- <!-- Rule test name: was "amq_stream_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the immediate flag.</doc>
- </rule>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "50" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" content = "1" index = "60"
- label = "notify the client of a consumer message">
- <doc>
- This method delivers a message to the client, via a consumer. In the asynchronous
- message delivery model, the client starts a consumer using the Consume method, then
- the server responds with Deliver methods as and when messages arrive for that
- consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue that the message came from. Note that a single
- channel can start many consumers on different queues.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
- </class>
-
- <!-- == TX =============================================================== -->
-
- <class name = "tx" handler = "channel" index = "90" label = "work with standard transactions">
- <doc>
- Standard transactions provide so-called "1.5 phase commit". We can ensure that work is
- never lost, but there is a chance of confirmations being lost, so that messages may be
- resent. Applications that use standard transactions must be able to detect and ignore
- duplicate messages.
- </doc>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "01">
- <doc>
- An client using standard transactions SHOULD be able to track all messages received
- within a reasonable period, and thus detect and reject duplicates of the same
- message. It SHOULD NOT pass these to the application layer.
- </doc>
- </rule>
-
- <doc type = "grammar">
- tx = C:SELECT S:SELECT-OK
- / C:COMMIT S:COMMIT-OK
- / C:ROLLBACK S:ROLLBACK-OK
- </doc>
-
- <chassis name = "server" implement = "SHOULD" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "select" synchronous = "1" index = "10" label = "select standard transaction mode">
- <doc>
- This method sets the channel to use standard transactions. The client must use this
- method at least once on a channel before using the Commit or Rollback methods.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "select-ok" />
- </method>
-
- <method name = "select-ok" synchronous = "1" index = "11" label = "confirm transaction mode">
- <doc>
- This method confirms to the client that the channel was successfully set to use
- standard transactions.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "commit" synchronous = "1" index = "20" label = "commit the current transaction">
- <doc>
- This method commits all messages published and acknowledged in the current
- transaction. A new transaction starts immediately after a commit.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "commit-ok" />
- </method>
-
- <method name = "commit-ok" synchronous = "1" index = "21" label = "confirm a successful commit">
- <doc>
- This method confirms to the client that the commit succeeded. Note that if a commit
- fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "rollback" synchronous = "1" index = "30"
- label = "abandon the current transaction">
- <doc>
- This method abandons all messages published and acknowledged in the current
- transaction. A new transaction starts immediately after a rollback.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "rollback-ok" />
- </method>
-
- <method name = "rollback-ok" synchronous = "1" index = "31" label = "confirm successful rollback">
- <doc>
- This method confirms to the client that the rollback succeeded. Note that if an
- rollback fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == DTX ============================================================== -->
-
- <class name = "dtx" handler = "channel" index = "100" label = "work with distributed transactions">
- <doc>
- Distributed transactions provide so-called "2-phase commit". The AMQP distributed
- transaction model supports the X-Open XA architecture and other distributed transaction
- implementations. The Dtx class assumes that the server has a private communications
- channel (not AMQP) to a distributed transaction coordinator.
- </doc>
-
- <doc type = "grammar">
- dtx = C:SELECT S:SELECT-OK
- C:START S:START-OK
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "select" synchronous = "1" index = "10" label = "select standard transaction mode">
- <doc>
- This method sets the channel to use distributed transactions. The client must use
- this method at least once on a channel before using the Start method.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "select-ok" />
- </method>
-
- <method name = "select-ok" synchronous = "1" index = "11" label = "confirm transaction mode">
- <doc>
- This method confirms to the client that the channel was successfully set to use
- distributed transactions.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "start" synchronous = "1" index = "20"
- label = "start a new distributed transaction">
- <doc>
- This method starts a new distributed transaction. This must be the first method on a
- new channel that uses the distributed transaction mode, before any methods that
- publish or consume messages.
- </doc>
- <chassis name = "server" implement = "MAY" />
- <response name = "start-ok" />
- <field name = "dtx-identifier" domain = "shortstr" label = "transaction identifier">
- <doc>
- The distributed transaction key. This identifies the transaction so that the
- AMQP server can coordinate with the distributed transaction coordinator.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <method name = "start-ok" synchronous = "1" index = "21"
- label = "confirm the start of a new distributed transaction">
- <doc>
- This method confirms to the client that the transaction started. Note that if a
- start fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == TUNNEL =========================================================== -->
-
- <class name = "tunnel" handler = "tunnel" index = "110" label = "methods for protocol tunneling">
- <doc>
- The tunnel methods are used to send blocks of binary data - which can be serialised AMQP
- methods or other protocol frames - between AMQP peers.
- </doc>
-
- <doc type = "grammar">
- tunnel = C:REQUEST
- / S:REQUEST
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "proxy-name" domain = "shortstr" label = "identity of tunnelling proxy" />
- <field name = "data-name" domain = "shortstr" label = "name or type of message being tunnelled" />
- <field name = "durable" domain = "octet" label = "message durability indicator" />
- <field name = "broadcast" domain = "octet" label = "message broadcast mode" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "request" content = "1" index = "10" label = "sends a tunnelled method">
- <doc>
- This method tunnels a block of binary data, which can be an encoded
- AMQP method or other data. The binary data is sent as the content for
- the Tunnel.Request method.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <field name = "meta-data" domain = "table" label = "meta data for the tunnelled block">
- <doc>
- This field table holds arbitrary meta-data that the sender needs to
- pass to the recipient.
- </doc>
- </field>
- </method>
- </class>
-</amqp>
diff --git a/qpid/gentools/xml-src/amqp-0.8.test.xml b/qpid/gentools/xml-src/amqp-0.8.test.xml
deleted file mode 100644
index b0adf31828..0000000000
--- a/qpid/gentools/xml-src/amqp-0.8.test.xml
+++ /dev/null
@@ -1,3959 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
- Copyright Notice
- ================
- (c) Copyright JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc.,
- iMatix Corporation, IONA\ufffd Technologies, Red Hat, Inc.,
- TWIST Process Innovations, and 29West Inc. 2006. All rights reserved.
-
- License
- =======
- JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc., iMatix
- Corporation, IONA\ufffd Technologies, Red Hat, Inc., TWIST Process Innovations, and
- 29West Inc. (collectively, the "Authors") each hereby grants to you a worldwide,
- perpetual, royalty-free, nontransferable, nonexclusive license to
- (i) copy, display, and implement the Advanced Messaging Queue Protocol
- ("AMQP") Specification and (ii) the Licensed Claims that are held by
- the Authors, all for the purpose of implementing the Advanced Messaging
- Queue Protocol Specification. Your license and any rights under this
- Agreement will terminate immediately without notice from
- any Author if you bring any claim, suit, demand, or action related to
- the Advanced Messaging Queue Protocol Specification against any Author.
- Upon termination, you shall destroy all copies of the Advanced Messaging
- Queue Protocol Specification in your possession or control.
-
- As used hereunder, "Licensed Claims" means those claims of a patent or
- patent application, throughout the world, excluding design patents and
- design registrations, owned or controlled, or that can be sublicensed
- without fee and in compliance with the requirements of this
- Agreement, by an Author or its affiliates now or at any
- future time and which would necessarily be infringed by implementation
- of the Advanced Messaging Queue Protocol Specification. A claim is
- necessarily infringed hereunder only when it is not possible to avoid
- infringing it because there is no plausible non-infringing alternative
- for implementing the required portions of the Advanced Messaging Queue
- Protocol Specification. Notwithstanding the foregoing, Licensed Claims
- shall not include any claims other than as set forth above even if
- contained in the same patent as Licensed Claims; or that read solely
- on any implementations of any portion of the Advanced Messaging Queue
- Protocol Specification that are not required by the Advanced Messaging
- Queue Protocol Specification, or that, if licensed, would require a
- payment of royalties by the licensor to unaffiliated third parties.
- Moreover, Licensed Claims shall not include (i) any enabling technologies
- that may be necessary to make or use any Licensed Product but are not
- themselves expressly set forth in the Advanced Messaging Queue Protocol
- Specification (e.g., semiconductor manufacturing technology, compiler
- technology, object oriented technology, networking technology, operating
- system technology, and the like); or (ii) the implementation of other
- published standards developed elsewhere and merely referred to in the
- body of the Advanced Messaging Queue Protocol Specification, or
- (iii) any Licensed Product and any combinations thereof the purpose or
- function of which is not required for compliance with the Advanced
- Messaging Queue Protocol Specification. For purposes of this definition,
- the Advanced Messaging Queue Protocol Specification shall be deemed to
- include both architectural and interconnection requirements essential
- for interoperability and may also include supporting source code artifacts
- where such architectural, interconnection requirements and source code
- artifacts are expressly identified as being required or documentation to
- achieve compliance with the Advanced Messaging Queue Protocol Specification.
-
- As used hereunder, "Licensed Products" means only those specific portions
- of products (hardware, software or combinations thereof) that implement
- and are compliant with all relevant portions of the Advanced Messaging
- Queue Protocol Specification.
-
- The following disclaimers, which you hereby also acknowledge as to any
- use you may make of the Advanced Messaging Queue Protocol Specification:
-
- THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION IS PROVIDED "AS IS,"
- AND THE AUTHORS MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, OR TITLE; THAT THE
- CONTENTS OF THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION ARE
- SUITABLE FOR ANY PURPOSE; NOR THAT THE IMPLEMENTATION OF THE ADVANCED
- MESSAGING QUEUE PROTOCOL SPECIFICATION WILL NOT INFRINGE ANY THIRD PARTY
- PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
-
- THE AUTHORS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL,
- INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATING TO ANY
- USE, IMPLEMENTATION OR DISTRIBUTION OF THE ADVANCED MESSAGING QUEUE
- PROTOCOL SPECIFICATION.
-
- The name and trademarks of the Authors may NOT be used in any manner,
- including advertising or publicity pertaining to the Advanced Messaging
- Queue Protocol Specification or its contents without specific, written
- prior permission. Title to copyright in the Advanced Messaging Queue
- Protocol Specification will at all times remain with the Authors.
-
- No other rights are granted by implication, estoppel or otherwise.
-
- Upon termination of your license or rights under this Agreement, you
- shall destroy all copies of the Advanced Messaging Queue Protocol
- Specification in your possession or control.
-
- Trademarks
- ==========
- "JPMorgan", "JPMorgan Chase", "Chase", the JPMorgan Chase logo and the
- Octagon Symbol are trademarks of JPMorgan Chase & Co.
-
- IMATIX and the iMatix logo are trademarks of iMatix Corporation sprl.
-
- IONA, IONA Technologies, and the IONA logos are trademarks of IONA
- Technologies PLC and/or its subsidiaries.
-
- LINUX is a trademark of Linus Torvalds. RED HAT and JBOSS are registered
- trademarks of Red Hat, Inc. in the US and other countries.
-
- Java, all Java-based trademarks and OpenOffice.org are trademarks of
- Sun Microsystems, Inc. in the United States, other countries, or both.
-
- Other company, product, or service names may be trademarks or service
- marks of others.
-
- Links to full AMQP specification:
- =================================
- http://www.envoytech.org/spec/amq/
- http://www.iona.com/opensource/amqp/
- http://www.redhat.com/solutions/specifications/amqp/
- http://www.twiststandards.org/tiki-index.php?page=AMQ
- http://www.imatix.com/amqp
-
--->
-
-<!--
-========================================================
-EDITORS: (PH) Pieter Hintjens <ph@imatix.com>
- (KvdR) Kim van der Riet <kim.vdriet@redhat.com>
-
-NOTE: These editors have been assigned by the AMQP working group. Please do not
-edit/commit this file without consulting with one of the above editors.
-========================================================
-
-Revision history:
- 2006-06-07 (PH) - version number changed to 0.8 to conform to public
- release documentation.
-
- 2006-05-15 (PH) - fixed comments on queue name in basic.get to clarify
- use of current queue in this method.
-
- 2006-05-15 (PH) - fixed comments on routing key in queue.bind to clarify
- how routing key is filled when empty (to allow asynch queue.declare).
-
- 2006-05-11 (PH) - reset version to 0.70 so that putatitive standards
- group can release 2-3 major new versions before hitting 1.0 (again).
-
- 2006-05-11 (PH) - TODO in documentation: cycle field in frame header
- has been removed.
-
- 2006-05-11 (PH) - added nowait option to exchange.declare, delete,
- queue.declare, delete, bind, purge, basic.consume, cancel,
- file.consume, cancel, stream.consume and cancel methods.
-
- 2006-05-11 (PH) - removed notnull rule and added explanations on queue
- name in queue.bind, purge, delete, basic.consume, cancel, file.consume,
- cancel, stream.consume and cancel methods.
-
- 2006-05-11 (PH) - added basic.qos, file.qos, and stream.qos methods that
- regroup all prefetch options from the consume methods. Also removed the
- prefetch option from channel.open.
-
- 2006-05-11 (PH) - renumbered method indexes to show request-response
- nature of methods; requests are 10, 20, 30 while responses are 11, 21,
- etc.
-
- 2006-05-11 (PH) - removed OpenAMQ extension methods from this definition
- since these are maintained seperately.
-
- 2006-05-26 (RG) - added Basic.Recover method to allow replay of
- unacknowledged messages on a channel.
-
- 2006-07-03 (PH) - cosmetic clean-up of Basic.Recover comments.
--->
-
-<amqp major="8" minor="0" port="5672" comment="AMQ protocol 0.80">
- AMQ Protocol 0.80
-
-<!--
-======================================================
-== CONSTANTS
-======================================================
--->
- <constant name="frame method" value="1"/>
- <constant name="frame header" value="2"/>
- <constant name="frame body" value="3"/>
- <constant name="frame oob method" value="4"/>
- <constant name="frame oob header" value="5"/>
- <constant name="frame oob body" value="6"/>
- <constant name="frame trace" value="7"/>
- <constant name="frame heartbeat" value="8"/>
- <constant name="frame min size" value="4096"/>
- <constant name="frame end" value="206"/>
- <constant name="reply success" value="200">
- Indicates that the method completed successfully. This reply code is
- reserved for future use - the current protocol design does not use
- positive confirmation and reply codes are sent only in case of an
- error.
-</constant>
- <constant name="not delivered" value="310" class="soft error">
- The client asked for a specific message that is no longer available.
- The message was delivered to another client, or was purged from the
- queue for some other reason.
-</constant>
- <constant name="content too large" value="311" class="soft error">
- The client attempted to transfer content larger than the server
- could accept at the present time. The client may retry at a later
- time.
-</constant>
- <constant name="connection forced" value="320" class="hard error">
- An operator intervened to close the connection for some reason.
- The client may retry at some later date.
-</constant>
- <constant name="invalid path" value="402" class="hard error">
- The client tried to work with an unknown virtual host or cluster.
-</constant>
- <constant name="access refused" value="403" class="soft error">
- The client attempted to work with a server entity to which it has
- no due to security settings.
-</constant>
- <constant name="not found" value="404" class="soft error">
- The client attempted to work with a server entity that does not exist.
-</constant>
- <constant name="resource locked" value="405" class="soft error">
- The client attempted to work with a server entity to which it has
- no access because another client is working with it.
-</constant>
- <constant name="frame error" value="501" class="hard error">
- The client sent a malformed frame that the server could not decode.
- This strongly implies a programming error in the client.
-</constant>
- <constant name="syntax error" value="502" class="hard error">
- The client sent a frame that contained illegal values for one or more
- fields. This strongly implies a programming error in the client.
-</constant>
- <constant name="command invalid" value="503" class="hard error">
- The client sent an invalid sequence of frames, attempting to perform
- an operation that was considered invalid by the server. This usually
- implies a programming error in the client.
-</constant>
- <constant name="channel error" value="504" class="hard error">
- The client attempted to work with a channel that had not been
- correctly opened. This most likely indicates a fault in the client
- layer.
-</constant>
- <constant name="resource error" value="506" class="hard error">
- The server could not complete the method because it lacked sufficient
- resources. This may be due to the client creating too many of some
- type of entity.
-</constant>
- <constant name="not allowed" value="530" class="hard error">
- The client tried to work with some entity in a manner that is
- prohibited by the server, due to security settings or by some other
- criteria.
-</constant>
- <constant name="not implemented" value="540" class="hard error">
- The client tried to use functionality that is not implemented in the
- server.
-</constant>
- <constant name="internal error" value="541" class="hard error">
- The server could not complete the method because of an internal error.
- The server may require intervention by an operator in order to resume
- normal operations.
-</constant>
- <!--
-======================================================
-== DOMAIN TYPES
-======================================================
--->
- <domain name="access ticket" type="short">
- access ticket granted by server
- <doc>
- An access ticket granted by the server for a certain set of access
- rights within a specific realm. Access tickets are valid within the
- channel where they were created, and expire when the channel closes.
- </doc>
- <assert check="ne" value="0"/>
- </domain>
- <domain name="class id" type="short"/>
- <domain name="consumer tag" type="shortstr">
- consumer tag
- <doc>
- Identifier for the consumer, valid within the current connection.
- </doc>
- <rule implement="MUST">
- The consumer tag is valid only within the channel from which the
- consumer was created. I.e. a client MUST NOT create a consumer in
- one channel and then use it in another.
- </rule>
- </domain>
- <domain name="delivery tag" type="longlong">
- server-assigned delivery tag
- <doc>
- The server-assigned and channel-specific delivery tag
- </doc>
- <rule implement="MUST">
- The delivery tag is valid only within the channel from which the
- message was received. I.e. a client MUST NOT receive a message on
- one channel and then acknowledge it on another.
- </rule>
- <rule implement="MUST">
- The server MUST NOT use a zero value for delivery tags. Zero is
- reserved for client use, meaning "all messages so far received".
- </rule>
- </domain>
- <domain name="exchange name" type="shortstr">
- exchange name
- <doc>
- The exchange name is a client-selected string that identifies
- the exchange for publish methods. Exchange names may consist
- of any mixture of digits, letters, and underscores. Exchange
- names are scoped by the virtual host.
- </doc>
- <assert check="length" value="127"/>
- </domain>
- <domain name="known hosts" type="shortstr">
-list of known hosts
-<doc>
-Specifies the list of equivalent or alternative hosts that the server
-knows about, which will normally include the current server itself.
-Clients can cache this information and use it when reconnecting to a
-server after a failure.
-</doc>
- <rule implement="MAY">
-The server MAY leave this field empty if it knows of no other
-hosts than itself.
-</rule>
- </domain>
- <domain name="method id" type="short"/>
- <domain name="no ack" type="bit">
- no acknowledgement needed
- <doc>
- If this field is set the server does not expect acknowledgments
- for messages. That is, when a message is delivered to the client
- the server automatically and silently acknowledges it on behalf
- of the client. This functionality increases performance but at
- the cost of reliability. Messages can get lost if a client dies
- before it can deliver them to the application.
- </doc>
- </domain>
- <domain name="no local" type="bit">
- do not deliver own messages
- <doc>
- If the no-local field is set the server will not send messages to
- the client that published them.
- </doc>
- </domain>
- <domain name="path" type="shortstr">
- <doc>
- Must start with a slash "/" and continue with path names
- separated by slashes. A path name consists of any combination
- of at least one of [A-Za-z0-9] plus zero or more of [.-_+!=:].
-</doc>
- <assert check="notnull"/>
- <assert check="syntax" rule="path"/>
- <assert check="length" value="127"/>
- </domain>
- <domain name="peer properties" type="table">
- <doc>
-This string provides a set of peer properties, used for
-identification, debugging, and general information.
-</doc>
- <rule implement="SHOULD">
-The properties SHOULD contain these fields:
-"product", giving the name of the peer product, "version", giving
-the name of the peer version, "platform", giving the name of the
-operating system, "copyright", if appropriate, and "information",
-giving other general information.
-</rule>
- </domain>
- <domain name="queue name" type="shortstr">
- queue name
- <doc>
- The queue name identifies the queue within the vhost. Queue
- names may consist of any mixture of digits, letters, and
- underscores.
- </doc>
- <assert check="length" value="127"/>
- </domain>
- <domain name="redelivered" type="bit">
- message is being redelivered
- <doc>
- This indicates that the message has been previously delivered to
- this or another client.
- </doc>
- <rule implement="SHOULD">
- The server SHOULD try to signal redelivered messages when it can.
- When redelivering a message that was not successfully acknowledged,
- the server SHOULD deliver it to the original client if possible.
- </rule>
- <rule implement="MUST">
- The client MUST NOT rely on the redelivered field but MUST take it
- as a hint that the message may already have been processed. A
- fully robust client must be able to track duplicate received messages
- on non-transacted, and locally-transacted channels.
- </rule>
- </domain>
- <domain name="reply code" type="short">
-reply code from server
-<doc>
- The reply code. The AMQ reply codes are defined in AMQ RFC 011.
-</doc>
- <assert check="notnull"/>
- </domain>
- <domain name="reply text" type="shortstr">
-localised reply text
-<doc>
- The localised reply text. This text can be logged as an aid to
- resolving issues.
-</doc>
- <assert check="notnull"/>
- </domain>
- <class name="connection" handler="connection" index="10">
- <!--
-======================================================
-== CONNECTION
-======================================================
--->
- work with socket connections
-<doc>
- The connection class provides methods for a client to establish a
- network connection to a server, and for both peers to operate the
- connection thereafter.
-</doc>
- <doc name="grammar">
- connection = open-connection *use-connection close-connection
- open-connection = C:protocol-header
- S:START C:START-OK
- *challenge
- S:TUNE C:TUNE-OK
- C:OPEN S:OPEN-OK | S:REDIRECT
- challenge = S:SECURE C:SECURE-OK
- use-connection = *channel
- close-connection = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="start" synchronous="1" index="10">
- start connection negotiation
- <doc>
- This method starts the connection negotiation process by telling
- the client the protocol version that the server proposes, along
- with a list of security mechanisms which the client can use for
- authentication.
- </doc>
- <rule implement="MUST">
- If the client cannot handle the protocol version suggested by the
- server it MUST close the socket connection.
- </rule>
- <rule implement="MUST">
- The server MUST provide a protocol version that is lower than or
- equal to that requested by the client in the protocol header. If
- the server cannot support the specified protocol it MUST NOT send
- this method, but MUST close the socket connection.
- </rule>
- <chassis name="client" implement="MUST"/>
- <response name="start-ok"/>
- <field name="version major" type="octet">
- protocol major version
- <doc>
- The protocol major version that the server agrees to use, which
- cannot be higher than the client's major version.
- </doc>
- </field>
- <field name="version minor" type="octet">
- protocol major version
- <doc>
- The protocol minor version that the server agrees to use, which
- cannot be higher than the client's minor version.
- </doc>
- </field>
- <field name="server properties" domain="peer properties">
- server properties
- </field>
- <field name="mechanisms" type="longstr">
- available security mechanisms
- <doc>
- A list of the security mechanisms that the server supports, delimited
- by spaces. Currently ASL supports these mechanisms: PLAIN.
- </doc>
- <see name="security mechanisms"/>
- <assert check="notnull"/>
- </field>
- <field name="locales" type="longstr">
- available message locales
- <doc>
- A list of the message locales that the server supports, delimited
- by spaces. The locale defines the language in which the server
- will send reply texts.
- </doc>
- <rule implement="MUST">
- All servers MUST support at least the en_US locale.
- </rule>
- <assert check="notnull"/>
- </field>
- </method>
- <method name="start-ok" synchronous="1" index="11">
- select security mechanism and locale
- <doc>
- This method selects a SASL security mechanism. ASL uses SASL
- (RFC2222) to negotiate authentication and encryption.
- </doc>
- <chassis name="server" implement="MUST"/>
- <field name="client properties" domain="peer properties">
- client properties
- </field>
- <field name="mechanism" type="shortstr">
- selected security mechanism
- <doc>
- A single security mechanisms selected by the client, which must be
- one of those specified by the server.
- </doc>
- <rule implement="SHOULD">
- The client SHOULD authenticate using the highest-level security
- profile it can handle from the list provided by the server.
- </rule>
- <rule implement="MUST">
- The mechanism field MUST contain one of the security mechanisms
- proposed by the server in the Start method. If it doesn't, the
- server MUST close the socket.
- </rule>
- <assert check="notnull"/>
- </field>
- <field name="response" type="longstr">
- security response data
- <doc>
- A block of opaque data passed to the security mechanism. The contents
- of this data are defined by the SASL security mechanism. For the
- PLAIN security mechanism this is defined as a field table holding
- two fields, LOGIN and PASSWORD.
- </doc>
- <assert check="notnull"/>
- </field>
- <field name="locale" type="shortstr">
- selected message locale
- <doc>
- A single message local selected by the client, which must be one
- of those specified by the server.
- </doc>
- <assert check="notnull"/>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="secure" synchronous="1" index="20">
- security mechanism challenge
- <doc>
- The SASL protocol works by exchanging challenges and responses until
- both peers have received sufficient information to authenticate each
- other. This method challenges the client to provide more information.
- </doc>
- <chassis name="client" implement="MUST"/>
- <response name="secure-ok"/>
- <field name="challenge" type="longstr">
- security challenge data
- <doc>
- Challenge information, a block of opaque binary data passed to
- the security mechanism.
- </doc>
- <see name="security mechanisms"/>
- </field>
- </method>
- <method name="secure-ok" synchronous="1" index="21">
- security mechanism response
- <doc>
- This method attempts to authenticate, passing a block of SASL data
- for the security mechanism at the server side.
- </doc>
- <chassis name="server" implement="MUST"/>
- <field name="response" type="longstr">
- security response data
- <doc>
- A block of opaque data passed to the security mechanism. The contents
- of this data are defined by the SASL security mechanism.
- </doc>
- <assert check="notnull"/>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="tune" synchronous="1" index="30">
- propose connection tuning parameters
- <doc>
- This method proposes a set of connection configuration values
- to the client. The client can accept and/or adjust these.
- </doc>
- <chassis name="client" implement="MUST"/>
- <response name="tune-ok"/>
- <field name="channel max" type="short">
- proposed maximum channels
- <doc>
- The maximum total number of channels that the server allows
- per connection. Zero means that the server does not impose a
- fixed limit, but the number of allowed channels may be limited
- by available server resources.
- </doc>
- </field>
- <field name="frame max" type="long">
- proposed maximum frame size
- <doc>
- The largest frame size that the server proposes for the
- connection. The client can negotiate a lower value. Zero means
- that the server does not impose any specific limit but may reject
- very large frames if it cannot allocate resources for them.
- </doc>
- <rule implement="MUST">
- Until the frame-max has been negotiated, both peers MUST accept
- frames of up to 4096 octets large. The minimum non-zero value for
- the frame-max field is 4096.
- </rule>
- </field>
- <field name="heartbeat" type="short">
- desired heartbeat delay
- <doc>
- The delay, in seconds, of the connection heartbeat that the server
- wants. Zero means the server does not want a heartbeat.
- </doc>
- </field>
- </method>
- <method name="tune-ok" synchronous="1" index="31">
- negotiate connection tuning parameters
- <doc>
- This method sends the client's connection tuning parameters to the
- server. Certain fields are negotiated, others provide capability
- information.
- </doc>
- <chassis name="server" implement="MUST"/>
- <field name="channel max" type="short">
- negotiated maximum channels
- <doc>
- The maximum total number of channels that the client will use
- per connection. May not be higher than the value specified by
- the server.
- </doc>
- <rule implement="MAY">
- The server MAY ignore the channel-max value or MAY use it for
- tuning its resource allocation.
- </rule>
- <assert check="notnull"/>
- <assert check="le" method="tune" field="channel max"/>
- </field>
- <field name="frame max" type="long">
- negotiated maximum frame size
- <doc>
- The largest frame size that the client and server will use for
- the connection. Zero means that the client does not impose any
- specific limit but may reject very large frames if it cannot
- allocate resources for them. Note that the frame-max limit
- applies principally to content frames, where large contents
- can be broken into frames of arbitrary size.
- </doc>
- <rule implement="MUST">
- Until the frame-max has been negotiated, both peers must accept
- frames of up to 4096 octets large. The minimum non-zero value for
- the frame-max field is 4096.
- </rule>
- </field>
- <field name="heartbeat" type="short">
- desired heartbeat delay
- <doc>
- The delay, in seconds, of the connection heartbeat that the client
- wants. Zero means the client does not want a heartbeat.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="open" synchronous="1" index="40">
- open connection to virtual host
- <doc>
- This method opens a connection to a virtual host, which is a
- collection of resources, and acts to separate multiple application
- domains within a server.
- </doc>
- <rule implement="MUST">
- The client MUST open the context before doing any work on the
- connection.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="open-ok"/>
- <response name="redirect"/>
- <field name="virtual host" domain="path">
- virtual host name
- <assert check="regexp" value="^[a-zA-Z0-9/-_]+$"/>
- <doc>
- The name of the virtual host to work with.
- </doc>
- <rule implement="MUST">
- If the server supports multiple virtual hosts, it MUST enforce a
- full separation of exchanges, queues, and all associated entities
- per virtual host. An application, connected to a specific virtual
- host, MUST NOT be able to access resources of another virtual host.
- </rule>
- <rule implement="SHOULD">
- The server SHOULD verify that the client has permission to access
- the specified virtual host.
- </rule>
- <rule implement="MAY">
- The server MAY configure arbitrary limits per virtual host, such
- as the number of each type of entity that may be used, per
- connection and/or in total.
- </rule>
- </field>
- <field name="capabilities" type="shortstr">
- required capabilities
- <doc>
- The client may specify a number of capability names, delimited by
- spaces. The server can use this string to how to process the
- client's connection request.
- </doc>
- </field>
- <field name="insist" type="bit">
- insist on connecting to server
- <doc>
- In a configuration with multiple load-sharing servers, the server
- may respond to a Connection.Open method with a Connection.Redirect.
- The insist option tells the server that the client is insisting on
- a connection to the specified server.
- </doc>
- <rule implement="SHOULD">
- When the client uses the insist option, the server SHOULD accept
- the client connection unless it is technically unable to do so.
- </rule>
- </field>
- </method>
- <method name="open-ok" synchronous="1" index="41">
- signal that the connection is ready
- <doc>
- This method signals to the client that the connection is ready for
- use.
- </doc>
- <chassis name="client" implement="MUST"/>
- <field name="known hosts" domain="known hosts"/>
- </method>
- <method name="redirect" synchronous="1" index="50">
- asks the client to use a different server
- <doc>
- This method redirects the client to another server, based on the
- requested virtual host and/or capabilities.
- </doc>
- <rule implement="SHOULD">
- When getting the Connection.Redirect method, the client SHOULD
- reconnect to the host specified, and if that host is not present,
- to any of the hosts specified in the known-hosts list.
- </rule>
- <chassis name="client" implement="MAY"/>
- <field name="host" type="shortstr">
- server to connect to
- <doc>
- Specifies the server to connect to. This is an IP address or a
- DNS name, optionally followed by a colon and a port number. If
- no port number is specified, the client should use the default
- port number for the protocol.
- </doc>
- <assert check="notnull"/>
- </field>
- <field name="known hosts" domain="known hosts"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="close" synchronous="1" index="60">
- request a connection close
- <doc>
- This method indicates that the sender wants to close the connection.
- This may be due to internal conditions (e.g. a forced shut-down) or
- due to an error handling a specific method, i.e. an exception. When
- a close is due to an exception, the sender provides the class and
- method id of the method which caused the exception.
- </doc>
- <rule implement="MUST">
- After sending this method any received method except the Close-OK
- method MUST be discarded.
- </rule>
- <rule implement="MAY">
- The peer sending this method MAY use a counter or timeout to
- detect failure of the other peer to respond correctly with
- the Close-OK method.
- </rule>
- <rule implement="MUST">
- When a server receives the Close method from a client it MUST
- delete all server-side resources associated with the client's
- context. A client CANNOT reconnect to a context after sending
- or receiving a Close method.
- </rule>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="close-ok"/>
- <field name="reply code" domain="reply code"/>
- <field name="reply text" domain="reply text"/>
- <field name="class id" domain="class id">
- failing method class
- <doc>
- When the close is provoked by a method exception, this is the
- class of the method.
- </doc>
- </field>
- <field name="method id" domain="class id">
- failing method ID
- <doc>
- When the close is provoked by a method exception, this is the
- ID of the method.
- </doc>
- </field>
- </method>
- <method name="close-ok" synchronous="1" index="61">
- confirm a connection close
- <doc>
- This method confirms a Connection.Close method and tells the
- recipient that it is safe to release resources for the connection
- and close the socket.
- </doc>
- <rule implement="SHOULD">
- A peer that detects a socket closure without having received a
- Close-Ok handshake method SHOULD log the error.
- </rule>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- </method>
- </class>
- <class name="channel" handler="channel" index="20">
- <!--
-======================================================
-== CHANNEL
-======================================================
--->
- work with channels
-<doc>
- The channel class provides methods for a client to establish a virtual
- connection - a channel - to a server and for both peers to operate the
- virtual connection thereafter.
-</doc>
- <doc name="grammar">
- channel = open-channel *use-channel close-channel
- open-channel = C:OPEN S:OPEN-OK
- use-channel = C:FLOW S:FLOW-OK
- / S:FLOW C:FLOW-OK
- / S:ALERT
- / functional-class
- close-channel = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="open" synchronous="1" index="10">
- open a channel for use
- <doc>
- This method opens a virtual connection (a channel).
- </doc>
- <rule implement="MUST">
- This method MUST NOT be called when the channel is already open.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="open-ok"/>
- <field name="out of band" type="shortstr">
- out-of-band settings
- <doc>
- Configures out-of-band transfers on this channel. The syntax and
- meaning of this field will be formally defined at a later date.
- </doc>
- <assert check="null"/>
- </field>
- </method>
- <method name="open-ok" synchronous="1" index="11">
- signal that the channel is ready
- <doc>
- This method signals to the client that the channel is ready for use.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="flow" synchronous="1" index="20">
- enable/disable flow from peer
- <doc>
- This method asks the peer to pause or restart the flow of content
- data. This is a simple flow-control mechanism that a peer can use
- to avoid oveflowing its queues or otherwise finding itself receiving
- more messages than it can process. Note that this method is not
- intended for window control. The peer that receives a request to
- stop sending content should finish sending the current content, if
- any, and then wait until it receives a Flow restart method.
- </doc>
- <rule implement="MAY">
- When a new channel is opened, it is active. Some applications
- assume that channels are inactive until started. To emulate this
- behaviour a client MAY open the channel, then pause it.
- </rule>
- <rule implement="SHOULD">
- When sending content data in multiple frames, a peer SHOULD monitor
- the channel for incoming methods and respond to a Channel.Flow as
- rapidly as possible.
- </rule>
- <rule implement="MAY">
- A peer MAY use the Channel.Flow method to throttle incoming content
- data for internal reasons, for example, when exchangeing data over a
- slower connection.
- </rule>
- <rule implement="MAY">
- The peer that requests a Channel.Flow method MAY disconnect and/or
- ban a peer that does not respect the request.
- </rule>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <response name="flow-ok"/>
- <field name="active" type="bit">
- start/stop content frames
- <doc>
- If 1, the peer starts sending content frames. If 0, the peer
- stops sending content frames.
- </doc>
- </field>
- </method>
- <method name="flow-ok" index="21">
- confirm a flow method
- <doc>
- Confirms to the peer that a flow command was received and processed.
- </doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <field name="active" type="bit">
- current flow setting
- <doc>
- Confirms the setting of the processed flow method: 1 means the
- peer will start sending or continue to send content frames; 0
- means it will not.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="alert" index="30">
- send a non-fatal warning message
- <doc>
- This method allows the server to send a non-fatal warning to the
- client. This is used for methods that are normally asynchronous
- and thus do not have confirmations, and for which the server may
- detect errors that need to be reported. Fatal errors are handled
- as channel or connection exceptions; non-fatal errors are sent
- through this method.
- </doc>
- <chassis name="client" implement="MUST"/>
- <field name="reply code" domain="reply code"/>
- <field name="reply text" domain="reply text"/>
- <field name="details" type="table">
- detailed information for warning
- <doc>
- A set of fields that provide more information about the
- problem. The meaning of these fields are defined on a
- per-reply-code basis (TO BE DEFINED).
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="close" synchronous="1" index="40">
- request a channel close
- <doc>
- This method indicates that the sender wants to close the channel.
- This may be due to internal conditions (e.g. a forced shut-down) or
- due to an error handling a specific method, i.e. an exception. When
- a close is due to an exception, the sender provides the class and
- method id of the method which caused the exception.
- </doc>
- <rule implement="MUST">
- After sending this method any received method except
- Channel.Close-OK MUST be discarded.
- </rule>
- <rule implement="MAY">
- The peer sending this method MAY use a counter or timeout to detect
- failure of the other peer to respond correctly with Channel.Close-OK..
- </rule>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="close-ok"/>
- <field name="reply code" domain="reply code"/>
- <field name="reply text" domain="reply text"/>
- <field name="class id" domain="class id">
- failing method class
- <doc>
- When the close is provoked by a method exception, this is the
- class of the method.
- </doc>
- </field>
- <field name="method id" domain="method id">
- failing method ID
- <doc>
- When the close is provoked by a method exception, this is the
- ID of the method.
- </doc>
- </field>
- </method>
- <method name="close-ok" synchronous="1" index="41">
- confirm a channel close
- <doc>
- This method confirms a Channel.Close method and tells the recipient
- that it is safe to release resources for the channel and close the
- socket.
- </doc>
- <rule implement="SHOULD">
- A peer that detects a socket closure without having received a
- Channel.Close-Ok handshake method SHOULD log the error.
- </rule>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- </method>
- </class>
- <class name="access" handler="connection" index="30">
- <!--
-======================================================
-== ACCESS CONTROL
-======================================================
--->
- work with access tickets
-<doc>
- The protocol control access to server resources using access tickets.
- A client must explicitly request access tickets before doing work.
- An access ticket grants a client the right to use a specific set of
- resources - called a "realm" - in specific ways.
-</doc>
- <doc name="grammar">
- access = C:REQUEST S:REQUEST-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="request" synchronous="1" index="10">
- request an access ticket
- <doc>
- This method requests an access ticket for an access realm.
- The server responds by granting the access ticket. If the
- client does not have access rights to the requested realm
- this causes a connection exception. Access tickets are a
- per-channel resource.
- </doc>
- <rule implement="MUST">
- The realm name MUST start with either "/data" (for application
- resources) or "/admin" (for server administration resources).
- If the realm starts with any other path, the server MUST raise
- a connection exception with reply code 403 (access refused).
- </rule>
- <rule implement="MUST">
- The server MUST implement the /data realm and MAY implement the
- /admin realm. The mapping of resources to realms is not
- defined in the protocol - this is a server-side configuration
- issue.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="request-ok"/>
- <field name="realm" domain="path">
- name of requested realm
- <rule implement="MUST">
- If the specified realm is not known to the server, the server
- must raise a channel exception with reply code 402 (invalid
- path).
- </rule>
- </field>
- <field name="exclusive" type="bit">
- request exclusive access
- <doc>
- Request exclusive access to the realm. If the server cannot grant
- this - because there are other active tickets for the realm - it
- raises a channel exception.
- </doc>
- </field>
- <field name="passive" type="bit">
- request passive access
- <doc>
- Request message passive access to the specified access realm.
- Passive access lets a client get information about resources in
- the realm but not to make any changes to them.
- </doc>
- </field>
- <field name="active" type="bit">
- request active access
- <doc>
- Request message active access to the specified access realm.
- Acvtive access lets a client get create and delete resources in
- the realm.
- </doc>
- </field>
- <field name="write" type="bit">
- request write access
- <doc>
- Request write access to the specified access realm. Write access
- lets a client publish messages to all exchanges in the realm.
- </doc>
- </field>
- <field name="read" type="bit">
- request read access
- <doc>
- Request read access to the specified access realm. Read access
- lets a client consume messages from queues in the realm.
- </doc>
- </field>
- </method>
- <method name="request-ok" synchronous="1" index="11">
- grant access to server resources
- <doc>
- This method provides the client with an access ticket. The access
- ticket is valid within the current channel and for the lifespan of
- the channel.
- </doc>
- <rule implement="MUST">
- The client MUST NOT use access tickets except within the same
- channel as originally granted.
- </rule>
- <rule implement="MUST">
- The server MUST isolate access tickets per channel and treat an
- attempt by a client to mix these as a connection exception.
- </rule>
- <chassis name="client" implement="MUST"/>
- <field name="ticket" domain="access ticket"/>
- </method>
- </class>
- <class name="exchange" handler="channel" index="40">
- <!--
-======================================================
-== EXCHANGES (or "routers", if you prefer)
-== (Or matchers, plugins, extensions, agents,... Routing is just one of
-== the many fun things an exchange can do.)
-======================================================
--->
- work with exchanges
-<doc>
- Exchanges match and distribute messages across queues. Exchanges can be
- configured in the server or created at runtime.
-</doc>
- <doc name="grammar">
- exchange = C:DECLARE S:DECLARE-OK
- / C:DELETE S:DELETE-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <rule implement="MUST">
- <test>amq_exchange_19</test>
- The server MUST implement the direct and fanout exchange types, and
- predeclare the corresponding exchanges named amq.direct and amq.fanout
- in each virtual host. The server MUST also predeclare a direct
- exchange to act as the default exchange for content Publish methods
- and for default queue bindings.
-</rule>
- <rule implement="SHOULD">
- <test>amq_exchange_20</test>
- The server SHOULD implement the topic exchange type, and predeclare
- the corresponding exchange named amq.topic in each virtual host.
-</rule>
- <rule implement="MAY">
- <test>amq_exchange_21</test>
- The server MAY implement the system exchange type, and predeclare the
- corresponding exchanges named amq.system in each virtual host. If the
- client attempts to bind a queue to the system exchange, the server
- MUST raise a connection exception with reply code 507 (not allowed).
-</rule>
- <rule implement="MUST">
- <test>amq_exchange_22</test>
- The default exchange MUST be defined as internal, and be inaccessible
- to the client except by specifying an empty exchange name in a content
- Publish method. That is, the server MUST NOT let clients make explicit
- bindings to this exchange.
-</rule>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="declare" synchronous="1" index="10">
- declare exchange, create if needed
- <doc>
- This method creates an exchange if it does not already exist, and if the
- exchange exists, verifies that it is of the correct and expected class.
- </doc>
- <rule implement="SHOULD">
- <test>amq_exchange_23</test>
- The server SHOULD support a minimum of 16 exchanges per virtual host
- and ideally, impose no limit except as defined by available resources.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="declare-ok"/>
- <field name="ticket" domain="access ticket">
- <doc>
- When a client defines a new exchange, this belongs to the access realm
- of the ticket used. All further work done with that exchange must be
- done with an access ticket for the same realm.
- </doc>
- <rule implement="MUST">
- The client MUST provide a valid access ticket giving "active" access
- to the realm in which the exchange exists or will be created, or
- "passive" access if the if-exists flag is set.
- </rule>
- </field>
- <field name="exchange" domain="exchange name">
- <rule implement="MUST">
- <test>amq_exchange_15</test>
- Exchange names starting with "amq." are reserved for predeclared
- and standardised exchanges. If the client attempts to create an
- exchange starting with "amq.", the server MUST raise a channel
- exception with reply code 403 (access refused).
- </rule>
- <assert check="regexp" value="^[a-zA-Z0-9-_.:]+$"/>
- </field>
- <field name="type" type="shortstr">
- exchange type
- <doc>
- Each exchange belongs to one of a set of exchange types implemented
- by the server. The exchange types define the functionality of the
- exchange - i.e. how messages are routed through it. It is not valid
- or meaningful to attempt to change the type of an existing exchange.
- </doc>
- <rule implement="MUST">
- <test>amq_exchange_16</test>
- If the exchange already exists with a different type, the server
- MUST raise a connection exception with a reply code 507 (not allowed).
- </rule>
- <rule implement="MUST">
- <test>amq_exchange_18</test>
- If the server does not support the requested exchange type it MUST
- raise a connection exception with a reply code 503 (command invalid).
- </rule>
- <assert check="regexp" value="^[a-zA-Z0-9-_.:]+$"/>
- </field>
- <field name="passive" type="bit">
- do not create exchange
- <doc>
- If set, the server will not create the exchange. The client can use
- this to check whether an exchange exists without modifying the server
- state.
- </doc>
- <rule implement="MUST">
- <test>amq_exchange_05</test>
- If set, and the exchange does not already exist, the server MUST
- raise a channel exception with reply code 404 (not found).
- </rule>
- </field>
- <field name="durable" type="bit">
- request a durable exchange
- <doc>
- If set when creating a new exchange, the exchange will be marked as
- durable. Durable exchanges remain active when a server restarts.
- Non-durable exchanges (transient exchanges) are purged if/when a
- server restarts.
- </doc>
- <rule implement="MUST">
- <test>amq_exchange_24</test>
- The server MUST support both durable and transient exchanges.
- </rule>
- <rule implement="MUST">
- The server MUST ignore the durable field if the exchange already
- exists.
- </rule>
- </field>
- <field name="auto delete" type="bit">
- auto-delete when unused
- <doc>
- If set, the exchange is deleted when all queues have finished
- using it.
- </doc>
- <rule implement="SHOULD">
- <test>amq_exchange_02</test>
- The server SHOULD allow for a reasonable delay between the point
- when it determines that an exchange is not being used (or no longer
- used), and the point when it deletes the exchange. At the least it
- must allow a client to create an exchange and then bind a queue to
- it, with a small but non-zero delay between these two actions.
- </rule>
- <rule implement="MUST">
- <test>amq_exchange_25</test>
- The server MUST ignore the auto-delete field if the exchange already
- exists.
- </rule>
- </field>
- <field name="internal" type="bit">
- create internal exchange
- <doc>
- If set, the exchange may not be used directly by publishers, but
- only when bound to other exchanges. Internal exchanges are used to
- construct wiring that is not visible to applications.
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-
- <field name="arguments" type="table">
- arguments for declaration
- <doc>
- A set of arguments for the declaration. The syntax and semantics
- of these arguments depends on the server implementation. This
- field is ignored if passive is 1.
- </doc>
- </field>
- </method>
- <method name="declare-ok" synchronous="1" index="11">
- confirms an exchange declaration
- <doc>
- This method confirms a Declare method and confirms the name of the
- exchange, essential for automatically-named exchanges.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="delete" synchronous="1" index="20">
- delete an exchange
- <doc>
- This method deletes an exchange. When an exchange is deleted all queue
- bindings on the exchange are cancelled.
- </doc>
- <chassis name="server" implement="MUST"/>
- <response name="delete-ok"/>
- <field name="ticket" domain="access ticket">
- <rule implement="MUST">
- The client MUST provide a valid access ticket giving "active"
- access rights to the exchange's access realm.
- </rule>
- </field>
- <field name="exchange" domain="exchange name">
- <rule implement="MUST">
- <test>amq_exchange_11</test>
- The exchange MUST exist. Attempting to delete a non-existing exchange
- causes a channel exception.
- </rule>
- <assert check="notnull"/>
- </field>
- <field name="if unused" type="bit">
- delete only if unused
- <doc>
- If set, the server will only delete the exchange if it has no queue
- bindings. If the exchange has queue bindings the server does not
- delete it but raises a channel exception instead.
- </doc>
- <rule implement="SHOULD">
- <test>amq_exchange_12</test>
- If set, the server SHOULD delete the exchange but only if it has
- no queue bindings.
- </rule>
- <rule implement="SHOULD">
- <test>amq_exchange_13</test>
- If set, the server SHOULD raise a channel exception if the exchange is in
- use.
- </rule>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-
- </method>
- <method name="delete-ok" synchronous="1" index="21">
- confirm deletion of an exchange
- <doc>
- This method confirms the deletion of an exchange.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- </class>
- <class name="queue" handler="channel" index="50">
- <!--
-======================================================
-== QUEUES
-======================================================
--->
- work with queues
-
-<doc>
- Queues store and forward messages. Queues can be configured in the server
- or created at runtime. Queues must be attached to at least one exchange
- in order to receive messages from publishers.
-</doc>
- <doc name="grammar">
- queue = C:DECLARE S:DECLARE-OK
- / C:BIND S:BIND-OK
- / C:PURGE S:PURGE-OK
- / C:DELETE S:DELETE-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="MUST"/>
- <rule implement="MUST">
- <test>amq_queue_33</test>
- A server MUST allow any content class to be sent to any queue, in any
- mix, and queue and delivery these content classes independently. Note
- that all methods that fetch content off queues are specific to a given
- content class.
-</rule>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="declare" synchronous="1" index="10">
- declare queue, create if needed
- <doc>
- This method creates or checks a queue. When creating a new queue
- the client can specify various properties that control the durability
- of the queue and its contents, and the level of sharing for the queue.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_34</test>
- The server MUST create a default binding for a newly-created queue
- to the default exchange, which is an exchange of type 'direct'.
- </rule>
- <rule implement="SHOULD">
- <test>amq_queue_35</test>
- The server SHOULD support a minimum of 256 queues per virtual host
- and ideally, impose no limit except as defined by available resources.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="declare-ok"/>
- <field name="ticket" domain="access ticket">
- <doc>
- When a client defines a new queue, this belongs to the access realm
- of the ticket used. All further work done with that queue must be
- done with an access ticket for the same realm.
- </doc>
- <doc>
- The client provides a valid access ticket giving "active" access
- to the realm in which the queue exists or will be created, or
- "passive" access if the if-exists flag is set.
- </doc>
- </field>
- <field name="queue" domain="queue name">
- <rule implement="MAY">
- <test>amq_queue_10</test>
- The queue name MAY be empty, in which case the server MUST create
- a new queue with a unique generated name and return this to the
- client in the Declare-Ok method.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_32</test>
- Queue names starting with "amq." are reserved for predeclared and
- standardised server queues. If the queue name starts with "amq."
- and the passive option is zero, the server MUST raise a connection
- exception with reply code 403 (access refused).
- </rule>
- <assert check="regexp" value="^[a-zA-Z0-9-_.:]*$"/>
- </field>
- <field name="passive" type="bit">
- do not create queue
- <doc>
- If set, the server will not create the queue. The client can use
- this to check whether a queue exists without modifying the server
- state.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_05</test>
- If set, and the queue does not already exist, the server MUST
- respond with a reply code 404 (not found) and raise a channel
- exception.
- </rule>
- </field>
- <field name="durable" type="bit">
- request a durable queue
- <doc>
- If set when creating a new queue, the queue will be marked as
- durable. Durable queues remain active when a server restarts.
- Non-durable queues (transient queues) are purged if/when a
- server restarts. Note that durable queues do not necessarily
- hold persistent messages, although it does not make sense to
- send persistent messages to a transient queue.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_03</test>
- The server MUST recreate the durable queue after a restart.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_36</test>
- The server MUST support both durable and transient queues.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_37</test>
- The server MUST ignore the durable field if the queue already
- exists.
- </rule>
- </field>
- <field name="exclusive" type="bit">
- request an exclusive queue
- <doc>
- Exclusive queues may only be consumed from by the current connection.
- Setting the 'exclusive' flag always implies 'auto-delete'.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_38</test>
- The server MUST support both exclusive (private) and non-exclusive
- (shared) queues.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_04</test>
- The server MUST raise a channel exception if 'exclusive' is specified
- and the queue already exists and is owned by a different connection.
- </rule>
- </field>
- <field name="auto delete" type="bit">
- auto-delete queue when unused
- <doc>
- If set, the queue is deleted when all consumers have finished
- using it. Last consumer can be cancelled either explicitly or because
- its channel is closed. If there was no consumer ever on the queue, it
- won't be deleted.
- </doc>
- <rule implement="SHOULD">
- <test>amq_queue_02</test>
- The server SHOULD allow for a reasonable delay between the point
- when it determines that a queue is not being used (or no longer
- used), and the point when it deletes the queue. At the least it
- must allow a client to create a queue and then create a consumer
- to read from it, with a small but non-zero delay between these
- two actions. The server should equally allow for clients that may
- be disconnected prematurely, and wish to re-consume from the same
- queue without losing messages. We would recommend a configurable
- timeout, with a suitable default value being one minute.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_31</test>
- The server MUST ignore the auto-delete field if the queue already
- exists.
- </rule>
- </field>
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-
- <field name="arguments" type="table">
- arguments for declaration
- <doc>
- A set of arguments for the declaration. The syntax and semantics
- of these arguments depends on the server implementation. This
- field is ignored if passive is 1.
- </doc>
- </field>
- </method>
- <method name="declare-ok" synchronous="1" index="11">
- confirms a queue definition
- <doc>
- This method confirms a Declare method and confirms the name of the
- queue, essential for automatically-named queues.
- </doc>
- <chassis name="client" implement="MUST"/>
- <field name="queue" domain="queue name">
- <doc>
- Reports the name of the queue. If the server generated a queue
- name, this field contains that name.
- </doc>
- <assert check="notnull"/>
- </field>
- <field name="message count" type="long">
- number of messages in queue
- <doc>
- Reports the number of messages in the queue, which will be zero
- for newly-created queues.
- </doc>
- </field>
- <field name="consumer count" type="long">
- number of consumers
- <doc>
- Reports the number of active consumers for the queue. Note that
- consumers can suspend activity (Channel.Flow) in which case they
- do not appear in this count.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="bind" synchronous="1" index="20">
- bind queue to an exchange
- <doc>
- This method binds a queue to an exchange. Until a queue is
- bound it will not receive any messages. In a classic messaging
- model, store-and-forward queues are bound to a dest exchange
- and subscription queues are bound to a dest_wild exchange.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_25</test>
- A server MUST allow ignore duplicate bindings - that is, two or
- more bind methods for a specific queue, with identical arguments
- - without treating these as an error.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_39</test>
- If a bind fails, the server MUST raise a connection exception.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_12</test>
- The server MUST NOT allow a durable queue to bind to a transient
- exchange. If the client attempts this the server MUST raise a
- channel exception.
- </rule>
- <rule implement="SHOULD">
- <test>amq_queue_13</test>
- Bindings for durable queues are automatically durable and the
- server SHOULD restore such bindings after a server restart.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_17</test>
- If the client attempts to an exchange that was declared as internal,
- the server MUST raise a connection exception with reply code 530
- (not allowed).
- </rule>
- <rule implement="SHOULD">
- <test>amq_queue_40</test>
- The server SHOULD support at least 4 bindings per queue, and
- ideally, impose no limit except as defined by available resources.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="bind-ok"/>
- <field name="ticket" domain="access ticket">
- <doc>
- The client provides a valid access ticket giving "active"
- access rights to the queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to bind. If the queue name is
- empty, refers to the current queue for the channel, which is
- the last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue
- name in this method is empty, the server MUST raise a connection
- exception with reply code 530 (not allowed).
- </doc>
- <doc name = "rule" test = "amq_queue_26">
- If the queue does not exist the server MUST raise a channel exception
- with reply code 404 (not found).
- </doc>
- </field>
-
- <field name="exchange" domain="exchange name">
- The name of the exchange to bind to.
- <rule implement="MUST">
- <test>amq_queue_14</test>
- If the exchange does not exist the server MUST raise a channel
- exception with reply code 404 (not found).
- </rule>
- </field>
- <field name="routing key" type="shortstr">
- message routing key
- <doc>
- Specifies the routing key for the binding. The routing key is
- used for routing messages depending on the exchange configuration.
- Not all exchanges use a routing key - refer to the specific
- exchange documentation. If the routing key is empty and the queue
- name is empty, the routing key will be the current queue for the
- channel, which is the last declared queue.
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-
- <field name="arguments" type="table">
- arguments for binding
- <doc>
- A set of arguments for the binding. The syntax and semantics of
- these arguments depends on the exchange class.
- </doc>
- </field>
- </method>
- <method name="bind-ok" synchronous="1" index="21">
- confirm bind successful
- <doc>
- This method confirms that the bind was successful.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="purge" synchronous="1" index="30">
- purge a queue
- <doc>
- This method removes all messages from a queue. It does not cancel
- consumers. Purged messages are deleted without any formal "undo"
- mechanism.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_15</test>
- A call to purge MUST result in an empty queue.
- </rule>
- <rule implement="MUST">
- <test>amq_queue_41</test>
- On transacted channels the server MUST not purge messages that have
- already been sent to a client but not yet acknowledged.
- </rule>
- <rule implement="MAY">
- <test>amq_queue_42</test>
- The server MAY implement a purge queue or log that allows system
- administrators to recover accidentally-purged messages. The server
- SHOULD NOT keep purged messages in the same storage spaces as the
- live messages since the volumes of purged messages may get very
- large.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="purge-ok"/>
- <field name="ticket" domain="access ticket">
- <doc>
- The access ticket must be for the access realm that holds the
- queue.
- </doc>
- <rule implement="MUST">
- The client MUST provide a valid access ticket giving "read" access
- rights to the queue's access realm. Note that purging a queue is
- equivalent to reading all messages and discarding them.
- </rule>
- </field>
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to purge. If the queue name is
- empty, refers to the current queue for the channel, which is
- the last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue
- name in this method is empty, the server MUST raise a connection
- exception with reply code 530 (not allowed).
- </doc>
- <doc name = "rule" test = "amq_queue_16">
- The queue must exist. Attempting to purge a non-existing queue
- causes a channel exception.
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
- </method>
- <method name="purge-ok" synchronous="1" index="31">
- confirms a queue purge
- <doc>
- This method confirms the purge of a queue.
- </doc>
- <chassis name="client" implement="MUST"/>
- <field name="message count" type="long">
- number of messages purged
- <doc>
- Reports the number of messages purged.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="delete" synchronous="1" index="40">
- delete a queue
- <doc>
- This method deletes a queue. When a queue is deleted any pending
- messages are sent to a dead-letter queue if this is defined in the
- server configuration, and all consumers on the queue are cancelled.
- </doc>
- <rule implement="SHOULD">
- <test>amq_queue_43</test>
- The server SHOULD use a dead-letter queue to hold messages that
- were pending on a deleted queue, and MAY provide facilities for
- a system administrator to move these messages back to an active
- queue.
- </rule>
- <chassis name="server" implement="MUST"/>
- <response name="delete-ok"/>
- <field name="ticket" domain="access ticket">
- <doc>
- The client provides a valid access ticket giving "active"
- access rights to the queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to delete. If the queue name is
- empty, refers to the current queue for the channel, which is the
- last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue
- name in this method is empty, the server MUST raise a connection
- exception with reply code 530 (not allowed).
- </doc>
- <doc name = "rule" test = "amq_queue_21">
- The queue must exist. Attempting to delete a non-existing queue
- causes a channel exception.
- </doc>
- </field>
-
- <field name="if unused" type="bit">
- delete only if unused
- <doc>
- If set, the server will only delete the queue if it has no
- consumers. If the queue has consumers the server does does not
- delete it but raises a channel exception instead.
- </doc>
- <rule implement="MUST">
- <test>amq_queue_29</test>
- <test>amq_queue_30</test>
- The server MUST respect the if-unused flag when deleting a queue.
- </rule>
- </field>
- <field name="if empty" type="bit">
- delete only if empty
- <test>amq_queue_27</test>
- <doc>
- If set, the server will only delete the queue if it has no
- messages. If the queue is not empty the server raises a channel
- exception.
- </doc>
- </field>
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name="delete-ok" synchronous="1" index="41">
- confirm deletion of a queue
- <doc>
- This method confirms the deletion of a queue.
- </doc>
- <chassis name="client" implement="MUST"/>
- <field name="message count" type="long">
- number of messages purged
- <doc>
- Reports the number of messages purged.
- </doc>
- </field>
- </method>
- </class>
- <class name="basic" handler="channel" index="60">
- <!--
-======================================================
-== BASIC MIDDLEWARE
-======================================================
--->
- work with basic content
-<doc>
- The Basic class provides methods that support an industry-standard
- messaging model.
-</doc>
-
-<doc name = "grammar">
- basic = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN content
- / S:DELIVER content
- / C:GET ( S:GET-OK content / S:GET-EMPTY )
- / C:ACK
- / C:REJECT
-</doc>
-
-<chassis name = "server" implement = "MUST" />
-<chassis name = "client" implement = "MAY" />
-
-<doc name = "rule" test = "amq_basic_08">
- The server SHOULD respect the persistent property of basic messages
- and SHOULD make a best-effort to hold persistent basic messages on a
- reliable storage mechanism.
-</doc>
-<doc name = "rule" test = "amq_basic_09">
- The server MUST NOT discard a persistent basic message in case of a
- queue overflow. The server MAY use the Channel.Flow method to slow
- or stop a basic message publisher when necessary.
-</doc>
-<doc name = "rule" test = "amq_basic_10">
- The server MAY overflow non-persistent basic messages to persistent
- storage and MAY discard or dead-letter non-persistent basic messages
- on a priority basis if the queue size exceeds some configured limit.
-</doc>
-<doc name = "rule" test = "amq_basic_11">
- The server MUST implement at least 2 priority levels for basic
- messages, where priorities 0-4 and 5-9 are treated as two distinct
- levels. The server MAY implement up to 10 priority levels.
-</doc>
-<doc name = "rule" test = "amq_basic_12">
- The server MUST deliver messages of the same priority in order
- irrespective of their individual persistence.
-</doc>
-<doc name = "rule" test = "amq_basic_13">
- The server MUST support both automatic and explicit acknowledgements
- on Basic content.
-</doc>
-
-<!-- These are the properties for a Basic content -->
-
-<field name = "content type" type = "shortstr">
- MIME content type
-</field>
-<field name = "content encoding" type = "shortstr">
- MIME content encoding
-</field>
-<field name = "headers" type = "table">
- Message header field table
-</field>
-<field name = "delivery mode" type = "octet">
- Non-persistent (1) or persistent (2)
-</field>
-<field name = "priority" type = "octet">
- The message priority, 0 to 9
-</field>
-<field name = "correlation id" type = "shortstr">
- The application correlation identifier
-</field>
-<field name = "reply to" type = "shortstr">
- The destination to reply to
-</field>
-<field name = "expiration" type = "shortstr">
- Message expiration specification
-</field>
-<field name = "message id" type = "shortstr">
- The application message identifier
-</field>
-<field name = "timestamp" type = "timestamp">
- The message timestamp
-</field>
-<field name = "type" type = "shortstr">
- The message type name
-</field>
-<field name = "user id" type = "shortstr">
- The creating user id
-</field>
-<field name = "app id" type = "shortstr">
- The creating application id
-</field>
-<field name = "cluster id" type = "shortstr">
- Intra-cluster routing identifier
-</field>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "qos" synchronous = "1" index = "10">
- specify quality of service
- <doc>
- This method requests a specific quality of service. The QoS can
- be specified for the current channel or for all channels on the
- connection. The particular properties and semantics of a qos method
- always depend on the content class semantics. Though the qos method
- could in principle apply to both peers, it is currently meaningful
- only for the server.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "qos-ok" />
-
- <field name = "prefetch size" type = "long">
- prefetch window in octets
- <doc>
- The client can request that messages be sent in advance so that
- when the client finishes processing a message, the following
- message is already held locally, rather than needing to be sent
- down the channel. Prefetching gives a performance improvement.
- This field specifies the prefetch window size in octets. The
- server will send a message in advance if it is equal to or
- smaller in size than the available prefetch size (and also falls
- into other prefetch limits). May be set to zero, meaning "no
- specific limit", although other prefetch limits may still apply.
- The prefetch-size is ignored if the no-ack option is set.
- </doc>
- <doc name = "rule" test = "amq_basic_17">
- The server MUST ignore this setting when the client is not
- processing any messages - i.e. the prefetch size does not limit
- the transfer of single messages to a client, only the sending in
- advance of more messages while the client still has one or more
- unacknowledged messages.
- </doc>
- </field>
-
- <field name = "prefetch count" type = "short">
- prefetch window in messages
- <doc>
- Specifies a prefetch window in terms of whole messages. This
- field may be used in combination with the prefetch-size field;
- a message will only be sent in advance if both prefetch windows
- (and those at the channel and connection level) allow it.
- The prefetch-count is ignored if the no-ack option is set.
- </doc>
- <doc name = "rule" test = "amq_basic_18">
- The server MAY send less data in advance than allowed by the
- client's specified prefetch windows but it MUST NOT send more.
- </doc>
- </field>
-
- <field name = "global" type = "bit">
- apply to entire connection
- <doc>
- By default the QoS settings apply to the current channel only. If
- this field is set, they are applied to the entire connection.
- </doc>
- </field>
-</method>
-
-<method name = "qos-ok" synchronous = "1" index = "11">
- confirm the requested qos
- <doc>
- This method tells the client that the requested QoS levels could
- be handled by the server. The requested QoS applies to all active
- consumers until a new QoS is defined.
- </doc>
- <chassis name = "client" implement = "MUST" />
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "consume" synchronous = "1" index = "20">
- start a queue consumer
- <doc>
- This method asks the server to start a "consumer", which is a
- transient request for messages from a specific queue. Consumers
- last as long as the channel they were created on, or until the
- client cancels them.
- </doc>
- <doc name = "rule" test = "amq_basic_01">
- The server SHOULD support at least 16 consumers per queue, unless
- the queue was declared as private, and ideally, impose no limit
- except as defined by available resources.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "read" access
- rights to the realm for the queue.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name
- is null, refers to the current queue for the channel, which is the
- last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue name
- in this method is empty, the server MUST raise a connection exception
- with reply code 530 (not allowed).
- </doc>
- </field>
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is
- local to a connection, so two clients can use the same consumer
- tags. If this field is empty the server will generate a unique
- tag.
- </doc>
- <doc name = "rule" test = "todo">
- The tag MUST NOT refer to an existing consumer. If the client
- attempts to create two consumers with the same non-empty tag
- the server MUST raise a connection exception with reply code
- 530 (not allowed).
- </doc>
- </field>
-
- <field name = "no local" domain = "no local" />
-
- <field name = "no ack" domain = "no ack" />
-
- <field name = "exclusive" type = "bit">
- request exclusive access
- <doc>
- Request exclusive consumer access, meaning only this consumer can
- access the queue.
- </doc>
- <doc name = "rule" test = "amq_basic_02">
- If the server cannot grant exclusive access to the queue when asked,
- - because there are other consumers active - it MUST raise a channel
- exception with return code 403 (access refused).
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-<method name = "consume-ok" synchronous = "1" index = "21">
- confirm a new consumer
- <doc>
- The server provides the client with a consumer tag, which is used
- by the client for methods called on the consumer at a later stage.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Holds the consumer tag specified by the client or provided by
- the server.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "cancel" synchronous = "1" index = "30">
- end a queue consumer
- <doc test = "amq_basic_04">
- This method cancels a consumer. This does not affect already
- delivered messages, but it does mean the server will not send any
- more messages for that consumer. The client may receive an
- abitrary number of messages in between sending the cancel method
- and receiving the cancel-ok reply.
- </doc>
- <doc name = "rule" test = "todo">
- If the queue no longer exists when the client sends a cancel command,
- or the consumer has been cancelled for other reasons, this command
- has no effect.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "cancel-ok" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-<method name = "cancel-ok" synchronous = "1" index = "31">
- confirm a cancelled consumer
- <doc>
- This method confirms that the cancellation was completed.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "publish" content = "1" index = "40">
- publish a message
- <doc>
- This method publishes a message to a specific exchange. The message
- will be routed to queues as defined by the exchange configuration
- and distributed to any active consumers when the transaction, if any,
- is committed.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "write"
- access rights to the access realm for the exchange.
- </doc>
- </field>
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange
- name can be empty, meaning the default exchange. If the exchange
- name is specified, and that exchange does not exist, the server
- will raise a channel exception.
- </doc>
- <doc name = "rule" test = "amq_basic_06">
- The server MUST accept a blank exchange name to mean the default
- exchange.
- </doc>
- <doc name = "rule" test = "amq_basic_14">
- If the exchange was declared as an internal exchange, the server
- MUST raise a channel exception with a reply code 403 (access
- refused).
- </doc>
- <doc name = "rule" test = "amq_basic_15">
- The exchange MAY refuse basic content in which case it MUST raise
- a channel exception with reply code 540 (not implemented).
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key for the message. The routing key is
- used for routing messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" type = "bit">
- indicate mandatory routing
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue. If this flag is set, the server will return an
- unroutable message with a Return method. If this flag is zero, the
- server silently drops the message.
- </doc>
- <doc name = "rule" test = "amq_basic_07">
- The server SHOULD implement the mandatory flag.
- </doc>
- </field>
-
- <field name = "immediate" type = "bit">
- request immediate delivery
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue consumer immediately. If this flag is set, the
- server will return an undeliverable message with a Return method.
- If this flag is zero, the server will queue the message, but with
- no guarantee that it will ever be consumed.
- </doc>
- <doc name = "rule" test = "amq_basic_16">
- The server SHOULD implement the immediate flag.
- </doc>
- </field>
-</method>
-
-<method name = "return" content = "1" index = "50">
- return a failed message
- <doc>
- This method returns an undeliverable message that was published
- with the "immediate" flag set, or an unroutable message published
- with the "mandatory" flag set. The reply code and text provide
- information about the reason that the message was undeliverable.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply code" domain = "reply code" />
- <field name = "reply text" domain = "reply text" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was
- originally published to.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "deliver" content = "1" index = "60">
- notify the client of a consumer message
- <doc>
- This method delivers a message to the client, via a consumer. In
- the asynchronous message delivery model, the client starts a
- consumer using the Consume method, then the server responds with
- Deliver methods as and when messages arrive for that consumer.
- </doc>
- <doc name = "rule" test = "amq_basic_19">
- The server SHOULD track the number of times a message has been
- delivered to clients and when a message is redelivered a certain
- number of times - e.g. 5 times - without being acknowledged, the
- server SHOULD consider the message to be unprocessable (possibly
- causing client applications to abort), and move the message to a
- dead letter queue.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was
- originally published to.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "get" synchronous = "1" index = "70">
- direct access to a queue
- <doc>
- This method provides a direct access to the messages in a queue
- using a synchronous dialogue that is designed for specific types of
- application where synchronous functionality is more important than
- performance.
- </doc>
- <response name = "get-ok" />
- <response name = "get-empty" />
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "read"
- access rights to the realm for the queue.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name
- is null, refers to the current queue for the channel, which is the
- last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue name
- in this method is empty, the server MUST raise a connection exception
- with reply code 530 (not allowed).
- </doc>
- </field>
-
- <field name = "no ack" domain = "no ack" />
-</method>
-
-<method name = "get-ok" synchronous = "1" content = "1" index = "71">
- provide client with a message
- <doc>
- This method delivers a message to the client following a get
- method. A message delivered by 'get-ok' must be acknowledged
- unless the no-ack option was set in the get method.
- </doc>
- <chassis name = "client" implement = "MAY" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was originally
- published to. If empty, the message was published to the default
- exchange.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-
- <field name = "message count" type = "long" >
- number of messages pending
- <doc>
- This field reports the number of messages pending on the queue,
- excluding the message being delivered. Note that this figure is
- indicative, not reliable, and can change arbitrarily as messages
- are added to the queue and removed by other clients.
- </doc>
- </field>
-</method>
-
-
-<method name = "get-empty" synchronous = "1" index = "72">
- indicate no messages available
- <doc>
- This method tells the client that the queue has no messages
- available for the client.
- </doc>
- <chassis name = "client" implement = "MAY" />
-
- <field name = "cluster id" type = "shortstr">
- Cluster id
- <doc>
- For use by cluster applications, should not be used by
- client applications.
- </doc>
- </field>
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "ack" index = "80">
- acknowledge one or more messages
- <doc>
- This method acknowledges one or more messages delivered via the
- Deliver or Get-Ok methods. The client can ask to confirm a
- single message or a set of messages up to and including a specific
- message.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "multiple" type = "bit">
- acknowledge multiple messages
- <doc>
- If set to 1, the delivery tag is treated as "up to and including",
- so that the client can acknowledge multiple messages with a single
- method. If set to zero, the delivery tag refers to a single
- message. If the multiple field is 1, and the delivery tag is zero,
- tells the server to acknowledge all outstanding mesages.
- </doc>
- <doc name = "rule" test = "amq_basic_20">
- The server MUST validate that a non-zero delivery-tag refers to an
- delivered message, and raise a channel exception if this is not the
- case.
- </doc>
- </field>
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "reject" index = "90">
- reject an incoming message
- <doc>
- This method allows a client to reject a message. It can be used to
- interrupt and cancel large incoming messages, or return untreatable
- messages to their original queue.
- </doc>
- <doc name = "rule" test = "amq_basic_21">
- The server SHOULD be capable of accepting and process the Reject
- method while sending message content with a Deliver or Get-Ok
- method. I.e. the server should read and process incoming methods
- while sending output frames. To cancel a partially-send content,
- the server sends a content body frame of size 1 (i.e. with no data
- except the frame-end octet).
- </doc>
- <doc name = "rule" test = "amq_basic_22">
- The server SHOULD interpret this method as meaning that the client
- is unable to process the message at this time.
- </doc>
- <doc name = "rule">
- A client MUST NOT use this method as a means of selecting messages
- to process. A rejected message MAY be discarded or dead-lettered,
- not necessarily passed to another client.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "requeue" type = "bit">
- requeue the message
- <doc>
- If this field is zero, the message will be discarded. If this bit
- is 1, the server will attempt to requeue the message.
- </doc>
- <doc name = "rule" test = "amq_basic_23">
- The server MUST NOT deliver the message to the same client within
- the context of the current channel. The recommended strategy is
- to attempt to deliver the message to an alternative consumer, and
- if that is not possible, to move the message to a dead-letter
- queue. The server MAY use more sophisticated tracking to hold
- the message on the queue and redeliver it to the same client at
- a later stage.
- </doc>
- </field>
-</method>
-
-<method name = "recover" index = "100">
- redeliver unacknowledged messages
- <doc>
- This method asks the broker to redeliver all unacknowledged messages on a
- specified channel. Zero or more messages may be redelivered. This method
- is only allowed on non-transacted channels.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "requeue" type = "bit">
- requeue the message
- <doc>
- If this field is zero, the message will be redelivered to the original
- recipient. If this bit is 1, the server will attempt to requeue the
- message, potentially then delivering it to an alternative subscriber.
- </doc>
- </field>
- <doc name="rule">
- The server MUST set the redelivered flag on all messages that are resent.
- </doc>
- <doc name="rule">
- The server MUST raise a channel exception if this is called on a
- transacted channel.
- </doc>
-</method>
-
-</class>
-
-
- <class name="file" handler="channel" index="70">
- <!--
-======================================================
-== FILE TRANSFER
-======================================================
--->
- work with file content
-<doc>
- The file class provides methods that support reliable file transfer.
- File messages have a specific set of properties that are required for
- interoperability with file transfer applications. File messages and
- acknowledgements are subject to channel transactions. Note that the
- file class does not provide message browsing methods; these are not
- compatible with the staging model. Applications that need browsable
- file transfer should use Basic content and the Basic class.
-</doc>
-
-<doc name = "grammar">
- file = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:OPEN S:OPEN-OK C:STAGE content
- / S:OPEN C:OPEN-OK S:STAGE content
- / C:PUBLISH
- / S:DELIVER
- / S:RETURN
- / C:ACK
- / C:REJECT
-</doc>
-
-<chassis name = "server" implement = "MAY" />
-<chassis name = "client" implement = "MAY" />
-
-<doc name = "rule">
- The server MUST make a best-effort to hold file messages on a
- reliable storage mechanism.
-</doc>
-<doc name = "rule">
- The server MUST NOT discard a file message in case of a queue
- overflow. The server MUST use the Channel.Flow method to slow or stop
- a file message publisher when necessary.
-</doc>
-<doc name = "rule">
- The server MUST implement at least 2 priority levels for file
- messages, where priorities 0-4 and 5-9 are treated as two distinct
- levels. The server MAY implement up to 10 priority levels.
-</doc>
-<doc name = "rule">
- The server MUST support both automatic and explicit acknowledgements
- on file content.
-</doc>
-
-<!-- These are the properties for a File content -->
-
-<field name = "content type" type = "shortstr">
- MIME content type
-</field>
-<field name = "content encoding" type = "shortstr">
- MIME content encoding
-</field>
-<field name = "headers" type = "table">
- Message header field table
-</field>
-<field name = "priority" type = "octet">
- The message priority, 0 to 9
-</field>
-<field name = "reply to" type = "shortstr">
- The destination to reply to
-</field>
-<field name = "message id" type = "shortstr">
- The application message identifier
-</field>
-<field name = "filename" type = "shortstr">
- The message filename
-</field>
-<field name = "timestamp" type = "timestamp">
- The message timestamp
-</field>
-<field name = "cluster id" type = "shortstr">
- Intra-cluster routing identifier
-</field>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "qos" synchronous = "1" index = "10">
- specify quality of service
- <doc>
- This method requests a specific quality of service. The QoS can
- be specified for the current channel or for all channels on the
- connection. The particular properties and semantics of a qos method
- always depend on the content class semantics. Though the qos method
- could in principle apply to both peers, it is currently meaningful
- only for the server.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "qos-ok" />
-
- <field name = "prefetch size" type = "long">
- prefetch window in octets
- <doc>
- The client can request that messages be sent in advance so that
- when the client finishes processing a message, the following
- message is already held locally, rather than needing to be sent
- down the channel. Prefetching gives a performance improvement.
- This field specifies the prefetch window size in octets. May be
- set to zero, meaning "no specific limit". Note that other
- prefetch limits may still apply. The prefetch-size is ignored
- if the no-ack option is set.
- </doc>
- </field>
-
- <field name = "prefetch count" type = "short">
- prefetch window in messages
- <doc>
- Specifies a prefetch window in terms of whole messages. This
- is compatible with some file API implementations. This field
- may be used in combination with the prefetch-size field; a
- message will only be sent in advance if both prefetch windows
- (and those at the channel and connection level) allow it.
- The prefetch-count is ignored if the no-ack option is set.
- </doc>
- <doc name = "rule">
- The server MAY send less data in advance than allowed by the
- client's specified prefetch windows but it MUST NOT send more.
- </doc>
- </field>
-
- <field name = "global" type = "bit">
- apply to entire connection
- <doc>
- By default the QoS settings apply to the current channel only. If
- this field is set, they are applied to the entire connection.
- </doc>
- </field>
-</method>
-
-<method name = "qos-ok" synchronous = "1" index = "11">
- confirm the requested qos
- <doc>
- This method tells the client that the requested QoS levels could
- be handled by the server. The requested QoS applies to all active
- consumers until a new QoS is defined.
- </doc>
- <chassis name = "client" implement = "MUST" />
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "consume" synchronous = "1" index = "20">
- start a queue consumer
- <doc>
- This method asks the server to start a "consumer", which is a
- transient request for messages from a specific queue. Consumers
- last as long as the channel they were created on, or until the
- client cancels them.
- </doc>
- <doc name = "rule">
- The server SHOULD support at least 16 consumers per queue, unless
- the queue was declared as private, and ideally, impose no limit
- except as defined by available resources.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "read" access
- rights to the realm for the queue.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name
- is null, refers to the current queue for the channel, which is the
- last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue name
- in this method is empty, the server MUST raise a connection exception
- with reply code 530 (not allowed).
- </doc>
- </field>
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is
- local to a connection, so two clients can use the same consumer
- tags. If this field is empty the server will generate a unique
- tag.
- </doc>
- <doc name = "rule" test = "todo">
- The tag MUST NOT refer to an existing consumer. If the client
- attempts to create two consumers with the same non-empty tag
- the server MUST raise a connection exception with reply code
- 530 (not allowed).
- </doc>
- </field>
-
- <field name = "no local" domain = "no local" />
-
- <field name = "no ack" domain = "no ack" />
-
- <field name = "exclusive" type = "bit">
- request exclusive access
- <doc>
- Request exclusive consumer access, meaning only this consumer can
- access the queue.
- </doc>
- <doc name = "rule" test = "amq_file_00">
- If the server cannot grant exclusive access to the queue when asked,
- - because there are other consumers active - it MUST raise a channel
- exception with return code 405 (resource locked).
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-<method name = "consume-ok" synchronous = "1" index = "21">
- confirm a new consumer
- <doc>
- This method provides the client with a consumer tag which it MUST
- use in methods that work with the consumer.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Holds the consumer tag specified by the client or provided by
- the server.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "cancel" synchronous = "1" index = "30">
- end a queue consumer
- <doc>
- This method cancels a consumer. This does not affect already
- delivered messages, but it does mean the server will not send any
- more messages for that consumer.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "cancel-ok" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-<method name = "cancel-ok" synchronous = "1" index = "31">
- confirm a cancelled consumer
- <doc>
- This method confirms that the cancellation was completed.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "open" synchronous = "1" index = "40">
- request to start staging
- <doc>
- This method requests permission to start staging a message. Staging
- means sending the message into a temporary area at the recipient end
- and then delivering the message by referring to this temporary area.
- Staging is how the protocol handles partial file transfers - if a
- message is partially staged and the connection breaks, the next time
- the sender starts to stage it, it can restart from where it left off.
- </doc>
- <response name = "open-ok" />
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "identifier" type = "shortstr">
- staging identifier
- <doc>
- This is the staging identifier. This is an arbitrary string chosen
- by the sender. For staging to work correctly the sender must use
- the same staging identifier when staging the same message a second
- time after recovery from a failure. A good choice for the staging
- identifier would be the SHA1 hash of the message properties data
- (including the original filename, revised time, etc.).
- </doc>
- </field>
-
- <field name = "content size" type = "longlong">
- message content size
- <doc>
- The size of the content in octets. The recipient may use this
- information to allocate or check available space in advance, to
- avoid "disk full" errors during staging of very large messages.
- </doc>
- <doc name = "rule">
- The sender MUST accurately fill the content-size field.
- Zero-length content is permitted.
- </doc>
- </field>
-</method>
-
-<method name = "open-ok" synchronous = "1" index = "41">
- confirm staging ready
- <doc>
- This method confirms that the recipient is ready to accept staged
- data. If the message was already partially-staged at a previous
- time the recipient will report the number of octets already staged.
- </doc>
- <response name = "stage" />
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "staged size" type = "longlong">
- already staged amount
- <doc>
- The amount of previously-staged content in octets. For a new
- message this will be zero.
- </doc>
- <doc name = "rule">
- The sender MUST start sending data from this octet offset in the
- message, counting from zero.
- </doc>
- <doc name = "rule">
- The recipient MAY decide how long to hold partially-staged content
- and MAY implement staging by always discarding partially-staged
- content. However if it uses the file content type it MUST support
- the staging methods.
- </doc>
- </field>
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "stage" content = "1" index = "50">
- stage message content
- <doc>
- This method stages the message, sending the message content to the
- recipient from the octet offset specified in the Open-Ok method.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "publish" index = "60">
- publish a message
- <doc>
- This method publishes a staged file message to a specific exchange.
- The file message will be routed to queues as defined by the exchange
- configuration and distributed to any active consumers when the
- transaction, if any, is committed.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "write"
- access rights to the access realm for the exchange.
- </doc>
- </field>
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange
- name can be empty, meaning the default exchange. If the exchange
- name is specified, and that exchange does not exist, the server
- will raise a channel exception.
- </doc>
- <doc name = "rule">
- The server MUST accept a blank exchange name to mean the default
- exchange.
- </doc>
- <doc name = "rule">
- If the exchange was declared as an internal exchange, the server
- MUST respond with a reply code 403 (access refused) and raise a
- channel exception.
- </doc>
- <doc name = "rule">
- The exchange MAY refuse file content in which case it MUST respond
- with a reply code 540 (not implemented) and raise a channel
- exception.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key for the message. The routing key is
- used for routing messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" type = "bit">
- indicate mandatory routing
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue. If this flag is set, the server will return an
- unroutable message with a Return method. If this flag is zero, the
- server silently drops the message.
- </doc>
- <doc name = "rule" test = "amq_file_00">
- The server SHOULD implement the mandatory flag.
- </doc>
- </field>
-
- <field name = "immediate" type = "bit">
- request immediate delivery
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue consumer immediately. If this flag is set, the
- server will return an undeliverable message with a Return method.
- If this flag is zero, the server will queue the message, but with
- no guarantee that it will ever be consumed.
- </doc>
- <doc name = "rule" test = "amq_file_00">
- The server SHOULD implement the immediate flag.
- </doc>
- </field>
-
- <field name = "identifier" type = "shortstr">
- staging identifier
- <doc>
- This is the staging identifier of the message to publish. The
- message must have been staged. Note that a client can send the
- Publish method asynchronously without waiting for staging to
- finish.
- </doc>
- </field>
-</method>
-
-<method name = "return" content = "1" index = "70">
- return a failed message
- <doc>
- This method returns an undeliverable message that was published
- with the "immediate" flag set, or an unroutable message published
- with the "mandatory" flag set. The reply code and text provide
- information about the reason that the message was undeliverable.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply code" domain = "reply code" />
- <field name = "reply text" domain = "reply text" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was
- originally published to.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "deliver" index = "80">
- notify the client of a consumer message
- <doc>
- This method delivers a staged file message to the client, via a
- consumer. In the asynchronous message delivery model, the client
- starts a consumer using the Consume method, then the server
- responds with Deliver methods as and when messages arrive for
- that consumer.
- </doc>
- <doc name = "rule">
- The server SHOULD track the number of times a message has been
- delivered to clients and when a message is redelivered a certain
- number of times - e.g. 5 times - without being acknowledged, the
- server SHOULD consider the message to be unprocessable (possibly
- causing client applications to abort), and move the message to a
- dead letter queue.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was originally
- published to.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-
- <field name = "identifier" type = "shortstr">
- staging identifier
- <doc>
- This is the staging identifier of the message to deliver. The
- message must have been staged. Note that a server can send the
- Deliver method asynchronously without waiting for staging to
- finish.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "ack" index = "90">
- acknowledge one or more messages
- <doc>
- This method acknowledges one or more messages delivered via the
- Deliver method. The client can ask to confirm a single message or
- a set of messages up to and including a specific message.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "multiple" type = "bit">
- acknowledge multiple messages
- <doc>
- If set to 1, the delivery tag is treated as "up to and including",
- so that the client can acknowledge multiple messages with a single
- method. If set to zero, the delivery tag refers to a single
- message. If the multiple field is 1, and the delivery tag is zero,
- tells the server to acknowledge all outstanding mesages.
- </doc>
- <doc name = "rule">
- The server MUST validate that a non-zero delivery-tag refers to an
- delivered message, and raise a channel exception if this is not the
- case.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "reject" index = "100">
- reject an incoming message
- <doc>
- This method allows a client to reject a message. It can be used to
- return untreatable messages to their original queue. Note that file
- content is staged before delivery, so the client will not use this
- method to interrupt delivery of a large message.
- </doc>
- <doc name = "rule">
- The server SHOULD interpret this method as meaning that the client
- is unable to process the message at this time.
- </doc>
- <doc name = "rule">
- A client MUST NOT use this method as a means of selecting messages
- to process. A rejected message MAY be discarded or dead-lettered,
- not necessarily passed to another client.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "requeue" type = "bit">
- requeue the message
- <doc>
- If this field is zero, the message will be discarded. If this bit
- is 1, the server will attempt to requeue the message.
- </doc>
- <doc name = "rule">
- The server MUST NOT deliver the message to the same client within
- the context of the current channel. The recommended strategy is
- to attempt to deliver the message to an alternative consumer, and
- if that is not possible, to move the message to a dead-letter
- queue. The server MAY use more sophisticated tracking to hold
- the message on the queue and redeliver it to the same client at
- a later stage.
- </doc>
- </field>
-</method>
-
-</class>
-
- <class name="stream" handler="channel" index="80">
- <!--
-======================================================
-== STREAMING
-======================================================
--->
- work with streaming content
-
-<doc>
- The stream class provides methods that support multimedia streaming.
- The stream class uses the following semantics: one message is one
- packet of data; delivery is unacknowleged and unreliable; the consumer
- can specify quality of service parameters that the server can try to
- adhere to; lower-priority messages may be discarded in favour of high
- priority messages.
-</doc>
-
-<doc name = "grammar">
- stream = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN
- / S:DELIVER content
-</doc>
-
-<chassis name = "server" implement = "MAY" />
-<chassis name = "client" implement = "MAY" />
-
-<doc name = "rule">
- The server SHOULD discard stream messages on a priority basis if
- the queue size exceeds some configured limit.
-</doc>
-<doc name = "rule">
- The server MUST implement at least 2 priority levels for stream
- messages, where priorities 0-4 and 5-9 are treated as two distinct
- levels. The server MAY implement up to 10 priority levels.
-</doc>
-<doc name = "rule">
- The server MUST implement automatic acknowledgements on stream
- content. That is, as soon as a message is delivered to a client
- via a Deliver method, the server must remove it from the queue.
-</doc>
-
-
-<!-- These are the properties for a Stream content -->
-
-<field name = "content type" type = "shortstr">
- MIME content type
-</field>
-<field name = "content encoding" type = "shortstr">
- MIME content encoding
-</field>
-<field name = "headers" type = "table">
- Message header field table
-</field>
-<field name = "priority" type = "octet">
- The message priority, 0 to 9
-</field>
-<field name = "timestamp" type = "timestamp">
- The message timestamp
-</field>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "qos" synchronous = "1" index = "10">
- specify quality of service
- <doc>
- This method requests a specific quality of service. The QoS can
- be specified for the current channel or for all channels on the
- connection. The particular properties and semantics of a qos method
- always depend on the content class semantics. Though the qos method
- could in principle apply to both peers, it is currently meaningful
- only for the server.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "qos-ok" />
-
- <field name = "prefetch size" type = "long">
- prefetch window in octets
- <doc>
- The client can request that messages be sent in advance so that
- when the client finishes processing a message, the following
- message is already held locally, rather than needing to be sent
- down the channel. Prefetching gives a performance improvement.
- This field specifies the prefetch window size in octets. May be
- set to zero, meaning "no specific limit". Note that other
- prefetch limits may still apply.
- </doc>
- </field>
-
- <field name = "prefetch count" type = "short">
- prefetch window in messages
- <doc>
- Specifies a prefetch window in terms of whole messages. This
- field may be used in combination with the prefetch-size field;
- a message will only be sent in advance if both prefetch windows
- (and those at the channel and connection level) allow it.
- </doc>
- </field>
-
- <field name = "consume rate" type = "long">
- transfer rate in octets/second
- <doc>
- Specifies a desired transfer rate in octets per second. This is
- usually determined by the application that uses the streaming
- data. A value of zero means "no limit", i.e. as rapidly as
- possible.
- </doc>
- <doc name = "rule">
- The server MAY ignore the prefetch values and consume rates,
- depending on the type of stream and the ability of the server
- to queue and/or reply it. The server MAY drop low-priority
- messages in favour of high-priority messages.
- </doc>
- </field>
-
- <field name = "global" type = "bit">
- apply to entire connection
- <doc>
- By default the QoS settings apply to the current channel only. If
- this field is set, they are applied to the entire connection.
- </doc>
- </field>
-</method>
-
-<method name = "qos-ok" synchronous = "1" index = "11">
- confirm the requested qos
- <doc>
- This method tells the client that the requested QoS levels could
- be handled by the server. The requested QoS applies to all active
- consumers until a new QoS is defined.
- </doc>
- <chassis name = "client" implement = "MUST" />
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "consume" synchronous = "1" index = "20">
- start a queue consumer
- <doc>
- This method asks the server to start a "consumer", which is a
- transient request for messages from a specific queue. Consumers
- last as long as the channel they were created on, or until the
- client cancels them.
- </doc>
- <doc name = "rule">
- The server SHOULD support at least 16 consumers per queue, unless
- the queue was declared as private, and ideally, impose no limit
- except as defined by available resources.
- </doc>
- <doc name = "rule">
- Streaming applications SHOULD use different channels to select
- different streaming resolutions. AMQP makes no provision for
- filtering and/or transforming streams except on the basis of
- priority-based selective delivery of individual messages.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "read" access
- rights to the realm for the queue.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name
- is null, refers to the current queue for the channel, which is the
- last declared queue.
- </doc>
- <doc name = "rule">
- If the client did not previously declare a queue, and the queue name
- in this method is empty, the server MUST raise a connection exception
- with reply code 530 (not allowed).
- </doc>
- </field>
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is
- local to a connection, so two clients can use the same consumer
- tags. If this field is empty the server will generate a unique
- tag.
- </doc>
- <doc name = "rule" test = "todo">
- The tag MUST NOT refer to an existing consumer. If the client
- attempts to create two consumers with the same non-empty tag
- the server MUST raise a connection exception with reply code
- 530 (not allowed).
- </doc>
- </field>
-
- <field name = "no local" domain = "no local" />
-
- <field name = "exclusive" type = "bit">
- request exclusive access
- <doc>
- Request exclusive consumer access, meaning only this consumer can
- access the queue.
- </doc>
- <doc name = "rule" test = "amq_file_00">
- If the server cannot grant exclusive access to the queue when asked,
- - because there are other consumers active - it MUST raise a channel
- exception with return code 405 (resource locked).
- </doc>
- </field>
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-
-<method name = "consume-ok" synchronous = "1" index = "21">
- confirm a new consumer
- <doc>
- This method provides the client with a consumer tag which it may
- use in methods that work with the consumer.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag">
- <doc>
- Holds the consumer tag specified by the client or provided by
- the server.
- </doc>
- </field>
-</method>
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "cancel" synchronous = "1" index = "30">
- end a queue consumer
- <doc>
- This method cancels a consumer. Since message delivery is
- asynchronous the client may continue to receive messages for
- a short while after canceling a consumer. It may process or
- discard these as appropriate.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "cancel-ok" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "nowait" type = "bit">
- do not send a reply method
- <doc>
- If set, the server will not respond to the method. The client should
- not wait for a reply method. If the server could not complete the
- method it will raise a channel or connection exception.
- </doc>
- </field>
-</method>
-
-<method name = "cancel-ok" synchronous = "1" index = "31">
- confirm a cancelled consumer
- <doc>
- This method confirms that the cancellation was completed.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "publish" content = "1" index = "40">
- publish a message
- <doc>
- This method publishes a message to a specific exchange. The message
- will be routed to queues as defined by the exchange configuration
- and distributed to any active consumers as appropriate.
- </doc>
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access ticket">
- <doc name = "rule">
- The client MUST provide a valid access ticket giving "write"
- access rights to the access realm for the exchange.
- </doc>
- </field>
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange
- name can be empty, meaning the default exchange. If the exchange
- name is specified, and that exchange does not exist, the server
- will raise a channel exception.
- </doc>
- <doc name = "rule">
- The server MUST accept a blank exchange name to mean the default
- exchange.
- </doc>
- <doc name = "rule">
- If the exchange was declared as an internal exchange, the server
- MUST respond with a reply code 403 (access refused) and raise a
- channel exception.
- </doc>
- <doc name = "rule">
- The exchange MAY refuse stream content in which case it MUST
- respond with a reply code 540 (not implemented) and raise a
- channel exception.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key for the message. The routing key is
- used for routing messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" type = "bit">
- indicate mandatory routing
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue. If this flag is set, the server will return an
- unroutable message with a Return method. If this flag is zero, the
- server silently drops the message.
- </doc>
- <doc name = "rule" test = "amq_stream_00">
- The server SHOULD implement the mandatory flag.
- </doc>
- </field>
-
- <field name = "immediate" type = "bit">
- request immediate delivery
- <doc>
- This flag tells the server how to react if the message cannot be
- routed to a queue consumer immediately. If this flag is set, the
- server will return an undeliverable message with a Return method.
- If this flag is zero, the server will queue the message, but with
- no guarantee that it will ever be consumed.
- </doc>
- <doc name = "rule" test = "amq_stream_00">
- The server SHOULD implement the immediate flag.
- </doc>
- </field>
-</method>
-
-<method name = "return" content = "1" index = "50">
- return a failed message
- <doc>
- This method returns an undeliverable message that was published
- with the "immediate" flag set, or an unroutable message published
- with the "mandatory" flag set. The reply code and text provide
- information about the reason that the message was undeliverable.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply code" domain = "reply code" />
- <field name = "reply text" domain = "reply text" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was
- originally published to.
- </doc>
- </field>
-
- <field name = "routing key" type = "shortstr">
- Message routing key
- <doc>
- Specifies the routing key name specified when the message was
- published.
- </doc>
- </field>
-</method>
-
-
-<!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
-<method name = "deliver" content = "1" index = "60">
- notify the client of a consumer message
- <doc>
- This method delivers a message to the client, via a consumer. In
- the asynchronous message delivery model, the client starts a
- consumer using the Consume method, then the server responds with
- Deliver methods as and when messages arrive for that consumer.
- </doc>
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer tag" domain = "consumer tag" />
-
- <field name = "delivery tag" domain = "delivery tag" />
-
- <field name = "exchange" domain = "exchange name">
- <doc>
- Specifies the name of the exchange that the message was originally
- published to.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue name">
- <doc>
- Specifies the name of the queue that the message came from. Note
- that a single channel can start many consumers on different
- queues.
- </doc>
- <assert check = "notnull" />
- </field>
-</method>
- </class>
-
- <class name="tx" handler="channel" index="90">
- <!--
-======================================================
-== TRANSACTIONS
-======================================================
--->
- work with standard transactions
-
-<doc>
- Standard transactions provide so-called "1.5 phase commit". We can
- ensure that work is never lost, but there is a chance of confirmations
- being lost, so that messages may be resent. Applications that use
- standard transactions must be able to detect and ignore duplicate
- messages.
-</doc>
- <rule implement="SHOULD">
- An client using standard transactions SHOULD be able to track all
- messages received within a reasonable period, and thus detect and
- reject duplicates of the same message. It SHOULD NOT pass these to
- the application layer.
-</rule>
- <doc name="grammar">
- tx = C:SELECT S:SELECT-OK
- / C:COMMIT S:COMMIT-OK
- / C:ROLLBACK S:ROLLBACK-OK
-</doc>
- <chassis name="server" implement="SHOULD"/>
- <chassis name="client" implement="MAY"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="select" synchronous="1" index="10">
-select standard transaction mode
- <doc>
- This method sets the channel to use standard transactions. The
- client must use this method at least once on a channel before
- using the Commit or Rollback methods.
- </doc>
- <chassis name="server" implement="MUST"/>
- <response name="select-ok"/>
- </method>
- <method name="select-ok" synchronous="1" index="11">
-confirm transaction mode
- <doc>
- This method confirms to the client that the channel was successfully
- set to use standard transactions.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="commit" synchronous="1" index="20">
-commit the current transaction
- <doc>
- This method commits all messages published and acknowledged in
- the current transaction. A new transaction starts immediately
- after a commit.
- </doc>
- <chassis name="server" implement="MUST"/>
- <response name="commit-ok"/>
- </method>
- <method name="commit-ok" synchronous="1" index="21">
-confirm a successful commit
- <doc>
- This method confirms to the client that the commit succeeded.
- Note that if a commit fails, the server raises a channel exception.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="rollback" synchronous="1" index="30">
-abandon the current transaction
- <doc>
- This method abandons all messages published and acknowledged in
- the current transaction. A new transaction starts immediately
- after a rollback.
- </doc>
- <chassis name="server" implement="MUST"/>
- <response name="rollback-ok"/>
- </method>
- <method name="rollback-ok" synchronous="1" index="31">
-confirm a successful rollback
- <doc>
- This method confirms to the client that the rollback succeeded.
- Note that if an rollback fails, the server raises a channel exception.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- </class>
- <class name="dtx" handler="channel" index="100">
- <!--
-======================================================
-== DISTRIBUTED TRANSACTIONS
-======================================================
--->
- work with distributed transactions
-
-<doc>
- Distributed transactions provide so-called "2-phase commit". The
- AMQP distributed transaction model supports the X-Open XA
- architecture and other distributed transaction implementations.
- The Dtx class assumes that the server has a private communications
- channel (not AMQP) to a distributed transaction coordinator.
-</doc>
- <doc name="grammar">
- dtx = C:SELECT S:SELECT-OK
- C:START S:START-OK
-</doc>
- <chassis name="server" implement="MAY"/>
- <chassis name="client" implement="MAY"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="select" synchronous="1" index="10">
-select standard transaction mode
- <doc>
- This method sets the channel to use distributed transactions. The
- client must use this method at least once on a channel before
- using the Start method.
- </doc>
- <chassis name="server" implement="MUST"/>
- <response name="select-ok"/>
- </method>
- <method name="select-ok" synchronous="1" index="11">
-confirm transaction mode
- <doc>
- This method confirms to the client that the channel was successfully
- set to use distributed transactions.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="start" synchronous="1" index="20">
- start a new distributed transaction
- <doc>
- This method starts a new distributed transaction. This must be
- the first method on a new channel that uses the distributed
- transaction mode, before any methods that publish or consume
- messages.
- </doc>
- <chassis name="server" implement="MAY"/>
- <response name="start-ok"/>
- <field name="dtx identifier" type="shortstr">
- transaction identifier
- <doc>
- The distributed transaction key. This identifies the transaction
- so that the AMQP server can coordinate with the distributed
- transaction coordinator.
- </doc>
- <assert check="notnull"/>
- </field>
- </method>
- <method name="start-ok" synchronous="1" index="21">
- confirm the start of a new distributed transaction
- <doc>
- This method confirms to the client that the transaction started.
- Note that if a start fails, the server raises a channel exception.
- </doc>
- <chassis name="client" implement="MUST"/>
- </method>
- </class>
- <class name="tunnel" handler="tunnel" index="110">
- <!--
-======================================================
-== TUNNEL
-======================================================
--->
- methods for protocol tunneling.
-
-<doc>
- The tunnel methods are used to send blocks of binary data - which
- can be serialised AMQP methods or other protocol frames - between
- AMQP peers.
-</doc>
- <doc name="grammar">
- tunnel = C:REQUEST
- / S:REQUEST
-</doc>
- <chassis name="server" implement="MAY"/>
- <chassis name="client" implement="MAY"/>
- <field name="headers" type="table">
- Message header field table
-</field>
- <field name="proxy name" type="shortstr">
- The identity of the tunnelling proxy
-</field>
- <field name="data name" type="shortstr">
- The name or type of the message being tunnelled
-</field>
- <field name="durable" type="octet">
- The message durability indicator
-</field>
- <field name="broadcast" type="octet">
- The message broadcast mode
-</field>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="request" content="1" index="10">
- sends a tunnelled method
- <doc>
- This method tunnels a block of binary data, which can be an
- encoded AMQP method or other data. The binary data is sent
- as the content for the Tunnel.Request method.
- </doc>
- <chassis name="server" implement="MUST"/>
- <field name="meta data" type="table">
- meta data for the tunnelled block
- <doc>
- This field table holds arbitrary meta-data that the sender needs
- to pass to the recipient.
- </doc>
- </field>
- </method>
- </class>
- <class name="test" handler="channel" index="120">
- <!--
-======================================================
-== TEST - CHECK FUNCTIONAL CAPABILITIES OF AN IMPLEMENTATION
-======================================================
--->
- test functional primitives of the implementation
-
-<doc>
- The test class provides methods for a peer to test the basic
- operational correctness of another peer. The test methods are
- intended to ensure that all peers respect at least the basic
- elements of the protocol, such as frame and content organisation
- and field types. We assume that a specially-designed peer, a
- "monitor client" would perform such tests.
-</doc>
- <doc name="grammar">
- test = C:INTEGER S:INTEGER-OK
- / S:INTEGER C:INTEGER-OK
- / C:STRING S:STRING-OK
- / S:STRING C:STRING-OK
- / C:TABLE S:TABLE-OK
- / S:TABLE C:TABLE-OK
- / C:CONTENT S:CONTENT-OK
- / S:CONTENT C:CONTENT-OK
-</doc>
- <chassis name="server" implement="MUST"/>
- <chassis name="client" implement="SHOULD"/>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="integer" synchronous="1" index="10">
- test integer handling
- <doc>
- This method tests the peer's capability to correctly marshal integer
- data.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="integer-ok"/>
- <field name="integer 1" type="octet">
- octet test value
- <doc>
- An octet integer test value.
- </doc>
- </field>
- <field name="integer 2" type="short">
- short test value
- <doc>
- A short integer test value.
- </doc>
- </field>
- <field name="integer 3" type="long">
- long test value
- <doc>
- A long integer test value.
- </doc>
- </field>
- <field name="integer 4" type="longlong">
- long-long test value
- <doc>
- A long long integer test value.
- </doc>
- </field>
- <field name="operation" type="octet">
- operation to test
- <doc>
- The client must execute this operation on the provided integer
- test fields and return the result.
- </doc>
- <assert check="enum">
- <value name="add">return sum of test values</value>
- <value name="min">return lowest of test values</value>
- <value name="max">return highest of test values</value>
- </assert>
- </field>
- </method>
- <method name="integer-ok" synchronous="1" index="11">
- report integer test result
- <doc>
- This method reports the result of an Integer method.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <field name="result" type="longlong">
- result value
- <doc>
- The result of the tested operation.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="string" synchronous="1" index="20">
- test string handling
- <doc>
- This method tests the peer's capability to correctly marshal string
- data.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="string-ok"/>
- <field name="string 1" type="shortstr">
- short string test value
- <doc>
- An short string test value.
- </doc>
- </field>
- <field name="string 2" type="longstr">
- long string test value
- <doc>
- A long string test value.
- </doc>
- </field>
- <field name="operation" type="octet">
- operation to test
- <doc>
- The client must execute this operation on the provided string
- test fields and return the result.
- </doc>
- <assert check="enum">
- <value name="add">return concatentation of test strings</value>
- <value name="min">return shortest of test strings</value>
- <value name="max">return longest of test strings</value>
- </assert>
- </field>
- </method>
- <method name="string-ok" synchronous="1" index="21">
- report string test result
- <doc>
- This method reports the result of a String method.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <field name="result" type="longstr">
- result value
- <doc>
- The result of the tested operation.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="table" synchronous="1" index="30">
- test field table handling
- <doc>
- This method tests the peer's capability to correctly marshal field
- table data.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="table-ok"/>
- <field name="table" type="table">
- field table of test values
- <doc>
- A field table of test values.
- </doc>
- </field>
- <field name="integer op" type="octet">
- operation to test on integers
- <doc>
- The client must execute this operation on the provided field
- table integer values and return the result.
- </doc>
- <assert check="enum">
- <value name="add">return sum of numeric field values</value>
- <value name="min">return min of numeric field values</value>
- <value name="max">return max of numeric field values</value>
- </assert>
- </field>
- <field name="string op" type="octet">
- operation to test on strings
- <doc>
- The client must execute this operation on the provided field
- table string values and return the result.
- </doc>
- <assert check="enum">
- <value name="add">return concatenation of string field values</value>
- <value name="min">return shortest of string field values</value>
- <value name="max">return longest of string field values</value>
- </assert>
- </field>
- </method>
- <method name="table-ok" synchronous="1" index="31">
- report table test result
- <doc>
- This method reports the result of a Table method.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <field name="integer result" type="longlong">
- integer result value
- <doc>
- The result of the tested integer operation.
- </doc>
- </field>
- <field name="string result" type="longstr">
- string result value
- <doc>
- The result of the tested string operation.
- </doc>
- </field>
- </method>
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <method name="content" synchronous="1" content="1" index="40">
- test content handling
- <doc>
- This method tests the peer's capability to correctly marshal content.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <response name="content-ok"/>
- </method>
- <method name="content-ok" synchronous="1" content="1" index="41">
- report content test result
- <doc>
- This method reports the result of a Content method. It contains the
- content checksum and echoes the original content as provided.
- </doc>
- <chassis name="client" implement="MUST"/>
- <chassis name="server" implement="MUST"/>
- <field name="content checksum" type="long">
- content hash
- <doc>
- The 32-bit checksum of the content, calculated by adding the
- content into a 32-bit accumulator.
- </doc>
- </field>
- </method>
- </class>
-</amqp>
diff --git a/qpid/gentools/xml-src/amqp-0.9.test.xml b/qpid/gentools/xml-src/amqp-0.9.test.xml
deleted file mode 100644
index e12e9c787a..0000000000
--- a/qpid/gentools/xml-src/amqp-0.9.test.xml
+++ /dev/null
@@ -1,4282 +0,0 @@
-<?xml version = "1.0"?>
-
-<!--
- EDITORS: (PH) Pieter Hintjens <ph@imatix.com>
- (KvdR) Kim van der Riet <kim.vdriet@redhat.com>
-
- These editors have been assigned by the AMQP working group.
- Please do not edit/commit this file without consulting with
- one of the above editors.
- ========================================================
-
- TODOs
- - see TODO comments in the text
--->
-
-<!--
- Copyright Notice
- ================
- (c) Copyright JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc.,
- iMatix Corporation, IONA\ufffd Technologies, Red Hat, Inc.,
- TWIST Process Innovations, and 29West Inc. 2006. All rights reserved.
-
- License
- =======
- JPMorgan Chase Bank & Co., Cisco Systems, Inc., Envoy Technologies Inc., iMatix
- Corporation, IONA\ufffd Technologies, Red Hat, Inc., TWIST Process Innovations, and
- 29West Inc. (collectively, the "Authors") each hereby grants to you a worldwide,
- perpetual, royalty-free, nontransferable, nonexclusive license to
- (i) copy, display, and implement the Advanced Messaging Queue Protocol
- ("AMQP") Specification and (ii) the Licensed Claims that are held by
- the Authors, all for the purpose of implementing the Advanced Messaging
- Queue Protocol Specification. Your license and any rights under this
- Agreement will terminate immediately without notice from
- any Author if you bring any claim, suit, demand, or action related to
- the Advanced Messaging Queue Protocol Specification against any Author.
- Upon termination, you shall destroy all copies of the Advanced Messaging
- Queue Protocol Specification in your possession or control.
-
- As used hereunder, "Licensed Claims" means those claims of a patent or
- patent application, throughout the world, excluding design patents and
- design registrations, owned or controlled, or that can be sublicensed
- without fee and in compliance with the requirements of this
- Agreement, by an Author or its affiliates now or at any
- future time and which would necessarily be infringed by implementation
- of the Advanced Messaging Queue Protocol Specification. A claim is
- necessarily infringed hereunder only when it is not possible to avoid
- infringing it because there is no plausible non-infringing alternative
- for implementing the required portions of the Advanced Messaging Queue
- Protocol Specification. Notwithstanding the foregoing, Licensed Claims
- shall not include any claims other than as set forth above even if
- contained in the same patent as Licensed Claims; or that read solely
- on any implementations of any portion of the Advanced Messaging Queue
- Protocol Specification that are not required by the Advanced Messaging
- Queue Protocol Specification, or that, if licensed, would require a
- payment of royalties by the licensor to unaffiliated third parties.
- Moreover, Licensed Claims shall not include (i) any enabling technologies
- that may be necessary to make or use any Licensed Product but are not
- themselves expressly set forth in the Advanced Messaging Queue Protocol
- Specification (e.g., semiconductor manufacturing technology, compiler
- technology, object oriented technology, networking technology, operating
- system technology, and the like); or (ii) the implementation of other
- published standards developed elsewhere and merely referred to in the
- body of the Advanced Messaging Queue Protocol Specification, or
- (iii) any Licensed Product and any combinations thereof the purpose or
- function of which is not required for compliance with the Advanced
- Messaging Queue Protocol Specification. For purposes of this definition,
- the Advanced Messaging Queue Protocol Specification shall be deemed to
- include both architectural and interconnection requirements essential
- for interoperability and may also include supporting source code artifacts
- where such architectural, interconnection requirements and source code
- artifacts are expressly identified as being required or documentation to
- achieve compliance with the Advanced Messaging Queue Protocol Specification.
-
- As used hereunder, "Licensed Products" means only those specific portions
- of products (hardware, software or combinations thereof) that implement
- and are compliant with all relevant portions of the Advanced Messaging
- Queue Protocol Specification.
-
- The following disclaimers, which you hereby also acknowledge as to any
- use you may make of the Advanced Messaging Queue Protocol Specification:
-
- THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION IS PROVIDED "AS IS,"
- AND THE AUTHORS MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, OR TITLE; THAT THE
- CONTENTS OF THE ADVANCED MESSAGING QUEUE PROTOCOL SPECIFICATION ARE
- SUITABLE FOR ANY PURPOSE; NOR THAT THE IMPLEMENTATION OF THE ADVANCED
- MESSAGING QUEUE PROTOCOL SPECIFICATION WILL NOT INFRINGE ANY THIRD PARTY
- PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS.
-
- THE AUTHORS WILL NOT BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL,
- INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATING TO ANY
- USE, IMPLEMENTATION OR DISTRIBUTION OF THE ADVANCED MESSAGING QUEUE
- PROTOCOL SPECIFICATION.
-
- The name and trademarks of the Authors may NOT be used in any manner,
- including advertising or publicity pertaining to the Advanced Messaging
- Queue Protocol Specification or its contents without specific, written
- prior permission. Title to copyright in the Advanced Messaging Queue
- Protocol Specification will at all times remain with the Authors.
-
- No other rights are granted by implication, estoppel or otherwise.
-
- Upon termination of your license or rights under this Agreement, you
- shall destroy all copies of the Advanced Messaging Queue Protocol
- Specification in your possession or control.
-
- Trademarks
- ==========
- "JPMorgan", "JPMorgan Chase", "Chase", the JPMorgan Chase logo and the
- Octagon Symbol are trademarks of JPMorgan Chase & Co.
-
- IMATIX and the iMatix logo are trademarks of iMatix Corporation sprl.
-
- IONA, IONA Technologies, and the IONA logos are trademarks of IONA
- Technologies PLC and/or its subsidiaries.
-
- LINUX is a trademark of Linus Torvalds. RED HAT and JBOSS are registered
- trademarks of Red Hat, Inc. in the US and other countries.
-
- Java, all Java-based trademarks and OpenOffice.org are trademarks of
- Sun Microsystems, Inc. in the United States, other countries, or both.
-
- Other company, product, or service names may be trademarks or service
- marks of others.
-
- Links to full AMQP specification:
- =================================
- http://www.envoytech.org/spec/amq/
- http://www.iona.com/opensource/amqp/
- http://www.redhat.com/solutions/specifications/amqp/
- http://www.twiststandards.org/tiki-index.php?page=AMQ
- http://www.imatix.com/amqp
--->
-
-<!--
- <!DOCTYPE amqp SYSTEM "amqp.dtd">
--->
-
-<!-- XML Notes
-
- We use entities to indicate repetition; attributes to indicate properties.
-
- We use the 'name' attribute as an identifier, usually within the context
- of the surrounding entities.
-
- We use spaces to seperate words in names, so that we can print names in
- their natural form depending on the context - underlines for source code,
- hyphens for written text, etc.
-
- We do not enforce any particular validation mechanism but we support all
- mechanisms. The protocol definition conforms to a formal grammar that is
- published seperately in several technologies.
-
- -->
-
-<amqp major = "0" minor = "9" port = "5672" comment = "AMQ Protocol">
- <!--
- ======================================================
- == CONSTANTS
- ======================================================
- -->
- <!-- Frame types -->
- <constant name = "frame-method" value = "1" />
- <constant name = "frame-header" value = "2" />
- <constant name = "frame-body" value = "3" />
- <constant name = "frame-oob-method" value = "4" />
- <constant name = "frame-oob-header" value = "5" />
- <constant name = "frame-oob-body" value = "6" />
- <constant name = "frame-trace" value = "7" />
- <constant name = "frame-heartbeat" value = "8" />
-
- <!-- Protocol constants -->
- <constant name = "frame-min-size" value = "4096" />
- <constant name = "frame-end" value = "206" />
-
- <!-- Reply codes -->
- <constant name = "reply-success" value = "200">
- <doc>
- Indicates that the method completed successfully. This reply code is
- reserved for future use - the current protocol design does not use positive
- confirmation and reply codes are sent only in case of an error.
- </doc>
- </constant>
-
- <constant name = "not-delivered" value = "310" class = "soft-error">
- <doc>
- The client asked for a specific message that is no longer available.
- The message was delivered to another client, or was purged from the queue
- for some other reason.
- </doc>
- </constant>
-
- <constant name = "content-too-large" value = "311" class = "soft-error">
- <doc>
- The client attempted to transfer content larger than the server could accept
- at the present time. The client may retry at a later time.
- </doc>
- </constant>
-
- <constant name = "connection-forced" value = "320" class = "hard-error">
- <doc>
- An operator intervened to close the connection for some reason. The client
- may retry at some later date.
- </doc>
- </constant>
-
- <constant name = "invalid-path" value = "402" class = "hard-error">
- <doc>
- The client tried to work with an unknown virtual host.
- </doc>
- </constant>
-
- <constant name = "access-refused" value = "403" class = "soft-error">
- <doc>
- The client attempted to work with a server entity to which it has no
- access due to security settings.
- </doc>
- </constant>
-
- <constant name = "not-found" value = "404" class = "soft-error">
- <doc>The client attempted to work with a server entity that does not exist.</doc>
- </constant>
-
- <constant name = "resource-locked" value = "405" class = "soft-error">
- <doc>
- The client attempted to work with a server entity to which it has no
- access because another client is working with it.
- </doc>
- </constant>
-
- <constant name = "precondition-failed" value = "406" class = "soft-error">
- <doc>
- The client requested a method that was not allowed because some precondition
- failed.
- </doc>
- </constant>
-
- <constant name = "frame-error" value = "501" class = "hard-error">
- <doc>
- The client sent a malformed frame that the server could not decode. This
- strongly implies a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "syntax-error" value = "502" class = "hard-error">
- <doc>
- The client sent a frame that contained illegal values for one or more
- fields. This strongly implies a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "command-invalid" value = "503" class = "hard-error">
- <doc>
- The client sent an invalid sequence of frames, attempting to perform an
- operation that was considered invalid by the server. This usually implies
- a programming error in the client.
- </doc>
- </constant>
-
- <constant name = "channel-error" value = "504" class = "hard-error">
- <doc>
- The client attempted to work with a channel that had not been correctly
- opened. This most likely indicates a fault in the client layer.
- </doc>
- </constant>
-
- <constant name = "resource-error" value = "506" class = "hard-error">
- <doc>
- The server could not complete the method because it lacked sufficient
- resources. This may be due to the client creating too many of some type
- of entity.
- </doc>
- </constant>
-
- <constant name = "not-allowed" value = "530" class = "hard-error">
- <doc>
- The client tried to work with some entity in a manner that is prohibited
- by the server, due to security settings or by some other criteria.
- </doc>
- </constant>
-
- <constant name = "not-implemented" value = "540" class = "hard-error">
- <doc>
- The client tried to use functionality that is not implemented in the
- server.
- </doc>
- </constant>
-
- <constant name = "internal-error" value = "541" class = "hard-error">
- <doc>
- The server could not complete the method because of an internal error.
- The server may require intervention by an operator in order to resume
- normal operations.
- </doc>
- </constant>
-
- <constant name = "test-str2" value = "1.2.3.3"/>
-
- <!--
- ======================================================
- == DOMAIN TYPES
- ======================================================
- -->
-
- <domain name = "access-ticket" type = "short" label = "access ticket granted by server">
- <doc>
- An access ticket granted by the server for a certain set of access rights
- within a specific realm. Access tickets are valid within the channel where
- they were created, and expire when the channel closes.
- </doc>
- <assert check = "ne" value = "0" />
- </domain>
-
- <domain name = "class-id" type = "short" />
-
- <domain name = "consumer-tag" type = "shortstr" label = "consumer tag">
- <doc>
- Identifier for the consumer, valid within the current connection.
- </doc>
- </domain>
-
- <domain name = "delivery-tag" type = "longlong" label = "server-assigned delivery tag">
- <doc>
- The server-assigned and channel-specific delivery tag
- </doc>
- <rule name = "channel-local">
- <doc>
- The delivery tag is valid only within the channel from which the message was
- received. I.e. a client MUST NOT receive a message on one channel and then
- acknowledge it on another.
- </doc>
- </rule>
- <rule name = "non-zero">
- <doc>
- The server MUST NOT use a zero value for delivery tags. Zero is reserved
- for client use, meaning "all messages so far received".
- </doc>
- </rule>
- </domain>
-
- <domain name = "exchange-name" type = "shortstr" label = "exchange name">
- <doc>
- The exchange name is a client-selected string that identifies the exchange for publish
- methods. Exchange names may consist of any mixture of digits, letters, and underscores.
- Exchange names are scoped by the virtual host.
- </doc>
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "known-hosts" type = "shortstr" label = "list of known hosts">
- <doc>
- Specifies the list of equivalent or alternative hosts that the server knows about,
- which will normally include the current server itself. Clients can cache this
- information and use it when reconnecting to a server after a failure. This field
- may be empty.
- </doc>
- </domain>
-
- <domain name = "method-id" type = "short" />
-
- <domain name = "no-ack" type = "bit" label = "no acknowledgement needed">
- <doc>
- If this field is set the server does not expect acknowledgments for
- messages. That is, when a message is delivered to the client the server
- automatically and silently acknowledges it on behalf of the client. This
- functionality increases performance but at the cost of reliability.
- Messages can get lost if a client dies before it can deliver them to the
- application.
- </doc>
- </domain>
-
- <domain name = "no-local" type = "bit" label = "do not deliver own messages">
- <doc>
- If the no-local field is set the server will not send messages to the client that
- published them.
- </doc>
- </domain>
-
- <domain name = "path" type = "shortstr">
- <doc>
- Must start with a slash "/" and continue with path names separated by slashes. A path
- name consists of any combination of at least one of [A-Za-z0-9] plus zero or more of
- [.-_+!=:].
- </doc>
-
- <assert check = "notnull" />
- <assert check = "syntax" rule = "path" />
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "peer-properties" type = "table">
- <doc>
- This string provides a set of peer properties, used for identification, debugging, and
- general information.
- </doc>
- </domain>
-
- <domain name = "queue-name" type = "shortstr" label = "queue name">
- <doc>
- The queue name identifies the queue within the vhost. Queue names may consist of any
- mixture of digits, letters, and underscores.
- </doc>
- <assert check = "length" value = "127" />
- </domain>
-
- <domain name = "redelivered" type = "bit" label = "message is being redelivered">
- <doc>
- This indicates that the message has been previously delivered to this or
- another client.
- </doc>
- <rule name = "implementation">
- <doc>
- The server SHOULD try to signal redelivered messages when it can. When
- redelivering a message that was not successfully acknowledged, the server
- SHOULD deliver it to the original client if possible.
- </doc>
- <doc type = "scenario">
- Create a shared queue and publish a message to the queue. Consume the
- message using explicit acknowledgements, but do not acknowledge the
- message. Close the connection, reconnect, and consume from the queue
- again. The message should arrive with the redelivered flag set.
- </doc>
- </rule>
- <rule name = "hinting">
- <doc>
- The client MUST NOT rely on the redelivered field but should take it as a
- hint that the message may already have been processed. A fully robust
- client must be able to track duplicate received messages on non-transacted,
- and locally-transacted channels.
- </doc>
- </rule>
- </domain>
-
- <domain name = "reply-code" type = "short" label = "reply code from server">
- <doc>
- The reply code. The AMQ reply codes are defined as constants at the start
- of this formal specification.
- </doc>
- <assert check = "notnull" />
- </domain>
-
- <domain name = "reply-text" type = "shortstr" label = "localised reply text">
- <doc>
- The localised reply text. This text can be logged as an aid to resolving
- issues.
- </doc>
- <assert check = "notnull" />
- </domain>
-
- <!-- Elementary domains -->
- <domain name = "bit" type = "bit" label = "single bit" />
- <domain name = "octet" type = "octet" label = "single octet" />
- <domain name = "short" type = "short" label = "16-bit integer" />
- <domain name = "long" type = "long" label = "32-bit integer" />
- <domain name = "longlong" type = "longlong" label = "64-bit integer" />
- <domain name = "shortstr" type = "shortstr" label = "short string" />
- <domain name = "longstr" type = "longstr" label = "long string" />
- <domain name = "timestamp" type = "timestamp" label = "64-bit timestamp" />
- <domain name = "table" type = "table" label = "field table" />
-
- <!-- == CONNECTION ======================================================= -->
-
- <!-- TODO 0.81 - the 'handler' attribute of methods needs to be reviewed, and if
- no current implementations use it, removed. /PH 2006/07/20
- -->
-
- <class name = "connection" handler = "connection" index = "10" label = "work with socket connections">
- <doc>
- The connection class provides methods for a client to establish a network connection to
- a server, and for both peers to operate the connection thereafter.
- </doc>
-
- <doc type = "grammar">
- connection = open-connection *use-connection close-connection
- open-connection = C:protocol-header
- S:START C:START-OK
- *challenge
- S:TUNE C:TUNE-OK
- C:OPEN S:OPEN-OK | S:REDIRECT
- challenge = S:SECURE C:SECURE-OK
- use-connection = *channel
- close-connection = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "start" synchronous = "1" index = "10" label = "start connection negotiation">
- <doc>
- This method starts the connection negotiation process by telling the client the
- protocol version that the server proposes, along with a list of security mechanisms
- which the client can use for authentication.
- </doc>
-
- <rule name = "protocol-name">
- <doc>
- If the server cannot support the protocol specified in the protocol header,
- it MUST close the socket connection without sending any response method.
- </doc>
- <doc type = "scenario">
- The client sends a protocol header containing an invalid protocol name.
- The server must respond by closing the connection.
- </doc>
- </rule>
- <rule name = "server-support">
- <doc>
- The server MUST provide a protocol version that is lower than or equal to
- that requested by the client in the protocol header.
- </doc>
- <doc type = "scenario">
- The client requests a protocol version that is higher than any valid
- implementation, e.g. 9.0. The server must respond with a current
- protocol version, e.g. 1.0.
- </doc>
- </rule>
- <rule name = "client-support">
- <doc>
- If the client cannot handle the protocol version suggested by the server
- it MUST close the socket connection.
- </doc>
- <doc type = "scenario">
- The server sends a protocol version that is lower than any valid
- implementation, e.g. 0.1. The client must respond by closing the
- connection.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <response name = "start-ok" />
-
- <field name = "version-major" domain = "octet" label = "protocol major version">
- <doc>
- The version of the protocol, expressed in protocol units of 0.1 public
- versions and properly printed as two digits with a leading zero. I.e. a
- protocol version of "09" represents a public version "0.9". The decimal
- shift allows the correct expression of pre-1.0 protocol releases.
- </doc>
- <doc type = "todo">
- This field should be renamed to "protocol version".
- </doc>
- </field>
-
- <field name = "version-minor" domain = "octet" label = "protocol major version">
- <doc>
- The protocol revision, expressed as an integer from 0 to 9. The use of more
- than ten revisions is discouraged. The public version string is constructed
- from the protocol version and revision as follows: we print the protocol
- version with one decimal position, and we append the protocol revision. A
- version=10 and revision=2 are printed as "1.02".
- </doc>
- <doc type = "todo">
- This field should be renamed to "protocol revision".
- </doc>
- </field>
-
- <field name = "server-properties" domain = "peer-properties" label = "server properties">
- <rule name = "required-fields">
- <doc>
- The properties SHOULD contain at least these fields: "host", specifying the
- server host name or address, "product", giving the name of the server product,
- "version", giving the name of the server version, "platform", giving the name
- of the operating system, "copyright", if appropriate, and "information", giving
- other general information.
- </doc>
- <doc type = "scenario">
- Client connects to server and inspects the server properties. It checks for
- the presence of the required fields.
- </doc>
- </rule>
- </field>
-
- <field name = "mechanisms" domain = "longstr" label = "available security mechanisms">
- <doc>
- A list of the security mechanisms that the server supports, delimited by spaces.
- Currently ASL supports these mechanisms: PLAIN.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "locales" domain = "longstr" label = "available message locales">
- <doc>
- A list of the message locales that the server supports, delimited by spaces. The
- locale defines the language in which the server will send reply texts.
- </doc>
- <rule name = "required-support">
- <doc>
- The server MUST support at least the en_US locale.
- </doc>
- <doc type = "scenario">
- Client connects to server and inspects the locales field. It checks for
- the presence of the required locale(s).
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
- </method>
-
- <method name = "start-ok" synchronous = "1" index = "11"
- label = "select security mechanism and locale">
- <doc>
- This method selects a SASL security mechanism. ASL uses SASL (RFC2222) to
- negotiate authentication and encryption.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "client-properties" domain = "peer-properties" label = "client properties">
- <rule name = "required-fields">
- <!-- This rule is not testable from the client side -->
- <doc>
- The properties SHOULD contain at least these fields: "product", giving the name
- of the client product, "version", giving the name of the client version, "platform",
- giving the name of the operating system, "copyright", if appropriate, and
- "information", giving other general information.
- </doc>
- </rule>
- </field>
-
- <field name = "mechanism" domain = "shortstr" label = "selected security mechanism">
- <doc>
- A single security mechanisms selected by the client, which must be one of those
- specified by the server.
- </doc>
- <rule name = "security">
- <doc>
- The client SHOULD authenticate using the highest-level security profile it
- can handle from the list provided by the server.
- </doc>
- </rule>
- <rule name = "validity">
- <doc>
- If the mechanism field does not contain one of the security mechanisms
- proposed by the server in the Start method, the server MUST close the
- connection without sending any further data.
- </doc>
- <doc type = "scenario">
- Client connects to server and sends an invalid security mechanism. The
- server must respond by closing the connection (a socket close, with no
- connection close negotiation).
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
-
- <field name = "response" domain = "longstr" label = "security response data">
- <doc>
- A block of opaque data passed to the security mechanism. The contents of this
- data are defined by the SASL security mechanism.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "locale" domain = "shortstr" label = "selected message locale">
- <doc>
- A single message local selected by the client, which must be one of those
- specified by the server.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "secure" synchronous = "1" index = "20" label = "security mechanism challenge">
- <doc>
- The SASL protocol works by exchanging challenges and responses until both peers have
- received sufficient information to authenticate each other. This method challenges
- the client to provide more information.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- <response name = "secure-ok" />
-
- <field name = "challenge" domain = "longstr" label = "security challenge data">
- <doc>
- Challenge information, a block of opaque binary data passed to the security
- mechanism.
- </doc>
- </field>
- </method>
-
- <method name = "secure-ok" synchronous = "1" index = "21" label = "security mechanism response">
- <doc>
- This method attempts to authenticate, passing a block of SASL data for the security
- mechanism at the server side.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "response" domain = "longstr" label = "security response data">
- <doc>
- A block of opaque data passed to the security mechanism. The contents of this
- data are defined by the SASL security mechanism.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "tune" synchronous = "1" index = "30"
- label = "propose connection tuning parameters">
- <doc>
- This method proposes a set of connection configuration values to the client. The
- client can accept and/or adjust these.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <response name = "tune-ok" />
-
- <field name = "channel-max" domain = "short" label = "proposed maximum channels">
- <doc>
- The maximum total number of channels that the server allows per connection. Zero
- means that the server does not impose a fixed limit, but the number of allowed
- channels may be limited by available server resources.
- </doc>
- </field>
-
- <field name = "frame-max" domain = "long" label = "proposed maximum frame size">
- <doc>
- The largest frame size that the server proposes for the connection. The client
- can negotiate a lower value. Zero means that the server does not impose any
- specific limit but may reject very large frames if it cannot allocate resources
- for them.
- </doc>
- <rule name = "minimum">
- <doc>
- Until the frame-max has been negotiated, both peers MUST accept frames of up
- to frame-min-size octets large, and the minimum negotiated value for frame-max
- is also frame-min-size.
- </doc>
- <doc type = "scenario">
- Client connects to server and sends a large properties field, creating a frame
- of frame-min-size octets. The server must accept this frame.
- </doc>
- </rule>
- </field>
-
- <field name = "heartbeat" domain = "short" label = "desired heartbeat delay">
- <!-- TODO 0.82 - the heartbeat negotiation mechanism was changed during
- implementation because the model documented here does not actually
- work properly. The best model we found is that the server proposes
- a heartbeat value to the client; the client can reply with zero, meaning
- 'do not use heartbeats (as documented here), or can propose its own
- heartbeat value, which the server should then accept. This is different
- from the model here which is disconnected - e.g. each side requests a
- heartbeat independently. Basically a connection is heartbeated in
- both ways, or not at all, depending on whether both peers support
- heartbeating or not, and the heartbeat value should itself be chosen
- by the client so that remote links can get a higher value. Also, the
- actual heartbeat mechanism needs documentation, and is as follows: so
- long as there is activity on a connection - in or out - both peers
- assume the connection is active. When there is no activity, each peer
- must send heartbeat frames. When no heartbeat frame is received after
- N cycles (where N is at least 2), the connection can be considered to
- have died. /PH 2006/07/19
- -->
- <doc>
- The delay, in seconds, of the connection heartbeat that the server wants.
- Zero means the server does not want a heartbeat.
- </doc>
- </field>
- </method>
-
- <method name = "tune-ok" synchronous = "1" index = "31"
- label = "negotiate connection tuning parameters">
- <doc>
- This method sends the client's connection tuning parameters to the server.
- Certain fields are negotiated, others provide capability information.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "channel-max" domain = "short" label = "negotiated maximum channels">
- <doc>
- The maximum total number of channels that the client will use per connection.
- </doc>
- <rule name = "upper-limit">
- <doc>
- If the client specifies a channel max that is higher than the value provided
- by the server, the server MUST close the connection without attempting a
- negotiated close. The server may report the error in some fashion to assist
- implementors.
- </doc>
- </rule>
- <assert check = "notnull" />
- <assert check = "le" method = "tune" field = "channel-max" />
- </field>
-
- <field name = "frame-max" domain = "long" label = "negotiated maximum frame size">
- <doc>
- The largest frame size that the client and server will use for the connection.
- Zero means that the client does not impose any specific limit but may reject
- very large frames if it cannot allocate resources for them. Note that the
- frame-max limit applies principally to content frames, where large contents can
- be broken into frames of arbitrary size.
- </doc>
- <rule name = "minimum">
- <doc>
- Until the frame-max has been negotiated, both peers MUST accept frames of up
- to frame-min-size octets large, and the minimum negotiated value for frame-max
- is also frame-min-size.
- </doc>
- </rule>
- <rule name = "upper-limit">
- <doc>
- If the client specifies a frame max that is higher than the value provided
- by the server, the server MUST close the connection without attempting a
- negotiated close. The server may report the error in some fashion to assist
- implementors.
- </doc>
- </rule>
- </field>
-
- <field name = "heartbeat" domain = "short" label = "desired heartbeat delay">
- <doc>
- The delay, in seconds, of the connection heartbeat that the client wants. Zero
- means the client does not want a heartbeat.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "40" label = "open connection to virtual host">
- <doc>
- This method opens a connection to a virtual host, which is a collection of
- resources, and acts to separate multiple application domains within a server.
- The server may apply arbitrary limits per virtual host, such as the number
- of each type of entity that may be used, per connection and/or in total.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "open-ok" />
- <response name = "redirect" />
-
- <field name = "virtual-host" domain = "path" label = "virtual host name">
- <!-- TODO 0.82 - the entire vhost model needs review. This concept was
- prompted by the HTTP vhost concept but does not fit very well into
- AMQP. Currently we use the vhost as a "cluster identifier" which is
- inaccurate usage. /PH 2006/07/19
- -->
- <assert check = "regexp" value = "^[a-zA-Z0-9/-_]+$" />
- <doc>
- The name of the virtual host to work with.
- </doc>
- <rule name = "separation">
- <doc>
- If the server supports multiple virtual hosts, it MUST enforce a full
- separation of exchanges, queues, and all associated entities per virtual
- host. An application, connected to a specific virtual host, MUST NOT be able
- to access resources of another virtual host.
- </doc>
- </rule>
- <rule name = "security">
- <doc>
- The server SHOULD verify that the client has permission to access the
- specified virtual host.
- </doc>
- </rule>
- </field>
-
- <field name = "capabilities" domain = "shortstr" label = "required capabilities">
- <doc>
- The client can specify zero or more capability names, delimited by spaces.
- The server can use this string to how to process the client's connection
- request.
- </doc>
- </field>
-
- <field name = "insist" domain = "bit" label = "insist on connecting to server">
- <doc>
- In a configuration with multiple collaborating servers, the server may respond
- to a Connection.Open method with a Connection.Redirect. The insist option tells
- the server that the client is insisting on a connection to the specified server.
- </doc>
- <rule name = "behaviour">
- <doc>
- When the client uses the insist option, the server MUST NOT respond with a
- Connection.Redirect method. If it cannot accept the client's connection
- request it should respond by closing the connection with a suitable reply
- code.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "41" label = "signal that connection is ready">
- <doc>
- This method signals to the client that the connection is ready for use.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "known-hosts" domain = "known-hosts" />
- </method>
-
- <method name = "redirect" synchronous = "1" index = "42" label = "redirects client to other server">
- <doc>
- This method redirects the client to another server, based on the requested virtual
- host and/or capabilities.
- </doc>
- <rule name = "usage">
- <doc>
- When getting the Connection.Redirect method, the client SHOULD reconnect to
- the host specified, and if that host is not present, to any of the hosts
- specified in the known-hosts list.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <field name = "host" domain = "shortstr" label = "server to connect to">
- <doc>
- Specifies the server to connect to. This is an IP address or a DNS name,
- optionally followed by a colon and a port number. If no port number is
- specified, the client should use the default port number for the protocol.
- </doc>
- <assert check = "notnull" />
- </field>
- <field name = "known-hosts" domain = "known-hosts" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "close" synchronous = "1" index = "50" label = "request a connection close">
- <doc>
- This method indicates that the sender wants to close the connection. This may be
- due to internal conditions (e.g. a forced shut-down) or due to an error handling
- a specific method, i.e. an exception. When a close is due to an exception, the
- sender provides the class and method id of the method which caused the exception.
- </doc>
- <!-- TODO: the connection close mechanism needs to be reviewed from the ODF
- documentation and better expressed as rules here. /PH 2006/07/20
- -->
- <rule name = "stability">
- <doc>
- After sending this method any received method except the Close-OK method MUST
- be discarded.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- <response name = "close-ok" />
-
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "class-id" domain = "class-id" label = "failing method class">
- <doc>
- When the close is provoked by a method exception, this is the class of the
- method.
- </doc>
- </field>
-
- <field name = "method-id" domain = "method-id" label = "failing method ID">
- <doc>
- When the close is provoked by a method exception, this is the ID of the method.
- </doc>
- </field>
- </method>
-
- <method name = "close-ok" synchronous = "1" index = "51" label = "confirm a connection close">
- <doc>
- This method confirms a Connection.Close method and tells the recipient that it is
- safe to release resources for the connection and close the socket.
- </doc>
- <rule name = "reporting">
- <doc>
- A peer that detects a socket closure without having received a Close-Ok
- handshake method SHOULD log the error.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- </method>
- </class>
-
- <!-- == CHANNEL ========================================================== -->
-
- <class name = "channel" handler = "channel" index = "20" label = "work with channels">
- <doc>
- The channel class provides methods for a client to establish a channel to a
- server and for both peers to operate the channel thereafter.
- </doc>
-
- <doc type = "grammar">
- channel = open-channel *use-channel close-channel
- open-channel = C:OPEN S:OPEN-OK
- use-channel = C:FLOW S:FLOW-OK
- / S:FLOW C:FLOW-OK
- / S:ALERT
- / functional-class
- close-channel = C:CLOSE S:CLOSE-OK
- / S:CLOSE C:CLOSE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "10" label = "open a channel for use">
- <doc>
- This method opens a channel to the server.
- </doc>
- <rule name = "state" on-failure = "channel-error">
- <doc>
- The client MUST NOT use this method on an alread-opened channel.
- </doc>
- <doc type = "scenario">
- Client opens a channel and then reopens the same channel.
- </doc>
- </rule>
- <chassis name = "server" implement = "MUST" />
- <response name = "open-ok" />
- <field name = "out of band" domain = "shortstr" label = "out-of-band settings">
- <doc>
- Configures out-of-band transfers on this channel. The syntax and meaning of this
- field will be formally defined at a later date.
- </doc>
- <assert check = "null" />
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "11" label = "signal that the channel is ready">
- <doc>
- This method signals to the client that the channel is ready for use.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "flow" synchronous = "1" index = "20" label = "enable/disable flow from peer">
- <doc>
- This method asks the peer to pause or restart the flow of content data. This is a
- simple flow-control mechanism that a peer can use to avoid oveflowing its queues or
- otherwise finding itself receiving more messages than it can process. Note that this
- method is not intended for window control. The peer that receives a disable flow
- method should finish sending the current content frame, if any, then pause.
- </doc>
-
- <rule name = "initial-state">
- <doc>
- When a new channel is opened, it is active (flow is active). Some applications
- assume that channels are inactive until started. To emulate this behaviour a
- client MAY open the channel, then pause it.
- </doc>
- </rule>
-
- <rule name = "bidirectional">
- <doc>
- When sending content frames, a peer SHOULD monitor the channel for incoming
- methods and respond to a Channel.Flow as rapidly as possible.
- </doc>
- </rule>
-
- <rule name = "throttling">
- <doc>
- A peer MAY use the Channel.Flow method to throttle incoming content data for
- internal reasons, for example, when exchanging data over a slower connection.
- </doc>
- </rule>
-
- <rule name = "expected-behaviour">
- <doc>
- The peer that requests a Channel.Flow method MAY disconnect and/or ban a peer
- that does not respect the request. This is to prevent badly-behaved clients
- from overwhelming a broker.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <response name = "flow-ok" />
-
- <field name = "active" domain = "bit" label = "start/stop content frames">
- <doc>
- If 1, the peer starts sending content frames. If 0, the peer stops sending
- content frames.
- </doc>
- </field>
- </method>
-
- <method name = "flow-ok" index = "21" label = "confirm a flow method">
- <doc>
- Confirms to the peer that a flow command was received and processed.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
- <field name = "active" domain = "bit" label = "current flow setting">
- <doc>
- Confirms the setting of the processed flow method: 1 means the peer will start
- sending or continue to send content frames; 0 means it will not.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
- <!-- TODO 0.82 - remove this method entirely
- /PH 2006/07/20
- -->
- <method name = "alert" index = "30" label = "send a non-fatal warning message">
- <doc>
- This method allows the server to send a non-fatal warning to the client. This is
- used for methods that are normally asynchronous and thus do not have confirmations,
- and for which the server may detect errors that need to be reported. Fatal errors
- are handled as channel or connection exceptions; non-fatal errors are sent through
- this method.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
- <field name = "details" domain = "table" label = "detailed information for warning">
- <doc>
- A set of fields that provide more information about the problem. The meaning of
- these fields are defined on a per-reply-code basis (TO BE DEFINED).
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "close" synchronous = "1" index = "40" label = "request a channel close">
- <doc>
- This method indicates that the sender wants to close the channel. This may be due to
- internal conditions (e.g. a forced shut-down) or due to an error handling a specific
- method, i.e. an exception. When a close is due to an exception, the sender provides
- the class and method id of the method which caused the exception.
- </doc>
-
- <!-- TODO: the channel close behaviour needs to be reviewed from the ODF
- documentation and better expressed as rules here. /PH 2006/07/20
- -->
- <rule name = "stability">
- <doc>
- After sending this method any received method except the Close-OK method MUST
- be discarded.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- <response name = "close-ok" />
-
- <field name = "reply-code" domain = "reply-code" />
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "class-id" domain = "class-id" label = "failing method class">
- <doc>
- When the close is provoked by a method exception, this is the class of the
- method.
- </doc>
- </field>
-
- <field name = "method-id" domain = "method-id" label = "failing method ID">
- <doc>
- When the close is provoked by a method exception, this is the ID of the method.
- </doc>
- </field>
- </method>
-
- <method name = "close-ok" synchronous = "1" index = "41" label = "confirm a channel close">
- <doc>
- This method confirms a Channel.Close method and tells the recipient that it is safe
- to release resources for the channel and close the socket.
- </doc>
- <rule name = "reporting">
- <doc>
- A peer that detects a socket closure without having received a Channel.Close-Ok
- handshake method SHOULD log the error.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <chassis name = "server" implement = "MUST" />
- </method>
- </class>
-
- <!-- == ACCESS =========================================================== -->
-
- <!-- TODO 0.82 - this class must be implemented by two teams before we can
- consider it matured.
- -->
-
- <class name = "access" handler = "connection" index = "30" label = "work with access tickets">
- <doc>
- The protocol control access to server resources using access tickets. A
- client must explicitly request access tickets before doing work. An access
- ticket grants a client the right to use a specific set of resources -
- called a "realm" - in specific ways.
- </doc>
-
- <doc type = "grammar">
- access = C:REQUEST S:REQUEST-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "request" synchronous = "1" index = "10" label = "request an access ticket">
- <doc>
- This method requests an access ticket for an access realm. The server
- responds by granting the access ticket. If the client does not have
- access rights to the requested realm this causes a connection exception.
- Access tickets are a per-channel resource.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "request-ok" />
-
- <field name = "realm" domain = "shortstr" label = "name of requested realm">
- <doc>
- Specifies the name of the realm to which the client is requesting access.
- The realm is a configured server-side object that collects a set of
- resources (exchanges, queues, etc.). If the channel has already requested
- an access ticket onto this realm, the previous ticket is destroyed and a
- new ticket is created with the requested access rights, if allowed.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST specify a realm that is known to the server. The server
- makes an identical response for undefined realms as it does for realms
- that are defined but inaccessible to this client.
- </doc>
- <doc type = "scenario">
- Client specifies an undefined realm.
- </doc>
- </rule>
- </field>
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive access to the realm, meaning that this will be the only
- channel that uses the realm's resources.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MAY NOT request exclusive access to a realm that has active
- access tickets, unless the same channel already had the only access
- ticket onto that realm.
- </doc>
- <doc type = "scenario">
- Client opens two channels and requests exclusive access to the same realm.
- </doc>
- </rule>
- </field>
- <field name = "passive" domain = "bit" label = "request passive access">
- <doc>
- Request message passive access to the specified access realm. Passive
- access lets a client get information about resources in the realm but
- not to make any changes to them.
- </doc>
- </field>
- <field name = "active" domain = "bit" label = "request active access">
- <doc>
- Request message active access to the specified access realm. Active access lets
- a client get create and delete resources in the realm.
- </doc>
- </field>
- <field name = "write" domain = "bit" label = "request write access">
- <doc>
- Request write access to the specified access realm. Write access lets a client
- publish messages to all exchanges in the realm.
- </doc>
- </field>
- <field name = "read" domain = "bit" label = "request read access">
- <doc>
- Request read access to the specified access realm. Read access lets a client
- consume messages from queues in the realm.
- </doc>
- </field>
- </method>
-
- <method name = "request-ok" synchronous = "1" index = "11" label = "grant access to server resources">
- <doc>
- This method provides the client with an access ticket. The access ticket is valid
- within the current channel and for the lifespan of the channel.
- </doc>
- <rule name = "per-channel" on-failure = "not-allowed">
- <doc>
- The client MUST NOT use access tickets except within the same channel as
- originally granted.
- </doc>
- <doc type = "scenario">
- Client opens two channels, requests a ticket on one channel, and then
- tries to use that ticket in a seconc channel.
- </doc>
- </rule>
- <chassis name = "client" implement = "MUST" />
- <field name = "ticket" domain = "access-ticket" />
- </method>
- </class>
-
- <!-- == EXCHANGE ========================================================= -->
-
- <class name = "exchange" handler = "channel" index = "40" label = "work with exchanges">
- <doc>
- Exchanges match and distribute messages across queues. Exchanges can be configured in
- the server or created at runtime.
- </doc>
-
- <doc type = "grammar">
- exchange = C:DECLARE S:DECLARE-OK
- / C:DELETE S:DELETE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <rule name = "required-types">
- <doc>
- The server MUST implement these standard exchange types: fanout, direct.
- </doc>
- <doc type = "scenario">
- Client attempts to declare an exchange with each of these standard types.
- </doc>
- </rule>
- <rule name = "recommended-types">
- <doc>
- The server SHOULD implement these standard exchange types: topic, headers.
- </doc>
- <doc type = "scenario">
- Client attempts to declare an exchange with each of these standard types.
- </doc>
- </rule>
- <rule name = "required-instances">
- <doc>
- The server MUST, in each virtual host, pre-declare an exchange instance
- for each standard exchange type that it implements, where the name of the
- exchange instance is "amq." followed by the exchange type name.
- </doc>
- <doc type = "scenario">
- Client creates a temporary queue and attempts to bind to each required
- exchange instance (amq.fanout, amq.direct, and amq.topic, amq.headers if
- those types are defined).
- </doc>
- </rule>
- <rule name = "default-exchange">
- <doc>
- The server MUST predeclare a direct exchange to act as the default exchange
- for content Publish methods and for default queue bindings.
- </doc>
- <doc type = "scenario">
- Client checks that the default exchange is active by specifying a queue
- binding with no exchange name, and publishing a message with a suitable
- routing key but without specifying the exchange name, then ensuring that
- the message arrives in the queue correctly.
- </doc>
- </rule>
- <rule name = "default-access">
- <doc>
- The server MUST NOT allow clients to access the default exchange except
- by specifying an empty exchange name in the Queue.Bind and content Publish
- methods.
- </doc>
- </rule>
- <rule name = "extensions">
- <doc>
- The server MAY implement other exchange types as wanted.
- </doc>
- </rule>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "declare" synchronous = "1" index = "10" label = "declare exchange, create if needed">
- <doc>
- This method creates an exchange if it does not already exist, and if the exchange
- exists, verifies that it is of the correct and expected class.
- </doc>
- <rule name = "minimum">
- <doc>
- The server SHOULD support a minimum of 16 exchanges per virtual host and
- ideally, impose no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- The client creates as many exchanges as it can until the server reports
- an error; the number of exchanges successfuly created must be at least
- sixteen.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "declare-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- When a client defines a new exchange, this belongs to the access realm of the
- ticket used. All further work done with that exchange must be done with an
- access ticket for the same realm.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access to
- the realm in which the exchange exists or will be created, or "passive"
- access if the if-exists flag is set.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <rule name = "reserved" on-failure = "access-refused">
- <doc>
- Exchange names starting with "amq." are reserved for predeclared and
- standardised exchanges. The client MUST NOT attempt to create an exchange
- starting with "amq.".
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]+$" />
- </field>
-
- <field name = "type" domain = "shortstr" label = "exchange type">
- <doc>
- Each exchange belongs to one of a set of exchange types implemented by the
- server. The exchange types define the functionality of the exchange - i.e. how
- messages are routed through it. It is not valid or meaningful to attempt to
- change the type of an existing exchange.
- </doc>
- <rule name = "typed" on-failure = "not-allowed">
- <doc>
- Exchanges cannot be redeclared with different types. The client MUST not
- attempt to redeclare an existing exchange with a different type than used
- in the original Exchange.Declare method.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <rule name = "support" on-failure = "command-invalid">
- <doc>
- The client MUST NOT attempt to create an exchange with a type that the
- server does not support.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]+$" />
- </field>
-
- <field name = "passive" domain = "bit" label = "do not create exchange">
- <doc>
- If set, the server will not create the exchange. The client can use this to
- check whether an exchange exists without modifying the server state.
- </doc>
- <rule name = "not-found">
- <doc>
- If set, and the exchange does not already exist, the server MUST raise a
- channel exception with reply code 404 (not found).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "durable" domain = "bit" label = "request a durable exchange">
- <doc>
- If set when creating a new exchange, the exchange will be marked as durable.
- Durable exchanges remain active when a server restarts. Non-durable exchanges
- (transient exchanges) are purged if/when a server restarts.
- </doc>
- <rule name = "support">
- <doc>
- The server MUST support both durable and transient exchanges.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- <rule name = "sticky">
- <doc>
- The server MUST ignore the durable field if the exchange already exists.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <!-- TODO 0.82 - clarify how this works; there is no way to cancel a binding
- except by deleting a queue.
- -->
- <field name = "auto-delete" domain = "bit" label = "auto-delete when unused">
- <doc>
- If set, the exchange is deleted when all queues have finished using it.
- </doc>
- <rule name = "sticky">
- <doc>
- The server MUST ignore the auto-delete field if the exchange already
- exists.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "internal" domain = "bit" label = "create internal exchange">
- <doc>
- If set, the exchange may not be used directly by publishers, but only when bound
- to other exchanges. Internal exchanges are used to construct wiring that is not
- visible to applications.
- </doc>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for declaration">
- <doc>
- A set of arguments for the declaration. The syntax and semantics of these
- arguments depends on the server implementation. This field is ignored if passive
- is 1.
- </doc>
- </field>
- </method>
-
- <method name = "declare-ok" synchronous = "1" index = "11" label = "confirm exchange declaration">
- <doc>
- This method confirms a Declare method and confirms the name of the exchange,
- essential for automatically-named exchanges.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "delete" synchronous = "1" index = "20" label = "delete an exchange">
- <doc>
- This method deletes an exchange. When an exchange is deleted all queue bindings on
- the exchange are cancelled.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "delete-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access
- rights to the exchange's access realm.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <rule name = "exists" on-failure = "not-found">
- <doc>
- The client MUST NOT attempt to delete an exchange that does not exist.
- </doc>
- </rule>
- <assert check = "notnull" />
- </field>
-
- <!-- TODO 0.82 - discuss whether this option is useful or not. I don't have
- any real use case for it. /PH 2006-07-23.
- -->
- <field name = "if-unused" domain = "bit" label = "delete only if unused">
- <doc>
- If set, the server will only delete the exchange if it has no queue bindings. If
- the exchange has queue bindings the server does not delete it but raises a
- channel exception instead.
- </doc>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "delete-ok" synchronous = "1" index = "21"
- label = "confirm deletion of an exchange">
- <doc>This method confirms the deletion of an exchange.</doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == QUEUE ============================================================ -->
-
- <class name = "queue" handler = "channel" index = "50" label = "work with queues">
- <doc>
- Queues store and forward messages. Queues can be configured in the server or created at
- runtime. Queues must be attached to at least one exchange in order to receive messages
- from publishers.
- </doc>
-
- <doc type = "grammar">
- queue = C:DECLARE S:DECLARE-OK
- / C:BIND S:BIND-OK
- / C:PURGE S:PURGE-OK
- / C:DELETE S:DELETE-OK
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <rule name = "any-content">
- <doc>
- A server MUST allow any content class to be sent to any queue, in any mix, and
- queue and deliver these content classes independently. Note that all methods
- that fetch content off queues are specific to a given content class.
- </doc>
- <doc type = "scenario">
- Client creates an exchange of each standard type and several queues that
- it binds to each exchange. It must then sucessfully send each of the standard
- content types to each of the available queues.
- </doc>
- </rule>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "declare" synchronous = "1" index = "10" label = "declare queue, create if needed">
- <doc>
- This method creates or checks a queue. When creating a new queue the client can
- specify various properties that control the durability of the queue and its
- contents, and the level of sharing for the queue.
- </doc>
-
- <rule name = "default-binding">
- <doc>
- The server MUST create a default binding for a newly-created queue to the
- default exchange, which is an exchange of type 'direct'.
- </doc>
- <doc type = "scenario">
- Client creates a new queue, and then without explicitly binding it to an
- exchange, attempts to send a message through the default exchange binding,
- i.e. publish a message to the empty exchange, with the queue name as routing
- key.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_35" -->
- <rule name = "minimum-queues">
- <doc>
- The server SHOULD support a minimum of 256 queues per virtual host and ideally,
- impose no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- Client attempts to create as many queues as it can until the server reports
- an error. The resulting count must at least be 256.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "declare-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- When a client defines a new queue, this belongs to the access realm of the
- ticket used. All further work done with that queue must be done with an access
- ticket for the same realm.
- </doc>
- <rule name = "validity" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "active" access to
- the realm in which the queue exists or will be created.
- </doc>
- <doc type = "scenario">
- Client creates access ticket with wrong access rights and attempts to use
- in this method.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <rule name = "default-name">
- <doc>
- The queue name MAY be empty, in which case the server MUST create a new
- queue with a unique generated name and return this to the client in the
- Declare-Ok method.
- </doc>
- <doc type = "scenario">
- Client attempts to create several queues with an empty name. The client then
- verifies that the server-assigned names are unique and different.
- </doc>
- </rule>
- <rule name = "reserved-prefix" on-failure = "not-allowed">
- <doc>
- Queue names starting with "amq." are reserved for predeclared and
- standardised server queues. A client MAY NOT attempt to declare a queue with a
- name that starts with "amq." and the passive option set to zero.
- </doc>
- <doc type = "scenario">
- A client attempts to create a queue with a name starting with "amq." and with
- the passive option set to zero.
- </doc>
- </rule>
- <assert check = "regexp" value = "^[a-zA-Z0-9-_.:]*$" />
- </field>
-
- <field name = "passive" domain = "bit" label = "do not create queue">
- <doc>
- If set, the server will not create the queue. This field allows the client
- to assert the presence of a queue without modifying the server state.
- </doc>
- <rule name = "passive" on-failure = "not-found">
- <doc>
- The client MAY ask the server to assert that a queue exists without
- creating the queue if not. If the queue does not exist, the server
- treats this as a failure.
- </doc>
- <doc type = "scenario">
- Client declares an existing queue with the passive option and expects
- the server to respond with a declare-ok. Client then attempts to declare
- a non-existent queue with the passive option, and the server must close
- the channel with the correct reply-code.
- </doc>
- </rule>
- </field>
-
- <field name = "durable" domain = "bit" label = "request a durable queue">
- <doc>
- If set when creating a new queue, the queue will be marked as durable. Durable
- queues remain active when a server restarts. Non-durable queues (transient
- queues) are purged if/when a server restarts. Note that durable queues do not
- necessarily hold persistent messages, although it does not make sense to send
- persistent messages to a transient queue.
- </doc>
- <!-- Rule test name: was "amq_queue_03" -->
- <rule name = "persistence">
- <doc>The server MUST recreate the durable queue after a restart.</doc>
-
- <!-- TODO: use 'client does something' rather than 'a client does something'. -->
- <doc type = "scenario">
- A client creates a durable queue. The server is then restarted. The client
- then attempts to send a message to the queue. The message should be successfully
- delivered.
- </doc>
- </rule>
- <!-- Rule test name: was "amq_queue_36" -->
- <rule name = "types">
- <doc>The server MUST support both durable and transient queues.</doc>
- <doc type = "scenario">
- A client creates two named queues, one durable and one transient.
- </doc>
- </rule>
- <!-- Rule test name: was "amq_queue_37" -->
- <rule name = "pre-existence">
- <doc>The server MUST ignore the durable field if the queue already exists.</doc>
- <doc type = "scenario">
- A client creates two named queues, one durable and one transient. The client
- then attempts to declare the two queues using the same names again, but reversing
- the value of the durable flag in each case. Verify that the queues still exist
- with the original durable flag values.
- <!-- TODO: but how? -->
- </doc>
- </rule>
- </field>
-
- <field name = "exclusive" domain = "bit" label = "request an exclusive queue">
- <doc>
- Exclusive queues may only be consumed from by the current connection. Setting
- the 'exclusive' flag always implies 'auto-delete'.
- </doc>
-
- <!-- Rule test name: was "amq_queue_38" -->
- <rule name = "types">
- <doc>
- The server MUST support both exclusive (private) and non-exclusive (shared)
- queues.
- </doc>
- <doc type = "scenario">
- A client creates two named queues, one exclusive and one non-exclusive.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_04" -->
- <rule name = "02" on-failure = "channel-error">
- <doc>
- The client MAY NOT attempt to declare any existing and exclusive queue
- on multiple connections.
- </doc>
- <doc type = "scenario">
- A client declares an exclusive named queue. A second client on a different
- connection attempts to declare a queue of the same name.
- </doc>
- </rule>
- </field>
-
- <field name = "auto-delete" domain = "bit" label = "auto-delete queue when unused">
- <doc>
- If set, the queue is deleted when all consumers have finished using it. Last
- consumer can be cancelled either explicitly or because its channel is closed. If
- there was no consumer ever on the queue, it won't be deleted.
- </doc>
-
- <!-- Rule test name: was "amq_queue_31" -->
- <rule name = "pre-existence">
- <doc>
- The server MUST ignore the auto-delete field if the queue already exists.
- </doc>
- <doc type = "scenario">
- A client creates two named queues, one as auto-delete and one explicit-delete.
- The client then attempts to declare the two queues using the same names again,
- but reversing the value of the auto-delete field in each case. Verify that the
- queues still exist with the original auto-delete flag values.
- <!-- TODO: but how? -->
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for declaration">
- <doc>
- A set of arguments for the declaration. The syntax and semantics of these
- arguments depends on the server implementation. This field is ignored if passive
- is 1.
- </doc>
- </field>
- </method>
-
- <method name = "declare-ok" synchronous = "1" index = "11" label = "confirms a queue definition">
- <doc>
- This method confirms a Declare method and confirms the name of the queue, essential
- for automatically-named queues.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Reports the name of the queue. If the server generated a queue name, this field
- contains that name.
- </doc>
- <assert check = "notnull" />
- </field>
-
- <field name = "message-count" domain = "long" label = "number of messages in queue">
- <doc>
- Reports the number of messages in the queue, which will be zero for
- newly-created queues.
- </doc>
- </field>
-
- <field name = "consumer-count" domain = "long" label = "number of consumers">
- <doc>
- Reports the number of active consumers for the queue. Note that consumers can
- suspend activity (Channel.Flow) in which case they do not appear in this count.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "bind" synchronous = "1" index = "20" label = "bind queue to an exchange">
- <doc>
- This method binds a queue to an exchange. Until a queue is bound it will not receive
- any messages. In a classic messaging model, store-and-forward queues are bound to a
- dest exchange and subscription queues are bound to a dest_wild exchange.
- </doc>
-
- <!-- Rule test name: was "amq_queue_25" -->
- <rule name = "duplicates">
- <doc>
- A server MUST allow ignore duplicate bindings - that is, two or more bind
- methods for a specific queue, with identical arguments - without treating these
- as an error.
- </doc>
- <doc type = "scenario">
- A client binds a named queue to an exchange. The client then repeats the bind
- (with identical arguments).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_39" -->
- <rule name = "failure" on-failure = "??????">
- <!--
- TODO: Find correct code. The on-failure code returned should depend on why the bind
- failed. Assuming that failures owing to bad parameters are covered in the rules relating
- to those parameters, the only remaining reason for a failure would be the lack of
- server resorces or some internal error - such as too many queues open. Would these
- cases qualify as "resource error" 506 or "internal error" 541?
- -->
- <doc>If a bind fails, the server MUST raise a connection exception.</doc>
- <doc type = "scenario">
- TODO
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_12" -->
- <rule name = "transient-exchange" on-failure = "not-allowed">
- <doc>
- The server MUST NOT allow a durable queue to bind to a transient exchange.
- </doc>
- <doc type = "scenario">
- A client creates a transient exchange. The client then declares a named durable
- queue and then attempts to bind the transient exchange to the durable queue.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_13" -->
- <rule name = "durable-exchange">
- <doc>
- Bindings for durable queues are automatically durable and the server SHOULD
- restore such bindings after a server restart.
- </doc>
- <doc type = "scenario">
- A server creates a named durable queue and binds it to a durable exchange. The
- server is restarted. The client then attempts to use the queue/exchange combination.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_17" -->
- <rule name = "internal-exchange">
- <doc>
- If the client attempts to bind to an exchange that was declared as internal, the server
- MUST raise a connection exception with reply code 530 (not allowed).
- </doc>
- <doc type = "scenario">
- A client attempts to bind a named queue to an internal exchange.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_40" -->
- <rule name = "binding-count">
- <doc>
- The server SHOULD support at least 4 bindings per queue, and ideally, impose no
- limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- A client creates a named queue and attempts to bind it to 4 different non-internal
- exchanges.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "bind-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- The client provides a valid access ticket giving "active" access rights to the
- queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to bind. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "empty-queue" on-failure = "not-allowed">
- <doc>
- A client MUST NOT be allowed to bind a non-existent and unnamed queue (i.e.
- empty queue name) to an exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind with an unnamed (empty) queue name to an exchange.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_26" -->
- <rule name = "queue-existence" on-failure = "not-found">
- <doc>
- A client MUST NOT be allowed to bind a non-existent queue (i.e. not previously
- declared) to an exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind an undeclared queue name to an exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name" label = "name of the exchange to bind to">
- <!-- Rule test name: was "amq_queue_14" -->
- <rule name = "exchange-existence" on-failure = "not-found">
- <doc>
- A client MUST NOT be allowed to bind a queue to a non-existent exchange.
- </doc>
- <doc type = "scenario">
- A client attempts to bind an named queue to a undeclared exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "message routing key">
- <doc>
- Specifies the routing key for the binding. The routing key is used for routing
- messages depending on the exchange configuration. Not all exchanges use a
- routing key - refer to the specific exchange documentation. If the queue name
- is empty, the server uses the last queue declared on the channel. If the
- routing key is also empty, the server uses this queue name for the routing
- key as well. If the queue name is provided but the routing key is empty, the
- server does the binding with that empty routing key. The meaning of empty
- routing keys depends on the exchange implementation.
- </doc>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
-
- <field name = "arguments" domain = "table" label = "arguments for binding">
- <doc>
- A set of arguments for the binding. The syntax and semantics of these arguments
- depends on the exchange class.
- </doc>
- </field>
- </method>
-
- <method name = "bind-ok" synchronous = "1" index = "21" label = "confirm bind successful">
- <doc>This method confirms that the bind was successful.</doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "purge" synchronous = "1" index = "30" label = "purge a queue">
- <doc>
- This method removes all messages from a queue. It does not cancel consumers. Purged
- messages are deleted without any formal "undo" mechanism.
- </doc>
-
- <!-- Rule test name: was "amq_queue_15" -->
- <rule name = "01">
- <doc>A call to purge MUST result in an empty queue.</doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_41" -->
- <rule name = "02">
- <doc>
- On transacted channels the server MUST not purge messages that have already been
- sent to a client but not yet acknowledged.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <!-- Rule test name: was "amq_queue_42" -->
- <rule name = "03">
- <doc>
- The server MAY implement a purge queue or log that allows system administrators
- to recover accidentally-purged messages. The server SHOULD NOT keep purged
- messages in the same storage spaces as the live messages since the volumes of
- purged messages may get very large.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "purge-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>The access ticket must be for the access realm that holds the queue.</doc>
-
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the queue's access realm. Note that purging a queue is equivalent to reading
- all messages and discarding them.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to purge. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
-
- <!-- TODO Rule split? -->
-
- <!-- Rule test name: was "amq_queue_16" -->
- <rule name = "02">
- <doc>
- The queue MUST exist. Attempting to purge a non-existing queue MUST cause a
- channel exception.
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "purge-ok" synchronous = "1" index = "31" label = "confirms a queue purge">
- <doc>This method confirms the purge of a queue.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "message-count" domain = "long" label = "number of messages purged">
- <doc>Reports the number of messages purged.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "delete" synchronous = "1" index = "40" label = "delete a queue">
- <doc>
- This method deletes a queue. When a queue is deleted any pending messages are sent
- to a dead-letter queue if this is defined in the server configuration, and all
- consumers on the queue are cancelled.
- </doc>
-
- <!-- TODO: Rule split? -->
-
- <!-- Rule test name: was "amq_queue_43" -->
- <rule name = "01">
- <doc>
- The server SHOULD use a dead-letter queue to hold messages that were pending on
- a deleted queue, and MAY provide facilities for a system administrator to move
- these messages back to an active queue.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "delete-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <doc>
- The client provides a valid access ticket giving "active" access rights to the
- queue's access realm.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to delete. If the queue name is empty, refers to
- the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_queue_21" -->
- <rule name = "02">
- <doc>
- The queue must exist. If the client attempts to delete a non-existing queue
- the server MUST raise a channel exception with reply code 404 (not found).
- </doc>
- </rule>
- </field>
-
- <field name = "if-unused" domain = "bit" label = "delete only if unused">
- <doc>
- If set, the server will only delete the queue if it has no consumers. If the
- queue has consumers the server does does not delete it but raises a channel
- exception instead.
- </doc>
-
- <!-- Rule test name: was "amq_queue_29" and "amq_queue_30" -->
- <rule name = "01">
- <doc>The server MUST respect the if-unused flag when deleting a queue.</doc>
- </rule>
- </field>
-
- <field name = "if-empty" domain = "bit" label = "delete only if empty">
- <doc>
- If set, the server will only delete the queue if it has no messages.
- </doc>
- <rule name = "01">
- <doc>
- If the queue is not empty the server MUST raise a channel exception with
- reply code 406 (precondition failed).
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "delete-ok" synchronous = "1" index = "41" label = "confirm deletion of a queue">
- <doc>This method confirms the deletion of a queue.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "message-count" domain = "long" label = "number of messages purged">
- <doc>Reports the number of messages purged.</doc>
- </field>
- </method>
- </class>
-
- <!-- == BASIC ============================================================ -->
-
- <class name = "basic" handler = "channel" index = "60" label = "work with basic content">
- <doc>
- The Basic class provides methods that support an industry-standard messaging model.
- </doc>
-
- <doc type = "grammar">
- basic = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN content
- / S:DELIVER content
- / C:GET ( S:GET-OK content / S:GET-EMPTY )
- / C:ACK
- / C:REJECT
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- Rule test name: was "amq_basic_08" -->
- <rule name = "01">
- <doc>
- The server SHOULD respect the persistent property of basic messages and
- SHOULD make a best-effort to hold persistent basic messages on a reliable
- storage mechanism.
- </doc>
- <doc type = "scenario">
- Send a persistent message to queue, stop server, restart server and then
- verify whether message is still present. Assumes that queues are durable.
- Persistence without durable queues makes no sense.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_09" -->
- <rule name = "02">
- <doc>
- The server MUST NOT discard a persistent basic message in case of a queue
- overflow.
- </doc>
- <doc type = "scenario">
- Create a queue overflow situation with persistent messages and verify that
- messages do not get lost (presumably the server will write them to disk).
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The server MAY use the Channel.Flow method to slow or stop a basic message
- publisher when necessary.
- </doc>
- <doc type = "scenario">
- Create a queue overflow situation with non-persistent messages and verify
- whether the server responds with Channel.Flow or not. Repeat with persistent
- messages.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_10" -->
- <rule name = "04">
- <doc>
- The server MAY overflow non-persistent basic messages to persistent
- storage.
- </doc>
- <!-- Test scenario: untestable -->
- </rule>
-
- <rule name = "05">
- <doc>
- The server MAY discard or dead-letter non-persistent basic messages on a
- priority basis if the queue size exceeds some configured limit.
- </doc>
- <!-- Test scenario: untestable -->
- </rule>
-
- <!-- Rule test name: was "amq_basic_11" -->
- <rule name = "06">
- <doc>
- The server MUST implement at least 2 priority levels for basic messages,
- where priorities 0-4 and 5-9 are treated as two distinct levels.
- </doc>
- <doc type = "scenario">
- Send a number of priority 0 messages to a queue. Send one priority 9
- message. Consume messages from the queue and verify that the first message
- received was priority 9.
- </doc>
- </rule>
-
- <rule name = "07">
- <doc>
- The server MAY implement up to 10 priority levels.
- </doc>
- <doc type = "scenario">
- Send a number of messages with mixed priorities to a queue, so that all
- priority values from 0 to 9 are exercised. A good scenario would be ten
- messages in low-to-high priority. Consume from queue and verify how many
- priority levels emerge.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_12" -->
- <rule name = "08">
- <doc>
- The server MUST deliver messages of the same priority in order irrespective of
- their individual persistence.
- </doc>
- <doc type = "scenario">
- Send a set of messages with the same priority but different persistence
- settings to a queue. Consume and verify that messages arrive in same order
- as originally published.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_13" -->
- <rule name = "09">
- <doc>
- The server MUST support automatic acknowledgements on Basic content, i.e.
- consumers with the no-ack field set to FALSE.
- </doc>
- <doc type = "scenario">
- Create a queue and a consumer using automatic acknowledgements. Publish
- a set of messages to the queue. Consume the messages and verify that all
- messages are received.
- </doc>
- </rule>
-
- <rule name = "10">
- <doc>
- The server MUST support explicit acknowledgements on Basic content, i.e.
- consumers with the no-ack field set to TRUE.
- </doc>
- <doc type = "scenario">
- Create a queue and a consumer using explicit acknowledgements. Publish a
- set of messages to the queue. Consume the messages but acknowledge only
- half of them. Disconnect and reconnect, and consume from the queue.
- Verify that the remaining messages are received.
- </doc>
- </rule>
-
- <!-- These are the properties for a Basic content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "delivery-mode" domain = "octet" label = "non-persistent (1) or persistent (2)" />
- <field name = "priority" domain = "octet" label = "message priority, 0 to 9" />
- <field name = "correlation-id" domain = "shortstr" label = "application correlation identifier" />
- <field name = "reply-to" domain = "shortstr" label = "destination to reply to" />
- <field name = "expiration" domain = "shortstr" label = "message expiration specification" />
- <field name = "message-id" domain = "shortstr" label = "application message identifier" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
- <field name = "type" domain = "shortstr" label = "message type name" />
- <field name = "user-id" domain = "shortstr" label = "creating user id" />
- <field name = "app-id" domain = "shortstr" label = "creating application id" />
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "intra-cluster routing identifier" />
-
- <field name = "property-one" domain = "shortstr" label = "Extra property for testing only" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. The server
- will send a message in advance if it is equal to or smaller in size than the
- available prefetch size (and also falls into other prefetch limits). May be set
- to zero, meaning "no specific limit", although other prefetch limits may still
- apply. The prefetch-size is ignored if the no-ack option is set.
- </doc>
- <!-- Rule test name: was "amq_basic_17" -->
- <rule name = "01">
- <doc>
- The server MUST ignore this setting when the client is not processing any
- messages - i.e. the prefetch size does not limit the transfer of single
- messages to a client, only the sending in advance of more messages while
- the client still has one or more unacknowledged messages.
- </doc>
- <doc type = "scenario">
- Define a QoS prefetch-size limit and send a single message that exceeds
- that limit. Verify that the message arrives correctly.
- </doc>
- </rule>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This field may be used
- in combination with the prefetch-size field; a message will only be sent in
- advance if both prefetch windows (and those at the channel and connection level)
- allow it. The prefetch-count is ignored if the no-ack option is set.
- </doc>
- <!-- Rule test name: was "amq_basic_18" -->
- <rule name = "01">
- <doc>
- The server may send less data in advance than allowed by the client's
- specified prefetch windows but it MUST NOT send more.
- </doc>
- <doc type = "scenario">
- Define a QoS prefetch-size limit and a prefetch-count limit greater than
- one. Send multiple messages that exceed the prefetch size. Verify that
- no more than one message arrives at once.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "30" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <!-- Rule test name: was "amq_basic_01" -->
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, and ideally, impose
- no limit except as defined by available resources.
- </doc>
- <doc type = "scenario">
- Create a queue and create consumers on that queue until the server closes the
- connection. Verify that the number of consumers created was at least sixteen
- and report the total number.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01" on-failure = "access-refused">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer with an invalid (non-zero) access ticket.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
- <rule name = "01" on-failure = "not-allowed">
- <doc>
- If the queue name is empty the client MUST have previously declared a
- queue using this channel.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer with an empty queue name and no previously
- declared queue on the channel.
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
- <rule name = "01" on-failure = "not-allowed">
- <doc>
- The client MUST NOT specify a tag that refers to an existing consumer.
- </doc>
- <doc type = "scenario">
- Attempt to create two consumers with the same non-empty tag.
- </doc>
- </rule>
- <rule name = "02" on-failure = "not-allowed">
- <doc>
- The consumer tag is valid only within the channel from which the
- consumer was created. I.e. a client MUST NOT create a consumer in one
- channel and then use it in another.
- </doc>
- <doc type = "scenario">
- Attempt to create a consumer in one channel, then use in another channel,
- in which consumers have also been created (to test that the server uses
- unique consumer tags).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "no-ack" domain = "no-ack" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
- <!-- Rule test name: was "amq_basic_02" -->
- <rule name = "01" on-failure = "access-refused">
- <doc>
- The client MAY NOT gain exclusive access to a queue that already has
- active consumers.
- </doc>
- <doc type = "scenario">
- Open two connections to a server, and in one connection create a shared
- (non-exclusive) queue and then consume from the queue. In the second
- connection attempt to consume from the same queue using the exclusive
- option.
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise
- a channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "31" label = "confirm a new consumer">
- <doc>
- The server provides the client with a consumer tag, which is used by the client
- for methods called on the consumer at a later stage.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Holds the consumer tag specified by the client or provided by the server.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "20" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. This does not affect already delivered
- messages, but it does mean the server will not send any more messages for
- that consumer. The client may receive an abitrary number of messages in
- between sending the cancel method and receiving the cancel-ok reply.
- </doc>
-
- <rule name = "01">
- <doc>
- If the queue does not exist the server MUST ignore the cancel method, so
- long as the consumer tag is valid for that channel.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "cancel-ok" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "21" label = "confirm a cancelled consumer">
- <doc>
- This method confirms that the cancellation was completed.
- </doc>
- <chassis name = "client" implement = "MUST" />
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" content = "1" index = "40" label = "publish a message">
- <doc>
- This method publishes a message to a specific exchange. The message will be routed
- to queues as defined by the exchange configuration and distributed to any active
- consumers when the transaction, if any, is committed.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <!-- Rule test name: was "amq_basic_06" -->
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_14" -->
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST raise
- a channel exception with a reply code 403 (access refused).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_15" -->
- <rule name = "03">
- <doc>
- The exchange MAY refuse basic content in which case it MUST raise a channel
- exception with reply code 540 (not implemented).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
- <!-- Rule test name: was "amq_basic_07" -->
- <rule name = "01">
- <doc>
- The server SHOULD implement the mandatory flag.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
- <!-- Rule test name: was "amq_basic_16" -->
- <rule name = "01">
- <doc>
- The server SHOULD implement the immediate flag.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "50" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key name specified when the message was published.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" content = "1" index = "60"
- label = "notify the client of a consumer message">
- <doc>
- This method delivers a message to the client, via a consumer. In the asynchronous
- message delivery model, the client starts a consumer using the Consume method, then
- the server responds with Deliver methods as and when messages arrive for that
- consumer.
- </doc>
-
- <!-- Rule test name: was "amq_basic_19" -->
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server SHOULD track the number of times a message has been delivered to
- clients and when a message is redelivered a certain number of times - e.g. 5
- times - without being acknowledged, the server SHOULD consider the message to be
- unprocessable (possibly causing client applications to abort), and move the
- message to a dead letter queue.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "get" synchronous = "1" index = "70" label = "direct access to a queue">
- <doc>
- This method provides a direct access to the messages in a queue using a synchronous
- dialogue that is designed for specific types of application where synchronous
- functionality is more important than performance.
- </doc>
-
- <response name = "get-ok" />
- <response name = "get-empty" />
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
-
- <field name = "no-ack" domain = "no-ack" />
- </method>
-
- <method name = "get-ok" synchronous = "1" content = "1" index = "71"
- label = "provide client with a message">
- <doc>
- This method delivers a message to the client following a get method. A message
- delivered by 'get-ok' must be acknowledged unless the no-ack option was set in the
- get method.
- </doc>
-
- <chassis name = "client" implement = "MAY" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- If empty, the message was published to the default exchange.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
-
- <field name = "message-count" domain = "long" label = "number of messages pending">
- <doc>
- This field reports the number of messages pending on the queue, excluding the
- message being delivered. Note that this figure is indicative, not reliable, and
- can change arbitrarily as messages are added to the queue and removed by other
- clients.
- </doc>
- </field>
- </method>
-
- <method name = "get-empty" synchronous = "1" index = "72"
- label = "indicate no messages available">
- <doc>
- This method tells the client that the queue has no messages available for the
- client.
- </doc>
-
- <chassis name = "client" implement = "MAY" />
-
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "Cluster id">
- <doc>
- For use by cluster applications, should not be used by client applications.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "ack" index = "80" label = "acknowledge one or more messages">
- <doc>
- This method acknowledges one or more messages delivered via the Deliver or Get-Ok
- methods. The client can ask to confirm a single message or a set of messages up to
- and including a specific message.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "multiple" domain = "bit" label = "acknowledge multiple messages">
- <doc>
- If set to 1, the delivery tag is treated as "up to and including", so that the
- client can acknowledge multiple messages with a single method. If set to zero,
- the delivery tag refers to a single message. If the multiple field is 1, and the
- delivery tag is zero, tells the server to acknowledge all outstanding mesages.
- </doc>
-
- <!-- Rule test name: was "amq_basic_20" -->
- <rule name = "01">
- <doc>
- The server MUST validate that a non-zero delivery-tag refers to an delivered
- message, and raise a channel exception if this is not the case.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "reject" index = "90" label = "reject an incoming message">
- <doc>
- This method allows a client to reject a message. It can be used to interrupt and
- cancel large incoming messages, or return untreatable messages to their original
- queue.
- </doc>
-
- <!-- Rule test name: was "amq_basic_21" -->
- <rule name = "01">
- <doc>
- The server SHOULD be capable of accepting and process the Reject method while
- sending message content with a Deliver or Get-Ok method. I.e. the server should
- read and process incoming methods while sending output frames. To cancel a
- partially-send content, the server sends a content body frame of size 1 (i.e.
- with no data except the frame-end octet).
- </doc>
- </rule>
-
- <!-- Rule test name: was "amq_basic_22" -->
- <rule name = "02">
- <doc>
- The server SHOULD interpret this method as meaning that the client is unable to
- process the message at this time.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <rule name = "03">
- <!-- TODO: Rule split? -->
- <doc>
- A client MUST NOT use this method as a means of selecting messages to process. A
- rejected message MAY be discarded or dead-lettered, not necessarily passed to
- another client.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be discarded. If this bit is 1, the
- server will attempt to requeue the message.
- </doc>
-
- <!-- Rule test name: was "amq_basic_23" -->
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST NOT deliver the message to the same client within the
- context of the current channel. The recommended strategy is to attempt to
- deliver the message to an alternative consumer, and if that is not possible,
- to move the message to a dead-letter queue. The server MAY use more
- sophisticated tracking to hold the message on the queue and redeliver it to
- the same client at a later stage.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "recover" index = "100" label = "redeliver unacknowledged messages">
- <doc>
- This method asks the broker to redeliver all unacknowledged messages on a specified
- channel. Zero or more messages may be redelivered. This method is only allowed on
- non-transacted channels.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST set the redelivered flag on all messages that are resent.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- The server MUST raise a channel exception if this is called on a transacted
- channel.
- </doc>
- <doc type = "scenario">
- TODO.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be redelivered to the original
- recipient. If this bit is 1, the server will attempt to requeue the message,
- potentially then delivering it to an alternative subscriber.
- </doc>
- </field>
- </method>
- </class>
-
- <!-- == FILE ============================================================= -->
-
- <class name = "file" handler = "channel" index = "70" label = "work with file content">
- <doc>
- The file class provides methods that support reliable file transfer. File
- messages have a specific set of properties that are required for interoperability
- with file transfer applications. File messages and acknowledgements are subject to
- channel transactions. Note that the file class does not provide message browsing
- methods; these are not compatible with the staging model. Applications that need
- browsable file transfer should use Basic content and the Basic class.
- </doc>
-
- <doc type = "grammar">
- file = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:OPEN S:OPEN-OK C:STAGE content
- / S:OPEN C:OPEN-OK S:STAGE content
- / C:PUBLISH
- / S:DELIVER
- / S:RETURN
- / C:ACK
- / C:REJECT
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <rule name = "01">
- <doc>
- The server MUST make a best-effort to hold file messages on a reliable storage
- mechanism.
- </doc>
- </rule>
-
- <!-- TODO Rule implement attr inverse? -->
-
- <!-- TODO: Rule split? -->
-
- <rule name = "02">
- <doc>
- The server MUST NOT discard a file message in case of a queue overflow. The server
- MUST use the Channel.Flow method to slow or stop a file message publisher when
- necessary.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "03">
- <doc>
- The server MUST implement at least 2 priority levels for file messages, where
- priorities 0-4 and 5-9 are treated as two distinct levels. The server MAY implement
- up to 10 priority levels.
- </doc>
- </rule>
-
- <rule name = "04">
- <doc>
- The server MUST support both automatic and explicit acknowledgements on file
- content.
- </doc>
- </rule>
-
- <!-- These are the properties for a File content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "priority" domain = "octet" label = "message priority, 0 to 9" />
- <field name = "reply-to" domain = "shortstr" label = "destination to reply to" />
- <field name = "message-id" domain = "shortstr" label = "application message identifier" />
- <field name = "filename" domain = "shortstr" label = "message filename" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
- <!-- This field is deprecated pending review -->
- <field name = "cluster-id" domain = "shortstr" label = "intra-cluster routing identifier" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. May be set
- to zero, meaning "no specific limit". Note that other prefetch limits may still
- apply. The prefetch-size is ignored if the no-ack option is set.
- </doc>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This is compatible with
- some file API implementations. This field may be used in combination with the
- prefetch-size field; a message will only be sent in advance if both prefetch
- windows (and those at the channel and connection level) allow it. The
- prefetch-count is ignored if the no-ack option is set.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MAY send less data in advance than allowed by the client's
- specified prefetch windows but it MUST NOT send more.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "20" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, unless the queue was
- declared as private, and ideally, impose no limit except as defined by available
- resources.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The tag MUST NOT refer to an existing consumer. If the client attempts to
- create two consumers with the same non-empty tag the server MUST raise a
- connection exception with reply code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "no-ack" domain = "no-ack" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>
- If the server cannot grant exclusive access to the queue when asked, -
- because there are other consumers active - it MUST raise a channel exception
- with return code 405 (resource locked).
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "21" label = "confirm a new consumer">
- <doc>
- This method provides the client with a consumer tag which it MUST use in methods
- that work with the consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>Holds the consumer tag specified by the client or provided by the server.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "30" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. This does not affect already delivered messages, but
- it does mean the server will not send any more messages for that consumer.
- </doc>
-
- <response name = "cancel-ok" />
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "31" label = "confirm a cancelled consumer">
- <doc>This method confirms that the cancellation was completed.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "open" synchronous = "1" index = "40" label = "request to start staging">
- <doc>
- This method requests permission to start staging a message. Staging means sending
- the message into a temporary area at the recipient end and then delivering the
- message by referring to this temporary area. Staging is how the protocol handles
- partial file transfers - if a message is partially staged and the connection breaks,
- the next time the sender starts to stage it, it can restart from where it left off.
- </doc>
-
- <response name = "open-ok" />
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier. This is an arbitrary string chosen by the
- sender. For staging to work correctly the sender must use the same staging
- identifier when staging the same message a second time after recovery from a
- failure. A good choice for the staging identifier would be the SHA1 hash of the
- message properties data (including the original filename, revised time, etc.).
- </doc>
- </field>
-
- <field name = "content-size" domain = "longlong" label = "message content size">
- <doc>
- The size of the content in octets. The recipient may use this information to
- allocate or check available space in advance, to avoid "disk full" errors during
- staging of very large messages.
- </doc>
-
- <rule name = "01">
- <doc>
- The sender MUST accurately fill the content-size field. Zero-length content
- is permitted.
- </doc>
- </rule>
- </field>
- </method>
-
- <method name = "open-ok" synchronous = "1" index = "41" label = "confirm staging ready">
- <doc>
- This method confirms that the recipient is ready to accept staged data. If the
- message was already partially-staged at a previous time the recipient will report
- the number of octets already staged.
- </doc>
-
- <response name = "stage" />
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
-
- <field name = "staged-size" domain = "longlong" label = "already staged amount">
- <doc>
- The amount of previously-staged content in octets. For a new message this will
- be zero.
- </doc>
-
- <rule name = "01">
- <doc>
- The sender MUST start sending data from this octet offset in the message,
- counting from zero.
- </doc>
- </rule>
-
- <rule name = "02">
- <!-- TODO: Rule split? -->
- <doc>
- The recipient MAY decide how long to hold partially-staged content and MAY
- implement staging by always discarding partially-staged content. However if
- it uses the file content type it MUST support the staging methods.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "stage" content = "1" index = "50" label = "stage message content">
- <doc>
- This method stages the message, sending the message content to the recipient from
- the octet offset specified in the Open-Ok method.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" index = "60" label = "publish a message">
- <doc>
- This method publishes a staged file message to a specific exchange. The file message
- will be routed to queues as defined by the exchange configuration and distributed to
- any active consumers when the transaction, if any, is committed.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST
- respond with a reply code 403 (access refused) and raise a channel
- exception.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "03">
- <doc>
- The exchange MAY refuse file content in which case it MUST respond with a
- reply code 540 (not implemented) and raise a channel exception.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the mandatory flag.</doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the immediate flag.</doc>
- </rule>
- </field>
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier of the message to publish. The message must have
- been staged. Note that a client can send the Publish method asynchronously
- without waiting for staging to finish.
- </doc>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "70" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" index = "80" label = "notify the client of a consumer message">
- <doc>
- This method delivers a staged file message to the client, via a consumer. In the
- asynchronous message delivery model, the client starts a consumer using the Consume
- method, then the server responds with Deliver methods as and when messages arrive
- for that consumer.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server SHOULD track the number of times a message has been delivered to
- clients and when a message is redelivered a certain number of times - e.g. 5
- times - without being acknowledged, the server SHOULD consider the message to be
- unprocessable (possibly causing client applications to abort), and move the
- message to a dead letter queue.
- </doc>
- </rule>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "redelivered" domain = "redelivered" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
-
- <field name = "identifier" domain = "shortstr" label = "staging identifier">
- <doc>
- This is the staging identifier of the message to deliver. The message must have
- been staged. Note that a server can send the Deliver method asynchronously
- without waiting for staging to finish.
- </doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "ack" index = "90" label = "acknowledge one or more messages">
- <doc>
- This method acknowledges one or more messages delivered via the Deliver method. The
- client can ask to confirm a single message or a set of messages up to and including
- a specific message.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "multiple" domain = "bit" label = "acknowledge multiple messages">
- <doc>
- If set to 1, the delivery tag is treated as "up to and including", so that the
- client can acknowledge multiple messages with a single method. If set to zero,
- the delivery tag refers to a single message. If the multiple field is 1, and the
- delivery tag is zero, tells the server to acknowledge all outstanding mesages.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST validate that a non-zero delivery-tag refers to an delivered
- message, and raise a channel exception if this is not the case.
- </doc>
- </rule>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "reject" index = "100" label = "reject an incoming message">
- <doc>
- This method allows a client to reject a message. It can be used to return
- untreatable messages to their original queue. Note that file content is staged
- before delivery, so the client will not use this method to interrupt delivery of a
- large message.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD interpret this method as meaning that the client is unable to
- process the message at this time.
- </doc>
- </rule>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "02">
- <doc>
- A client MUST NOT use this method as a means of selecting messages to process. A
- rejected message MAY be discarded or dead-lettered, not necessarily passed to
- another client.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "requeue" domain = "bit" label = "requeue the message">
- <doc>
- If this field is zero, the message will be discarded. If this bit is 1, the
- server will attempt to requeue the message.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST NOT deliver the message to the same client within the
- context of the current channel. The recommended strategy is to attempt to
- deliver the message to an alternative consumer, and if that is not possible,
- to move the message to a dead-letter queue. The server MAY use more
- sophisticated tracking to hold the message on the queue and redeliver it to
- the same client at a later stage.
- </doc>
- </rule>
- </field>
- </method>
- </class>
-
- <!-- == STREAM =========================================================== -->
-
- <class name = "stream" handler = "channel" index = "80" label = "work with streaming content">
- <doc>
- The stream class provides methods that support multimedia streaming. The stream class
- uses the following semantics: one message is one packet of data; delivery is
- unacknowleged and unreliable; the consumer can specify quality of service parameters
- that the server can try to adhere to; lower-priority messages may be discarded in favour
- of high priority messages.
- </doc>
-
- <doc type = "grammar">
- stream = C:QOS S:QOS-OK
- / C:CONSUME S:CONSUME-OK
- / C:CANCEL S:CANCEL-OK
- / C:PUBLISH content
- / S:RETURN
- / S:DELIVER content
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <rule name = "01">
- <doc>
- The server SHOULD discard stream messages on a priority basis if the queue size
- exceeds some configured limit.
- </doc>
- </rule>
-
- <rule name = "02">
- <!-- TODO: Rule split? -->
- <doc>
- The server MUST implement at least 2 priority levels for stream messages, where
- priorities 0-4 and 5-9 are treated as two distinct levels. The server MAY implement
- up to 10 priority levels.
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The server MUST implement automatic acknowledgements on stream content. That is, as
- soon as a message is delivered to a client via a Deliver method, the server must
- remove it from the queue.
- </doc>
- </rule>
-
- <!-- These are the properties for a Stream content -->
-
- <field name = "content-type" domain = "shortstr" label = "MIME content type" />
- <field name = "content-encoding" domain = "shortstr" label = "MIME content encoding" />
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "priority" domain = "octet" label = "message priority, 0 to 9" />
- <field name = "timestamp" domain = "timestamp" label = "message timestamp" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "qos" synchronous = "1" index = "10" label = "specify quality of service">
- <doc>
- This method requests a specific quality of service. The QoS can be specified for the
- current channel or for all channels on the connection. The particular properties and
- semantics of a qos method always depend on the content class semantics. Though the
- qos method could in principle apply to both peers, it is currently meaningful only
- for the server.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "qos-ok" />
-
- <field name = "prefetch-size" domain = "long" label = "prefetch window in octets">
- <doc>
- The client can request that messages be sent in advance so that when the client
- finishes processing a message, the following message is already held locally,
- rather than needing to be sent down the channel. Prefetching gives a performance
- improvement. This field specifies the prefetch window size in octets. May be set
- to zero, meaning "no specific limit". Note that other prefetch limits may still
- apply.
- </doc>
- </field>
-
- <field name = "prefetch-count" domain = "short" label = "prefetch window in messages">
- <doc>
- Specifies a prefetch window in terms of whole messages. This field may be used
- in combination with the prefetch-size field; a message will only be sent in
- advance if both prefetch windows (and those at the channel and connection level)
- allow it.
- </doc>
- </field>
-
- <field name = "consume-rate" domain = "long" label = "transfer rate in octets/second">
- <doc>
- Specifies a desired transfer rate in octets per second. This is usually
- determined by the application that uses the streaming data. A value of zero
- means "no limit", i.e. as rapidly as possible.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The server MAY ignore the prefetch values and consume rates, depending on
- the type of stream and the ability of the server to queue and/or reply it.
- The server MAY drop low-priority messages in favour of high-priority
- messages.
- </doc>
- </rule>
- </field>
-
- <field name = "global" domain = "bit" label = "apply to entire connection">
- <doc>
- By default the QoS settings apply to the current channel only. If this field is
- set, they are applied to the entire connection.
- </doc>
- </field>
- </method>
-
- <method name = "qos-ok" synchronous = "1" index = "11" label = "confirm the requested qos">
- <doc>
- This method tells the client that the requested QoS levels could be handled by the
- server. The requested QoS applies to all active consumers until a new QoS is
- defined.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "consume" synchronous = "1" index = "20" label = "start a queue consumer">
- <doc>
- This method asks the server to start a "consumer", which is a transient request for
- messages from a specific queue. Consumers last as long as the channel they were
- created on, or until the client cancels them.
- </doc>
-
- <rule name = "01">
- <doc>
- The server SHOULD support at least 16 consumers per queue, unless the queue was
- declared as private, and ideally, impose no limit except as defined by available
- resources.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- Streaming applications SHOULD use different channels to select different
- streaming resolutions. AMQP makes no provision for filtering and/or transforming
- streams except on the basis of priority-based selective delivery of individual
- messages.
- </doc>
- </rule>
-
- <chassis name = "server" implement = "MUST" />
- <response name = "consume-ok" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "read" access rights to
- the realm for the queue.
- </doc>
- </rule>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue to consume from. If the queue name is null,
- refers to the current queue for the channel, which is the last declared queue.
- </doc>
-
- <rule name = "01">
- <doc>
- If the client did not previously declare a queue, and the queue name in this
- method is empty, the server MUST raise a connection exception with reply
- code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>
- Specifies the identifier for the consumer. The consumer tag is local to a
- connection, so two clients can use the same consumer tags. If this field is
- empty the server will generate a unique tag.
- </doc>
-
- <rule name = "01">
- <!-- TODO: Rule split? -->
- <doc>
- The tag MUST NOT refer to an existing consumer. If the client attempts to
- create two consumers with the same non-empty tag the server MUST raise a
- connection exception with reply code 530 (not allowed).
- </doc>
- </rule>
- </field>
-
- <field name = "no-local" domain = "no-local" />
-
- <field name = "exclusive" domain = "bit" label = "request exclusive access">
- <doc>
- Request exclusive consumer access, meaning only this consumer can access the
- queue.
- </doc>
-
-
- <!-- Rule test name: was "amq_file_00" -->
- <rule name = "01">
- <doc>
- If the server cannot grant exclusive access to the queue when asked, -
- because there are other consumers active - it MUST raise a channel exception
- with return code 405 (resource locked).
- </doc>
- </rule>
- </field>
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "consume-ok" synchronous = "1" index = "21" label = "confirm a new consumer">
- <doc>
- This method provides the client with a consumer tag which it may use in methods that
- work with the consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag">
- <doc>Holds the consumer tag specified by the client or provided by the server.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "cancel" synchronous = "1" index = "30" label = "end a queue consumer">
- <doc>
- This method cancels a consumer. Since message delivery is asynchronous the client
- may continue to receive messages for a short while after canceling a consumer. It
- may process or discard these as appropriate.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <response name = "cancel-ok" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "nowait" domain = "bit" label = "do not send a reply method">
- <doc>
- If set, the server will not respond to the method. The client should not wait
- for a reply method. If the server could not complete the method it will raise a
- channel or connection exception.
- </doc>
- </field>
- </method>
-
- <method name = "cancel-ok" synchronous = "1" index = "31" label = "confirm a cancelled consumer">
- <doc>This method confirms that the cancellation was completed.</doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "publish" content = "1" index = "40" label = "publish a message">
- <doc>
- This method publishes a message to a specific exchange. The message will be routed
- to queues as defined by the exchange configuration and distributed to any active
- consumers as appropriate.
- </doc>
-
- <chassis name = "server" implement = "MUST" />
-
- <field name = "ticket" domain = "access-ticket">
- <rule name = "01">
- <doc>
- The client MUST provide a valid access ticket giving "write" access rights
- to the access realm for the exchange.
- </doc>
- </rule>
- </field>
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange to publish to. The exchange name can be
- empty, meaning the default exchange. If the exchange name is specified, and that
- exchange does not exist, the server will raise a channel exception.
- </doc>
-
- <rule name = "01">
- <doc>
- The server MUST accept a blank exchange name to mean the default exchange.
- </doc>
- </rule>
-
- <rule name = "02">
- <doc>
- If the exchange was declared as an internal exchange, the server MUST
- respond with a reply code 403 (access refused) and raise a channel
- exception.
- </doc>
- </rule>
-
- <rule name = "03">
- <doc>
- The exchange MAY refuse stream content in which case it MUST respond with a
- reply code 540 (not implemented) and raise a channel exception.
- </doc>
- </rule>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>
- Specifies the routing key for the message. The routing key is used for routing
- messages depending on the exchange configuration.
- </doc>
- </field>
-
- <field name = "mandatory" domain = "bit" label = "indicate mandatory routing">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue. If this flag is set, the server will return an unroutable message with a
- Return method. If this flag is zero, the server silently drops the message.
- </doc>
-
- <!-- Rule test name: was "amq_stream_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the mandatory flag.</doc>
- </rule>
- </field>
-
- <field name = "immediate" domain = "bit" label = "request immediate delivery">
- <doc>
- This flag tells the server how to react if the message cannot be routed to a
- queue consumer immediately. If this flag is set, the server will return an
- undeliverable message with a Return method. If this flag is zero, the server
- will queue the message, but with no guarantee that it will ever be consumed.
- </doc>
-
- <!-- Rule test name: was "amq_stream_00" -->
- <rule name = "01">
- <doc>The server SHOULD implement the immediate flag.</doc>
- </rule>
- </field>
- </method>
-
- <method name = "return" content = "1" index = "50" label = "return a failed message">
- <doc>
- This method returns an undeliverable message that was published with the "immediate"
- flag set, or an unroutable message published with the "mandatory" flag set. The
- reply code and text provide information about the reason that the message was
- undeliverable.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "reply-code" domain = "reply-code" />
-
- <field name = "reply-text" domain = "reply-text" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "routing-key" domain = "shortstr" label = "Message routing key">
- <doc>Specifies the routing key name specified when the message was published.</doc>
- </field>
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "deliver" content = "1" index = "60"
- label = "notify the client of a consumer message">
- <doc>
- This method delivers a message to the client, via a consumer. In the asynchronous
- message delivery model, the client starts a consumer using the Consume method, then
- the server responds with Deliver methods as and when messages arrive for that
- consumer.
- </doc>
-
- <chassis name = "client" implement = "MUST" />
-
- <field name = "consumer-tag" domain = "consumer-tag" />
-
- <field name = "delivery-tag" domain = "delivery-tag" />
-
- <field name = "exchange" domain = "exchange-name">
- <doc>
- Specifies the name of the exchange that the message was originally published to.
- </doc>
- </field>
-
- <field name = "queue" domain = "queue-name">
- <doc>
- Specifies the name of the queue that the message came from. Note that a single
- channel can start many consumers on different queues.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
- </class>
-
- <!-- == TX =============================================================== -->
-
- <class name = "tx" handler = "channel" index = "90" label = "work with standard transactions">
- <doc>
- Standard transactions provide so-called "1.5 phase commit". We can ensure that work is
- never lost, but there is a chance of confirmations being lost, so that messages may be
- resent. Applications that use standard transactions must be able to detect and ignore
- duplicate messages.
- </doc>
-
- <!-- TODO: Rule split? -->
-
- <rule name = "01">
- <doc>
- An client using standard transactions SHOULD be able to track all messages received
- within a reasonable period, and thus detect and reject duplicates of the same
- message. It SHOULD NOT pass these to the application layer.
- </doc>
- </rule>
-
- <doc type = "grammar">
- tx = C:SELECT S:SELECT-OK
- / C:COMMIT S:COMMIT-OK
- / C:ROLLBACK S:ROLLBACK-OK
- </doc>
-
- <chassis name = "server" implement = "SHOULD" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "select" synchronous = "1" index = "10" label = "select standard transaction mode">
- <doc>
- This method sets the channel to use standard transactions. The client must use this
- method at least once on a channel before using the Commit or Rollback methods.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "select-ok" />
- </method>
-
- <method name = "select-ok" synchronous = "1" index = "11" label = "confirm transaction mode">
- <doc>
- This method confirms to the client that the channel was successfully set to use
- standard transactions.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "commit" synchronous = "1" index = "20" label = "commit the current transaction">
- <doc>
- This method commits all messages published and acknowledged in the current
- transaction. A new transaction starts immediately after a commit.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "commit-ok" />
- </method>
-
- <method name = "commit-ok" synchronous = "1" index = "21" label = "confirm a successful commit">
- <doc>
- This method confirms to the client that the commit succeeded. Note that if a commit
- fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "rollback" synchronous = "1" index = "30"
- label = "abandon the current transaction">
- <doc>
- This method abandons all messages published and acknowledged in the current
- transaction. A new transaction starts immediately after a rollback.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "rollback-ok" />
- </method>
-
- <method name = "rollback-ok" synchronous = "1" index = "31" label = "confirm successful rollback">
- <doc>
- This method confirms to the client that the rollback succeeded. Note that if an
- rollback fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == DTX ============================================================== -->
-
- <class name = "dtx" handler = "channel" index = "100" label = "work with distributed transactions">
- <doc>
- Distributed transactions provide so-called "2-phase commit". The AMQP distributed
- transaction model supports the X-Open XA architecture and other distributed transaction
- implementations. The Dtx class assumes that the server has a private communications
- channel (not AMQP) to a distributed transaction coordinator.
- </doc>
-
- <doc type = "grammar">
- dtx = C:SELECT S:SELECT-OK
- C:START S:START-OK
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "select" synchronous = "1" index = "10" label = "select standard transaction mode">
- <doc>
- This method sets the channel to use distributed transactions. The client must use
- this method at least once on a channel before using the Start method.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <response name = "select-ok" />
- </method>
-
- <method name = "select-ok" synchronous = "1" index = "11" label = "confirm transaction mode">
- <doc>
- This method confirms to the client that the channel was successfully set to use
- distributed transactions.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "start" synchronous = "1" index = "20"
- label = "start a new distributed transaction">
- <doc>
- This method starts a new distributed transaction. This must be the first method on a
- new channel that uses the distributed transaction mode, before any methods that
- publish or consume messages.
- </doc>
- <chassis name = "server" implement = "MAY" />
- <response name = "start-ok" />
- <field name = "dtx-identifier" domain = "shortstr" label = "transaction identifier">
- <doc>
- The distributed transaction key. This identifies the transaction so that the
- AMQP server can coordinate with the distributed transaction coordinator.
- </doc>
- <assert check = "notnull" />
- </field>
- </method>
-
- <method name = "start-ok" synchronous = "1" index = "21"
- label = "confirm the start of a new distributed transaction">
- <doc>
- This method confirms to the client that the transaction started. Note that if a
- start fails, the server raises a channel exception.
- </doc>
- <chassis name = "client" implement = "MUST" />
- </method>
- </class>
-
- <!-- == TUNNEL =========================================================== -->
-
- <class name = "tunnel" handler = "tunnel" index = "110" label = "methods for protocol tunneling">
- <doc>
- The tunnel methods are used to send blocks of binary data - which can be serialised AMQP
- methods or other protocol frames - between AMQP peers.
- </doc>
-
- <doc type = "grammar">
- tunnel = C:REQUEST
- / S:REQUEST
- </doc>
-
- <chassis name = "server" implement = "MAY" />
- <chassis name = "client" implement = "MAY" />
-
- <field name = "headers" domain = "table" label = "message header field table" />
- <field name = "proxy-name" domain = "shortstr" label = "identity of tunnelling proxy" />
- <field name = "data-name" domain = "shortstr" label = "name or type of message being tunnelled" />
- <field name = "durable" domain = "octet" label = "message durability indicator" />
- <field name = "broadcast" domain = "octet" label = "message broadcast mode" />
-
- <!-- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -->
-
- <method name = "request" content = "1" index = "10" label = "sends a tunnelled method">
- <doc>
- This method tunnels a block of binary data, which can be an encoded
- AMQP method or other data. The binary data is sent as the content for
- the Tunnel.Request method.
- </doc>
- <chassis name = "server" implement = "MUST" />
- <field name = "meta-data" domain = "table" label = "meta data for the tunnelled block">
- <doc>
- This field table holds arbitrary meta-data that the sender needs to
- pass to the recipient.
- </doc>
- </field>
- </method>
- </class>
-</amqp>
diff --git a/qpid/gentools/xml-src/cluster-0.9.test.xml b/qpid/gentools/xml-src/cluster-0.9.test.xml
deleted file mode 100644
index 142e6c9380..0000000000
--- a/qpid/gentools/xml-src/cluster-0.9.test.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<!--
- -
- - Licensed to the Apache Software Foundation (ASF) under one
- - or more contributor license agreements. See the NOTICE file
- - distributed with this work for additional information
- - regarding copyright ownership. The ASF licenses this file
- - to you under the Apache License, Version 2.0 (the
- - "License"); you may not use this file except in compliance
- - with the License. You may obtain a copy of the License at
- -
- - http://www.apache.org/licenses/LICENSE-2.0
- -
- - Unless required by applicable law or agreed to in writing,
- - software distributed under the License is distributed on an
- - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- - KIND, either express or implied. See the License for the
- - specific language governing permissions and limitations
- - under the License.
- -
- -->
-
-<amqp major="0" minor="9" port="5672" comment="AMQ protocol 0.80">
-
-<class name = "cluster" index = "101">
-
-<doc>
- An extension that allows brokers to communicate in order to
- provide a clustered service to clients.
-</doc>
-
-<method name = "join" index="10">
- <field name = "broker" type = "shortstr" />
-</method>
-
-<method name = "membership" index="20">
- <field name = "members" type = "longstr" />
-</method>
-
-<method name = "synch" index="30">
-</method>
-
-<method name = "leave" index="40">
- <field name = "broker" type = "shortstr" />
-</method>
-
-<method name = "suspect" index="50">
- <field name = "broker" type = "shortstr" />
-</method>
-
-<method name = "ping" index="60">
- <field name = "broker" type = "shortstr" />
- <field name = "load" type = "long" />
- <field name = "response required" type = "bit" />
-</method>
-
-</class>
-
-</amqp>
diff --git a/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/AbstractBDBMessageStore.java b/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/AbstractBDBMessageStore.java
index 851038c6de..eb8723461e 100644
--- a/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/AbstractBDBMessageStore.java
+++ b/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/AbstractBDBMessageStore.java
@@ -902,7 +902,7 @@ public abstract class AbstractBDBMessageStore implements MessageStore
{
LOGGER.debug("Enqueuing message " + messageId + " on queue "
+ (queue instanceof AMQQueue ? ((AMQQueue) queue).getName() + " with id " : "") + queue.getId()
- + " [Transaction" + tx + "]");
+ + " in transaction " + tx);
}
_deliveryDb.put(tx, key, value);
}
@@ -1056,7 +1056,8 @@ public abstract class AbstractBDBMessageStore implements MessageStore
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("commitTranImpl completed for [Transaction:" + tx + "]");
+ String transactionType = syncCommit ? "synchronous" : "asynchronous";
+ LOGGER.debug("commitTranImpl completed " + transactionType + " transaction " + tx);
}
}
catch (DatabaseException e)
@@ -1078,7 +1079,7 @@ public abstract class AbstractBDBMessageStore implements MessageStore
{
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("abortTran called for [Transaction:" + tx + "]");
+ LOGGER.debug("abortTran called for transaction " + tx);
}
try
@@ -1190,7 +1191,7 @@ public abstract class AbstractBDBMessageStore implements MessageStore
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("Storing content for message " + messageId + "[Transaction" + tx + "]");
+ LOGGER.debug("Storing content for message " + messageId + " in transaction " + tx);
}
}
@@ -1215,8 +1216,9 @@ public abstract class AbstractBDBMessageStore implements MessageStore
{
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("public void storeMetaData(Txn tx = " + tx + ", Long messageId = "
- + messageId + ", MessageMetaData messageMetaData = " + messageMetaData + "): called");
+ LOGGER.debug("storeMetaData called for transaction " + tx
+ + ", messageId " + messageId
+ + ", messageMetaData " + messageMetaData);
}
DatabaseEntry key = new DatabaseEntry();
@@ -1230,7 +1232,7 @@ public abstract class AbstractBDBMessageStore implements MessageStore
_messageMetaDataDb.put(tx, key, value);
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("Storing message metadata for message id " + messageId + "[Transaction" + tx + "]");
+ LOGGER.debug("Storing message metadata for message id " + messageId + " in transaction " + tx);
}
}
catch (DatabaseException e)
diff --git a/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/CommitThreadWrapper.java b/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/CommitThreadWrapper.java
index fe1556b5a6..598d20146c 100644
--- a/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/CommitThreadWrapper.java
+++ b/qpid/java/bdbstore/src/main/java/org/apache/qpid/server/store/berkeleydb/CommitThreadWrapper.java
@@ -80,7 +80,7 @@ public class CommitThreadWrapper
{
if (LOGGER.isDebugEnabled())
{
- LOGGER.debug("public synchronized void complete(): called (Transaction = " + _tx + ")");
+ LOGGER.debug("complete() called for transaction " + _tx);
}
_complete = true;
@@ -101,7 +101,10 @@ public class CommitThreadWrapper
if(!_syncCommit)
{
- LOGGER.debug("CommitAsync was requested, returning immediately.");
+ if(LOGGER.isDebugEnabled())
+ {
+ LOGGER.debug("CommitAsync was requested, returning immediately.");
+ }
return;
}
@@ -121,6 +124,12 @@ public class CommitThreadWrapper
public synchronized void waitForCompletion()
{
+ long startTime = 0;
+ if(LOGGER.isDebugEnabled())
+ {
+ startTime = System.currentTimeMillis();
+ }
+
while (!isComplete())
{
_commitThread.explicitNotify();
@@ -133,6 +142,12 @@ public class CommitThreadWrapper
throw new RuntimeException(e);
}
}
+
+ if(LOGGER.isDebugEnabled())
+ {
+ long duration = System.currentTimeMillis() - startTime;
+ LOGGER.debug("waitForCompletion returning after " + duration + " ms for transaction " + _tx);
+ }
}
}
@@ -198,8 +213,20 @@ public class CommitThreadWrapper
try
{
+ long startTime = 0;
+ if(LOGGER.isDebugEnabled())
+ {
+ startTime = System.currentTimeMillis();
+ }
+
_environment.flushLog(true);
+ if(LOGGER.isDebugEnabled())
+ {
+ long duration = System.currentTimeMillis() - startTime;
+ LOGGER.debug("flushLog completed in " + duration + " ms");
+ }
+
for(int i = 0; i < size; i++)
{
BDBCommitFuture commit = _jobQueue.poll();
diff --git a/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/QueueMBean.java b/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/QueueMBean.java
index 5c8b0f7194..94fac218ff 100644
--- a/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/QueueMBean.java
+++ b/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/QueueMBean.java
@@ -513,7 +513,6 @@ public class QueueMBean extends AMQManagedObject implements ManagedQueue, QueueN
{
_queue.visit(new QueueEntryVisitor()
{
-
public boolean visit(final QueueEntry entry)
{
final ServerMessage message = entry.getMessage();
@@ -525,11 +524,9 @@ public class QueueMBean extends AMQManagedObject implements ManagedQueue, QueueN
&& (messageId <= toMessageId))
{
txn.dequeue(entry);
- return true;
}
- return false;
}
- return true;
+ return false;
}
});
}
diff --git a/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/VirtualHostManagerMBean.java b/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/VirtualHostManagerMBean.java
index b3dbbc424a..67ac1bdc7c 100644
--- a/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/VirtualHostManagerMBean.java
+++ b/qpid/java/broker-plugins/management-jmx/src/main/java/org/apache/qpid/server/jmx/mbeans/VirtualHostManagerMBean.java
@@ -229,10 +229,9 @@ public class VirtualHostManagerMBean extends AbstractStatisticsGatheringMBean<Vi
return getObjectNameForSingleInstanceMBean();
}
- public synchronized boolean isStatisticsEnabled()
+ public boolean isStatisticsEnabled()
{
- updateStats();
- return false; //TODO - implement isStatisticsEnabled
+ return true;
}
}
diff --git a/qpid/java/broker/build.xml b/qpid/java/broker/build.xml
index 3c4a7d81c6..8581b7c639 100644
--- a/qpid/java/broker/build.xml
+++ b/qpid/java/broker/build.xml
@@ -56,7 +56,7 @@
<target name="release-bin-copy-broker-plugins" description="copy broker-plugins into module release">
<copy todir="${module.release}/lib/plugins" failonerror="true">
- <fileset dir="${build.lib.broker.plugins}"/>
+ <fileset dir="${build.scratch.broker.plugins.lib}"/>
</copy>
</target>
diff --git a/qpid/java/broker/etc/log4j.xml b/qpid/java/broker/etc/log4j.xml
index 7392260a0a..71a13875a1 100644
--- a/qpid/java/broker/etc/log4j.xml
+++ b/qpid/java/broker/etc/log4j.xml
@@ -68,7 +68,7 @@
<param name="backupFilesToPath" value="${QPID_WORK}/backup/log"/>
<layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d %-5p [%t] (%F:%L) - %m%n"/>
+ <param name="ConversionPattern" value="%d %-5p [%t] (%c{2}) - %m%n"/>
</layout>
</appender>
@@ -77,13 +77,13 @@
<param name="Append" value="false"/>
<layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d %-5p [%t] (%F:%L) - %m%n"/>
+ <param name="ConversionPattern" value="%d %-5p [%t] (%c{2}) - %m%n"/>
</layout>
</appender>
<appender class="org.apache.log4j.ConsoleAppender" name="STDOUT">
<layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d %-5p [%t] (%F:%L) - %m%n"/>
+ <param name="ConversionPattern" value="%d %-5p [%t] (%c{2}) - %m%n"/>
</layout>
</appender>
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/AMQChannel.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/AMQChannel.java
index ac4fda2985..ab4ca81d05 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/AMQChannel.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/AMQChannel.java
@@ -51,6 +51,7 @@ import org.apache.qpid.framing.MethodRegistry;
import org.apache.qpid.framing.abstraction.ContentChunk;
import org.apache.qpid.framing.abstraction.MessagePublishInfo;
import org.apache.qpid.protocol.AMQConstant;
+import org.apache.qpid.server.TransactionTimeoutHelper.CloseAction;
import org.apache.qpid.server.ack.UnacknowledgedMessageMap;
import org.apache.qpid.server.ack.UnacknowledgedMessageMapImpl;
import org.apache.qpid.server.configuration.BrokerProperties;
@@ -87,6 +88,7 @@ import org.apache.qpid.server.subscription.Subscription;
import org.apache.qpid.server.subscription.SubscriptionFactoryImpl;
import org.apache.qpid.server.txn.AsyncAutoCommitTransaction;
import org.apache.qpid.server.txn.LocalTransaction;
+import org.apache.qpid.server.txn.LocalTransaction.ActivityTimeAccessor;
import org.apache.qpid.server.txn.ServerTransaction;
import org.apache.qpid.server.virtualhost.VirtualHost;
import org.apache.qpid.transport.TransportException;
@@ -144,7 +146,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
private final AtomicLong _txnCommits = new AtomicLong(0);
private final AtomicLong _txnRejects = new AtomicLong(0);
private final AtomicLong _txnCount = new AtomicLong(0);
- private final AtomicLong _txnUpdateTime = new AtomicLong(0);
private final AMQProtocolSession _session;
private AtomicBoolean _closing = new AtomicBoolean(false);
@@ -184,15 +185,29 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
// by default the session is non-transactional
_transaction = new AsyncAutoCommitTransaction(_messageStore, this);
- _clientDeliveryMethod = session.createDeliveryMethod(_channelId);
+ _clientDeliveryMethod = session.createDeliveryMethod(_channelId);
- _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject);
+ _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject, new CloseAction()
+ {
+ @Override
+ public void doTimeoutAction(String reason) throws AMQException
+ {
+ closeConnection(reason);
+ }
+ });
}
/** Sets this channel to be part of a local transaction */
public void setLocalTransactional()
{
- _transaction = new LocalTransaction(_messageStore);
+ _transaction = new LocalTransaction(_messageStore, new ActivityTimeAccessor()
+ {
+ @Override
+ public long getActivityTime()
+ {
+ return _session.getLastReceivedTime();
+ }
+ });
_txnStarts.incrementAndGet();
}
@@ -206,8 +221,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
sync();
}
-
-
private void incrementOutstandingTxnsIfNecessary()
{
if(isTransactional())
@@ -228,11 +241,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
}
}
- public Long getTxnStarts()
- {
- return _txnStarts.get();
- }
-
public Long getTxnCommits()
{
return _txnCommits.get();
@@ -350,9 +358,8 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
}
});
- _transaction.enqueue(destinationQueues, _currentMessage, new MessageDeliveryAction(_currentMessage, destinationQueues), getProtocolSession().getLastReceivedTime());
+ _transaction.enqueue(destinationQueues, _currentMessage, new MessageDeliveryAction(_currentMessage, destinationQueues));
incrementOutstandingTxnsIfNecessary();
- updateTransactionalActivity();
_currentMessage.getStoredMessage().flushToStore();
}
}
@@ -377,7 +384,7 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
if (_logger.isDebugEnabled())
{
- _logger.debug(debugIdentity() + "Content body received on channel " + _channelId);
+ _logger.debug(debugIdentity() + " content body received on channel " + _channelId);
}
try
@@ -838,7 +845,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
{
Collection<QueueEntry> ackedMessages = getAckedMessages(deliveryTag, multiple);
_transaction.dequeue(ackedMessages, new MessageAcknowledgeAction(ackedMessages));
- updateTransactionalActivity();
}
private Collection<QueueEntry> getAckedMessages(long deliveryTag, boolean multiple)
@@ -1032,19 +1038,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
}
}
-
-
- }
-
- /**
- * Update last transaction activity timestamp
- */
- private void updateTransactionalActivity()
- {
- if (isTransactional())
- {
- _txnUpdateTime.set(getProtocolSession().getLastReceivedTime());
- }
}
public String toString()
@@ -1211,11 +1204,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
// TODO
throw new RuntimeException(e);
}
-
-
-
-
-
}
public void onRollback()
@@ -1365,7 +1353,6 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
public void onRollback()
{
- //To change body of implemented methods use File | Settings | File Templates.
}
}
@@ -1474,37 +1461,9 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
return _createTime;
}
- public void mgmtClose() throws AMQException
- {
- _session.mgmtCloseChannel(_channelId);
- }
-
public void checkTransactionStatus(long openWarn, long openClose, long idleWarn, long idleClose) throws AMQException
{
- final long transactionStartTime = _transaction.getTransactionStartTime();
- final long transactionUpdateTime = _txnUpdateTime.get();
- if (isTransactional() && transactionUpdateTime > 0 && transactionStartTime > 0)
- {
- long currentTime = System.currentTimeMillis();
- long openTime = currentTime - transactionStartTime;
- long idleTime = currentTime - transactionUpdateTime;
-
- _transactionTimeoutHelper.logIfNecessary(idleTime, idleWarn, ChannelMessages.IDLE_TXN(idleTime),
- TransactionTimeoutHelper.IDLE_TRANSACTION_ALERT);
- if (_transactionTimeoutHelper.isTimedOut(idleTime, idleClose))
- {
- closeConnection("Idle transaction timed out");
- return;
- }
-
- _transactionTimeoutHelper.logIfNecessary(openTime, openWarn, ChannelMessages.OPEN_TXN(openTime),
- TransactionTimeoutHelper.OPEN_TRANSACTION_ALERT);
- if (_transactionTimeoutHelper.isTimedOut(openTime, openClose))
- {
- closeConnection("Open transaction timed out");
- return;
- }
- }
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, openWarn, openClose, idleWarn, idleClose);
}
/**
@@ -1582,6 +1541,11 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
public void sync()
{
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("sync() called on channel " + debugIdentity());
+ }
+
AsyncCommand cmd;
while((cmd = _unfinishedCommandsQueue.poll()) != null)
{
@@ -1619,14 +1583,8 @@ public class AMQChannel implements AMQSessionModel, AsyncAutoCommitTransaction.F
_action.postCommit();
_action = null;
}
-
- boolean isReadyForCompletion()
- {
- return _future.isComplete();
- }
}
-
@Override
public int getConsumerCount()
{
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/TransactionTimeoutHelper.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/TransactionTimeoutHelper.java
index 0c474cca13..b7007bf768 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/TransactionTimeoutHelper.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/TransactionTimeoutHelper.java
@@ -18,46 +18,85 @@
*/
package org.apache.qpid.server;
-import org.apache.log4j.Logger;
+import org.apache.qpid.AMQException;
import org.apache.qpid.server.logging.LogActor;
import org.apache.qpid.server.logging.LogMessage;
import org.apache.qpid.server.logging.LogSubject;
import org.apache.qpid.server.logging.actors.CurrentActor;
import org.apache.qpid.server.logging.messages.ChannelMessages;
+import org.apache.qpid.server.txn.ServerTransaction;
public class TransactionTimeoutHelper
{
- private static final Logger LOGGER = Logger.getLogger(TransactionTimeoutHelper.class);
-
- public static final String IDLE_TRANSACTION_ALERT = "IDLE TRANSACTION ALERT";
- public static final String OPEN_TRANSACTION_ALERT = "OPEN TRANSACTION ALERT";
+ private static final String OPEN_TRANSACTION_TIMEOUT_ERROR = "Open transaction timed out";
+ private static final String IDLE_TRANSACTION_TIMEOUT_ERROR = "Idle transaction timed out";
private final LogSubject _logSubject;
- public TransactionTimeoutHelper(final LogSubject logSubject)
+ private final CloseAction _closeAction;
+
+ public TransactionTimeoutHelper(final LogSubject logSubject, final CloseAction closeAction)
{
_logSubject = logSubject;
+ _closeAction = closeAction;
}
- public void logIfNecessary(final long timeSoFar, final long warnTimeout,
- final LogMessage message, final String alternateLogPrefix)
+ public void checkIdleOrOpenTimes(ServerTransaction transaction, long openWarn, long openClose, long idleWarn, long idleClose) throws AMQException
{
- if (isTimedOut(timeSoFar, warnTimeout))
+ if (transaction.isTransactional())
{
- LogActor logActor = CurrentActor.get();
- if(logActor.getRootMessageLogger().isMessageEnabled(logActor, _logSubject, message.getLogHierarchy()))
+ final long transactionUpdateTime = transaction.getTransactionUpdateTime();
+ if(transactionUpdateTime > 0)
{
- logActor.message(_logSubject, message);
+ long idleTime = System.currentTimeMillis() - transactionUpdateTime;
+ boolean closed = logAndCloseIfNecessary(idleTime, idleWarn, idleClose, ChannelMessages.IDLE_TXN(idleTime), IDLE_TRANSACTION_TIMEOUT_ERROR);
+ if (closed)
+ {
+ return; // no point proceeding to check the open time
+ }
}
- else
+
+ final long transactionStartTime = transaction.getTransactionStartTime();
+ if(transactionStartTime > 0)
{
- LOGGER.warn(alternateLogPrefix + " " + _logSubject.toLogString() + " " + timeSoFar + " ms");
+ long openTime = System.currentTimeMillis() - transactionStartTime;
+ logAndCloseIfNecessary(openTime, openWarn, openClose, ChannelMessages.OPEN_TXN(openTime), OPEN_TRANSACTION_TIMEOUT_ERROR);
}
}
}
- public boolean isTimedOut(long timeSoFar, long timeout)
+ /**
+ * @return true iff closeTimeout was exceeded
+ */
+ private boolean logAndCloseIfNecessary(final long timeSoFar,
+ final long warnTimeout, final long closeTimeout,
+ final LogMessage warnMessage, final String closeMessage) throws AMQException
+ {
+ if (isTimedOut(timeSoFar, warnTimeout))
+ {
+ LogActor logActor = CurrentActor.get();
+ logActor.message(_logSubject, warnMessage);
+ }
+
+ if(isTimedOut(timeSoFar, closeTimeout))
+ {
+ _closeAction.doTimeoutAction(closeMessage);
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ private boolean isTimedOut(long timeSoFar, long timeout)
{
return timeout > 0L && timeSoFar > timeout;
}
+
+ public interface CloseAction
+ {
+ void doTimeoutAction(String reason) throws AMQException;
+ }
+
}
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/output/ProtocolOutputConverterImpl.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/output/ProtocolOutputConverterImpl.java
index a68ac5439c..917215a42f 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/output/ProtocolOutputConverterImpl.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/output/ProtocolOutputConverterImpl.java
@@ -218,55 +218,71 @@ class ProtocolOutputConverterImpl implements ProtocolOutputConverter
final boolean isRedelivered = entry.isRedelivered();
- final AMQBody returnBlock = new AMQBody()
- {
-
- private AMQBody _underlyingBody;
-
- public AMQBody createAMQBody()
- {
- return _methodRegistry.createBasicDeliverBody(consumerTag,
- deliveryTag,
- isRedelivered,
- exchangeName,
- routingKey);
-
-
+ final AMQBody returnBlock = new EncodedDeliveryBody(deliveryTag, routingKey, exchangeName, consumerTag, isRedelivered);
+ return returnBlock;
+ }
+ private class EncodedDeliveryBody implements AMQBody
+ {
+ private final long _deliveryTag;
+ private final AMQShortString _routingKey;
+ private final AMQShortString _exchangeName;
+ private final AMQShortString _consumerTag;
+ private final boolean _isRedelivered;
+ private AMQBody _underlyingBody;
+
+ private EncodedDeliveryBody(long deliveryTag, AMQShortString routingKey, AMQShortString exchangeName, AMQShortString consumerTag, boolean isRedelivered)
+ {
+ _deliveryTag = deliveryTag;
+ _routingKey = routingKey;
+ _exchangeName = exchangeName;
+ _consumerTag = consumerTag;
+ _isRedelivered = isRedelivered;
+ }
+ public AMQBody createAMQBody()
+ {
+ return _methodRegistry.createBasicDeliverBody(_consumerTag,
+ _deliveryTag,
+ _isRedelivered,
+ _exchangeName,
+ _routingKey);
+ }
- }
+ public byte getFrameType()
+ {
+ return AMQMethodBody.TYPE;
+ }
- public byte getFrameType()
+ public int getSize()
+ {
+ if(_underlyingBody == null)
{
- return AMQMethodBody.TYPE;
+ _underlyingBody = createAMQBody();
}
+ return _underlyingBody.getSize();
+ }
- public int getSize()
+ public void writePayload(DataOutput buffer) throws IOException
+ {
+ if(_underlyingBody == null)
{
- if(_underlyingBody == null)
- {
- _underlyingBody = createAMQBody();
- }
- return _underlyingBody.getSize();
+ _underlyingBody = createAMQBody();
}
+ _underlyingBody.writePayload(buffer);
+ }
- public void writePayload(DataOutput buffer) throws IOException
- {
- if(_underlyingBody == null)
- {
- _underlyingBody = createAMQBody();
- }
- _underlyingBody.writePayload(buffer);
- }
+ public void handle(final int channelId, final AMQVersionAwareProtocolSession amqMinaProtocolSession)
+ throws AMQException
+ {
+ throw new AMQException("This block should never be dispatched!");
+ }
- public void handle(final int channelId, final AMQVersionAwareProtocolSession amqMinaProtocolSession)
- throws AMQException
- {
- throw new AMQException("This block should never be dispatched!");
- }
- };
- return returnBlock;
+ @Override
+ public String toString()
+ {
+ return "[" + getClass().getSimpleName() + " underlyingBody: " + String.valueOf(_underlyingBody) + "]";
+ }
}
private AMQBody createEncodedGetOkBody(QueueEntry entry, long deliveryTag, int queueSize)
@@ -368,7 +384,6 @@ class ProtocolOutputConverterImpl implements ProtocolOutputConverter
_methodBody = methodBody;
_headerBody = headerBody;
_contentBody = contentBody;
-
}
public long getSize()
@@ -380,6 +395,19 @@ class ProtocolOutputConverterImpl implements ProtocolOutputConverter
{
AMQFrame.writeFrames(buffer, _channel, _methodBody, _headerBody, _contentBody);
}
+
+ @Override
+ public String toString()
+ {
+ StringBuilder builder = new StringBuilder();
+ builder.append("[").append(getClass().getSimpleName())
+ .append(" methodBody=").append(_methodBody)
+ .append(", headerBody=").append(_headerBody)
+ .append(", contentBody=").append(_contentBody)
+ .append(", channel=").append(_channel).append("]");
+ return builder.toString();
+ }
+
}
public static final class SmallCompositeAMQBodyBlock extends AMQDataBlock
@@ -408,6 +436,17 @@ class ProtocolOutputConverterImpl implements ProtocolOutputConverter
{
AMQFrame.writeFrames(buffer, _channel, _methodBody, _headerBody);
}
+
+ @Override
+ public String toString()
+ {
+ StringBuilder builder = new StringBuilder();
+ builder.append(getClass().getSimpleName())
+ .append("methodBody=").append(_methodBody)
+ .append(", headerBody=").append(_headerBody)
+ .append(", channel=").append(_channel).append("]");
+ return builder.toString();
+ }
}
} \ No newline at end of file
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/AMQProtocolEngine.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/AMQProtocolEngine.java
index 7bdbbbb5cb..ee1ef2418a 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/AMQProtocolEngine.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/AMQProtocolEngine.java
@@ -306,9 +306,13 @@ public class AMQProtocolEngine implements ServerProtocolEngine, AMQProtocolSessi
try
{
+ long startTime = 0;
+ String frameToString = null;
if (_logger.isDebugEnabled())
{
- _logger.debug("Frame Received: " + frame);
+ startTime = System.currentTimeMillis();
+ frameToString = frame.toString();
+ _logger.debug("RECV: " + frame);
}
// Check that this channel is not closing
@@ -343,6 +347,11 @@ public class AMQProtocolEngine implements ServerProtocolEngine, AMQProtocolSessi
closeChannel(channelId);
throw e;
}
+
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("Frame handled in " + (System.currentTimeMillis() - startTime) + " ms. Frame: " + frameToString);
+ }
}
finally
{
@@ -546,6 +555,12 @@ public class AMQProtocolEngine implements ServerProtocolEngine, AMQProtocolSessi
final ByteBuffer buf = asByteBuffer(frame);
_writtenBytes += buf.remaining();
+
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("SEND: " + frame);
+ }
+
_sender.send(buf);
final long time = System.currentTimeMillis();
_lastIoTime = time;
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/v1_0/ExchangeDestination.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/v1_0/ExchangeDestination.java
index ba1a1ca45c..2cef27267b 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/v1_0/ExchangeDestination.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/protocol/v1_0/ExchangeDestination.java
@@ -80,7 +80,7 @@ public class ExchangeDestination implements ReceivingDestination, SendingDestina
{
// NO-OP
}
- }, System.currentTimeMillis());
+ });
return ACCEPTED;
}
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/InboundMessageAdapter.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/InboundMessageAdapter.java
index bbc33ca846..d7dbd58537 100755
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/InboundMessageAdapter.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/InboundMessageAdapter.java
@@ -47,7 +47,7 @@ public class InboundMessageAdapter implements InboundMessage
public AMQShortString getRoutingKeyShortString()
{
- return AMQShortString.valueOf(_entry.getMessage());
+ return AMQShortString.valueOf(_entry.getMessage().getRoutingKey());
}
public String getRoutingKey()
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/QueueEntryImpl.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/QueueEntryImpl.java
index 25e771a9cf..9aa8d1da83 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/QueueEntryImpl.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/QueueEntryImpl.java
@@ -454,7 +454,7 @@ public abstract class QueueEntryImpl implements QueueEntry
{
}
- }, 0L);
+ });
txn.dequeue(currentQueue, message, new ServerTransaction.Action()
{
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/SimpleAMQQueue.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/SimpleAMQQueue.java
index abf18fc1aa..73c2870b9b 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/SimpleAMQQueue.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/queue/SimpleAMQQueue.java
@@ -1397,7 +1397,7 @@ public class SimpleAMQQueue implements AMQQueue, Subscription.StateListener, Mes
{
}
- }, 0L);
+ });
txn.dequeue(this, entry.getMessage(),
new ServerTransaction.Action()
{
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/transport/ServerSession.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/transport/ServerSession.java
index 075ed2a87c..6152ddd228 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/transport/ServerSession.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/transport/ServerSession.java
@@ -43,6 +43,7 @@ import org.apache.qpid.AMQException;
import org.apache.qpid.AMQStoreException;
import org.apache.qpid.protocol.AMQConstant;
import org.apache.qpid.server.TransactionTimeoutHelper;
+import org.apache.qpid.server.TransactionTimeoutHelper.CloseAction;
import org.apache.qpid.server.logging.LogActor;
import org.apache.qpid.server.logging.LogSubject;
import org.apache.qpid.server.logging.actors.CurrentActor;
@@ -132,7 +133,6 @@ public class ServerSession extends Session
private final AtomicLong _txnCommits = new AtomicLong(0);
private final AtomicLong _txnRejects = new AtomicLong(0);
private final AtomicLong _txnCount = new AtomicLong(0);
- private final AtomicLong _txnUpdateTime = new AtomicLong(0);
private Map<String, Subscription_0_10> _subscriptions = new ConcurrentHashMap<String, Subscription_0_10>();
@@ -147,7 +147,14 @@ public class ServerSession extends Session
_transaction = new AsyncAutoCommitTransaction(this.getMessageStore(),this);
_logSubject = new ChannelLogSubject(this);
- _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject);
+ _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject, new CloseAction()
+ {
+ @Override
+ public void doTimeoutAction(String reason) throws AMQException
+ {
+ getConnectionModel().closeSession(ServerSession.this, AMQConstant.RESOURCE_ERROR, reason);
+ }
+ });
}
protected void setState(State state)
@@ -186,9 +193,8 @@ public class ServerSession extends Session
}
getConnectionModel().registerMessageReceived(message.getSize(), message.getArrivalTime());
PostEnqueueAction postTransactionAction = new PostEnqueueAction(queues, message, isTransactional()) ;
- _transaction.enqueue(queues,message, postTransactionAction, 0L);
+ _transaction.enqueue(queues,message, postTransactionAction);
incrementOutstandingTxnsIfNecessary();
- updateTransactionalActivity();
}
@@ -402,7 +408,6 @@ public class ServerSession extends Session
entry.release();
}
});
- updateTransactionalActivity();
}
public Collection<Subscription_0_10> getSubscriptions()
@@ -582,22 +587,6 @@ public class ServerSession extends Session
}
}
- /**
- * Update last transaction activity timestamp
- */
- private void updateTransactionalActivity()
- {
- if (isTransactional())
- {
- _txnUpdateTime.set(System.currentTimeMillis());
- }
- }
-
- public Long getTxnStarts()
- {
- return _txnStarts.get();
- }
-
public Long getTxnCommits()
{
return _txnCommits.get();
@@ -703,30 +692,7 @@ public class ServerSession extends Session
public void checkTransactionStatus(long openWarn, long openClose, long idleWarn, long idleClose) throws AMQException
{
- final long transactionStartTime = _transaction.getTransactionStartTime();
- final long transactionUpdateTime = _txnUpdateTime.get();
- if (isTransactional() && transactionUpdateTime > 0 && transactionStartTime > 0)
- {
- long currentTime = System.currentTimeMillis();
- long openTime = currentTime - transactionStartTime;
- long idleTime = currentTime - transactionUpdateTime;
-
- _transactionTimeoutHelper.logIfNecessary(idleTime, idleWarn, ChannelMessages.IDLE_TXN(idleTime),
- TransactionTimeoutHelper.IDLE_TRANSACTION_ALERT);
- if (_transactionTimeoutHelper.isTimedOut(idleTime, idleClose))
- {
- getConnectionModel().closeSession(this, AMQConstant.RESOURCE_ERROR, "Idle transaction timed out");
- return;
- }
-
- _transactionTimeoutHelper.logIfNecessary(openTime, openWarn, ChannelMessages.OPEN_TXN(openTime),
- TransactionTimeoutHelper.OPEN_TRANSACTION_ALERT);
- if (_transactionTimeoutHelper.isTimedOut(openTime, openClose))
- {
- getConnectionModel().closeSession(this, AMQConstant.RESOURCE_ERROR, "Open transaction timed out");
- return;
- }
- }
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, openWarn, openClose, idleWarn, idleClose);
}
public void block(AMQQueue queue)
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AsyncAutoCommitTransaction.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AsyncAutoCommitTransaction.java
index efd7850a49..43e60c8e13 100755
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AsyncAutoCommitTransaction.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AsyncAutoCommitTransaction.java
@@ -66,11 +66,18 @@ public class AsyncAutoCommitTransaction implements ServerTransaction
_futureRecorder = recorder;
}
+ @Override
public long getTransactionStartTime()
{
return 0L;
}
+ @Override
+ public long getTransactionUpdateTime()
+ {
+ return 0L;
+ }
+
/**
* Since AutoCommitTransaction have no concept of a long lived transaction, any Actions registered
* by the caller are executed immediately.
@@ -241,7 +248,7 @@ public class AsyncAutoCommitTransaction implements ServerTransaction
}
- public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction, long currentTime)
+ public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction)
{
Transaction txn = null;
try
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AutoCommitTransaction.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AutoCommitTransaction.java
index e5a7df6880..8a9479a2d4 100755
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AutoCommitTransaction.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/AutoCommitTransaction.java
@@ -52,11 +52,18 @@ public class AutoCommitTransaction implements ServerTransaction
_messageStore = transactionLog;
}
+ @Override
public long getTransactionStartTime()
{
return 0L;
}
+ @Override
+ public long getTransactionUpdateTime()
+ {
+ return 0L;
+ }
+
/**
* Since AutoCommitTransaction have no concept of a long lived transaction, any Actions registered
* by the caller are executed immediately.
@@ -178,7 +185,7 @@ public class AutoCommitTransaction implements ServerTransaction
}
- public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction, long currentTime)
+ public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction)
{
Transaction txn = null;
try
@@ -270,4 +277,6 @@ public class AutoCommitTransaction implements ServerTransaction
}
}
+
+
}
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/DistributedTransaction.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/DistributedTransaction.java
index 05d0110e9b..ab987f0fb9 100644
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/DistributedTransaction.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/DistributedTransaction.java
@@ -26,7 +26,6 @@ import org.apache.qpid.server.protocol.AMQSessionModel;
import org.apache.qpid.server.queue.BaseQueue;
import org.apache.qpid.server.queue.QueueEntry;
import org.apache.qpid.server.store.MessageStore;
-import org.apache.qpid.server.store.Transaction;
import org.apache.qpid.server.virtualhost.VirtualHost;
import org.apache.qpid.transport.Xid;
@@ -39,10 +38,6 @@ public class DistributedTransaction implements ServerTransaction
private final AutoCommitTransaction _autoCommitTransaction;
- private volatile Transaction _transaction;
-
- private long _txnStartTime = 0L;
-
private DtxBranch _branch;
private AMQSessionModel _session;
private VirtualHost _vhost;
@@ -55,9 +50,16 @@ public class DistributedTransaction implements ServerTransaction
_autoCommitTransaction = new AutoCommitTransaction(vhost.getMessageStore());
}
+ @Override
public long getTransactionStartTime()
{
- return _txnStartTime;
+ return 0;
+ }
+
+ @Override
+ public long getTransactionUpdateTime()
+ {
+ return 0;
}
public void addPostTransactionAction(Action postTransactionAction)
@@ -107,7 +109,7 @@ public class DistributedTransaction implements ServerTransaction
{
_branch.enqueue(queue, message);
_branch.addPostTransactionAcion(postTransactionAction);
- enqueue(Collections.singletonList(queue), message, postTransactionAction, System.currentTimeMillis());
+ enqueue(Collections.singletonList(queue), message, postTransactionAction);
}
else
{
@@ -116,7 +118,7 @@ public class DistributedTransaction implements ServerTransaction
}
public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message,
- Action postTransactionAction, long currentTime)
+ Action postTransactionAction)
{
if(_branch != null)
{
@@ -128,7 +130,7 @@ public class DistributedTransaction implements ServerTransaction
}
else
{
- _autoCommitTransaction.enqueue(queues, message, postTransactionAction, currentTime);
+ _autoCommitTransaction.enqueue(queues, message, postTransactionAction);
}
}
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/LocalTransaction.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/LocalTransaction.java
index f11fb1086e..afa7cb0fb4 100755
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/LocalTransaction.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/LocalTransaction.java
@@ -49,25 +49,42 @@ public class LocalTransaction implements ServerTransaction
private final List<Action> _postTransactionActions = new ArrayList<Action>();
private volatile Transaction _transaction;
- private MessageStore _transactionLog;
+ private final ActivityTimeAccessor _activityTime;
+ private final MessageStore _transactionLog;
private volatile long _txnStartTime = 0L;
+ private volatile long _txnUpdateTime = 0l;
private StoreFuture _asyncTran;
public LocalTransaction(MessageStore transactionLog)
{
- _transactionLog = transactionLog;
+ this(transactionLog, new ActivityTimeAccessor()
+ {
+ @Override
+ public long getActivityTime()
+ {
+ return System.currentTimeMillis();
+ }
+ });
}
-
- public boolean inTransaction()
+
+ public LocalTransaction(MessageStore transactionLog, ActivityTimeAccessor activityTime)
{
- return _transaction != null;
+ _transactionLog = transactionLog;
+ _activityTime = activityTime;
}
+ @Override
public long getTransactionStartTime()
{
return _txnStartTime;
}
+ @Override
+ public long getTransactionUpdateTime()
+ {
+ return _txnUpdateTime;
+ }
+
public void addPostTransactionAction(Action postTransactionAction)
{
sync();
@@ -78,6 +95,7 @@ public class LocalTransaction implements ServerTransaction
{
sync();
_postTransactionActions.add(postTransactionAction);
+ initTransactionStartTimeIfNecessaryAndAdvanceUpdateTime();
if(message.isPersistent() && queue.isDurable())
{
@@ -104,6 +122,7 @@ public class LocalTransaction implements ServerTransaction
{
sync();
_postTransactionActions.add(postTransactionAction);
+ initTransactionStartTimeIfNecessaryAndAdvanceUpdateTime();
try
{
@@ -180,6 +199,7 @@ public class LocalTransaction implements ServerTransaction
{
sync();
_postTransactionActions.add(postTransactionAction);
+ initTransactionStartTimeIfNecessaryAndAdvanceUpdateTime();
if(message.isPersistent() && queue.isDurable())
{
@@ -189,7 +209,7 @@ public class LocalTransaction implements ServerTransaction
{
_logger.debug("Enqueue of message number " + message.getMessageNumber() + " to transaction log. Queue : " + queue.getNameShortString());
}
-
+
beginTranIfNecessary();
_transaction.enqueueMessage(queue, message);
}
@@ -202,15 +222,11 @@ public class LocalTransaction implements ServerTransaction
}
}
- public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction, long currentTime)
+ public void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction)
{
sync();
_postTransactionActions.add(postTransactionAction);
-
- if (_txnStartTime == 0L)
- {
- _txnStartTime = currentTime == 0L ? System.currentTimeMillis() : currentTime;
- }
+ initTransactionStartTimeIfNecessaryAndAdvanceUpdateTime();
if(message.isPersistent())
{
@@ -224,8 +240,7 @@ public class LocalTransaction implements ServerTransaction
{
_logger.debug("Enqueue of message number " + message.getMessageNumber() + " to transaction log. Queue : " + queue.getNameShortString() );
}
-
-
+
beginTranIfNecessary();
_transaction.enqueueMessage(queue, message);
}
@@ -378,16 +393,24 @@ public class LocalTransaction implements ServerTransaction
}
throw new RuntimeException("Failed to commit transaction", e);
}
-
-
}
private void doPostTransactionActions()
{
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("Beginning " + _postTransactionActions.size() + " post transaction actions");
+ }
+
for(int i = 0; i < _postTransactionActions.size(); i++)
{
_postTransactionActions.get(i).postCommit();
}
+
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("Completed post transaction actions");
+ }
}
public void rollback()
@@ -427,16 +450,34 @@ public class LocalTransaction implements ServerTransaction
}
}
+ private void initTransactionStartTimeIfNecessaryAndAdvanceUpdateTime()
+ {
+ long currentTime = _activityTime.getActivityTime();
+
+ if (_txnStartTime == 0)
+ {
+ _txnStartTime = currentTime;
+ }
+ _txnUpdateTime = currentTime;
+ }
+
private void resetDetails()
{
_asyncTran = null;
_transaction = null;
- _postTransactionActions.clear();
+ _postTransactionActions.clear();
_txnStartTime = 0L;
+ _txnUpdateTime = 0;
}
public boolean isTransactional()
{
return true;
}
+
+ public interface ActivityTimeAccessor
+ {
+ long getActivityTime();
+ }
+
}
diff --git a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/ServerTransaction.java b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/ServerTransaction.java
index c568ae67aa..8acac00479 100755
--- a/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/ServerTransaction.java
+++ b/qpid/java/broker/src/main/java/org/apache/qpid/server/txn/ServerTransaction.java
@@ -55,11 +55,18 @@ public interface ServerTransaction
/**
* Return the time the current transaction started.
- *
+ *
* @return the time this transaction started or 0 if not in a transaction
*/
long getTransactionStartTime();
+ /**
+ * Return the time of the last activity on the current transaction.
+ *
+ * @return the time of the last activity or 0 if not in a transaction
+ */
+ long getTransactionUpdateTime();
+
/**
* Register an Action for execution after transaction commit or rollback. Actions
* will be executed in the order in which they are registered.
@@ -92,7 +99,7 @@ public interface ServerTransaction
*
* Store operations will result only for a persistent messages on durable queues.
*/
- void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction, long currentTime);
+ void enqueue(List<? extends BaseQueue> queues, EnqueableMessage message, Action postTransactionAction);
/**
* Commit the transaction represented by this object.
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/TransactionTimeoutHelperTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/TransactionTimeoutHelperTest.java
index af06c517db..1ce3c87de6 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/TransactionTimeoutHelperTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/TransactionTimeoutHelperTest.java
@@ -18,67 +18,131 @@
*/
package org.apache.qpid.server;
-import static org.mockito.Matchers.any;
+import static java.util.concurrent.TimeUnit.SECONDS;
+import static org.apache.qpid.server.logging.messages.ChannelMessages.IDLE_TXN_LOG_HIERARCHY;
+import static org.apache.qpid.server.logging.messages.ChannelMessages.OPEN_TXN_LOG_HIERARCHY;
+import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.same;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
import static org.mockito.Mockito.when;
+import org.apache.qpid.server.TransactionTimeoutHelper.CloseAction;
import org.apache.qpid.server.logging.LogActor;
import org.apache.qpid.server.logging.LogMessage;
import org.apache.qpid.server.logging.LogSubject;
-import org.apache.qpid.server.logging.RootMessageLogger;
import org.apache.qpid.server.logging.actors.CurrentActor;
+import org.apache.qpid.server.txn.ServerTransaction;
import org.apache.qpid.test.utils.QpidTestCase;
+import org.hamcrest.Description;
+import org.mockito.ArgumentMatcher;
public class TransactionTimeoutHelperTest extends QpidTestCase
{
- private final LogMessage _logMessage = mock(LogMessage.class);
private final LogActor _logActor = mock(LogActor.class);
private final LogSubject _logSubject = mock(LogSubject.class);
+ private final ServerTransaction _transaction = mock(ServerTransaction.class);
+ private final CloseAction _closeAction = mock(CloseAction.class);
private TransactionTimeoutHelper _transactionTimeoutHelper;
- private RootMessageLogger _rootMessageLogger;
+ private long _now;
- public void testLogIfNecessary()
+ public void testNotTransactional() throws Exception
{
- _transactionTimeoutHelper.logIfNecessary(99, 100, _logMessage, "");
- verifyZeroInteractions(_logActor, _logMessage);
+ when(_transaction.isTransactional()).thenReturn(false);
- _transactionTimeoutHelper.logIfNecessary(101, 100, _logMessage, "");
- verify(_logActor).message(_logSubject, _logMessage);
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, 5, 10, 5, 10);
+
+ verifyZeroInteractions(_logActor, _closeAction);
}
- public void testLogIfNecessaryWhenOperationalLoggingDisabled()
+ public void testOpenTransactionProducesWarningOnly() throws Exception
{
- //disable the operational logging
- when(_rootMessageLogger.isMessageEnabled(
- same(_logActor), any(LogSubject.class), any(String.class)))
- .thenReturn(false);
-
- //verify the actor is never asked to log a message
- _transactionTimeoutHelper.logIfNecessary(101, 100, _logMessage, "");
- verify(_logActor, never()).message(any(LogMessage.class));
- verify(_logActor, never()).message(any(LogSubject.class), any(LogMessage.class));
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+
+ configureMockTransaction(sixtyOneSecondsAgo, sixtyOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, SECONDS.toMillis(30), 0, 0, 0);
+
+ verify(_logActor).message(same(_logSubject), isLogMessage(OPEN_TXN_LOG_HIERARCHY, "CHN-1007 : Open Transaction : 61,\\d{3} ms"));
+ verifyZeroInteractions(_closeAction);
}
- public void testIsTimedOut()
+ public void testOpenTransactionProducesTimeoutActionOnly() throws Exception
{
- assertFalse("Shouldn't have timed out", _transactionTimeoutHelper.isTimedOut(199,200));
- assertTrue("Should have timed out", _transactionTimeoutHelper.isTimedOut(201,200));
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+
+ configureMockTransaction(sixtyOneSecondsAgo, sixtyOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, 0, SECONDS.toMillis(30), 0, 0);
+
+ verify(_closeAction).doTimeoutAction("Open transaction timed out");
+ verifyZeroInteractions(_logActor);
}
- /**
- * If TransactionTimeout is disabled, the timeout will be 0. This test verifies
- * that the helper methods respond negatively in this scenario.
- */
- public void testTransactionTimeoutDisabled()
+ public void testOpenTransactionProducesWarningAndTimeoutAction() throws Exception
{
- assertFalse("Shouldn't have timed out", _transactionTimeoutHelper.isTimedOut(201,0));
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+
+ configureMockTransaction(sixtyOneSecondsAgo, sixtyOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, SECONDS.toMillis(15), SECONDS.toMillis(30), 0, 0);
- _transactionTimeoutHelper.logIfNecessary(99, 0, _logMessage, "");
- verifyZeroInteractions(_logActor, _logMessage);
+ verify(_logActor).message(same(_logSubject), isLogMessage(OPEN_TXN_LOG_HIERARCHY, "CHN-1007 : Open Transaction : 61,\\d{3} ms"));
+ verify(_closeAction).doTimeoutAction("Open transaction timed out");
+ }
+
+ public void testIdleTransactionProducesWarningOnly() throws Exception
+ {
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+ final long thrityOneSecondsAgo = _now - SECONDS.toMillis(31);
+
+ configureMockTransaction(sixtyOneSecondsAgo, thrityOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, 0, 0, SECONDS.toMillis(30), 0);
+
+ verify(_logActor).message(same(_logSubject), isLogMessage(IDLE_TXN_LOG_HIERARCHY, "CHN-1008 : Idle Transaction : 31,\\d{3} ms"));
+ verifyZeroInteractions(_closeAction);
+ }
+
+ public void testIdleTransactionProducesTimeoutActionOnly() throws Exception
+ {
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+ final long thrityOneSecondsAgo = _now - SECONDS.toMillis(31);
+
+ configureMockTransaction(sixtyOneSecondsAgo, thrityOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, 0, 0, 0, SECONDS.toMillis(30));
+
+ verify(_closeAction).doTimeoutAction("Idle transaction timed out");
+ verifyZeroInteractions(_logActor);
+ }
+
+ public void testIdleTransactionProducesWarningAndTimeoutAction() throws Exception
+ {
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+ final long thrityOneSecondsAgo = _now - SECONDS.toMillis(31);
+
+ configureMockTransaction(sixtyOneSecondsAgo, thrityOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, 0, 0, SECONDS.toMillis(15), SECONDS.toMillis(30));
+
+ verify(_logActor).message(same(_logSubject), isLogMessage(IDLE_TXN_LOG_HIERARCHY, "CHN-1008 : Idle Transaction : 31,\\d{3} ms"));
+ verify(_closeAction).doTimeoutAction("Idle transaction timed out");
+ }
+
+ public void testIdleAndOpenWarnings() throws Exception
+ {
+ final long sixtyOneSecondsAgo = _now - SECONDS.toMillis(61);
+ final long thirtyOneSecondsAgo = _now - SECONDS.toMillis(31);
+
+ configureMockTransaction(sixtyOneSecondsAgo, thirtyOneSecondsAgo);
+
+ _transactionTimeoutHelper.checkIdleOrOpenTimes(_transaction, SECONDS.toMillis(60), 0, SECONDS.toMillis(30), 0);
+
+ verify(_logActor).message(same(_logSubject), isLogMessage(IDLE_TXN_LOG_HIERARCHY, "CHN-1008 : Idle Transaction : 31,\\d{3} ms"));
+ verify(_logActor).message(same(_logSubject), isLogMessage(OPEN_TXN_LOG_HIERARCHY, "CHN-1007 : Open Transaction : 61,\\d{3} ms"));
+ verifyZeroInteractions(_closeAction);
}
@Override
@@ -88,20 +152,80 @@ public class TransactionTimeoutHelperTest extends QpidTestCase
CurrentActor.set(_logActor);
- _rootMessageLogger = mock(RootMessageLogger.class);
- when(_logActor.getRootMessageLogger()).thenReturn(_rootMessageLogger);
-
- when(_rootMessageLogger.isMessageEnabled(
- same(_logActor), any(LogSubject.class), any(String.class)))
- .thenReturn(true);
-
- _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject);
+ _transactionTimeoutHelper = new TransactionTimeoutHelper(_logSubject, _closeAction);
+ _now = System.currentTimeMillis();
}
@Override
protected void tearDown() throws Exception
{
- CurrentActor.remove();
- super.tearDown();
+ try
+ {
+ super.tearDown();
+ }
+ finally
+ {
+ CurrentActor.remove();
+ }
+ }
+
+ private void configureMockTransaction(final long startTime, final long updateTime)
+ {
+ when(_transaction.isTransactional()).thenReturn(true);
+ when(_transaction.getTransactionStartTime()).thenReturn(startTime);
+ when(_transaction.getTransactionUpdateTime()).thenReturn(updateTime);
}
+
+ private LogMessage isLogMessage(String expectedlogHierarchy, String expectedText)
+ {
+ return argThat(new IsLogMessage(expectedlogHierarchy, expectedText));
+ }
+
+ class IsLogMessage extends ArgumentMatcher<LogMessage>
+ {
+ private final String _expectedLogHierarchy;
+ private final String _expectedLogMessageMatches;
+ private String _hierarchyMatchesFailure;
+ private String _logMessageMatchesFailure;
+
+ public IsLogMessage(String expectedlogHierarchy, String expectedLogMessageMatches)
+ {
+ _expectedLogHierarchy = expectedlogHierarchy;
+ _expectedLogMessageMatches = expectedLogMessageMatches;
+ }
+
+ public boolean matches(Object arg)
+ {
+ LogMessage logMessage = (LogMessage)arg;
+
+ boolean hierarchyMatches = logMessage.getLogHierarchy().equals(_expectedLogHierarchy);
+ boolean logMessageMatches = logMessage.toString().matches(_expectedLogMessageMatches);
+
+ if (!hierarchyMatches)
+ {
+ _hierarchyMatchesFailure = "LogHierarchy does not match. Expected " + _expectedLogHierarchy + " actual " + logMessage.getLogHierarchy();
+ }
+
+ if (!logMessageMatches)
+ {
+ _logMessageMatchesFailure = "LogMessage does not match. Expected " + _expectedLogMessageMatches + " actual " + logMessage.toString();
+ }
+
+ return hierarchyMatches && logMessageMatches;
+ }
+
+ @Override
+ public void describeTo(Description description)
+ {
+ if (_hierarchyMatchesFailure != null)
+ {
+ description.appendText(_hierarchyMatchesFailure);
+ }
+ if (_logMessageMatchesFailure != null)
+ {
+ description.appendText(_logMessageMatchesFailure);
+ }
+ }
+ }
+
}
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/InboundMessageAdapterTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/InboundMessageAdapterTest.java
new file mode 100644
index 0000000000..2f160678ba
--- /dev/null
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/InboundMessageAdapterTest.java
@@ -0,0 +1,97 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.qpid.server.queue;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.qpid.framing.AMQShortString;
+import org.apache.qpid.server.message.AMQMessageHeader;
+import org.apache.qpid.server.message.ServerMessage;
+import org.apache.qpid.test.utils.QpidTestCase;
+
+public class InboundMessageAdapterTest extends QpidTestCase
+{
+ private ServerMessage<?> _mockMessage;
+ private QueueEntry _mockQueueEntry;
+ private InboundMessageAdapter _inboundMessageAdapter;
+
+ @Override
+ protected void setUp() throws Exception
+ {
+ super.setUp();
+ _mockMessage = mock(ServerMessage.class);
+ _mockQueueEntry = mock(QueueEntry.class);
+ when(_mockQueueEntry.getMessage()).thenReturn(_mockMessage);
+
+ _inboundMessageAdapter = new InboundMessageAdapter(_mockQueueEntry);
+ }
+
+ public void testGetRoutingKey() throws Exception
+ {
+ String routingKey = getTestName();
+ when(_mockMessage.getRoutingKey()).thenReturn(routingKey);
+
+ assertEquals("Unexpected value for routing key", routingKey, _inboundMessageAdapter.getRoutingKey());
+ }
+
+ public void testGetRoutingKeyShortString() throws Exception
+ {
+ String routingKey = getTestName();
+ when(_mockMessage.getRoutingKey()).thenReturn(routingKey);
+
+ AMQShortString routingKeyShortString = AMQShortString.valueOf(routingKey);
+ assertEquals("Unexpected value for routing key short string", routingKeyShortString, _inboundMessageAdapter.getRoutingKeyShortString());
+ }
+
+ public void testGetMessageHeader() throws Exception
+ {
+ AMQMessageHeader mockMessageHeader = mock(AMQMessageHeader.class);
+ when(_mockQueueEntry.getMessageHeader()).thenReturn(mockMessageHeader);
+
+ assertSame("unexpected message header", mockMessageHeader, _inboundMessageAdapter.getMessageHeader());
+ }
+
+ public void testIsRedelivered() throws Exception
+ {
+ when(_mockQueueEntry.isRedelivered()).thenReturn(true);
+ assertTrue("unexpected isRedelivered value", _inboundMessageAdapter.isRedelivered());
+
+ when(_mockQueueEntry.isRedelivered()).thenReturn(false);
+ assertFalse("unexpected isRedelivered value", _inboundMessageAdapter.isRedelivered());
+ }
+
+ public void testIsPersistent() throws Exception
+ {
+ when(_mockQueueEntry.isPersistent()).thenReturn(true);
+ assertTrue("unexpected isPersistent value", _inboundMessageAdapter.isPersistent());
+
+ when(_mockQueueEntry.isPersistent()).thenReturn(false);
+ assertFalse("unexpected isPersistent value", _inboundMessageAdapter.isPersistent());
+ }
+
+ public void testGetSize() throws Exception
+ {
+ long size = 32526215;
+ when(_mockQueueEntry.getSize()).thenReturn(size);
+ assertEquals("unexpected getSize value", size, _inboundMessageAdapter.getSize());
+ }
+}
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/SimpleAMQQueueTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/SimpleAMQQueueTest.java
index 2b60aea208..ece42f7de3 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/SimpleAMQQueueTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/queue/SimpleAMQQueueTest.java
@@ -659,7 +659,7 @@ public class SimpleAMQQueueTest extends QpidTestCase
public void onRollback()
{
}
- }, 0L);
+ });
// Check that it is enqueued
AMQQueue data = store.getMessages().get(1L);
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/store/MessageStoreTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/store/MessageStoreTest.java
index a957d6bf6c..ffd777243b 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/store/MessageStoreTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/store/MessageStoreTest.java
@@ -661,7 +661,7 @@ public class MessageStoreTest extends QpidTestCase
{
//To change body of implemented methods use File | Settings | File Templates.
}
- }, 0L);
+ });
}
}
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AsyncAutoCommitTransactionTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AsyncAutoCommitTransactionTest.java
index 1aa91fa98a..5c1012d50b 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AsyncAutoCommitTransactionTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AsyncAutoCommitTransactionTest.java
@@ -82,7 +82,7 @@ public class AsyncAutoCommitTransactionTest extends QpidTestCase
AsyncAutoCommitTransaction asyncAutoCommitTransaction =
new AsyncAutoCommitTransaction(_messageStore, _futureRecorder);
- asyncAutoCommitTransaction.enqueue(Collections.singletonList(_queue), _message, _postTransactionAction, System.currentTimeMillis());
+ asyncAutoCommitTransaction.enqueue(Collections.singletonList(_queue), _message, _postTransactionAction);
verify(_storeTransaction).enqueueMessage(_queue, _message);
verify(_futureRecorder).recordFuture(_future, _postTransactionAction);
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AutoCommitTransactionTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AutoCommitTransactionTest.java
index cd3fe3c473..06b8539eb1 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AutoCommitTransactionTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/AutoCommitTransactionTest.java
@@ -137,7 +137,7 @@ public class AutoCommitTransactionTest extends QpidTestCase
_message = createTestMessage(false);
_queues = createTestBaseQueues(new boolean[] {false, false, false});
- _transaction.enqueue(_queues, _message, _action, 0L);
+ _transaction.enqueue(_queues, _message, _action);
assertEquals("Enqueue of non-persistent message must not cause message to be enqueued", 0, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.NOT_STARTED, _storeTransaction.getState());
@@ -157,7 +157,7 @@ public class AutoCommitTransactionTest extends QpidTestCase
_message = createTestMessage(true);
_queues = createTestBaseQueues(new boolean[] {false, false, false});
- _transaction.enqueue(_queues, _message, _action, 0L);
+ _transaction.enqueue(_queues, _message, _action);
assertEquals("Enqueue of persistent message to non-durable queues must not cause message to be enqueued", 0, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.NOT_STARTED, _storeTransaction.getState());
@@ -175,7 +175,7 @@ public class AutoCommitTransactionTest extends QpidTestCase
_message = createTestMessage(true);
_queues = createTestBaseQueues(new boolean[] {false, true, false, true});
- _transaction.enqueue(_queues, _message, _action, 0L);
+ _transaction.enqueue(_queues, _message, _action);
assertEquals("Enqueue of persistent message to durable/non-durable queues must cause messages to be enqueued", 2, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.COMMITTED, _storeTransaction.getState());
@@ -198,7 +198,7 @@ public class AutoCommitTransactionTest extends QpidTestCase
try
{
- _transaction.enqueue(_queues, _message, _action, 0L);
+ _transaction.enqueue(_queues, _message, _action);
fail("Exception not thrown");
}
catch (RuntimeException re)
diff --git a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/LocalTransactionTest.java b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/LocalTransactionTest.java
index 5992829f37..4904cbc6fb 100644
--- a/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/LocalTransactionTest.java
+++ b/qpid/java/broker/src/test/java/org/apache/qpid/server/txn/LocalTransactionTest.java
@@ -140,7 +140,7 @@ public class LocalTransactionTest extends QpidTestCase
_message = createTestMessage(false);
_queues = createTestBaseQueues(new boolean[] {false, false, false});
- _transaction.enqueue(_queues, _message, _action1, 0L);
+ _transaction.enqueue(_queues, _message, _action1);
assertEquals("Enqueue of non-persistent message must not cause message to be enqueued", 0, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.NOT_STARTED, _storeTransaction.getState());
@@ -156,7 +156,7 @@ public class LocalTransactionTest extends QpidTestCase
_message = createTestMessage(true);
_queues = createTestBaseQueues(new boolean[] {false, false, false});
- _transaction.enqueue(_queues, _message, _action1, 0L);
+ _transaction.enqueue(_queues, _message, _action1);
assertEquals("Enqueue of persistent message to non-durable queues must not cause message to be enqueued", 0, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.NOT_STARTED, _storeTransaction.getState());
@@ -173,7 +173,7 @@ public class LocalTransactionTest extends QpidTestCase
_message = createTestMessage(true);
_queues = createTestBaseQueues(new boolean[] {false, true, false, true});
- _transaction.enqueue(_queues, _message, _action1, 0L);
+ _transaction.enqueue(_queues, _message, _action1);
assertEquals("Enqueue of persistent message to durable/non-durable queues must cause messages to be enqueued", 2, _storeTransaction.getNumberOfEnqueuedMessages());
assertEquals("Unexpected transaction state", TransactionState.STARTED, _storeTransaction.getState());
@@ -196,7 +196,7 @@ public class LocalTransactionTest extends QpidTestCase
try
{
- _transaction.enqueue(_queues, _message, _action1, 0L);
+ _transaction.enqueue(_queues, _message, _action1);
fail("Exception not thrown");
}
catch (RuntimeException re)
@@ -217,7 +217,7 @@ public class LocalTransactionTest extends QpidTestCase
{
_message = createTestMessage(false);
_queue = createTestAMQQueue(false);
-
+
_transaction.dequeue(_queue, _message, _action1);
assertEquals("Dequeue of non-persistent message must not cause message to be enqueued", 0, _storeTransaction.getNumberOfEnqueuedMessages());
@@ -465,7 +465,6 @@ public class LocalTransactionTest extends QpidTestCase
*/
public void testRollbackWorkWithAdditionalPostAction() throws Exception
{
-
_message = createTestMessage(true);
_queue = createTestAMQQueue(true);
@@ -482,6 +481,122 @@ public class LocalTransactionTest extends QpidTestCase
assertTrue("Rollback action2 must be fired", _action1.isRollbackActionFired());
}
+ public void testFirstEnqueueRecordsTransactionStartAndUpdateTime() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ long startTime = System.currentTimeMillis();
+ _transaction.enqueue(_queue, _message, _action1);
+
+ assertTrue("Transaction start time should have been recorded", _transaction.getTransactionStartTime() >= startTime);
+ assertEquals("Transaction update time should be the same as transaction start time", _transaction.getTransactionStartTime(), _transaction.getTransactionUpdateTime());
+ }
+
+ public void testSubsequentEnqueueAdvancesTransactionUpdateTimeOnly() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ _transaction.enqueue(_queue, _message, _action1);
+
+ final long transactionStartTimeAfterFirstEnqueue = _transaction.getTransactionStartTime();
+ final long transactionUpdateTimeAfterFirstEnqueue = _transaction.getTransactionUpdateTime();
+
+ Thread.sleep(1);
+ _transaction.enqueue(_queue, _message, _action2);
+
+ final long transactionStartTimeAfterSecondEnqueue = _transaction.getTransactionStartTime();
+ final long transactionUpdateTimeAfterSecondEnqueue = _transaction.getTransactionUpdateTime();
+
+ assertEquals("Transaction start time after second enqueue should be unchanged", transactionStartTimeAfterFirstEnqueue, transactionStartTimeAfterSecondEnqueue);
+ assertTrue("Transaction update time after second enqueue should be greater than first update time", transactionUpdateTimeAfterSecondEnqueue > transactionUpdateTimeAfterFirstEnqueue);
+ }
+
+ public void testFirstDequeueRecordsTransactionStartAndUpdateTime() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ long startTime = System.currentTimeMillis();
+ _transaction.dequeue(_queue, _message, _action1);
+
+ assertTrue("Transaction start time should have been recorded", _transaction.getTransactionStartTime() >= startTime);
+ assertEquals("Transaction update time should be the same as transaction start time", _transaction.getTransactionStartTime(), _transaction.getTransactionUpdateTime());
+ }
+
+ public void testMixedEnqueuesAndDequeuesAdvancesTransactionUpdateTimeOnly() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ _transaction.enqueue(_queue, _message, _action1);
+
+ final long transactionStartTimeAfterFirstEnqueue = _transaction.getTransactionStartTime();
+ final long transactionUpdateTimeAfterFirstEnqueue = _transaction.getTransactionUpdateTime();
+
+ Thread.sleep(1);
+ _transaction.dequeue(_queue, _message, _action2);
+
+ final long transactionStartTimeAfterFirstDequeue = _transaction.getTransactionStartTime();
+ final long transactionUpdateTimeAfterFirstDequeue = _transaction.getTransactionUpdateTime();
+
+ assertEquals("Transaction start time after first dequeue should be unchanged", transactionStartTimeAfterFirstEnqueue, transactionStartTimeAfterFirstDequeue);
+ assertTrue("Transaction update time after first dequeue should be greater than first update time", transactionUpdateTimeAfterFirstDequeue > transactionUpdateTimeAfterFirstEnqueue);
+ }
+
+ public void testCommitResetsTransactionStartAndUpdateTime() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ long startTime = System.currentTimeMillis();
+ _transaction.enqueue(_queue, _message, _action1);
+
+ assertTrue(_transaction.getTransactionStartTime() >= startTime);
+ assertTrue(_transaction.getTransactionUpdateTime() >= startTime);
+
+ _transaction.commit();
+
+ assertEquals("Transaction start time should be reset after commit", 0, _transaction.getTransactionStartTime());
+ assertEquals("Transaction update time should be reset after commit", 0, _transaction.getTransactionUpdateTime());
+ }
+
+ public void testRollbackResetsTransactionStartAndUpdateTime() throws Exception
+ {
+ assertEquals("Unexpected transaction start time before test", 0, _transaction.getTransactionStartTime());
+ assertEquals("Unexpected transaction update time before test", 0, _transaction.getTransactionUpdateTime());
+
+ _message = createTestMessage(true);
+ _queue = createTestAMQQueue(true);
+
+ long startTime = System.currentTimeMillis();
+ _transaction.enqueue(_queue, _message, _action1);
+
+ assertTrue(_transaction.getTransactionStartTime() >= startTime);
+ assertTrue(_transaction.getTransactionUpdateTime() >= startTime);
+
+ _transaction.rollback();
+
+ assertEquals("Transaction start time should be reset after rollback", 0, _transaction.getTransactionStartTime());
+ assertEquals("Transaction update time should be reset after rollback", 0, _transaction.getTransactionUpdateTime());
+ }
+
private Collection<QueueEntry> createTestQueueEntries(boolean[] queueDurableFlags, boolean[] messagePersistentFlags)
{
Collection<QueueEntry> queueEntries = new ArrayList<QueueEntry>();
diff --git a/qpid/java/build.deps b/qpid/java/build.deps
index 4775fcbf33..32f5e2f1e2 100644
--- a/qpid/java/build.deps
+++ b/qpid/java/build.deps
@@ -122,3 +122,5 @@ perftests-visualisation-jfc.test.libs=${test.libs}
bnd=lib/required/bnd-0.0.384.jar
jython=lib/required/jython-standalone-2.5.2.jar
maven-ant-tasks=lib/required/maven-ant-tasks-2.1.1.jar
+velocity.jar=lib/required/velocity-1.4.jar
+velocity-dep.jar=lib/required/velocity-dep-1.4.jar
diff --git a/qpid/java/client/src/main/java/org/apache/qpid/client/AMQSession_0_10.java b/qpid/java/client/src/main/java/org/apache/qpid/client/AMQSession_0_10.java
index 9a7f5241a5..85c96bc3bb 100644
--- a/qpid/java/client/src/main/java/org/apache/qpid/client/AMQSession_0_10.java
+++ b/qpid/java/client/src/main/java/org/apache/qpid/client/AMQSession_0_10.java
@@ -1575,6 +1575,10 @@ public class AMQSession_0_10 extends AMQSession<BasicMessageConsumer_0_10, Basic
getQpidSession().exchangeUnbind(queue, exchange,
binding.getBindingKey());
}
+ }
+
+ void deleteSubscriptionQueue(AMQDestination dest) throws AMQException
+ {
// We need to delete the subscription queue.
if (dest.getAddressType() == AMQDestination.TOPIC_TYPE &&
dest.getLink().getSubscriptionQueue().isExclusive() &&
diff --git a/qpid/java/client/src/main/java/org/apache/qpid/client/BasicMessageConsumer_0_10.java b/qpid/java/client/src/main/java/org/apache/qpid/client/BasicMessageConsumer_0_10.java
index 902770d901..ef7b8cc217 100644
--- a/qpid/java/client/src/main/java/org/apache/qpid/client/BasicMessageConsumer_0_10.java
+++ b/qpid/java/client/src/main/java/org/apache/qpid/client/BasicMessageConsumer_0_10.java
@@ -510,6 +510,7 @@ public class BasicMessageConsumer_0_10 extends BasicMessageConsumer<UnprocessedM
dest.getDelete() == AddressOption.RECEIVER )
{
((AMQSession_0_10) getSession()).handleNodeDelete(dest);
+ ((AMQSession_0_10) getSession()).deleteSubscriptionQueue(dest);
}
// Subscription queue is handled as part of linkDelete method.
((AMQSession_0_10) getSession()).handleLinkDelete(dest);
diff --git a/qpid/java/client/src/main/java/org/apache/qpid/client/security/DynamicSaslRegistrar.java b/qpid/java/client/src/main/java/org/apache/qpid/client/security/DynamicSaslRegistrar.java
index 9198903408..b43229292f 100644
--- a/qpid/java/client/src/main/java/org/apache/qpid/client/security/DynamicSaslRegistrar.java
+++ b/qpid/java/client/src/main/java/org/apache/qpid/client/security/DynamicSaslRegistrar.java
@@ -28,8 +28,10 @@ import org.apache.qpid.util.FileUtils;
import javax.security.sasl.SaslClientFactory;
import java.io.IOException;
import java.io.InputStream;
+import java.security.Provider;
import java.security.Security;
import java.util.Enumeration;
+import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
@@ -67,10 +69,10 @@ public class DynamicSaslRegistrar
}
/** Reads the properties file, and creates a dynamic security provider to register the SASL implementations with. */
- public static void registerSaslProviders()
+ public static ProviderRegistrationResult registerSaslProviders()
{
_logger.debug("public static void registerSaslProviders(): called");
-
+ ProviderRegistrationResult result = ProviderRegistrationResult.FAILED;
// Open the SASL properties file, using the default name is one is not specified.
String filename = System.getProperty(FILE_PROPERTY);
InputStream is =
@@ -89,22 +91,45 @@ public class DynamicSaslRegistrar
if (factories.size() > 0)
{
// Ensure we are used before the defaults
- if (Security.insertProviderAt(new JCAProvider(factories), 1) == -1)
+ JCAProvider qpidProvider = new JCAProvider(factories);
+ if (Security.insertProviderAt(qpidProvider, 1) == -1)
{
- _logger.error("Unable to load custom SASL providers.");
+ Provider registeredProvider = findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME);
+ if (registeredProvider == null)
+ {
+ result = ProviderRegistrationResult.FAILED;
+ _logger.error("Unable to load custom SASL providers.");
+ }
+ else if (registeredProvider.equals(qpidProvider))
+ {
+ result = ProviderRegistrationResult.EQUAL_ALREADY_REGISTERED;
+ _logger.debug("Custom SASL provider is already registered with equal properties.");
+ }
+ else
+ {
+ result = ProviderRegistrationResult.DIFFERENT_ALREADY_REGISTERED;
+ _logger.warn("Custom SASL provider was already registered with different properties.");
+ if (_logger.isDebugEnabled())
+ {
+ _logger.debug("Custom SASL provider " + registeredProvider + " properties: " + new HashMap<Object, Object>(registeredProvider));
+ }
+ }
}
else
{
+ result = ProviderRegistrationResult.SUCCEEDED;
_logger.info("Additional SASL providers successfully registered.");
}
}
else
{
- _logger.warn("No additional SASL providers registered.");
+ result = ProviderRegistrationResult.NO_SASL_FACTORIES;
+ _logger.warn("No additional SASL factories found to register.");
}
}
catch (IOException e)
{
+ result = ProviderRegistrationResult.FAILED;
_logger.error("Error reading properties: " + e, e);
}
finally
@@ -122,6 +147,22 @@ public class DynamicSaslRegistrar
}
}
}
+ return result;
+ }
+
+ static Provider findProvider(String name)
+ {
+ Provider[] providers = Security.getProviders();
+ Provider registeredProvider = null;
+ for (Provider provider : providers)
+ {
+ if (name.equals(provider.getName()))
+ {
+ registeredProvider = provider;
+ break;
+ }
+ }
+ return registeredProvider;
}
/**
@@ -158,15 +199,24 @@ public class DynamicSaslRegistrar
continue;
}
- _logger.debug("Registering class "+ clazz.getName() +" for mechanism "+mechanism);
+ _logger.debug("Found class "+ clazz.getName() +" for mechanism "+mechanism);
factoriesToRegister.put(mechanism, (Class<? extends SaslClientFactory>) clazz);
}
catch (Exception ex)
{
- _logger.error("Error instantiating SaslClientFactory calss " + className + " - skipping");
+ _logger.error("Error instantiating SaslClientFactory class " + className + " - skipping");
}
}
return factoriesToRegister;
}
+
+ public static enum ProviderRegistrationResult
+ {
+ SUCCEEDED,
+ EQUAL_ALREADY_REGISTERED,
+ DIFFERENT_ALREADY_REGISTERED,
+ NO_SASL_FACTORIES,
+ FAILED;
+ }
}
diff --git a/qpid/java/client/src/main/java/org/apache/qpid/client/security/JCAProvider.java b/qpid/java/client/src/main/java/org/apache/qpid/client/security/JCAProvider.java
index 4a91f805f6..c9bcaf0d15 100644
--- a/qpid/java/client/src/main/java/org/apache/qpid/client/security/JCAProvider.java
+++ b/qpid/java/client/src/main/java/org/apache/qpid/client/security/JCAProvider.java
@@ -39,6 +39,11 @@ import java.util.Map;
*/
public class JCAProvider extends Provider
{
+ static final String QPID_CLIENT_SASL_PROVIDER_NAME = "AMQSASLProvider-Client";
+ static final String QPID_CLIENT_SASL_PROVIDER_INFO = "A JCA provider that registers all "
+ + "AMQ SASL providers that want to be registered";
+ static final double QPID_CLIENT_SASL_PROVIDER_VERSION = 1.0;
+
private static final Logger log = LoggerFactory.getLogger(JCAProvider.class);
/**
@@ -48,8 +53,7 @@ public class JCAProvider extends Provider
*/
public JCAProvider(Map<String, Class<? extends SaslClientFactory>> providerMap)
{
- super("AMQSASLProvider-Client", 1.0, "A JCA provider that registers all "
- + "AMQ SASL providers that want to be registered");
+ super(QPID_CLIENT_SASL_PROVIDER_NAME, QPID_CLIENT_SASL_PROVIDER_VERSION, QPID_CLIENT_SASL_PROVIDER_INFO);
register(providerMap);
}
@@ -63,7 +67,7 @@ public class JCAProvider extends Provider
for (Map.Entry<String, Class<? extends SaslClientFactory>> me : providerMap.entrySet())
{
put( "SaslClientFactory."+me.getKey(), me.getValue().getName());
- log.debug("Registered SASL Client factory for " + me.getKey() + " as " + me.getValue().getName());
+ log.debug("Recording SASL Client factory for " + me.getKey() + " as " + me.getValue().getName());
}
}
}
diff --git a/qpid/java/client/src/main/java/org/apache/qpid/client/state/AMQStateManager.java b/qpid/java/client/src/main/java/org/apache/qpid/client/state/AMQStateManager.java
index 0b6217ffce..ed75e1f4c3 100644
--- a/qpid/java/client/src/main/java/org/apache/qpid/client/state/AMQStateManager.java
+++ b/qpid/java/client/src/main/java/org/apache/qpid/client/state/AMQStateManager.java
@@ -157,12 +157,15 @@ public class AMQStateManager implements AMQMethodListener
if (_waiters.size() == 0)
{
- _logger.error("No Waiters for error saving as last error:" + error.getMessage());
+ _logger.info("No Waiters for error. Saving as last error:" + error.getMessage());
_lastException = error;
}
for (StateWaiter waiter : _waiters)
{
- _logger.error("Notifying Waiters(" + _waiters + ") for error:" + error.getMessage());
+ if(_logger.isDebugEnabled())
+ {
+ _logger.debug("Notifying waiter " + waiter + " for error:" + error.getMessage());
+ }
waiter.error(error);
}
}
diff --git a/qpid/java/client/src/test/java/org/apache/qpid/client/security/DynamicSaslRegistrarTest.java b/qpid/java/client/src/test/java/org/apache/qpid/client/security/DynamicSaslRegistrarTest.java
new file mode 100644
index 0000000000..4281984212
--- /dev/null
+++ b/qpid/java/client/src/test/java/org/apache/qpid/client/security/DynamicSaslRegistrarTest.java
@@ -0,0 +1,140 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.qpid.client.security;
+
+import java.io.File;
+import java.security.Provider;
+import java.security.Security;
+
+import org.apache.qpid.client.security.DynamicSaslRegistrar.ProviderRegistrationResult;
+import org.apache.qpid.test.utils.QpidTestCase;
+import org.apache.qpid.test.utils.TestFileUtils;
+
+public class DynamicSaslRegistrarTest extends QpidTestCase
+{
+ private Provider _registeredProvider;
+
+ public void setUp() throws Exception
+ {
+ super.setUp();
+
+ //If the client provider is already registered, remove it for the duration of the test
+ _registeredProvider = DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME);
+ if (_registeredProvider != null)
+ {
+ Security.removeProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME);
+ }
+ }
+
+ public void tearDown() throws Exception
+ {
+ //Remove any provider left behind by the test.
+ Security.removeProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME);
+ try
+ {
+ //If the client provider was already registered before the test, restore it.
+ if (_registeredProvider != null)
+ {
+ Security.insertProviderAt(_registeredProvider, 1);
+ }
+ }
+ finally
+ {
+ super.tearDown();
+ }
+ }
+
+ public void testRegisterDefaultProvider()
+ {
+ assertNull("Provider should not yet be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ ProviderRegistrationResult firstRegistrationResult = DynamicSaslRegistrar.registerSaslProviders();
+ assertEquals("Unexpected registration result", ProviderRegistrationResult.SUCCEEDED, firstRegistrationResult);
+ assertNotNull("Providers should now be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+ }
+
+ public void testRegisterDefaultProviderTwice()
+ {
+ assertNull("Provider should not yet be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ DynamicSaslRegistrar.registerSaslProviders();
+ assertNotNull("Providers should now be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ ProviderRegistrationResult result = DynamicSaslRegistrar.registerSaslProviders();
+ assertEquals("Unexpected registration result when trying to re-register", ProviderRegistrationResult.EQUAL_ALREADY_REGISTERED, result);
+ assertNotNull("Providers should still be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+ }
+
+ @SuppressWarnings("serial")
+ public void testRegisterDefaultProviderWhenAnotherIsAlreadyPresentWithDifferentFactories()
+ {
+ assertNull("Provider should not be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ //Add a test provider with the same name, version, info as the default client provider, but with different factory properties (none).
+ Provider testProvider = new Provider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME,
+ JCAProvider.QPID_CLIENT_SASL_PROVIDER_VERSION,
+ JCAProvider.QPID_CLIENT_SASL_PROVIDER_INFO){};
+ Security.addProvider(testProvider);
+ assertSame("Test provider should be registered", testProvider, DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ //Try to register the default provider now that another with the same name etc (but different factories)
+ //is already registered, expect it not to be registered as a result.
+ ProviderRegistrationResult result = DynamicSaslRegistrar.registerSaslProviders();
+ assertEquals("Unexpected registration result", ProviderRegistrationResult.DIFFERENT_ALREADY_REGISTERED, result);
+
+ //Verify the test provider is still registered
+ assertSame("Test provider should still be registered", testProvider, DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+ }
+
+ public void testRegisterWithNoFactories()
+ {
+ File emptyTempFile = TestFileUtils.createTempFile(this);
+
+ assertNull("Provider should not be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ //Adjust the location of the properties file to point at an empty file, so no factories are found to register.
+ setTestSystemProperty("amq.dynamicsaslregistrar.properties", emptyTempFile.getPath());
+
+ //Try to register the default provider, expect it it not to be registered because there were no factories.
+ ProviderRegistrationResult result = DynamicSaslRegistrar.registerSaslProviders();
+ assertEquals("Unexpected registration result", ProviderRegistrationResult.NO_SASL_FACTORIES, result);
+
+ assertNull("Provider should not be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+ }
+
+ public void testRegisterWithMissingFileGetsDefault()
+ {
+ //Create a temp file and then delete it, such that we get a path which doesn't exist
+ File tempFile = TestFileUtils.createTempFile(this);
+ assertTrue("Failed to delete file", tempFile.delete());
+
+ assertNull("Provider should not be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+
+ //Adjust the location of the properties file to point at non-existent file.
+ setTestSystemProperty("amq.dynamicsaslregistrar.properties", tempFile.getPath());
+
+ //Try to register the default provider, expect it to fall back to the default in the jar and succeed.
+ ProviderRegistrationResult result = DynamicSaslRegistrar.registerSaslProviders();
+ assertEquals("Unexpected registration result", ProviderRegistrationResult.SUCCEEDED, result);
+
+ assertNotNull("Provider should be registered", DynamicSaslRegistrar.findProvider(JCAProvider.QPID_CLIENT_SASL_PROVIDER_NAME));
+ }
+}
diff --git a/qpid/java/common.xml b/qpid/java/common.xml
index 8a4e0b81a7..ce5693fd28 100644
--- a/qpid/java/common.xml
+++ b/qpid/java/common.xml
@@ -45,7 +45,7 @@
<property name="build.report" location="${build}/report"/>
<property name="build.release" location="${build}/release"/>
<property name="build.release.prepare" location="${build.release}/prepare"/>
- <property name="build.lib.broker.plugins" location="${build}/lib/broker-plugins"/>
+ <property name="build.scratch.broker.plugins.lib" location="${build.scratch}/broker-plugins/lib"/>
<property name="build.coverage.report" location="${build}/coverage/report"/>
<property name="build.coverage.src" location="${build}/coverage/src"/>
<property name="build.findbugs" location="${build}/findbugs"/>
diff --git a/qpid/java/common/build.xml b/qpid/java/common/build.xml
index 9caf93c026..e599c840db 100644
--- a/qpid/java/common/build.xml
+++ b/qpid/java/common/build.xml
@@ -53,7 +53,15 @@
</target>
<target name="compile_gentools">
- <ant dir="${gentools.home}" />
+ <mkdir dir="${gentools.build}/classes"/>
+ <javac srcdir="${gentools.home}/src" destdir="${gentools.build}/classes" source="${java.source}" target="${java.target}" fork="true" includeantruntime="false">
+ <classpath>
+ <fileset dir="${project.root}">
+ <include name="${velocity.jar}"/>
+ <include name="${velocity-dep.jar}"/>
+ </fileset>
+ </classpath>
+ </javac>
</target>
<target name="check_gentool_deps">
@@ -64,15 +72,12 @@
<target name="gentools" depends="compile_gentools,check_gentool_deps" unless="gentools.notRequired">
<mkdir dir="${framing.generated.dir}"/>
- <java classname="org.apache.qpid.gentools.Main" fork="true" dir="${gentools.home}/src" failonerror="true">
+ <java classname="org.apache.qpid.gentools.Main" fork="true" dir="${gentools.build}/classes" failonerror="true">
<arg line='-j -o "${framing.generated.dir}" -t "${project.root}/common/templates" ${xml.spec.list}'/>
<classpath>
- <pathelement path="${gentools.home}/src" />
- <fileset dir="${gentools.home}/lib">
- <include name="**/*.jar"/>
- </fileset>
- <pathelement path="${gentools.home}/lib/velocity-1.4.jar" />
- <pathelement path="${gentools.home}/lib/velocity-dep-1.4.jar" />
+ <pathelement path="${gentools.build}/classes" />
+ <pathelement path="${project.root}/${velocity.jar}" />
+ <pathelement path="${project.root}/${velocity-dep.jar}" />
</classpath>
</java>
<touch file="${gentools.timestamp}" />
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpClass.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClass.java
index 26195da2e3..26195da2e3 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpClass.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClass.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java
index a27a50d07e..a27a50d07e 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpClassMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpConstant.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstant.java
index df5bc6c362..df5bc6c362 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpConstant.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstant.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java
index ab8b8be61e..ab8b8be61e 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpConstantSet.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomain.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomain.java
index ba8552a6a6..ba8552a6a6 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomain.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomain.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java
index 0cd9d214bd..0cd9d214bd 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java
index e39550b96f..e39550b96f 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpDomainVersionMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpField.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpField.java
index 7c721cf913..7c721cf913 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpField.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpField.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java
index 0bb5e03a61..0bb5e03a61 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFieldMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java
index 5993a1b715..5993a1b715 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpFlagMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethod.java
index 4ec39b209e..4ec39b209e 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java
index d98dab4a39..d98dab4a39 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpMethodMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpModel.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpModel.java
index 45f0adb18d..45f0adb18d 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpModel.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpModel.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java
index 0633eff1e1..0633eff1e1 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalFieldMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java
index fede88631a..fede88631a 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOrdinalVersionMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java
index 10978d0e4a..10978d0e4a 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpOverloadedParameterMap.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpParseException.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpParseException.java
index 3f3d4611fc..3f3d4611fc 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpParseException.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpParseException.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java
index 1ac09ea453..1ac09ea453 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTemplateException.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java
index 127a8835b0..127a8835b0 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpTypeMappingException.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpVersion.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersion.java
index dbeef1b895..dbeef1b895 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpVersion.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersion.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java
index 6419e23a1e..6419e23a1e 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/AmqpVersionSet.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java
index d85510ee98..d85510ee98 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/BitFieldGenerateMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java
index 641f50c3f8..641f50c3f8 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/CommandGenerateMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java
index 9ab7eb178b..9ab7eb178b 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/ConsolidatedField.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/CppGenerator.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/CppGenerator.java
index 4f58cba34e..4f58cba34e 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/CppGenerator.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/CppGenerator.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java
index 9fc81dd428..9fc81dd428 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/DotnetGenerator.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/GenerateMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/GenerateMethod.java
index 8b0bb99b41..8b0bb99b41 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/GenerateMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/GenerateMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/Generator.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Generator.java
index 5d6e7be527..5d6e7be527 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/Generator.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Generator.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/JavaGenerator.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/JavaGenerator.java
index 7730fca1bd..7730fca1bd 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/JavaGenerator.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/JavaGenerator.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/LanguageConverter.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/LanguageConverter.java
index 5e692d86e7..5e692d86e7 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/LanguageConverter.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/LanguageConverter.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/Main.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Main.java
index c0584f7ca7..c0584f7ca7 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/Main.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Main.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java
index ffeefed900..ffeefed900 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/MangledGenerateMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/NodeAware.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/NodeAware.java
index f832da75ad..f832da75ad 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/NodeAware.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/NodeAware.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/Printable.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Printable.java
index aa13df7b68..aa13df7b68 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/Printable.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Printable.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java
index 8e1af1c551..8e1af1c551 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionClass.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionField.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionField.java
index b795663d15..b795663d15 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionField.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionField.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java
index 59a6d9e28a..59a6d9e28a 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionMethod.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java
index 22b416e45a..22b416e45a 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/SingleVersionModel.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java
index 39ce666288..39ce666288 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/TargetDirectoryException.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/Utils.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Utils.java
index 1cedaeea12..1cedaeea12 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/Utils.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/Utils.java
diff --git a/qpid/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java b/qpid/java/common/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java
index a9cdd56e88..a9cdd56e88 100644
--- a/qpid/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java
+++ b/qpid/java/common/gentools/src/org/apache/qpid/gentools/VersionConsistencyCheck.java
diff --git a/qpid/java/common/protocol-version.xml b/qpid/java/common/protocol-version.xml
deleted file mode 100644
index 5435a0a582..0000000000
--- a/qpid/java/common/protocol-version.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<!--
- -
- - Licensed to the Apache Software Foundation (ASF) under one
- - or more contributor license agreements. See the NOTICE file
- - distributed with this work for additional information
- - regarding copyright ownership. The ASF licenses this file
- - to you under the Apache License, Version 2.0 (the
- - "License"); you may not use this file except in compliance
- - with the License. You may obtain a copy of the License at
- -
- - http://www.apache.org/licenses/LICENSE-2.0
- -
- - Unless required by applicable law or agreed to in writing,
- - software distributed under the License is distributed on an
- - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- - KIND, either express or implied. See the License for the
- - specific language governing permissions and limitations
- - under the License.
- -
- -->
-<project name="Qpid Common Protocol Versions" default="generate">
- <property name="topDirectoryLocation" location=".." />
- <property name="project.build.directory" location="target" />
- <property name="gentools.home" location="${topDirectoryLocation}/../gentools" />
- <property name="generated.path" location="${project.build.directory}/generated-sources/gentools" />
- <property name="generated.package" value="org/apache/qpid/framing" />
- <property name="generated.dir" location="${generated.path}/${generated.package}" />
- <property name="generated.timestamp" location="${generated.dir}/timestamp" />
- <property name="xml.spec.dir" location="${topDirectoryLocation}/../specs" />
- <property name="xml.spec.deps" value="amqp.0-8.xml amqp.0-9.xml amqp0-9-1.stripped.xml" />
- <property name="xml.spec.list" value="${xml.spec.dir}/amqp.0-8.xml ${xml.spec.dir}/amqp.0-9.xml ${xml.spec.dir}/amqp0-9-1.stripped.xml" />
- <property name="template.dir" value="${topDirectoryLocation}/common/templates" />
-
-
- <!--<target name="generate" depends="compile_generator,check_generate_deps" unless="generation.notRequired">-->
- <target name="generate" depends="compile_generator" unless="generation.notRequired">
- <mkdir dir="${generated.dir}"/>
- <java classname="org.apache.qpid.gentools.Main" fork="true" dir="${gentools.home}/src" failonerror="true">
- <arg line="-j -o ${generated.dir} -t ${template.dir} ${xml.spec.list}" />
- <classpath>
- <pathelement path="${gentools.home}/src" />
- <fileset dir="${gentools.home}/lib">
- <include name="**/*.jar"/>
- </fileset>
- <pathelement path="${gentools.home}/lib/velocity-1.4.jar" />
- <pathelement path="${gentools.home}/lib/velocity-dep-1.4.jar" />
- </classpath>
- </java>
- <touch file="${generated.timestamp}" />
- </target>
-
- <target name="check_generate_deps">
- <uptodate property="generation.notRequired" targetfile="${generated.timestamp}">
- <srcfiles dir="${xml.spec.dir}" includes="${xml.spec.deps}" />
- <srcfiles dir="${template.dir}" includes="**/*.vm **/*.tmpl" />
- </uptodate>
- </target>
-
- <target name="compile_generator">
- <ant dir="${gentools.home}" />
- </target>
-
- <target name="precompile" depends="generate"/>
-
- <target name="clean">
- <delete dir="${generated.path}" />
- </target>
-
-</project>
-
diff --git a/qpid/java/common/src/main/java/org/apache/qpid/transport/Session.java b/qpid/java/common/src/main/java/org/apache/qpid/transport/Session.java
index 95c3e4669f..d66415c659 100644
--- a/qpid/java/common/src/main/java/org/apache/qpid/transport/Session.java
+++ b/qpid/java/common/src/main/java/org/apache/qpid/transport/Session.java
@@ -25,7 +25,6 @@ import org.apache.qpid.configuration.ClientProperties;
import org.apache.qpid.transport.network.Frame;
import org.apache.qpid.transport.util.Logger;
import org.apache.qpid.transport.util.Waiter;
-
import static org.apache.qpid.transport.Option.COMPLETED;
import static org.apache.qpid.transport.Option.SYNC;
import static org.apache.qpid.transport.Option.TIMELY_REPLY;
@@ -414,7 +413,7 @@ public class Session extends SessionInvoker
if(log.isDebugEnabled())
{
- log.debug("ID: [%s] %s", this.channel, id);
+ log.debug("identify: ch=%s, commandId=%s", this.channel, id);
}
if ((id & 0xff) == 0)
@@ -443,7 +442,7 @@ public class Session extends SessionInvoker
{
if(log.isDebugEnabled())
{
- log.debug("%s processed([%d,%d]) %s %s", this, lower, upper, syncPoint, maxProcessed);
+ log.debug("%s ch=%s processed([%d,%d]) %s %s", this, channel, lower, upper, syncPoint, maxProcessed);
}
boolean flush;
@@ -451,7 +450,7 @@ public class Session extends SessionInvoker
{
if(log.isDebugEnabled())
{
- log.debug("%s", processed);
+ log.debug("%s processed: %s", this, processed);
}
if (ge(upper, commandsIn))
diff --git a/qpid/java/common/src/test/java/org/apache/qpid/test/utils/QpidTestCase.java b/qpid/java/common/src/test/java/org/apache/qpid/test/utils/QpidTestCase.java
index eeff291f9e..08f7387b75 100644
--- a/qpid/java/common/src/test/java/org/apache/qpid/test/utils/QpidTestCase.java
+++ b/qpid/java/common/src/test/java/org/apache/qpid/test/utils/QpidTestCase.java
@@ -41,6 +41,7 @@ public class QpidTestCase extends TestCase
public static final String QPID_HOME = System.getProperty("QPID_HOME");
public static final String TEST_RESOURCES_DIR = QPID_HOME + "/../test-profiles/test_resources/";
public static final String TMP_FOLDER = System.getProperty("java.io.tmpdir");
+ public static final String LOG4J_CONFIG_FILE_PATH = System.getProperty("log4j.configuration.file");
private static final Logger _logger = Logger.getLogger(QpidTestCase.class);
@@ -199,6 +200,8 @@ public class QpidTestCase extends TestCase
{
System.setProperty(property, value);
}
+
+ _logger.info("Set system property \"" + property + "\" to: \"" + value + "\"");
}
/**
diff --git a/qpid/java/ivy.retrieve.xml b/qpid/java/ivy.retrieve.xml
index 3ca38e39f8..5998a3e78e 100644
--- a/qpid/java/ivy.retrieve.xml
+++ b/qpid/java/ivy.retrieve.xml
@@ -69,6 +69,8 @@
<dependency org="org.eclipse.jetty" name="jetty-util" rev="7.6.3.v20120416" transitive="false"/>
<dependency org="org.dojotoolkit" name="dojo-war" rev="1.7.2" transitive="false"/>
<dependency org="xalan" name="xalan" rev="2.7.0" transitive="false"/>
+ <dependency org="velocity" name="velocity" rev="1.4" transitive="false"/>
+ <dependency org="velocity" name="velocity-dep" rev="1.4" transitive="false"/>
<!-- The following are optional dependencies, for modules providing optional functionlity or
for use in optional build/test steps. Their optional status is usually indicative of licences
diff --git a/qpid/java/jca/README-JBOSS-EAP6.txt b/qpid/java/jca/README-JBOSS-EAP6.txt
new file mode 100644
index 0000000000..219bfb6468
--- /dev/null
+++ b/qpid/java/jca/README-JBOSS-EAP6.txt
@@ -0,0 +1,183 @@
+Qpid JCA Resource Adapter
+
+JBoss EAP 6.x Installation and Configuration Instructions
+
+Overview
+========
+The Qpid Resource Adapter is a JCA 1.5 compliant resource adapter that allows
+for JEE integration between EE applications and AMQP 0.10 message brokers.
+
+The adapter provides both outbound and inbound connectivity and
+exposes a variety of options to fine tune your messaging applications.
+Currently the adapter only supports C++ based brokers and has only been tested with Apache Qpid C++ broker.
+
+The following document explains how to configure the resource adapter for deployment in JBoss EAP 6.x.
+
+Deployment
+==========
+To deploy the Qpid JCA adapter in the JBoss EAP 6 environment, copy the qpid-ra-<version>.rar file
+to your JBoss deployment directory. By default this can be found at
+
+JBOSS_ROOT/<server-config>/deployments
+
+where JBOSS_ROOT denotes the root directory of your JBoss EAP 6.x installation and <server-config> denotes the
+particular server configuration for your applicationd development. Currently, JBoss EAP 6 provides two configurations
+by default standalone and domain. This documentation assumes the standalone server configuration, though the process
+to configure and deploy the Qpid JCA adapter is largely the same between the two. Assuming the standalone configuration
+the deployment location above would be
+
+JBOSS_ROOT/standalone/deployments
+
+Note, as opposed to prior versions of EAP, copying a RAR file to the deployment location does not automatically
+start and deploy the adapter. A separate manual configuration step is required which is explained in the following
+section.
+
+Configuration
+=============
+The EAP 6.x environment uses an XML based configuration scheme that is fundamentally different than prior versions
+of EAP. As previously mentioned, EAP 6.x provides two server configuration types, standalone and domain. Each come with
+a different set of configuration files that are tailored to varying types of server environments. Configuration locations
+can be found at
+
+JBOSS_ROOT/<server-config>/configuration
+
+The varying XML files are named
+
+<server-config>-full.xml
+<server-config>-full-ha.xml
+<server-config>.xml
+
+where each XML file denotes the capabilites of the server. This document assumes a minimal server configuration in
+the standalone server environment. While each configuration file provides a variety of options, this document is
+only concerned with the configuration of the JCA adapter. Please consult the EAP 6.x documentation for other
+options and configuration scenarios.
+
+The EAP 6.x infrastructure is built upon the notion of varying subsystem where subsystem generally corresponds
+to one particular piece of functionality. Typical examples are EJB, JAXR etc. In order to configure the Qpid JCA adapter
+we need to modify the ejb and resource-adapters subsystems in order to tell EAP 6.x about our RAR deployment as well
+as the RAR that will provide JMS provider functionality.
+
+Note, JCA in EAP 6.x involves two subsystems, jca and resource-adapters. The former subsytem provides capabilities for
+all JCA deployments (the JCA runtime environment) where the resource-adapters subsystem is particular to an invidual JCA
+deployment. Here we are only concerned with the latter. Please consult the EAP 6.x documentation for more general JCA
+configuration options as well as other subsystems.
+
+Each subsystem is configured in an XML fragment and is versioned separately. Subsystem versions will change over
+time so this document may not reflect the most current version, but the Qpid JCA configuration options remain
+unchanged across subsystem regardless of version or release date.
+
+The following XML fragment replaces the default messaging provider in the EAP 6.x environment
+
+ <subsystem xmlns="urn:jboss:domain:ejb3:1.2">
+ <session-bean>
+ <stateless>
+ <bean-instance-pool-ref pool-name="slsb-strict-max-pool"/>
+ </stateless>
+ <stateful default-access-timeout="5000" cache-ref="simple"/>
+ <singleton default-access-timeout="5000"/>
+ </session-bean>
+ <mdb>
+ <resource-adapter-ref resource-adapter-name="qpid-ra-<rar-version>.rar"/>
+ <bean-instance-pool-ref pool-name="mdb-strict-max-pool"/>
+ </mdb>
+ <pools>
+ <bean-instance-pools>
+ <strict-max-pool name="slsb-strict-max-pool" max-pool-size="20" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/>
+ <strict-max-pool name="mdb-strict-max-pool" max-pool-size="20" instance-acquisition-timeout="5" instance-acquisition-timeout-unit="MINUTES"/>
+ </bean-instance-pools>
+ </pools>
+ <caches>
+ <cache name="simple" aliases="NoPassivationCache"/>
+ <cache name="passivating" passivation-store-ref="file" aliases="SimpleStatefulCache"/>
+ </caches>
+ <passivation-stores>
+ <file-passivation-store name="file"/>
+ </passivation-stores>
+ <async thread-pool-name="default"/>
+ <timer-service thread-pool-name="default">
+ <data-store path="timer-service-data" relative-to="jboss.server.data.dir"/>
+ </timer-service>
+ <remote connector-ref="remoting-connector" thread-pool-name="default"/>
+ <thread-pools>
+ <thread-pool name="default">
+ <max-threads count="10"/>
+ <keepalive-time time="100" unit="milliseconds"/>
+ </thread-pool>
+ </thread-pools>
+ </subsystem>
+
+The only real lines we are concerned with are
+
+ <mdb>
+ <resource-adapter-ref resource-adapter-name="qpid-ra-<rar-version>.rar"/>
+ <bean-instance-pool-ref pool-name="mdb-strict-max-pool"/>
+ </mdb>
+
+however, the complete fragment is provided for clarity.
+
+The following XML fragment provides a minimal example configuration in the EAP 6 environment. Here we are configuring
+an XA aware ManagedConnectionFactory and two JMS destinations (queue and topic)
+
+ <subsystem xmlns="urn:jboss:domain:resource-adapters:1.0">
+ <resource-adapters>
+ <resource-adapter>
+ <archive>
+ qpid-ra-<rar-version>.rar
+ </archive>
+ <transaction-support>
+ XATransaction
+ </transaction-support>
+ <config-property name="connectionURL">
+ amqp://anonymous:passwd@client/test?brokerlist='tcp://localhost?sasl_mechs='PLAIN''
+ </config-property>
+ <config-property name="TransactionManagerLocatorClass">
+ org.apache.qpid.ra.tm.JBoss7TransactionManagerLocator
+ </config-property>
+ <config-property name="TransactionManagerLocatorMethod">
+ getTm
+ </config-property>
+ <connection-definitions>
+ <connection-definition class-name="org.apache.qpid.ra.QpidRAManagedConnectionFactory" jndi-name="QpidJMSXA" pool-name="QpidJMSXA">
+ <config-property name="connectionURL">
+ amqp://anonymous:passwd@client/test?brokerlist='tcp://localhost?sasl_mechs='PLAIN''
+ </config-property>
+ <config-property name="SessionDefaultType">
+ javax.jms.Queue
+ </config-property>
+ </connection-definition>
+ </connection-definitions>
+ <admin-objects>
+ <admin-object class-name="org.apache.qpid.ra.admin.QpidTopicImpl" jndi-name="java:jboss/exported/GoodByeTopic" use-java-context="false" pool-name="GoodByeTopic">
+ <config-property name="DestinationAddress">
+ amq.topic/hello.Topic
+ </config-property>
+ </admin-object>
+ <admin-object class-name="org.apache.qpid.ra.admin.QpidQueueImpl" jndi-name="java:jboss/exported/HelloQueue" use-java-context="false" pool-name="HelloQueue">
+ <config-property name="DestinationAddress">
+ hello.Queue;{create:always, node:{type:queue, x-declare:{auto-delete:true}}}
+ </config-property>
+ </admin-object>
+ </admin-objects>
+ </resource-adapter>
+ </resource-adapters>
+ </subsystem>
+
+
+
+Note, while this document assumes that you are modifying the standalone.xml file directly, an alternative to this approach would be
+to make a copy of the file, apply the modifications above and start the EAP instance with the new configuration
+
+JBOSS_HOME/bin/standalone.sh -c your-modified-config.xml
+
+Regardless of the approach that you use, once the modifications have been made you can start your EAP 6.x instance and the Qpid JCA
+adapter will be deployed and ready for use. If property deployed and configured, you should see something in the log files or console
+resembling the following:
+
+INFO [org.apache.qpid.ra.QpidResourceAdapter] (MSC service thread 1-4) Qpid resource adapter started
+
+
+Notes
+=====
+While the differences between the EAP 5.x and 6.x environments may appear to be dramatic, the configuration options and functionality of the Qpid
+JCA adapter are not. The README.txt file outlines general configuration options that remain unchanged between the respective EAP environments.
+
diff --git a/qpid/java/jca/README-JBOSS.txt b/qpid/java/jca/README-JBOSS.txt
index 77bf91e6dd..e88643e0f2 100644
--- a/qpid/java/jca/README-JBOSS.txt
+++ b/qpid/java/jca/README-JBOSS.txt
@@ -61,7 +61,7 @@ XA ConnectionFactory
<xa-transaction/>
<rar-name>qpid-ra-<ra-version>.rar</rar-name>
<connection-definition>org.apache.qpid.ra.QpidRAConnectionFactory</connection-definition>
- <config-property name="connectionURL">amqp://guest:guest@/test?brokerlist='tcp://localhost:5672?sasl_mechs='ANONYMOUS''</config-property>
+ <config-property name="ConnectionURL">amqp://guest:guest@/test?brokerlist='tcp://localhost:5672?sasl_mechs='PLAIN''</config-property>
<max-pool-size>20</max-pool-size>
</tx-connection-factory>
@@ -79,11 +79,11 @@ Local ConnectionFactory
=======================
<tx-connection-factory>
<jndi-name>QpidJMS</jndi-name>
- <rar-name>qpid-ra-0.10.rar</rar-name>
+ <rar-name>qpid-ra-<ra-version>.rar</rar-name>
<local-transaction/>
- <config-property name="useLocalTx" type="java.lang.Boolean">true</config-property>
- <config-property name="connectionURL">amqp://anonymous:@client/test?brokerlist='tcp://localhost:5672?sasl_mechs='ANONYMOUS''</config-property>
- <connection-definition>org.apache.qpid.ra.QpidRAConnectionFactory</connection-definition>
+ <config-property name="UseLocalTx" type="java.lang.Boolean">true</config-property>
+ <config-property name="ConnectionURL">amqp://anonymous:@client/test?brokerlist='tcp://localhost:5672?sasl_mechs='PLAIN''
+ </config-property> <connection-definition>org.apache.qpid.ra.QpidRAConnectionFactory</connection-definition>
<max-pool-size>20</max-pool-size>
</tx-connection-factory>
@@ -100,11 +100,10 @@ provides two such objects
<mbean code="org.jboss.resource.deployment.AdminObject"
name="qpid.jca:name=HelloQueue">
<attribute name="JNDIName">Hello</attribute>
- <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-0.10.rar'</depends>
+ <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-<ra-version>.rar'</depends>
<attribute name="Type">javax.jms.Destination</attribute>
<attribute name="Properties">
- destinationType=QUEUE
- destinationAddress=amq.direct
+ DestinationAddress=amq.direct
</attribute>
</mbean>
@@ -113,16 +112,15 @@ The above XML defines a JMS Queue which is bound into JNDI as
queue/HelloQueue
This destination can be retrieved from JNDI and be used for the consumption or production of messages. The desinationAddress property
-can be customized for your environment. Please see the Qpid Java Client documentation for specific configuration options.
+can be customized for your environment. Please see the Qpid Java Client documentation for specific configuration options.
<mbean code="org.jboss.resource.deployment.AdminObject"
name="qpid.jca:name=HelloTopic">
<attribute name="JNDIName">HelloTopic</attribute>
- <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-0.10.rar'</depends>
+ <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-<ra-version>.rar'</depends>
<attribute name="Type">javax.jms.Destination</attribute>
<attribute name="Properties">
- destinationType=TOPIC
- destinationAddress=amq.topic
+ DestinationAddress=amq.topic
</attribute>
</mbean>
@@ -138,10 +136,10 @@ can be customized for your environment. Please see the Qpid Java Client document
<mbean code="org.jboss.resource.deployment.AdminObject"
name="qpid.jca:name=QpidConnectionFactory">
<attribute name="JNDIName">QpidConnectionFactory</attribute>
- <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-0.10.rar'</depends>
+ <depends optional-attribute-name="RARName">jboss.jca:service=RARDeployment,name='qpid-ra-<ra-version>.rar'</depends>
<attribute name="Type">javax.jms.ConnectionFactory</attribute>
<attribute name="Properties">
- connectionURL=amqp://anonymous:@client/test?brokerlist='tcp://localhost:5672?sasl_mechs='ANONYMOUS''
+ ConnectionURL=amqp://anonymous:@client/test?brokerlist='tcp://localhost:5672?sasl_mechs='PLAIN''
</attribute>
</mbean>
diff --git a/qpid/java/jca/build.xml b/qpid/java/jca/build.xml
index 768ff54bff..42a19ff83a 100644
--- a/qpid/java/jca/build.xml
+++ b/qpid/java/jca/build.xml
@@ -25,26 +25,23 @@
<!-- Hack to make the renamed module jars available on the module test classpath -->
<property name="module.test.depends" value="ra ra/tests"/>
- <import file="../module.xml"/>
-
- <property name="module.rar" value="${build.lib}/${project.name}-ra-${project.version}.rar"/>
-
- <property name="module.resources" value="src/main/resources"/>
+ <!-- Import common.xml to make the properties it defines available before importing module.xml -->
+ <import file="../common.xml"/>
- <target name="rar" depends="jar.nomanifest">
+ <!-- Override the standard output jar names before importing module.xml, to produce
+ artifacts that use ra in the name instead of jca like the module should -->
+ <property name="module.test.jar" value="${build.lib}/${project.name}-ra-tests-${project.version}.jar"/>
+ <property name="module.jar" value="${build.lib}/${project.name}-ra-${project.version}.jar"/>
+ <property name="module.source.jar" value="${build.lib}/${project.name}-ra-${project.version}-sources.jar"/>
- <!--Note we need to do this as we need to keep the ra in the name of the artificats but we can't override the module.jar property which is based on the directory name. Also, we re-jar to avoid duplicate *.xml files in the jar and the rar which is causing EAP deployment issues in later versions.-->
- <delete file="${build.lib}/${project.name}-${module.name}-${project.version}.jar"/>
+ <import file="../module.xml"/>
- <jar destfile="${module.jar}" basedir="${module.classes}">
- <metainf dir="${module.metainf}" >
- <exclude name="**/*.xml"/>
- </metainf>
- </jar>
- <move file="${build.lib}/${project.name}-${module.name}-${project.version}.jar" tofile="${build.lib}/${project.name}-ra-${project.version}.jar"/>
+ <property name="module.rar" value="${build.lib}/${project.name}-ra-${project.version}.rar"/>
+ <property name="rar.resources" value="rar/src/main/resources"/>
+ <target name="rar" depends="jar" description="creates a rar file containing the module jar, client jars, etc">
<jar destfile="${module.rar}">
- <fileset dir="${module.resources}">
+ <fileset dir="${rar.resources}">
<include name="**/*.xml"/>
</fileset>
<fileset dir="${build.lib}">
@@ -78,11 +75,9 @@
<target name="examples" depends="example-properties-file, example-jars"/>
- <target name="build" depends="rar, examples, jar-tests, jar-sources, postbuild"/>
+ <target name="postbuild" depends="rar, examples"/>
- <target name="postbuild">
- <!-- Hack to make the tests/sources jar names match the renamed main module jar/rar -->
- <move file="${module.test.jar}" tofile="${build.lib}/${project.name}-ra-tests-${project.version}.jar"/>
- <move file="${module.source.jar}" tofile="${build.lib}/${project.name}-ra-${project.version}-sources.jar"/>
- </target>
+ <!-- Override module.xml 'libs' target to avoid copying the jar files dependencies
+ into the 'build/lib' dir, since they will be supplied by the app server -->
+ <target name="libs"/>
</project>
diff --git a/qpid/java/jca/src/main/resources/META-INF/jboss-ra.xml b/qpid/java/jca/rar/src/main/resources/META-INF/jboss-ra.xml
index f459b1efc1..f459b1efc1 100644
--- a/qpid/java/jca/src/main/resources/META-INF/jboss-ra.xml
+++ b/qpid/java/jca/rar/src/main/resources/META-INF/jboss-ra.xml
diff --git a/qpid/java/jca/src/main/resources/META-INF/ra.xml b/qpid/java/jca/rar/src/main/resources/META-INF/ra.xml
index a9374f52d7..a9374f52d7 100755
--- a/qpid/java/jca/src/main/resources/META-INF/ra.xml
+++ b/qpid/java/jca/rar/src/main/resources/META-INF/ra.xml
diff --git a/qpid/java/jca/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxy.java b/qpid/java/jca/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxy.java
index a948948d6a..d7ca29e04a 100644
--- a/qpid/java/jca/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxy.java
+++ b/qpid/java/jca/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxy.java
@@ -27,13 +27,17 @@ import java.io.ObjectInput;
import java.io.ObjectOutput;
import java.io.Serializable;
-import javax.jms.ConnectionFactory;
import javax.jms.Connection;
+import javax.jms.ConnectionFactory;
import javax.jms.JMSException;
import javax.naming.NamingException;
+import javax.jms.QueueConnection;
+import javax.jms.QueueConnectionFactory;
import javax.naming.Reference;
import javax.naming.Referenceable;
import javax.naming.spi.ObjectFactory;
+import javax.jms.TopicConnection;
+import javax.jms.TopicConnectionFactory;
import org.apache.qpid.client.AMQConnectionFactory;
@@ -44,7 +48,7 @@ import org.slf4j.LoggerFactory;
*
*
*/
-public class QpidConnectionFactoryProxy implements Externalizable, Referenceable, ConnectionFactory, Serializable
+public class QpidConnectionFactoryProxy implements QueueConnectionFactory, TopicConnectionFactory, Externalizable, Referenceable, Serializable
{
private static final Logger _log = LoggerFactory.getLogger(QpidDestinationProxy.class);
@@ -100,13 +104,6 @@ public class QpidConnectionFactoryProxy implements Externalizable, Referenceable
try
{
_delegate = new AMQConnectionFactory(getConnectionURL());
- /*
- QpidResourceAdapter ra = new QpidResourceAdapter();
- QpidRAManagedConnectionFactory mcf = new QpidRAManagedConnectionFactory();
- mcf.setResourceAdapter(ra);
- mcf.setConnectionURL(getConnectionURL());
- delegate = new QpidRAConnectionFactoryImpl(mcf, null);
- */
return ((Referenceable) _delegate).getReference();
}
catch(Exception e)
@@ -162,7 +159,63 @@ public class QpidConnectionFactoryProxy implements Externalizable, Referenceable
*/
public Connection createConnection(final String userName, final String password) throws JMSException
{
- return _delegate.createConnection(userName, password);
+ try
+ {
+ if(_delegate == null)
+ {
+ getReference();
+ }
+
+ return _delegate.createConnection(userName, password);
+ }
+ catch(Exception e)
+ {
+ throw new JMSException(e.getMessage());
+ }
+ }
+
+ /**
+ * Create a queue connection
+ * @return The queue connection
+ * @exception JMSException Thrown if the operation fails
+ */
+ public QueueConnection createQueueConnection() throws JMSException
+ {
+ return (QueueConnection)createConnection();
+ }
+
+ /**
+ * Create a queue connection
+ * @param userName The user name
+ * @param password The password
+ * @return The connection
+ * @exception JMSException Thrown if the operation fails
+ */
+ public QueueConnection createQueueConnection(final String userName, final String password) throws JMSException
+ {
+ return (QueueConnection)createConnection(userName, password);
+ }
+
+ /**
+ * Create a topic connection
+ * @return The topic connection
+ * @exception JMSException Thrown if the operation fails
+ */
+ public TopicConnection createTopicConnection() throws JMSException
+ {
+ return (TopicConnection)createConnection();
+ }
+
+ /**
+ * Create a topic connection
+ * @param userName The user name
+ * @param password The password
+ * @return The topic connection
+ * @exception JMSException Thrown if the operation fails
+ */
+ public TopicConnection createTopicConnection(final String userName, final String password) throws JMSException
+ {
+ return (TopicConnection)createConnection(userName, password);
}
}
diff --git a/qpid/java/module.xml b/qpid/java/module.xml
index 57797d0da1..9146403d04 100644
--- a/qpid/java/module.xml
+++ b/qpid/java/module.xml
@@ -55,9 +55,11 @@
<property name="module.test.src" location="src/test/java"/>
<property name="module.bin" location="bin"/>
<property name="module.etc" location="etc"/>
+
<property name="module.src.resources" location="src/main/resources"/>
<property name="module.src.resources.metainf" location="${module.src.resources}/META-INF"/>
- <property name="module.metainf" location="${module.build}/META-INF"/>
+ <property name="module.resources.dir" location="resources/"/>
+ <property name="module.metainf" location="${module.build}/META-INF"/>
<property name="module.namever" value="${project.name}-${module.name}-${project.version}"/>
<property name="module.namever.osgi" value="${project.name}-${module.name}_${project.version}.0.osgi"/>
@@ -89,6 +91,7 @@
<available property="module.src.resources.exists" file="${module.src.resources}"/>
<available property="module.test.src.resources.exists" file="${module.test.resources}"/>
<available property="module.src.resources.metainf.exists" file="${module.src.resources.metainf}"/>
+ <available property="module.resources.dir.exists" file="${module.resources.dir}"/>
<property name="module.source.jar"
location="${build.lib}/${project.name}-${module.name}-${project.version}-sources.jar"/>
@@ -241,7 +244,7 @@
<mkdir dir="${build.etc}"/>
<mkdir dir="${build.lib}"/>
<mkdir dir="${build.results}"/>
- <mkdir dir="${build.lib.broker.plugins}"/>
+ <mkdir dir="${build.scratch.broker.plugins.lib}"/>
<mkdir dir="${module.classes}"/>
<mkdir dir="${module.precompiled}"/>
<mkdir dir="${module.api}"/>
@@ -369,9 +372,8 @@
<map property="_profile_files" value="${profiles}" join=" ">
<globmapper from="*" to="*.testprofile"/>
</map>
-
- <delete file="${build.scratch}/test-${profile}.properties" quiet="true"/>
- <concat destfile="${build.scratch}/test-${profile}.properties" force="no" fixlastline="yes">
+
+ <concat destfile="${build.scratch}/test-${profile}.properties" force="yes" append="no" fixlastline="yes">
<filelist dir="${test.profiles}" files="testprofile.defaults"/>
<filelist dir="${test.profiles}" files="${_profile_files}"/>
</concat>
@@ -406,6 +408,7 @@
<propertyref name="log4j.debug"/>
<propertyref name="log4j.configuration"/>
+ <propertyref name="log4j.configuration.file"/>
<propertyref name="root.logging.level"/>
<propertyref name="java.naming.factory.initial"/>
@@ -502,25 +505,27 @@
<target name="build" depends="jar,jar-tests,jar-sources,libs,copy-bin,copy-etc,postbuild,copy-broker-plugin-jars" description="compile and copy resources into build tree"/>
- <target name="jar.manifest" depends="compile, copy-resources, copy-module-metainf" if="module.manifest">
+ <target name="jar.manifest" depends="compile,copy-resources,copy-files-to-module-metainf" if="module.manifest">
<jar destfile="${module.jar}" basedir="${module.classes}" manifest="${module.manifest}">
<metainf dir="${module.metainf}" />
</jar>
</target>
- <target name="jar.nomanifest" depends="compile, copy-resources, copy-module-metainf" unless="module.manifest">
+ <target name="jar.nomanifest" depends="compile,copy-resources,copy-files-to-module-metainf" unless="module.manifest">
<jar destfile="${module.jar}" basedir="${module.classes}">
<metainf dir="${module.metainf}" />
</jar>
</target>
- <target name="copy-broker-plugin-jars" if="${broker.plugin}" description="copy broker plugins for use in release packaging">
- <copy file="${module.jar}" todir="${build.lib.broker.plugins}" failonerror="true"/>
+ <target name="copy-broker-plugin-jars" if="broker.plugin" description="copy broker plugins for use in release packaging">
+ <copy file="${module.jar}" todir="${build.scratch.broker.plugins.lib}" failonerror="true"/>
</target>
- <target name="copy-module-metainf" depends="copy-metainf-resources" if="module.src.resources.metainf.exists">
+ <target name="copy-files-to-module-metainf" depends="copy-project-resources-metainf, copy-module-resources-metainf, copy-module-src-resources-metainf"/>
+
+ <target name="copy-project-resources-metainf">
<copy todir="${module.metainf}" failonerror="true">
- <fileset dir="${module.src.resources.metainf}"/>
+ <fileset dir="${project.root}/resources/"/>
</copy>
</target>
@@ -531,12 +536,15 @@
</copy>
</target>
- <target name="copy-metainf-resources">
- <copy todir="${module.metainf}" failonerror="true">
- <fileset dir="${project.root}/resources/"/>
- </copy>
+ <target name="copy-module-resources-metainf" if="module.resources.dir.exists">
<copy todir="${module.metainf}" failonerror="false" overwrite="true">
- <fileset dir="resources/"/>
+ <fileset dir="${module.resources.dir}"/>
+ </copy>
+ </target>
+
+ <target name="copy-module-src-resources-metainf" if="module.src.resources.metainf.exists">
+ <copy todir="${module.metainf}" failonerror="true">
+ <fileset dir="${module.src.resources.metainf}"/>
</copy>
</target>
@@ -809,7 +817,8 @@ qpid.name=${project.name}
-->
- <property name="gentools.home" location="${project.root}/../gentools" />
+ <property name="gentools.home" location="${project.root}/common/gentools" />
+ <property name="gentools.build" location="${build.scratch}/common/gentools" />
<property name="generated.dir" location="${module.precompiled}" />
<property name="velocity.compile.dir" value="${build.scratch}/broker/velocity"/>
<property name="velocity.timestamp" location="${generated.dir}/velocity.timestamp" />
@@ -822,7 +831,7 @@ qpid.name=${project.name}
deprecation="${javac.deprecation}"
srcdir="${project.root}/broker/src/velocity/java" >
<classpath>
- <pathelement path="${gentools.home}/lib/velocity-1.4.jar" />
+ <pathelement path="${project.root}/${velocity.jar}" />
</classpath>
<compilerarg line="${javac.compiler.args}"/>
</javac>
@@ -852,7 +861,7 @@ qpid.name=${project.name}
<echo message="logmessages is ${logmessages}"/>
- <java classname="org.apache.qpid.server.logging.GenerateLogMessages" fork="true" dir="${gentools.home}/src" failonerror="true">
+ <java classname="org.apache.qpid.server.logging.GenerateLogMessages" fork="true" dir="${gentools.build}/classes" failonerror="true">
<arg line="'${logmessages}'"/>
<arg value="-j"/>
<arg value="-o"/>
@@ -869,7 +878,7 @@ qpid.name=${project.name}
<fileset dir="${project.root}/lib/required">
<include name="**/*.jar"/>
</fileset>
- <pathelement path="${gentools.home}/lib/velocity-1.4.jar" />
+ <pathelement path="${project.root}/${velocity.jar}" />
</classpath>
</java>
<touch file="${velocity.timestamp}" />
diff --git a/qpid/java/perftests/etc/chartdefs/1030-BatchSize.chartdef b/qpid/java/perftests/etc/chartdefs/1030-BatchSize-Equal.chartdef
index 17d15fbf81..97b712e027 100644
--- a/qpid/java/perftests/etc/chartdefs/1030-BatchSize.chartdef
+++ b/qpid/java/perftests/etc/chartdefs/1030-BatchSize-Equal.chartdef
@@ -4,35 +4,34 @@
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
+# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
chartType=XYLINE
-chartTitle=Transaction Batch Size
+chartTitle=Transaction Batch Sizes Equal
chartSubtitle=Persistent 1KB messages
-chartDescription=1P 1C, persistent, transacted with message payload 1KB and batch sizes 1-100 messages
+chartDescription=1P 1C, persistent, transacted with message payload 1KB with producer/consumer batch size varying between 1-400 messages for both P and C
xAxisTitle=Batch Size
yAxisTitle=Throughput (messages/s)
series.1.statement=SELECT batchSize, throughputMessagesPerS FROM BatchSize WHERE participantName = 'All'
-series.1.legend=Current
+series.1.legend=Equal Producer/Consumer
series.1.dir=${csvCurrentDir}
series.1.colourName=red
series.2.statement=SELECT batchSize, throughputMessagesPerS FROM BatchSize WHERE participantName = 'All'
-series.2.legend=Baseline
+series.2.legend=Equal Producer/Consumer (Baseline)
series.2.dir=${csvBaselineDir}
series.2.colourName=dark_red
series.2.strokeWidth=-1
-
diff --git a/qpid/java/perftests/etc/chartdefs/1031-BatchSize-Unequal.chartdef b/qpid/java/perftests/etc/chartdefs/1031-BatchSize-Unequal.chartdef
new file mode 100644
index 0000000000..51b3bb2144
--- /dev/null
+++ b/qpid/java/perftests/etc/chartdefs/1031-BatchSize-Unequal.chartdef
@@ -0,0 +1,53 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# 'License'); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+chartType=XYLINE
+chartTitle=Transaction Batch Size Unequal
+chartSubtitle=Persistent 1KB messages
+chartDescription=1P 1C, persistent, transacted with message payload 1KB with fixed batch size 1 for one party whilst other varies between 1-400 messages
+
+xAxisTitle=Batch Size
+yAxisTitle=Throughput (messages/s)
+
+#
+# If csvjdbc could do sub-selects (allowing us to extract the consumer/producer batch size from the All Consumers/All Producers rows),
+# we would not need the workaround where we have testdef place the consumer/producer batch size into testName field
+#
+
+series.1.statement=SELECT testName, throughputMessagesPerS FROM BatchSizeProducerVaries WHERE participantName = 'All'
+series.1.legend=Variable Producer, Fixed Consumer
+series.1.dir=${csvCurrentDir}
+series.1.colourName=red
+
+series.2.statement=SELECT testName, throughputMessagesPerS FROM BatchSizeProducerVaries WHERE participantName = 'All'
+series.2.legend=Variable Producer, Fixed Consumer (Baseline)
+series.2.dir=${csvBaselineDir}
+series.2.colourName=dark_red
+series.2.strokeWidth=-1
+
+series.3.statement=SELECT testName, throughputMessagesPerS FROM BatchSizeConsumerVaries WHERE participantName = 'All'
+series.3.legend=Fixed Producer, Variable Consumer
+series.3.dir=${csvCurrentDir}
+series.3.colourName=blue
+
+series.4.statement=SELECT testName, throughputMessagesPerS FROM BatchSizeConsumerVaries WHERE participantName = 'All'
+series.4.legend=Fixed Producer, Variable Consumer (Baseline)
+series.4.dir=${csvBaselineDir}
+series.4.colourName=dark_blue
+series.4.strokeWidth=-1
diff --git a/qpid/java/perftests/etc/testdefs/BatchSize.js b/qpid/java/perftests/etc/testdefs/BatchSize.js
new file mode 100644
index 0000000000..f17751b7b5
--- /dev/null
+++ b/qpid/java/perftests/etc/testdefs/BatchSize.js
@@ -0,0 +1,102 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+var jsonObject = {
+ _tests:[]
+};
+
+var duration = 30000;
+
+var txBatchSizes = [[1,1], [2,2], [5,5], [10,10], [20,20], [50,50], [100,100], [200,200], [400,400]];
+
+var acknowledgeMode = 0;
+var deliveryMode = 2;
+var messageSize = 1024;
+
+for(i=0; i < txBatchSizes.length ; i++)
+{
+ var producerBatchSize = txBatchSizes[i][0];
+ var consumerBatchSize = txBatchSizes[i][1];
+ var queueName = "txBatchSize" + producerBatchSize + "_" + consumerBatchSize;
+ var destination = "direct://amq.direct//" + queueName + "?durable='true'";
+
+ var test = {
+ "_name": "Batch Size " + producerBatchSize + "-" + consumerBatchSize + " - PERSISTENT",
+ "_queues":[
+ {
+ "_name": queueName,
+ "_durable": true
+ }
+ ],
+ "_clients":[
+ {
+ "_name": "producingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_producers": [
+ {
+ "_name": "Producer1",
+ "_destinationName": destination,
+ "_messageSize": messageSize,
+ "_deliveryMode": deliveryMode,
+ "_batchSize": producerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "_name": "consumingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_consumers": [
+ {
+ "_name": "Consumer1",
+ "_destinationName": destination,
+ "_batchSize": consumerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ };
+
+ jsonObject._tests= jsonObject._tests.concat(test);
+}
diff --git a/qpid/java/perftests/etc/testdefs/BatchSize.json b/qpid/java/perftests/etc/testdefs/BatchSize.json
deleted file mode 100644
index eeb446bad6..0000000000
--- a/qpid/java/perftests/etc/testdefs/BatchSize.json
+++ /dev/null
@@ -1,84 +0,0 @@
-{
- "_tests":[
- {
- "_name": "Batch Size- PERSISTENT";
- "_iterations":[
- {
- "_batchSize": 1
- },
- {
- "_batchSize": 2
- },
- {
- "_batchSize": 5
- },
- {
- "_batchSize": 10
- },
- {
- "_batchSize": 20
- },
- {
- "_batchSize": 50
- },
- {
- "_batchSize": 100
- }
- ],
- "_queues":[
- {
- "_name": "direct://amq.direct//batchSize?durable='true'",
- "_durable": true
- }
- ],
- "_clients":[
- {
- "_name": "producingClient",
- "_connections":[
- {
- "_name": "connection1",
- "_factory": "connectionfactory",
- "_sessions": [
- {
- "_sessionName": "session1",
- "_acknowledgeMode": 0,
- "_producers": [
- {
- "_name": "Producer1",
- "_destinationName": "direct://amq.direct//batchSize?durable='true'",
- "_messageSize": 1024,
- "_maximumDuration": 30000,
- "_deliveryMode": 2
- }
- ]
- }
- ]
- }
- ]
- },
- {
- "_name": "consumingClient",
- "_connections":[
- {
- "_name": "connection1",
- "_factory": "connectionfactory",
- "_sessions": [
- {
- "_sessionName": "session1",
- "_acknowledgeMode": 0,
- "_consumers": [
- {
- "_name": "Consumer1",
- "_destinationName": "direct://amq.direct//batchSize?durable='true'",
- "_maximumDuration": 30000
- }
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- ]
-}
diff --git a/qpid/java/perftests/etc/testdefs/BatchSizeConsumerVaries.js b/qpid/java/perftests/etc/testdefs/BatchSizeConsumerVaries.js
new file mode 100644
index 0000000000..b491f431c9
--- /dev/null
+++ b/qpid/java/perftests/etc/testdefs/BatchSizeConsumerVaries.js
@@ -0,0 +1,102 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+var jsonObject = {
+ _tests:[]
+};
+
+var duration = 30000;
+
+var txBatchSizes = [[1,1], [1,2], [1,5], [1,10], [1,20], [1,50], [1,100], [1,200], [1,400]];
+
+var acknowledgeMode = 0;
+var deliveryMode = 2;
+var messageSize = 1024;
+
+for(i=0; i < txBatchSizes.length ; i++)
+{
+ var producerBatchSize = txBatchSizes[i][0];
+ var consumerBatchSize = txBatchSizes[i][1];
+ var queueName = "txBatchSize" + producerBatchSize + "_" + consumerBatchSize;
+ var destination = "direct://amq.direct//" + queueName + "?durable='true'";
+
+ var test = {
+ "_name": consumerBatchSize,// hack - use test name to expose the consumer batch size on the All result rows
+ "_queues":[
+ {
+ "_name": queueName,
+ "_durable": true
+ }
+ ],
+ "_clients":[
+ {
+ "_name": "producingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_producers": [
+ {
+ "_name": "Producer1",
+ "_destinationName": destination,
+ "_messageSize": messageSize,
+ "_deliveryMode": deliveryMode,
+ "_batchSize": producerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "_name": "consumingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_consumers": [
+ {
+ "_name": "Consumer1",
+ "_destinationName": destination,
+ "_batchSize": consumerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ };
+
+ jsonObject._tests= jsonObject._tests.concat(test);
+}
diff --git a/qpid/java/perftests/etc/testdefs/BatchSizeProducerVaries.js b/qpid/java/perftests/etc/testdefs/BatchSizeProducerVaries.js
new file mode 100644
index 0000000000..ac23c52c9e
--- /dev/null
+++ b/qpid/java/perftests/etc/testdefs/BatchSizeProducerVaries.js
@@ -0,0 +1,102 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+var jsonObject = {
+ _tests:[]
+};
+
+var duration = 30000;
+
+var txBatchSizes = [[1,1], [2,1], [5,1], [10,1], [20,1], [50,1], [100,1], [200,1], [400,1]];
+
+var acknowledgeMode = 0;
+var deliveryMode = 2;
+var messageSize = 1024;
+
+for(i=0; i < txBatchSizes.length ; i++)
+{
+ var producerBatchSize = txBatchSizes[i][0];
+ var consumerBatchSize = txBatchSizes[i][1];
+ var queueName = "txBatchSize" + producerBatchSize + "_" + consumerBatchSize;
+ var destination = "direct://amq.direct//" + queueName + "?durable='true'";
+
+ var test = {
+ "_name": producerBatchSize,// hack - use test name to expose the producer batch size on the All result rows
+ "_queues":[
+ {
+ "_name": queueName,
+ "_durable": true
+ }
+ ],
+ "_clients":[
+ {
+ "_name": "producingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_producers": [
+ {
+ "_name": "Producer1",
+ "_destinationName": destination,
+ "_messageSize": messageSize,
+ "_deliveryMode": deliveryMode,
+ "_batchSize": producerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "_name": "consumingClient",
+ "_connections":[
+ {
+ "_name": "connection1",
+ "_factory": "connectionfactory",
+ "_sessions": [
+ {
+ "_sessionName": "session1",
+ "_acknowledgeMode": acknowledgeMode,
+ "_consumers": [
+ {
+ "_name": "Consumer1",
+ "_destinationName": destination,
+ "_batchSize": consumerBatchSize,
+ "_maximumDuration": duration
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ };
+
+ jsonObject._tests= jsonObject._tests.concat(test);
+}
diff --git a/qpid/java/perftests/example/brokerconfig/log4j.xml b/qpid/java/perftests/example/brokerconfig/log4j.xml
index 7dbb1bc87d..7e53d2667b 100644
--- a/qpid/java/perftests/example/brokerconfig/log4j.xml
+++ b/qpid/java/perftests/example/brokerconfig/log4j.xml
@@ -68,7 +68,7 @@
<param name="backupFilesToPath" value="${QPID_WORK}/backup/log"/>
<layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d %-5p [%t] (%F:%L) - %m%n"/>
+ <param name="ConversionPattern" value="%d %-5p [%t] (%c{2}) - %m%n"/>
</layout>
</appender>
diff --git a/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/client/ConsumerParticipant.java b/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/client/ConsumerParticipant.java
index 8c69e5694b..8d25f86b77 100644
--- a/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/client/ConsumerParticipant.java
+++ b/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/client/ConsumerParticipant.java
@@ -103,16 +103,22 @@ public class ConsumerParticipant implements Participant
}
Date end = new Date();
- int numberOfMessagesSent = _totalNumberOfMessagesReceived.get();
+ int numberOfMessagesReceived = _totalNumberOfMessagesReceived.get();
long totalPayloadSize = _totalPayloadSizeOfAllMessagesReceived.get();
int payloadSize = getPayloadSizeForResultIfConstantOrZeroOtherwise(_allConsumedPayloadSizes);
+ if (LOGGER.isInfoEnabled())
+ {
+ LOGGER.info("Consumer {} finished consuming. Number of messages consumed: {}",
+ getName(), numberOfMessagesReceived);
+ }
+
ConsumerParticipantResult result = _resultFactory.createForConsumer(
getName(),
registeredClientName,
_command,
acknowledgeMode,
- numberOfMessagesSent,
+ numberOfMessagesReceived,
payloadSize,
totalPayloadSize,
start, end, _messageLatencies);
diff --git a/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/jms/QpidQueueCreator.java b/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/jms/QpidQueueCreator.java
index 0b906d228f..ef2cfb6cd4 100644
--- a/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/jms/QpidQueueCreator.java
+++ b/qpid/java/perftests/src/main/java/org/apache/qpid/disttest/jms/QpidQueueCreator.java
@@ -25,7 +25,6 @@ import javax.jms.JMSException;
import javax.jms.MessageConsumer;
import javax.jms.Session;
-import org.apache.qpid.AMQException;
import org.apache.qpid.client.AMQDestination;
import org.apache.qpid.client.AMQSession;
import org.apache.qpid.disttest.DistributedTestException;
@@ -34,12 +33,13 @@ import org.apache.qpid.framing.AMQShortString;
import org.apache.qpid.framing.FieldTable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
public class QpidQueueCreator implements QueueCreator
{
private static final Logger LOGGER = LoggerFactory.getLogger(QpidQueueCreator.class);
private static final FieldTable EMPTY_QUEUE_BIND_ARGUMENTS = new FieldTable();
private static final String QUEUE_CREATOR_DRAIN_POLL_TIMEOUT = "qpid.disttest.queue.creator.drainPollTime";
- private static int _drainPollTimeout = Integer.getInteger(QUEUE_CREATOR_DRAIN_POLL_TIMEOUT, 5000);
+ private static int _drainPollTimeout = Integer.getInteger(QUEUE_CREATOR_DRAIN_POLL_TIMEOUT, 500);
@Override
public void createQueues(Connection connection, Session session, List<QueueConfig> configs)
@@ -61,10 +61,8 @@ public class QpidQueueCreator implements QueueCreator
// drainQueue method is added because deletion of queue with a lot
// of messages takes time and might cause the timeout exception
- if (queueHasMessages(amqSession, destination))
- {
- drainQueue(connection, destination);
- }
+ drainQueue(connection, destination);
+
deleteQueue(amqSession, destination.getAMQQueueName());
}
}
@@ -81,13 +79,12 @@ public class QpidQueueCreator implements QueueCreator
}
}
- private boolean queueHasMessages(AMQSession<?, ?> amqSession, AMQDestination destination)
+ private long getQueueDepth(AMQSession<?, ?> amqSession, AMQDestination destination)
{
try
{
long queueDepth = amqSession.getQueueDepth(destination);
- LOGGER.info("Queue {} has {} message(s)", destination.getQueueName(), queueDepth);
- return queueDepth > 0;
+ return queueDepth;
}
catch (Exception e)
{
@@ -103,10 +100,19 @@ public class QpidQueueCreator implements QueueCreator
LOGGER.debug("About to drain the queue {}", destination.getQueueName());
noAckSession = connection.createSession(false, org.apache.qpid.jms.Session.NO_ACKNOWLEDGE);
MessageConsumer messageConsumer = noAckSession.createConsumer(destination);
+
+ long currentQueueDepth = getQueueDepth((AMQSession<?,?>)noAckSession, destination);
int counter = 0;
- while(messageConsumer.receive(_drainPollTimeout) != null)
+ while (currentQueueDepth > 0)
{
- counter++;
+ LOGGER.info("Queue {} has {} message(s)", destination.getQueueName(), currentQueueDepth);
+
+ while(messageConsumer.receive(_drainPollTimeout) != null)
+ {
+ counter++;
+ }
+
+ currentQueueDepth = getQueueDepth((AMQSession<?,?>)noAckSession, destination);
}
LOGGER.info("Drained {} message(s) from queue {} ", counter, destination.getQueueName());
messageConsumer.close();
diff --git a/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/ChartingUtil.java b/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/ChartingUtil.java
index 82a3a8c140..a329f33b00 100644
--- a/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/ChartingUtil.java
+++ b/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/ChartingUtil.java
@@ -58,10 +58,14 @@ public class ChartingUtil
public static final String CHART_DEFINITIONS_PROP = "chart-defs";
public static final String CHART_DEFINITIONS_DEFAULT = ".";
+ public static final String SUMMARY_TITLE_PROP = "summary-title";
+ public static final String SUMMARY_TITLE_DEFAULT = "Performance Charts";
+
private Map<String,String> _cliOptions = new HashMap<String, String>();
{
_cliOptions.put(OUTPUT_DIR_PROP, OUTPUT_DIR_DEFAULT);
_cliOptions.put(CHART_DEFINITIONS_PROP, CHART_DEFINITIONS_DEFAULT);
+ _cliOptions.put(SUMMARY_TITLE_PROP, SUMMARY_TITLE_DEFAULT);
}
public static void main(String[] args) throws Exception
@@ -101,7 +105,8 @@ public class ChartingUtil
writer.writeChartToFileSystem(chart, chartingDefinition);
}
- writer.writeHtmlSummaryToFileSystem();
+ final String summaryChartTitle = _cliOptions.get(SUMMARY_TITLE_PROP);
+ writer.writeHtmlSummaryToFileSystem(summaryChartTitle);
}
private List<ChartingDefinition> loadChartDefinitions(String chartingDefsDir)
diff --git a/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/writer/ChartWriter.java b/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/writer/ChartWriter.java
index 69997a051c..888d7dc3d7 100644
--- a/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/writer/ChartWriter.java
+++ b/qpid/java/perftests/visualisation-jfc/src/main/java/org/apache/qpid/disttest/charting/writer/ChartWriter.java
@@ -79,7 +79,7 @@ public class ChartWriter
}
}
- public void writeHtmlSummaryToFileSystem()
+ public void writeHtmlSummaryToFileSystem(String summaryPageTitle)
{
if(_chartFilesToChartDef.size() < 2)
{
@@ -87,13 +87,13 @@ public class ChartWriter
return;
}
- String htmlHeader =
+ String htmlHeader = String.format(
"<html>\n" +
" <head>\n" +
- " <title>Performance Charts</title>\n" +
+ " <title>%s</title>\n" +
" <style type='text/css'>figure { float: left; display: table; width: 87px;}</style>\n" +
" </head>\n" +
- " <body>\n";
+ " <body>\n", summaryPageTitle);
String htmlFooter =
" </body>\n" +
diff --git a/qpid/java/perftests/visualisation-jfc/src/test/java/org/apache/qpid/disttest/charting/writer/ChartWriterTest.java b/qpid/java/perftests/visualisation-jfc/src/test/java/org/apache/qpid/disttest/charting/writer/ChartWriterTest.java
index b515e70f2c..4a249e252e 100644
--- a/qpid/java/perftests/visualisation-jfc/src/test/java/org/apache/qpid/disttest/charting/writer/ChartWriterTest.java
+++ b/qpid/java/perftests/visualisation-jfc/src/test/java/org/apache/qpid/disttest/charting/writer/ChartWriterTest.java
@@ -91,7 +91,7 @@ public class ChartWriterTest extends TestCase
_writer.writeChartToFileSystem(_chart2, chartDef2);
_writer.writeChartToFileSystem(_chart1, chartDef1);
- _writer.writeHtmlSummaryToFileSystem();
+ _writer.writeHtmlSummaryToFileSystem("Performance Charts");
InputStream expectedSummaryFileInputStream = getClass().getResourceAsStream("expected-chart-summary.html");
String expectedSummaryContent = new Scanner(expectedSummaryFileInputStream).useDelimiter("\\A").next();
@@ -110,7 +110,7 @@ public class ChartWriterTest extends TestCase
_writer.writeChartToFileSystem(_chart1, chartDef1);
- _writer.writeHtmlSummaryToFileSystem();
+ _writer.writeHtmlSummaryToFileSystem("Performance Charts");
assertFalse("Only one chart generated so no summary file should have been written",
summaryFile.exists());
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxyTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxyTest.java
new file mode 100644
index 0000000000..80001099a8
--- /dev/null
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/ra/admin/QpidConnectionFactoryProxyTest.java
@@ -0,0 +1,120 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+package org.apache.qpid.ra.admin;
+
+import javax.jms.Connection;
+import javax.jms.ConnectionFactory;
+import javax.jms.JMSException;
+import javax.naming.NamingException;
+import javax.jms.QueueConnection;
+import javax.jms.QueueConnectionFactory;
+import javax.naming.Reference;
+import javax.naming.Referenceable;
+import javax.naming.spi.ObjectFactory;
+import javax.jms.TopicConnection;
+import javax.jms.TopicConnectionFactory;
+
+import junit.framework.TestCase;
+
+import org.apache.qpid.test.utils.QpidBrokerTestCase;
+
+public class QpidConnectionFactoryProxyTest extends QpidBrokerTestCase
+{
+ private static final String BROKER_PORT = "15672";
+
+ private static final String URL = "amqp://guest:guest@client/test?brokerlist='tcp://localhost:" + BROKER_PORT + "?sasl_mechs='PLAIN''";
+
+ public void testQueueConnectionFactory() throws Exception
+ {
+ QueueConnectionFactory cf = null;
+ QueueConnection c = null;
+
+ try
+ {
+ cf = new QpidConnectionFactoryProxy();
+ ((QpidConnectionFactoryProxy)cf).setConnectionURL(URL);
+ c = cf.createQueueConnection();
+ assertTrue(c instanceof QueueConnection);
+
+ }
+ finally
+ {
+ if(c != null)
+ {
+ c.close();
+ }
+ }
+ }
+
+ public void testTopicConnectionFactory() throws Exception
+ {
+ TopicConnectionFactory cf = null;
+ TopicConnection c = null;
+
+ try
+ {
+ cf = new QpidConnectionFactoryProxy();
+ ((QpidConnectionFactoryProxy)cf).setConnectionURL(URL);
+ c = cf.createTopicConnection();
+ assertTrue(c instanceof TopicConnection);
+
+ }
+ finally
+ {
+ if(c != null)
+ {
+ c.close();
+ }
+ }
+ try
+ {
+
+ }
+ finally
+ {
+
+ }
+ }
+
+ public void testConnectionFactory() throws Exception
+ {
+ ConnectionFactory cf = null;
+ Connection c = null;
+
+ try
+ {
+ cf = new QpidConnectionFactoryProxy();
+ ((QpidConnectionFactoryProxy)cf).setConnectionURL(URL);
+ c = cf.createConnection();
+ assertTrue(c instanceof Connection);
+
+ }
+ finally
+ {
+ if(c != null)
+ {
+ c.close();
+ }
+
+ }
+ }
+}
+
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/server/BrokerStartupTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/server/BrokerStartupTest.java
index 9f3994fc91..eba2a638c0 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/server/BrokerStartupTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/server/BrokerStartupTest.java
@@ -30,6 +30,7 @@ import org.apache.qpid.util.LogMonitor;
import javax.jms.Connection;
import javax.jms.Queue;
import javax.jms.Session;
+import java.io.File;
import java.util.List;
/**
@@ -72,10 +73,7 @@ public class BrokerStartupTest extends AbstractTestLogging
if (isJavaBroker() && isExternalBroker() && !isInternalBroker())
{
//Remove test Log4j config from the commandline
- _brokerCommand = _brokerCommand.substring(0, _brokerCommand.indexOf("-l"));
-
- // Add an invalid value
- _brokerCommand += " -l invalid";
+ setBrokerCommandLog4JFile(new File("invalid file"));
// The broker has a built in default log4j configuration set up
// so if the the broker cannot load the -l value it will use default
@@ -145,4 +143,4 @@ public class BrokerStartupTest extends AbstractTestLogging
}
}
-} \ No newline at end of file
+}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/AbstractTestLogging.java b/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/AbstractTestLogging.java
index d823cb5856..84017b6850 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/AbstractTestLogging.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/AbstractTestLogging.java
@@ -122,7 +122,7 @@ public class AbstractTestLogging extends QpidBrokerTestCase
}
protected String fromMessage(String log)
- {
+ {;
int startSubject = log.indexOf("]") + 1;
int start = log.indexOf("]", startSubject) + 1;
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/BrokerLoggingTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/BrokerLoggingTest.java
index 7d3b946b6d..c5f5e06ae1 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/BrokerLoggingTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/server/logging/BrokerLoggingTest.java
@@ -171,8 +171,7 @@ public class BrokerLoggingTest extends AbstractTestLogging
{
String TESTID = "BRK-1007";
- //Remove test Log4j config from the commandline
- _brokerCommand = _brokerCommand.substring(0, _brokerCommand.indexOf("-l"));
+ _brokerCommandHelper.removeBrokerCommandLog4JFile();
startBroker();
@@ -259,9 +258,7 @@ public class BrokerLoggingTest extends AbstractTestLogging
// This logging startup code only occurs when you run a Java broker
if (isJavaBroker() && isExternalBroker())
{
- // Get custom -l value used during testing for the broker startup
- String brokerCommand = getBrokerCommand(DEFAULT_PORT, "");
- String customLog4j = brokerCommand.substring(brokerCommand.indexOf("-l") + 2).trim();
+ String customLog4j = getBrokerCommandLog4JFile().getAbsolutePath();
String TESTID = "BRK-1007";
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/LoggingManagementTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/LoggingManagementTest.java
index 2a112eab35..3c3bbdca41 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/LoggingManagementTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/LoggingManagementTest.java
@@ -55,9 +55,8 @@ public class LoggingManagementTest extends QpidBrokerTestCase
File tmpLogFile = File.createTempFile("log4j" + "." + getName(), ".xml");
tmpLogFile.deleteOnExit();
- FileUtils.copy(_logConfigFile, tmpLogFile);
-
- _logConfigFile = tmpLogFile;
+ FileUtils.copy(getBrokerCommandLog4JFile(), tmpLogFile);
+ setBrokerCommandLog4JFile(tmpLogFile);
super.setUp();
_jmxUtils.open();
@@ -105,7 +104,7 @@ public class LoggingManagementTest extends QpidBrokerTestCase
_monitor.markDiscardPoint();
_loggingManagement.setRuntimeLoggerLevel(logger, "OFF");
- List<String> matches = _monitor.findMatches("Setting level to OFF for logger 'org.apache.qpid'");
+ List<String> matches = _monitor.waitAndFindMatches("Setting level to OFF for logger 'org.apache.qpid'", 5000);
assertEquals(1, matches.size());
TabularData table = _loggingManagement.viewEffectiveRuntimeLoggerLevels();
@@ -121,7 +120,7 @@ public class LoggingManagementTest extends QpidBrokerTestCase
_monitor.markDiscardPoint();
_loggingManagement.setConfigFileLoggerLevel(operationalLoggingLogger, "OFF");
- List<String> matches = _monitor.findMatches("Setting level to OFF for logger 'qpid.message'");
+ List<String> matches = _monitor.waitAndFindMatches("Setting level to OFF for logger 'qpid.message'", 5000);
assertEquals(1, matches.size());
assertEffectiveLoggingLevel(operationalLoggingLogger, "INFO");
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/QueueManagementTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/QueueManagementTest.java
index 10a2f954d0..0d3289d1bd 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/QueueManagementTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/systest/management/jmx/QueueManagementTest.java
@@ -580,6 +580,60 @@ public class QueueManagementTest extends QpidBrokerTestCase
assertMessageIndicesOn(_destinationQueue, 0);
}
+ /**
+ * Tests {@link ManagedQueue#deleteMessages(long, long)} interface.
+ */
+ public void testDeleteMessages() throws Exception
+ {
+ final int numberOfMessagesToSend = 15;
+
+ sendMessage(_session, _sourceQueue, numberOfMessagesToSend);
+ syncSession(_session);
+ assertEquals("Unexpected queue depth after send", numberOfMessagesToSend, _managedSourceQueue.getMessageCount().intValue());
+ List<Long> amqMessagesIds = getAMQMessageIdsOn(_managedSourceQueue, 1, numberOfMessagesToSend);
+ // Current expected queue state, in terms of message header indices: [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+
+ // Delete the first message (Remember the amqMessagesIds list, and the message indices added as a property when sending, are both 0-based index)
+ long fromMessageId = amqMessagesIds.get(0);
+ long toMessageId = fromMessageId;
+ _managedSourceQueue.deleteMessages(fromMessageId, toMessageId);
+ assertEquals("Unexpected message count after first deletion", numberOfMessagesToSend - 1, _managedSourceQueue.getMessageCount().intValue());
+ // Current expected queue state, in terms of message header indices: [X,1,2,3,4,5,6,7,8,9,10,11,12,13,14]
+
+ // Delete the 9th-10th messages, in the middle of the queue
+ fromMessageId = amqMessagesIds.get(8);
+ toMessageId = amqMessagesIds.get(9);
+ _managedSourceQueue.deleteMessages(fromMessageId, toMessageId);
+ assertEquals("Unexpected message count after third deletion", numberOfMessagesToSend - 3, _managedSourceQueue.getMessageCount().intValue());
+ // Current expected queue state, in terms of message header indices: [X,1,2,3,4,5,6,7,X,X,10,11,12,13,14]
+
+ // Delete the 11th and 12th messages, but still include the IDs for the 9th and 10th messages in the
+ // range to ensure their IDs are 'skipped' until the matching messages are found
+ fromMessageId = amqMessagesIds.get(8);
+ toMessageId = amqMessagesIds.get(11);
+ _managedSourceQueue.deleteMessages(fromMessageId, toMessageId);
+ assertEquals("Unexpected message count after fourth deletion", numberOfMessagesToSend - 5, _managedSourceQueue.getMessageCount().intValue());
+ // Current expected queue state, in terms of message header indices: [X,1,2,3,4,5,6,7,X,X,X,X,12,13,14]
+
+ // Delete the 8th message and the 13th message, including the IDs for the 9th-12th messages in the
+ // range to ensure their IDs are 'skipped' and the other matching message is found
+ fromMessageId = amqMessagesIds.get(7);
+ toMessageId = amqMessagesIds.get(12);
+ _managedSourceQueue.deleteMessages(fromMessageId, toMessageId);
+ assertEquals("Unexpected message count after fourth deletion", numberOfMessagesToSend - 7, _managedSourceQueue.getMessageCount().intValue());
+ // Current expected queue state, in terms of message header indices: [X,1,2,3,4,5,6,X,X,X,X,X,X,13,14]
+
+ // Delete the last message message
+ fromMessageId = amqMessagesIds.get(numberOfMessagesToSend -1);
+ toMessageId = fromMessageId;
+ _managedSourceQueue.deleteMessages(fromMessageId, toMessageId);
+ assertEquals("Unexpected message count after second deletion", numberOfMessagesToSend - 8, _managedSourceQueue.getMessageCount().intValue());
+ // Current expected queue state, in terms of message header indices: [X,1,2,3,4,5,6,X,X,X,X,X,X,13,X]
+
+ // Verify the message indices with a consumer
+ assertMessageIndicesOn(_sourceQueue, 1,2,3,4,5,6,13);
+ }
+
@Override
public Message createNextMessage(Session session, int messageNumber) throws JMSException
{
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/client/destination/AddressBasedDestinationTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/client/destination/AddressBasedDestinationTest.java
index 6041600364..371b40bfc8 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/client/destination/AddressBasedDestinationTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/client/destination/AddressBasedDestinationTest.java
@@ -1407,10 +1407,16 @@ public class AddressBasedDestinationTest extends QpidBrokerTestCase
"}";
String addr = "ADDR:amq.topic/test; {link: {name:my-queue, durable:true," + xDeclareArgs + "}}";
- MessageConsumer cons = ssn.createConsumer(ssn.createTopic(addr));
+ Destination dest = ssn.createTopic(addr);
+ MessageConsumer cons = ssn.createConsumer(dest);
String verifyAddr = "ADDR:my-queue;{ node: {durable:true, " + xDeclareArgs + "}}";
AMQDestination verifyDest = (AMQDestination)ssn.createQueue(verifyAddr);
((AMQSession_0_10)ssn).isQueueExist(verifyDest, true);
+
+ // Verify that the producer does not delete the subscription queue.
+ MessageProducer prod = ssn.createProducer(dest);
+ prod.close();
+ ((AMQSession_0_10)ssn).isQueueExist(verifyDest, true);
}
}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/MaxDeliveryCountTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/MaxDeliveryCountTest.java
index da75bb6556..40db17f799 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/MaxDeliveryCountTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/MaxDeliveryCountTest.java
@@ -84,8 +84,10 @@ public class MaxDeliveryCountTest extends QpidBrokerTestCase
// Set client-side flag to allow the server to determine if messages
// dead-lettered or requeued.
- setTestClientSystemProperty(ClientProperties.REJECT_BEHAVIOUR_PROP_NAME, "server");
-
+ if (!isBroker010())
+ {
+ setTestClientSystemProperty(ClientProperties.REJECT_BEHAVIOUR_PROP_NAME, "server");
+ }
super.setUp();
boolean durableSub = isDurSubTest();
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseOkTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseOkTest.java
deleted file mode 100644
index 1c9ee27b94..0000000000
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseOkTest.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-package org.apache.qpid.test.unit.client.channelclose;
-
-import junit.textui.TestRunner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.qpid.client.AMQConnection;
-import org.apache.qpid.client.AMQQueue;
-import org.apache.qpid.test.utils.QpidBrokerTestCase;
-
-import javax.jms.Destination;
-import javax.jms.ExceptionListener;
-import javax.jms.JMSException;
-import javax.jms.Message;
-import javax.jms.MessageListener;
-import javax.jms.MessageProducer;
-import javax.jms.Session;
-import javax.jms.TextMessage;
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Due to bizarre exception handling all sessions are closed if you get
- * a channel close request and no exception listener is registered.
- * <p/>
- * JIRA issue IBTBLZ-10.
- * <p/>
- * Simulate by:
- * <p/>
- * 0. Create two sessions with no exception listener.
- * 1. Publish message to queue/topic that does not exist (wrong routing key).
- * 2. This will cause a channel close.
- * 3. Since client does not have an exception listener, currently all sessions are
- * closed.
- */
-public class ChannelCloseOkTest extends QpidBrokerTestCase
-{
- private AMQConnection _connection;
- private Destination _destination1;
- private Destination _destination2;
- private Session _session1;
- private Session _session2;
- private final List<Message> _received1 = new ArrayList<Message>();
- private final List<Message> _received2 = new ArrayList<Message>();
-
- private static final Logger _log = LoggerFactory.getLogger(ChannelCloseOkTest.class);
-
- protected void setUp() throws Exception
- {
- super.setUp();
-
- _connection = (AMQConnection) getConnection("guest", "guest");
-
- _destination1 = new AMQQueue(_connection, "q1", true);
- _destination2 = new AMQQueue(_connection, "q2", true);
- _session1 = _connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
- _session1.createConsumer(_destination1).setMessageListener(new MessageListener()
- {
- public void onMessage(Message message)
- {
- _log.debug("consumer 1 got message [" + getTextMessage(message) + "]");
- synchronized (_received1)
- {
- _received1.add(message);
- _received1.notify();
- }
- }
- });
- _session2 = _connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
- _session2.createConsumer(_destination2).setMessageListener(new MessageListener()
- {
- public void onMessage(Message message)
- {
- _log.debug("consumer 2 got message [" + getTextMessage(message) + "]");
- synchronized (_received2)
- {
- _received2.add(message);
- _received2.notify();
- }
- }
- });
-
- _connection.start();
- }
-
- private String getTextMessage(Message message)
- {
- TextMessage tm = (TextMessage) message;
- try
- {
- return tm.getText();
- }
- catch (JMSException e)
- {
- return "oops " + e;
- }
- }
-
- protected void tearDown() throws Exception
- {
- closeConnection();
- super.tearDown();
- }
-
- public void closeConnection() throws JMSException
- {
- if (_connection != null)
- {
- _log.info(">>>>>>>>>>>>>>.. closing");
- _connection.close();
- }
- }
-
- public void testWithoutExceptionListener() throws Exception
- {
- doTest();
- }
-
- public void testWithExceptionListener() throws Exception
- {
- _connection.setExceptionListener(new ExceptionListener()
- {
- public void onException(JMSException jmsException)
- {
- _log.warn("onException - " + jmsException.getMessage());
- }
- });
-
- doTest();
- }
-
- public void doTest() throws Exception
- {
- // Check both sessions are ok.
- sendAndWait(_session1, _destination1, "first", _received1, 1);
- sendAndWait(_session2, _destination2, "second", _received2, 1);
- assertEquals(1, _received1.size());
- assertEquals(1, _received2.size());
-
- // Now send message to incorrect destination on session 1.
- Destination destination = new AMQQueue(_connection, "incorrect");
- send(_session1, destination, "third"); // no point waiting as message will never be received.
-
- // Ensure both sessions are still ok.
- // Send a bunch of messages as this give time for the sessions to be erroneously closed.
- final int num = 300;
- for (int i = 0; i < num; ++i)
- {
- send(_session1, _destination1, "" + i);
- send(_session2, _destination2, "" + i);
- }
-
- waitFor(_received1, num + 1);
- waitFor(_received2, num + 1);
-
- // Note that the third message is never received as it is sent to an incorrect destination.
- assertEquals(num + 1, _received1.size());
- assertEquals(num + 1, _received2.size());
- }
-
- private void sendAndWait(Session session, Destination destination, String message, List<Message> received, int count)
- throws JMSException, InterruptedException
- {
- send(session, destination, message);
- waitFor(received, count);
- }
-
- private void send(Session session, Destination destination, String message) throws JMSException
- {
- _log.debug("sending message " + message);
- MessageProducer producer1 = session.createProducer(destination);
- producer1.send(session.createTextMessage(message));
- }
-
- private void waitFor(List<Message> received, int count) throws InterruptedException
- {
- long timeout = 20000;
-
- synchronized (received)
- {
- long start = System.currentTimeMillis();
- while (received.size() < count)
- {
- if (System.currentTimeMillis() - start > timeout)
- {
- fail("timeout expired waiting for messages");
- }
- try
- {
- received.wait(timeout);
- }
- catch (InterruptedException e)
- {
- _log.info("Interrupted: " + e);
- throw e;
- }
-
- }
- }
- }
-
- private static String randomize(String in)
- {
- return in + System.currentTimeMillis();
- }
-
- public static void main(String[] args)
- {
- TestRunner.run(ChannelCloseOkTest.class);
- }
-
- public static junit.framework.Test suite()
- {
- return new junit.framework.TestSuite(ChannelCloseOkTest.class);
- }
-}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseTest.java
deleted file mode 100644
index c20eefd987..0000000000
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/client/channelclose/ChannelCloseTest.java
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- *
- */
-package org.apache.qpid.test.unit.client.channelclose;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.qpid.AMQException;
-import org.apache.qpid.client.AMQConnection;
-import org.apache.qpid.client.failover.FailoverException;
-import org.apache.qpid.client.protocol.AMQProtocolHandler;
-import org.apache.qpid.framing.AMQFrame;
-import org.apache.qpid.framing.AMQShortString;
-import org.apache.qpid.framing.ChannelCloseOkBody;
-import org.apache.qpid.framing.ChannelOpenBody;
-import org.apache.qpid.framing.ChannelOpenOkBody;
-import org.apache.qpid.framing.ExchangeDeclareBody;
-import org.apache.qpid.framing.ExchangeDeclareOkBody;
-import org.apache.qpid.jms.ConnectionListener;
-import org.apache.qpid.protocol.AMQConstant;
-import org.apache.qpid.test.utils.QpidBrokerTestCase;
-
-import javax.jms.Connection;
-import javax.jms.ExceptionListener;
-import javax.jms.JMSException;
-import javax.jms.Message;
-import javax.jms.MessageConsumer;
-import javax.jms.MessageProducer;
-import javax.jms.Queue;
-import javax.jms.Session;
-import javax.jms.TextMessage;
-
-public class ChannelCloseTest extends QpidBrokerTestCase implements ExceptionListener, ConnectionListener
-{
- private static final Logger _logger = LoggerFactory.getLogger(ChannelCloseTest.class);
-
- private Connection _connection;
- private Session _session;
- private static final long SYNC_TIMEOUT = 500;
- private int TEST = 0;
-
- /**
- * Close channel, use chanel with same id ensure error.
- *
- * This test is only valid for non 0-10 connection .
- */
- public void testReusingChannelAfterFullClosure() throws Exception
- {
- _connection=newConnection();
-
- // Create Producer
- try
- {
- _connection.start();
-
- createChannelAndTest(1);
-
- // Cause it to close
- try
- {
- _logger.info("Testing invalid exchange");
- declareExchange(1, "", "name_that_will_lookup_to_null", false);
- fail("Exchange name is empty so this should fail ");
- }
- catch (AMQException e)
- {
- assertEquals("Exchange should not be found", AMQConstant.NOT_FOUND, e.getErrorCode());
- }
-
- // Check that
- try
- {
- _logger.info("Testing valid exchange should fail");
- declareExchange(1, "topic", "amq.topic", false);
- fail("This should not succeed as the channel should be closed ");
- }
- catch (AMQException e)
- {
- if (_logger.isInfoEnabled())
- {
- _logger.info("Exception occured was:" + e.getErrorCode());
- }
-
- assertEquals("Connection should be closed", AMQConstant.CHANNEL_ERROR, e.getErrorCode());
-
- _connection=newConnection();
- }
-
- checkSendingMessage();
-
- _session.close();
- _connection.close();
-
- }
- catch (JMSException e)
- {
- e.printStackTrace();
- fail(e.getMessage());
- }
- }
-
- /*
- close channel and send guff then send ok no errors
- REMOVE TEST - The behaviour after server has sent close is undefined.
- the server should be free to fail as it may wish to reclaim its resources
- immediately after close.
- */
- /*public void testSendingMethodsAfterClose() throws Exception
- {
- // this is testing an 0.8 connection
- if(isBroker08())
- {
- try
- {
- _connection=new AMQConnection("amqp://guest:guest@CCTTest/test?brokerlist='" + _brokerlist + "'");
-
- ((AMQConnection) _connection).setConnectionListener(this);
-
- _connection.setExceptionListener(this);
-
- // Change the StateManager for one that doesn't respond with Close-OKs
- AMQStateManager oldStateManager=((AMQConnection) _connection).getProtocolHandler().getStateManager();
-
- _session=_connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
-
- _connection.start();
-
- // Test connection
- checkSendingMessage();
-
- // Set StateManager to manager that ignores Close-oks
- AMQProtocolSession protocolSession=
- ((AMQConnection) _connection).getProtocolHandler().getProtocolSession();
-
- MethodDispatcher d = protocolSession.getMethodDispatcher();
-
- MethodDispatcher wrappedDispatcher = (MethodDispatcher)
- Proxy.newProxyInstance(d.getClass().getClassLoader(),
- d.getClass().getInterfaces(),
- new MethodDispatcherProxyHandler(
- (ClientMethodDispatcherImpl) d));
-
- protocolSession.setMethodDispatcher(wrappedDispatcher);
-
-
- AMQStateManager newStateManager=new NoCloseOKStateManager(protocolSession);
- newStateManager.changeState(oldStateManager.getCurrentState());
-
- ((AMQConnection) _connection).getProtocolHandler().setStateManager(newStateManager);
-
- final int TEST_CHANNEL=1;
- _logger.info("Testing Channel(" + TEST_CHANNEL + ") Creation");
-
- createChannelAndTest(TEST_CHANNEL);
-
- // Cause it to close
- try
- {
- _logger.info("Closing Channel - invalid exchange");
- declareExchange(TEST_CHANNEL, "", "name_that_will_lookup_to_null", false);
- fail("Exchange name is empty so this should fail ");
- }
- catch (AMQException e)
- {
- assertEquals("Exchange should not be found", AMQConstant.NOT_FOUND, e.getErrorCode());
- }
-
- try
- {
- // Send other methods that should be ignored
- // send them no wait as server will ignore them
- _logger.info("Tested known exchange - should ignore");
- declareExchange(TEST_CHANNEL, "topic", "amq.topic", true);
-
- _logger.info("Tested known invalid exchange - should ignore");
- declareExchange(TEST_CHANNEL, "", "name_that_will_lookup_to_null", true);
-
- _logger.info("Tested known invalid exchange - should ignore");
- declareExchange(TEST_CHANNEL, "", "name_that_will_lookup_to_null", true);
-
- // Send sync .. server will igore and timy oue
- _logger.info("Tested known invalid exchange - should ignore");
- declareExchange(TEST_CHANNEL, "", "name_that_will_lookup_to_null", false);
- }
- catch (AMQTimeoutException te)
- {
- assertEquals("Request should timeout", AMQConstant.REQUEST_TIMEOUT, te.getErrorCode());
- }
- catch (AMQException e)
- {
- fail("This should not fail as all requests should be ignored");
- }
-
- _logger.info("Sending Close");
- // Send Close-ok
- sendClose(TEST_CHANNEL);
-
- _logger.info("Re-opening channel");
-
- createChannelAndTest(TEST_CHANNEL);
-
- // Test connection is still ok
-
- checkSendingMessage();
-
- }
- catch (JMSException e)
- {
- e.printStackTrace();
- fail(e.getMessage());
- }
- catch (AMQException e)
- {
- fail(e.getMessage());
-
- }
- catch (URLSyntaxException e)
- {
- fail(e.getMessage());
- }
- finally
- {
- try
- {
- _session.close();
- _connection.close();
- }
- catch (JMSException e)
- {
- e.printStackTrace();
- fail(e.getMessage());
- }
- }
- }
- }
-*/
- private void createChannelAndTest(int channel) throws FailoverException
- {
- // Create A channel
- try
- {
- createChannel(channel);
- }
- catch (AMQException e)
- {
- fail(e.getMessage());
- }
-
- // Test it is ok
- try
- {
- declareExchange(channel, "topic", "amq.topic", false);
- _logger.info("Tested known exchange");
- }
- catch (AMQException e)
- {
- fail("This should not fail as this is the default exchange details");
- }
- }
-
- private void sendClose(int channel)
- {
- ChannelCloseOkBody body =
- ((AMQConnection) _connection).getProtocolHandler().getMethodRegistry().createChannelCloseOkBody();
- AMQFrame frame = body.generateFrame(channel);
-
- ((AMQConnection) _connection).getProtocolHandler().writeFrame(frame);
- }
-
- private void checkSendingMessage() throws JMSException
- {
- TEST++;
- _logger.info("Test creating producer which will use channel id 1");
-
- Queue queue = _session.createQueue("CCT_test_validation_queue" + TEST);
-
- MessageConsumer consumer = _session.createConsumer(queue);
-
- MessageProducer producer = _session.createProducer(queue);
-
- final String MESSAGE = "CCT_Test_Message";
- producer.send(_session.createTextMessage(MESSAGE));
-
- Message msg = consumer.receive(2000);
-
- assertNotNull("Received messages should not be null.", msg);
- assertEquals("Message received not what we sent", MESSAGE, ((TextMessage) msg).getText());
- }
-
- private Connection newConnection()
- {
- Connection connection = null;
- try
- {
- connection = getConnection();
-
- ((AMQConnection) connection).setConnectionListener(this);
-
- _session = connection.createSession(false, Session.CLIENT_ACKNOWLEDGE);
-
- connection.start();
-
- }
- catch (Exception e)
- {
- fail("Creating new connection when:" + e.getMessage());
- }
-
- return connection;
- }
-
- private void declareExchange(int channelId, String _type, String _name, boolean nowait)
- throws AMQException, FailoverException
- {
- ExchangeDeclareBody body =
- ((AMQConnection) _connection).getProtocolHandler()
- .getMethodRegistry()
- .createExchangeDeclareBody(0,
- new AMQShortString(_name),
- new AMQShortString(_type),
- true,
- false,
- false,
- false,
- nowait,
- null);
- AMQFrame exchangeDeclare = body.generateFrame(channelId);
- AMQProtocolHandler protocolHandler = ((AMQConnection) _connection).getProtocolHandler();
-
-
- if (nowait)
- {
- protocolHandler.writeFrame(exchangeDeclare);
- }
- else
- {
- protocolHandler.syncWrite(exchangeDeclare, ExchangeDeclareOkBody.class, SYNC_TIMEOUT);
- }
-
-// return null;
-// }
-// }, (AMQConnection)_connection).execute();
-
- }
-
- private void createChannel(int channelId) throws AMQException, FailoverException
- {
- ChannelOpenBody body =
- ((AMQConnection) _connection).getProtocolHandler().getMethodRegistry().createChannelOpenBody(null);
-
- ((AMQConnection) _connection).getProtocolHandler().syncWrite(body.generateFrame(channelId), // outOfBand
- ChannelOpenOkBody.class);
-
- }
-
- public void onException(JMSException jmsException)
- {
- // _logger.info("CCT" + jmsException);
- fail(jmsException.getMessage());
- }
-
- public void bytesSent(long count)
- { }
-
- public void bytesReceived(long count)
- { }
-
- public boolean preFailover(boolean redirect)
- {
- return false;
- }
-
- public boolean preResubscribe()
- {
- return false;
- }
-
- public void failoverComplete()
- { }
-
-}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTest.java
index 89835955cb..4dc26847da 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTest.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTest.java
@@ -39,7 +39,7 @@ public class TransactionTimeoutTest extends TransactionTimeoutTestCase
protected void configure() throws Exception
{
- // Setup housekeeping every second
+ // Setup housekeeping every 100ms
setVirtualHostConfigurationProperty("virtualhosts.virtualhost." + VIRTUALHOST + ".housekeeping.checkPeriod", "100");
if (getName().contains("ProducerIdle"))
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTestCase.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTestCase.java
index e2b0f00ee4..721dc027c6 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTestCase.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/unit/transacted/TransactionTimeoutTestCase.java
@@ -23,17 +23,13 @@ package org.apache.qpid.test.unit.transacted;
import junit.framework.TestCase;
import org.apache.qpid.AMQException;
-import org.apache.qpid.client.AMQConnection;
-import org.apache.qpid.client.AMQConnectionURL;
-import org.apache.qpid.client.AMQQueue;
import org.apache.qpid.client.AMQSession;
-import org.apache.qpid.framing.AMQShortString;
-import org.apache.qpid.jms.ConnectionURL;
-import org.apache.qpid.jms.Session;
+import org.apache.qpid.configuration.ClientProperties;
import org.apache.qpid.protocol.AMQConstant;
import org.apache.qpid.test.utils.QpidBrokerTestCase;
import org.apache.qpid.util.LogMonitor;
+import javax.jms.Connection;
import javax.jms.DeliveryMode;
import javax.jms.ExceptionListener;
import javax.jms.JMSException;
@@ -41,6 +37,7 @@ import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.Queue;
+import javax.jms.Session;
import javax.jms.TextMessage;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@@ -61,7 +58,7 @@ public abstract class TransactionTimeoutTestCase extends QpidBrokerTestCase impl
public static final String OPEN = "Open";
protected LogMonitor _monitor;
- protected AMQConnection _con;
+ protected Connection _con;
protected Session _psession, _csession;
protected Queue _queue;
protected MessageConsumer _consumer;
@@ -89,16 +86,14 @@ public abstract class TransactionTimeoutTestCase extends QpidBrokerTestCase impl
super.setUp();
// Connect to broker
- String broker = ("tcp://localhost:" + DEFAULT_PORT);
- ConnectionURL url = new AMQConnectionURL("amqp://guest:guest@clientid/test?brokerlist='" + broker + "'&maxprefetch='1'");
- _con = (AMQConnection) getConnection(url);
+ setTestClientSystemProperty(ClientProperties.MAX_PREFETCH_PROP_NAME, String.valueOf(1));
+ _con = getConnection();
_con.setExceptionListener(this);
_con.start();
// Create queue
Session qsession = _con.createSession(true, Session.SESSION_TRANSACTED);
- AMQShortString queueName = new AMQShortString("test");
- _queue = new AMQQueue(qsession.getDefaultQueueExchangeName(), queueName, queueName, false, true);
+ _queue = qsession.createQueue(getTestQueueName());
qsession.close();
// Create producer and consumer
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelper.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelper.java
new file mode 100644
index 0000000000..81b53e3dac
--- /dev/null
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelper.java
@@ -0,0 +1,52 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.qpid.test.utils;
+
+import java.io.File;
+
+/**
+ * Generates the command to start a broker by substituting the tokens
+ * in the provided broker command.
+ *
+ * The command is returned as a list so that it can be easily used by a
+ * {@link java.lang.ProcessBuilder}.
+ */
+public class BrokerCommandHelper
+{
+ private String _brokerCommandTemplate;
+
+ public BrokerCommandHelper(String brokerCommandTemplate)
+ {
+ _brokerCommandTemplate = brokerCommandTemplate;
+ }
+
+ public String getBrokerCommand( int port, String storePath, String storeType, File logConfigFile)
+ {
+ return _brokerCommandTemplate
+ .replace("@PORT", "" + port)
+ .replace("@STORE_PATH", storePath)
+ .replace("@STORE_TYPE", storeType)
+ .replace("@LOG_CONFIG_FILE", '"' + logConfigFile.getAbsolutePath() + '"');
+ }
+
+ public void removeBrokerCommandLog4JFile()
+ {
+ int logArgumentPosition = _brokerCommandTemplate.indexOf("-l");
+ _brokerCommandTemplate = _brokerCommandTemplate.substring(0, logArgumentPosition - 1);
+ }
+}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelperTest.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelperTest.java
new file mode 100644
index 0000000000..663e7d92b4
--- /dev/null
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/BrokerCommandHelperTest.java
@@ -0,0 +1,50 @@
+/* Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.qpid.test.utils;
+
+import static org.mockito.Mockito.*;
+
+import java.io.File;
+
+public class BrokerCommandHelperTest extends QpidTestCase
+{
+ private BrokerCommandHelper _brokerCommandHelper = new BrokerCommandHelper("qpid -p @PORT -sp @STORE_PATH -st @STORE_TYPE -l @LOG_CONFIG_FILE");
+
+ private File logConfigFile = mock(File.class);
+
+ @Override
+ public void setUp()
+ {
+ when(logConfigFile.getAbsolutePath()).thenReturn("logConfigFile");
+ }
+
+ public void testGetBrokerCommand()
+ {
+ String brokerCommand = _brokerCommandHelper.getBrokerCommand(1, "configFile", "json", logConfigFile);
+ assertEquals("Unexpected broker command", "qpid -p 1 -sp configFile -st json -l \"logConfigFile\"", brokerCommand);
+ }
+
+ public void testRemoveBrokerCommandLog4JFile()
+ {
+ _brokerCommandHelper.removeBrokerCommandLog4JFile();
+ String brokerCommand = _brokerCommandHelper.getBrokerCommand(1, "configFile", "json", logConfigFile);
+
+ assertEquals("The broker command list should not contain a log4j config option",
+ "qpid -p 1 -sp configFile -st json", brokerCommand );
+ }
+}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/QpidBrokerTestCase.java b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/QpidBrokerTestCase.java
index 8b67a5bbe4..3424520de6 100644..100755
--- a/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/QpidBrokerTestCase.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/test/utils/QpidBrokerTestCase.java
@@ -20,7 +20,8 @@ package org.apache.qpid.test.utils;
import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintStream;
-import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@@ -43,6 +44,7 @@ import javax.jms.Session;
import javax.jms.StreamMessage;
import javax.jms.TextMessage;
import javax.jms.Topic;
+import javax.naming.Context;
import javax.naming.InitialContext;
import javax.naming.NamingException;
@@ -77,7 +79,7 @@ public class QpidBrokerTestCase extends QpidTestCase
public enum BrokerType
{
EXTERNAL /** Test case relies on a Broker started independently of the test-suite */,
- INTERNAL /** Test case starts an embedded broker within this JVM */,
+ INTERNAL /** Test case starts an embedded broker within this JVM */,
SPAWNED /** Test case spawns a new broker as a separate process */
}
@@ -86,7 +88,7 @@ public class QpidBrokerTestCase extends QpidTestCase
protected final static String QpidHome = System.getProperty("QPID_HOME");
private final File _configFile = new File(System.getProperty("broker.config"));
- protected File _logConfigFile = new File(System.getProperty("log4j.configuration"));
+ private File _logConfigFile;
protected final String _brokerStoreType = System.getProperty("broker.config-store-type", "json");
protected static final Logger _logger = Logger.getLogger(QpidBrokerTestCase.class);
protected static final int LOGMONITOR_TIMEOUT = 5000;
@@ -105,11 +107,11 @@ public class QpidBrokerTestCase extends QpidTestCase
static
{
- String initialContext = System.getProperty(InitialContext.INITIAL_CONTEXT_FACTORY);
+ String initialContext = System.getProperty(Context.INITIAL_CONTEXT_FACTORY);
if (initialContext == null || initialContext.length() == 0)
{
- System.setProperty(InitialContext.INITIAL_CONTEXT_FACTORY, DEFAULT_INITIAL_CONTEXT);
+ System.setProperty(Context.INITIAL_CONTEXT_FACTORY, DEFAULT_INITIAL_CONTEXT);
}
}
@@ -141,7 +143,8 @@ public class QpidBrokerTestCase extends QpidTestCase
protected String _brokerLanguage = System.getProperty(BROKER_LANGUAGE, JAVA);
protected BrokerType _brokerType = BrokerType.valueOf(System.getProperty(BROKER_TYPE, "").toUpperCase());
- protected String _brokerCommand = System.getProperty(BROKER_COMMAND);
+
+ protected BrokerCommandHelper _brokerCommandHelper = new BrokerCommandHelper(System.getProperty(BROKER_COMMAND));
private Boolean _brokerCleanBetweenTests = Boolean.getBoolean(BROKER_CLEAN_BETWEEN_TESTS);
private final AmqpProtocolVersion _brokerVersion = AmqpProtocolVersion.valueOf(System.getProperty(BROKER_VERSION, ""));
protected String _output = System.getProperty(TEST_OUTPUT, System.getProperty("java.io.tmpdir"));
@@ -183,12 +186,11 @@ public class QpidBrokerTestCase extends QpidTestCase
}
private MessageType _messageType = MessageType.TEXT;
-
public QpidBrokerTestCase()
{
super();
-
_brokerConfigurations = new HashMap<Integer, TestBrokerConfiguration>();
+ initialiseLogConfigFile();
}
public TestBrokerConfiguration getBrokerConfiguration(int port)
@@ -228,6 +230,25 @@ public class QpidBrokerTestCase extends QpidTestCase
return configuration;
}
+ private void initialiseLogConfigFile()
+ {
+ try
+ {
+ _logger.info("About to initialise log config file from system property: " + LOG4J_CONFIG_FILE_PATH);
+
+ URI uri = new URI("file", LOG4J_CONFIG_FILE_PATH, null);
+ _logConfigFile = new File(uri);
+ if(!_logConfigFile.exists())
+ {
+ throw new RuntimeException("Log config file " + _logConfigFile.getAbsolutePath() + " does not exist");
+ }
+ }
+ catch (URISyntaxException e)
+ {
+ throw new RuntimeException("Couldn't create URI from log4.configuration: " + LOG4J_CONFIG_FILE_PATH, e);
+ }
+ }
+
public Logger getLogger()
{
return QpidBrokerTestCase._logger;
@@ -372,16 +393,6 @@ public class QpidBrokerTestCase extends QpidTestCase
}
}
- protected String getBrokerCommand(int port, String configPath) throws MalformedURLException
- {
- return _brokerCommand
- .replace("@STORE_PATH", configPath)
- .replace("@STORE_TYPE", "json")
- .replace("@LOG_CONFIG_FILE", _logConfigFile.toString())
- // for cpp profile only
- .replace("@PORT", "" + port);
- }
-
public void startBroker() throws Exception
{
startBroker(0);
@@ -394,13 +405,23 @@ public class QpidBrokerTestCase extends QpidTestCase
startBroker(actualPort, configuration, _testVirtualhosts);
}
+
+ protected File getBrokerCommandLog4JFile()
+ {
+ return _logConfigFile;
+ }
+
+ protected void setBrokerCommandLog4JFile(File file)
+ {
+ _logConfigFile = file;
+ _logger.info("Modified log config file to: " + file);
+ }
+
public void startBroker(int port, TestBrokerConfiguration testConfiguration, XMLConfiguration virtualHosts) throws Exception
{
port = getPort(port);
String testConfig = saveTestConfiguration(port, testConfiguration);
String virtualHostsConfig = saveTestVirtualhosts(port, virtualHosts);
- _logger.info("Set test.virtualhosts property to: " + virtualHostsConfig);
- setSystemProperty(TEST_VIRTUALHOSTS, virtualHostsConfig);
if(_brokers.get(port) != null)
{
@@ -411,8 +432,9 @@ public class QpidBrokerTestCase extends QpidTestCase
if (_brokerType.equals(BrokerType.INTERNAL) && !existingInternalBroker())
{
+ _logger.info("Set test.virtualhosts property to: " + virtualHostsConfig);
+ setSystemProperty(TEST_VIRTUALHOSTS, virtualHostsConfig);
setSystemProperty(BrokerProperties.PROPERTY_USE_CUSTOM_RMI_SOCKET_FACTORY, "false");
-
BrokerOptions options = new BrokerOptions();
options.setConfigurationStoreType(_brokerStoreType);
@@ -426,7 +448,7 @@ public class QpidBrokerTestCase extends QpidTestCase
options.setLogConfigFile(_logConfigFile.getAbsolutePath());
BrokerLauncher broker = new BrokerLauncher();
- _logger.info("starting internal broker (same JVM)");
+ _logger.info("Starting internal broker (same JVM)");
broker.startup(options);
_brokers.put(port, new InternalBrokerHolder(broker, System.getProperty("QPID_WORK"), portsUsedByBroker));
@@ -435,8 +457,9 @@ public class QpidBrokerTestCase extends QpidTestCase
{
// Add the port to QPID_WORK to ensure unique working dirs for multi broker tests
final String qpidWork = getQpidWork(_brokerType, port);
- String cmd = getBrokerCommand(port, testConfig);
- _logger.info("starting external broker: " + cmd);
+
+ String cmd = _brokerCommandHelper.getBrokerCommand(port, testConfig, _brokerStoreType, _logConfigFile);
+ _logger.info("Starting spawn broker using command: " + cmd);
ProcessBuilder pb = new ProcessBuilder(cmd.split("\\s+"));
pb.redirectErrorStream(true);
Map<String, String> processEnv = pb.environment();
@@ -467,24 +490,28 @@ public class QpidBrokerTestCase extends QpidTestCase
}
}
+ String qpidOpts = "";
- // Add default test logging levels that are used by the log4j-test
- // Use the convenience methods to push the current logging setting
- // in to the external broker's QPID_OPTS string.
- setSystemProperty("amqj.protocol.logging.level");
- setSystemProperty("root.logging.level");
- setSystemProperty(BrokerProperties.PROPERTY_BROKER_DEFAULT_AMQP_PROTOCOL_EXCLUDES);
- setSystemProperty(BrokerProperties.PROPERTY_BROKER_DEFAULT_AMQP_PROTOCOL_INCLUDES);
-
- // set test.config and test.virtualhosts
- String qpidOpts = " -D" + TEST_VIRTUALHOSTS + "=" + virtualHostsConfig;
-
- // Add all the specified system properties to QPID_OPTS
- if (!_propertiesSetForBroker.isEmpty())
+ // a synchronized hack to avoid adding into QPID_OPTS the values
+ // of JVM properties "test.virtualhosts" and "test.config" set by a concurrent startup process
+ synchronized (_propertiesSetForBroker)
{
- for (String key : _propertiesSetForBroker.keySet())
+ // Add default test logging levels that are used by the log4j-test
+ // Use the convenience methods to push the current logging setting
+ // in to the external broker's QPID_OPTS string.
+ setSystemProperty("amqj.protocol.logging.level");
+ setSystemProperty("root.logging.level");
+ setSystemProperty(BrokerProperties.PROPERTY_BROKER_DEFAULT_AMQP_PROTOCOL_EXCLUDES);
+ setSystemProperty(BrokerProperties.PROPERTY_BROKER_DEFAULT_AMQP_PROTOCOL_INCLUDES);
+ setSystemProperty(TEST_VIRTUALHOSTS, virtualHostsConfig);
+
+ // Add all the specified system properties to QPID_OPTS
+ if (!_propertiesSetForBroker.isEmpty())
{
- qpidOpts += " -D" + key + "=" + _propertiesSetForBroker.get(key);
+ for (String key : _propertiesSetForBroker.keySet())
+ {
+ qpidOpts += " -D" + key + "=" + _propertiesSetForBroker.get(key);
+ }
}
}
if (processEnv.containsKey("QPID_OPTS"))
@@ -600,6 +627,8 @@ public class QpidBrokerTestCase extends QpidTestCase
String testVirtualhosts = getTestVirtualhostsFile(port);
String relative = relativeToQpidHome(testVirtualhosts);
+ _logger.info("Path to virtualhosts configuration: " + testVirtualhosts);
+
// Create the file if configuration does not exist
if (virtualHostConfiguration.isEmpty())
{
@@ -824,11 +853,13 @@ public class QpidBrokerTestCase extends QpidTestCase
*/
protected void setBrokerOnlySystemProperty(String property, String value)
{
- if (!_propertiesSetForBroker.containsKey(property))
+ synchronized (_propertiesSetForBroker)
{
- _propertiesSetForBroker.put(property, value);
+ if (!_propertiesSetForBroker.containsKey(property))
+ {
+ _propertiesSetForBroker.put(property, value);
+ }
}
-
}
/**
@@ -853,7 +884,7 @@ public class QpidBrokerTestCase extends QpidTestCase
*
* When the test run is complete the value will be reverted.
*
- * The values set using this method will also be propogated to the external
+ * The values set using this method will also be propagated to the external
* Java Broker via a -D value defined in QPID_OPTS.
*
* If the value should not be set on the broker then use
@@ -864,16 +895,18 @@ public class QpidBrokerTestCase extends QpidTestCase
*/
protected void setSystemProperty(String property, String value)
{
- // Record the value for the external broker
- if (value == null)
+ synchronized(_propertiesSetForBroker)
{
- _propertiesSetForBroker.remove(property);
- }
- else
- {
- _propertiesSetForBroker.put(property, value);
+ // Record the value for the external broker
+ if (value == null)
+ {
+ _propertiesSetForBroker.remove(property);
+ }
+ else
+ {
+ _propertiesSetForBroker.put(property, value);
+ }
}
-
//Set the value for the test client vm aswell.
setTestClientSystemProperty(property, value);
}
diff --git a/qpid/java/systests/src/main/java/org/apache/qpid/util/LogMonitor.java b/qpid/java/systests/src/main/java/org/apache/qpid/util/LogMonitor.java
index 2b99289cd1..d6b8a8400c 100644
--- a/qpid/java/systests/src/main/java/org/apache/qpid/util/LogMonitor.java
+++ b/qpid/java/systests/src/main/java/org/apache/qpid/util/LogMonitor.java
@@ -42,6 +42,8 @@ import java.util.List;
*/
public class LogMonitor
{
+ private static final Logger _logger = Logger.getLogger(LogMonitor.class);
+
// The file that the log statements will be written to.
private final File _logfile;
@@ -90,6 +92,8 @@ public class LogMonitor
_appender.setImmediateFlush(true);
Logger.getRootLogger().addAppender(_appender);
}
+
+ _logger.info("Created LogMonitor. Monitoring file: " + _logfile.getAbsolutePath());
}
/**
diff --git a/qpid/java/test-profiles/CPPExcludes b/qpid/java/test-profiles/CPPExcludes
index bfcc06a1bc..8b74a19b8e 100755
--- a/qpid/java/test-profiles/CPPExcludes
+++ b/qpid/java/test-profiles/CPPExcludes
@@ -23,8 +23,6 @@ org.apache.qpid.test.client.destination.AddressBasedDestinationTest#testCreateEx
// QPID-3576: Java client issue. MessageConsumer#close() time-out.
org.apache.qpid.test.client.destination.AddressBasedDestinationTest#testDeleteOptions
-org.apache.qpid.test.unit.client.channelclose.ChannelCloseTest#*
-
// Those tests are testing 0.8 specific semantics
org.apache.qpid.test.client.ImmediateAndMandatoryPublishingTest#*
diff --git a/qpid/java/test-profiles/Java010Excludes b/qpid/java/test-profiles/Java010Excludes
index 3f12076dbe..c4b3ac8d66 100755
--- a/qpid/java/test-profiles/Java010Excludes
+++ b/qpid/java/test-profiles/Java010Excludes
@@ -37,9 +37,6 @@ org.apache.qpid.test.unit.close.JavaServerCloseRaceConditionTest#*
//QPID-1864: rollback with subscriptions does not work in 0-10 yet
org.apache.qpid.test.client.RollbackOrderTest#testOrderingAfterRollbackOnMessage
-// This test uses 0-8 channel frames
-org.apache.qpid.test.unit.client.channelclose.ChannelCloseTest#*
-
//QPID-3422: test fails because ring queue is not implemented on java broker
org.apache.qpid.test.client.destination.AddressBasedDestinationTest#testBrowseMode
diff --git a/qpid/java/test-profiles/testprofile.defaults b/qpid/java/test-profiles/testprofile.defaults
index a1e9ff0cbb..033da12a97 100644
--- a/qpid/java/test-profiles/testprofile.defaults
+++ b/qpid/java/test-profiles/testprofile.defaults
@@ -33,7 +33,12 @@ amqj.logging.level=${log}
amqj.server.logging.level=${log}
amqj.protocol.logging.level=${log}
root.logging.level=warn
-log4j.configuration=test-profiles/log4j-test.xml
+
+# System property log4j.configuration is used by log4j.
+# QpidBrokerTestCase uses log4j.configuration.file to construct a java.io.File, eg for log configuration of spawned brokers.
+log4j.configuration.file=${test.profiles}/log4j-test.xml
+log4j.configuration=file:///${log4j.configuration.file}
+
log4j.debug=false
# Note test-provider.properties also has variables of same name.
diff --git a/qpid/packaging/windows/qpidc.wxs b/qpid/packaging/windows/qpidc.wxs
index 8fd9f9a396..615ee04ed6 100644
--- a/qpid/packaging/windows/qpidc.wxs
+++ b/qpid/packaging/windows/qpidc.wxs
@@ -42,11 +42,18 @@
<PropertyRef Id="NETFRAMEWORK35"/>
<!-- Allow 64-bit builds to pick ProgramFiles64Folder instead -->
- <?Define ProgramFiles = "ProgramFilesFolder"?>
+ <?define ProgramFiles = "ProgramFilesFolder"?>
+
+ <!-- Pick up Program Files for 32-bit from the correct place -->
+ <?ifdef env.ProgramFiles(x86)?>
+ <?define ProgramFilesx86 = "$(env.ProgramFiles(x86))"?>
+ <?else?>
+ <?define ProgramFilesx86 = "$(env.ProgramFiles)"?>
+ <?endif?>
<Directory Id="TARGETDIR" Name="SourceDir">
- <Merge Id="VCRedist" SourceFile="C:\Program Files\Common Files\Merge Modules\Microsoft_VC90_CRT_x86.msm" DiskId="1" Language="0"/>
- <Merge Id="VCRedistPolicy" SourceFile="C:\Program Files\Common Files\Merge Modules\policy_9_0_Microsoft_VC90_CRT_x86.msm" DiskId="1" Language="0"/>
+ <Merge Id="VCRedist" SourceFile="$(var.ProgramFilesx86)\Common Files\Merge Modules\Microsoft_VC90_CRT_x86.msm" DiskId="1" Language="0"/>
+ <Merge Id="VCRedistPolicy" SourceFile="$(var.ProgramFilesx86)\Common Files\Merge Modules\policy_9_0_Microsoft_VC90_CRT_x86.msm" DiskId="1" Language="0"/>
<Directory Id="$(var.ProgramFiles)">
<Directory Id="ApacheTop" Name="Apache">
<Directory Id="INSTALLLOCATION" Name="qpidc-$(var.qpidc_version)">
@@ -269,7 +276,6 @@
<ComponentRef Id="TopStorePlugin"/>
<ComponentRef Id="SQLPersistence"/>
<ComponentRef Id="CLFSPersistence"/>
- <ComponentGroupRef Id="group_BoostDlls"/>
</Feature>
<Feature Id="ClientLib" Title="Client Libraries and Headers to develop and run programs" Level="1">
@@ -293,8 +299,6 @@
<ComponentRef Id="WCFInteropDLL"/>
<ComponentRef Id="WCFXADLL"/>
<ComponentGroupRef Id="group_QpidHeaders"/>
- <ComponentGroupRef Id="group_BoostHeaders"/>
- <ComponentGroupRef Id="group_BoostDlls"/>
<Feature Id="Examples" Title="Client Programming Examples" Level="1">
<ComponentGroupRef Id="group_Examples"/>
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/management.py b/qpid/tests/src/py/qpid_tests/broker_0_10/management.py
index 4ec3e0dd03..126b96853b 100644
--- a/qpid/tests/src/py/qpid_tests/broker_0_10/management.py
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/management.py
@@ -140,6 +140,30 @@ class ManagementTest (TestBase010):
return exchange
return None
+ def test_move_queued_messages_empty(self):
+ """
+ Test that moving messages from an empty queue does not cause an error.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up source queue"
+ session.queue_declare(queue="src-queue-empty", exclusive=True, auto_delete=True)
+
+ "Set up destination queue"
+ session.queue_declare(queue="dest-queue-empty", exclusive=True, auto_delete=True)
+
+ queues = self.qmf.getObjects(_class="queue")
+
+ "Move all messages from src-queue-empty to dest-queue-empty"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue-empty", "dest-queue-empty", 0, {})
+ self.assertEqual (result.status, 0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue-empty")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue-empty")[0]
+
+ self.assertEqual (sq.msgDepth,0)
+ self.assertEqual (dq.msgDepth,0)
+
def test_move_queued_messages(self):
"""
Test ability to move messages from the head of one queue to another.
diff --git a/qpid/tools/src/py/qpid-stat b/qpid/tools/src/py/qpid-stat
index 0506c084a4..a448bb9881 100755
--- a/qpid/tools/src/py/qpid-stat
+++ b/qpid/tools/src/py/qpid-stat
@@ -156,7 +156,7 @@ class BrokerManager:
shutting down.
"""
try:
- connection.close()
+ self.connection.close()
except:
pass