summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSage Weil <sage@inktank.com>2013-05-01 21:46:50 -0700
committerSage Weil <sage@inktank.com>2013-05-01 21:46:50 -0700
commit6a91ecb18e8bb3a0c27d057cef3da43918e2a3e1 (patch)
treec1f1b91860877dd444e78450b14d1e9cdeae5cbc
parentfdbab85ff3cb942555814dc0f8100a34bd3137a0 (diff)
parent17c14b251d0b81a7443ce669d86a04dbfeae962e (diff)
downloadceph-6a91ecb18e8bb3a0c27d057cef3da43918e2a3e1.tar.gz
Merge branch 'next'
-rw-r--r--doc/install/upgrading-ceph.rst294
-rw-r--r--src/.gitignore3
-rw-r--r--src/Makefile.am9
-rw-r--r--src/mds/Dumper.cc25
-rw-r--r--src/mds/Dumper.h1
-rw-r--r--src/os/DBObjectMap.cc13
-rw-r--r--src/os/DBObjectMap.h4
-rw-r--r--src/osd/OSD.cc36
-rw-r--r--src/osd/OSD.h1
-rw-r--r--src/tools/ceph-osdomap-tool.cc154
10 files changed, 433 insertions, 107 deletions
diff --git a/doc/install/upgrading-ceph.rst b/doc/install/upgrading-ceph.rst
index b8a4c89c0cf..667a9e235b9 100644
--- a/doc/install/upgrading-ceph.rst
+++ b/doc/install/upgrading-ceph.rst
@@ -23,38 +23,195 @@ daemons in this order:
As a general rule, we recommend upgrading all the daemons of a specific type
(e.g., all ``ceph-osd`` daemons, all ``ceph-mon`` daemons, etc.) to ensure that
they are all on the same release. We also recommend that you upgrade all the
-daemons in your cluster before you try to excercise new functionality in a
-release.
+daemons in your cluster before you try to exercise new functionality in a
+release.
-The following sections describe the upgrade process.
+Each release of Ceph may have some additional steps. Refer to the following
+sections to identify release-specific procedures for your cluster before
+using the upgrade procedures.
-.. important:: Each release of Ceph may have some additional steps. Refer to
- release-specific sections for details BEFORE you begin upgrading daemons.
-Upgrading an OSD
-================
+Argonaut to Bobtail
+===================
-To upgrade an OSD peform the following steps:
+When upgrading from Argonaut to Bobtail, you need to be aware of several things:
-#. Upgrade the OSD package::
+#. Authentication now defaults to **ON**, but used to default to **OFF**.
+#. Monitors use a new internal on-wire protocol.
+#. RBD ``format2`` images require upgrading all OSDs before using it.
- ssh {osd-host}
- sudo apt-get update && sudo apt-get install ceph
+Ensure that you update package repository paths. For example::
-#. Restart the OSD, where ``N`` is the OSD number::
+ sudo rm /etc/apt/sources.sources.list.d/ceph.list
+ echo deb http://ceph.com/debian-bobtail/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
- service ceph restart osd.N
+See the following sections for additional details.
-#. Ensure the upgraded OSD has rejoined the cluster::
+Authentication
+--------------
- ceph osd stat
+The Ceph Bobtail release enables authentication by default. Bobtail also has
+finer-grained authentication configuration settings. In previous versions of
+Ceph (i.e., actually v 0.55 and earlier), you could simply specify::
-Once you have successfully upgraded an OSD, you may upgrade another OSD until
-you have completed the upgrade cycle for all of your OSDs.
+ auth supported = [cephx | none]
+
+This option still works, but is deprecated. New releases support
+``cluster``, ``service`` and ``client`` authentication settings as
+follows::
+
+ auth cluster required = [cephx | none] # default cephx
+ auth service required = [cephx | none] # default cephx
+ auth client required = [cephx | none] # default cephx,none
+
+.. important:: If your cluster does not currently have an ``auth
+ supported`` line that enables authentication, you must explicitly
+ turn it off in Bobtail using the settings below.::
+
+ auth cluster required = none
+ auth service required = none
+
+ This will disable authentication on the cluster, but still leave
+ clients with the default configuration where they can talk to a
+ cluster that does enable it, but do not require it.
+
+.. important:: If your cluster already has an ``auth supported`` option defined in
+ the configuration file, no changes are necessary.
+
+See `Ceph Authentication - Backward Compatibility`_ for details.
+
+
+Monitor On-wire Protocol
+------------------------
+
+We recommend upgrading all monitors to Bobtail. A mixture of Bobtail and
+Argonaut monitors will not be able to use the new on-wire protocol, as the
+protocol requires all monitors to be Bobtail or greater. Upgrading only a
+majority of the nodes (e.g., two out of three) may expose the cluster to a
+situation where a single additional failure may compromise availability (because
+the non-upgraded daemon cannot participate in the new protocol). We recommend
+not waiting for an extended period of time between ``ceph-mon`` upgrades.
+
+
+RBD Images
+----------
+
+The Bobtail release supports ``format 2`` images! However, you should not create
+or use ``format 2`` RBD images until after all ``ceph-osd`` daemons have been
+upgraded. Note that ``format 1`` is still the default. You can use the new
+``ceph osd ls`` and ``ceph tell osd.N version`` commands to doublecheck your
+cluster. ``ceph osd ls`` will give a list of all OSD IDs that are part of the
+cluster, and you can use that to write a simple shell loop to display all the
+OSD version strings: ::
+
+ for i in $(ceph osd ls); do
+ ceph tell osd.${i} version
+ done
+
+
+Argonaut to Cuttlefish
+======================
+
+To upgrade your cluster from Argonaut to Cuttlefish, please read this section,
+and the sections on upgrading from Argonaut to Bobtail and upgrading from
+Bobtail to Cuttlefish carefully. When upgrading from Argonaut to Cuttlefish,
+**YOU MUST UPGRADE YOUR MONITORS FROM ARGONAUT TO BOBTAIL FIRST!!!**. All other
+Ceph daemons can upgrade from Argonaut to Cuttlefish without the intermediate
+upgrade to Bobtail.
+
+.. important:: Ensure that the repository specified points to Bobtail, not
+ Cuttlefish.
+
+For example::
+
+ sudo rm /etc/apt/sources.sources.list.d/ceph.list
+ echo deb http://ceph.com/debian-bobtail/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
+
+We recommend upgrading all monitors to Bobtail before proceeding with the
+upgrade of the monitors to Cuttlefish. A mixture of Bobtail and Argonaut
+monitors will not be able to use the new on-wire protocol, as the protocol
+requires all monitors to be Bobtail or greater. Upgrading only a majority of the
+nodes (e.g., two out of three) may expose the cluster to a situation where a
+single additional failure may compromise availability (because the non-upgraded
+daemon cannot participate in the new protocol). We recommend not waiting for an
+extended period of time between ``ceph-mon`` upgrades. See `Upgrading a
+Monitor`_ for details.
+
+.. note:: See the `Authentication`_ section and the
+ `Ceph Authentication - Backward Compatibility`_ for additional information
+ on authentication backward compatibility settings for Bobtail.
+
+Once you complete the upgrade of your monitors from Argonaut to Bobtail, you
+must upgrade the monitors from Bobtail to Cuttlefish. Ensure that you have
+a quorum before beginning this upgrade procedure. Before upgrading, remember
+to replace the reference to the Bobtail repository with a reference to the
+Cuttlefish repository. For example::
+
+ sudo rm /etc/apt/sources.sources.list.d/ceph.list
+ echo deb http://ceph.com/debian-cuttlefish/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
+
+See `Upgrading a Monitor`_ for details.
+
+The architecture of the monitors changed significantly from Argonaut to
+Cuttlefish. See `Monitor Config Reference`_ and `Joao's blog post`_ for details.
+Once you complete the monitor upgrade, you can upgrade the OSD daemons and the
+MDS daemons using the generic procedures. See `Upgrading an OSD`_ and `Upgrading
+a Metadata Server`_ for details.
+
+
+Bobtail to Cuttlefish
+=====================
+
+Upgrading your cluster from Bobtail to Cuttlefish has a few important
+considerations. First, the monitor uses a new architecture, so you should
+upgrade the full set of monitors to use Cuttlefish. Second, if you run multiple
+metadata servers in a cluster, ensure the metadata servers have unique names.
+See the following sections for details.
+
+Replace any ``apt`` reference to older repositories with a reference to the
+Cuttlefish repository. For example::
+
+ sudo rm /etc/apt/sources.sources.list.d/ceph.list
+ echo deb http://ceph.com/debian-cuttlefish/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
+
+
+Monitor
+-------
+
+The architecture of the monitors changed significantly from Bobtail to
+Cuttlefish. See `Monitor Config Reference`_ and `Joao's blog post`_ for
+details. This means that v0.59 and pre-v0.59 monitors do not talk to each other
+(Cuttlefish is v.0.61). When you upgrade each monitor, it will convert its
+local data store to the new format. Once you upgrade a majority of monitors,
+the monitors form a quorum using the new protocol and the old monitors will be
+blocked until they get upgraded. For this reason, we recommend upgrading the
+monitors in immediate succession.
+
+.. important:: Do not run a mixed-version cluster for an extended period.
+
+
+MDS Unique Names
+----------------
+
+The monitor now enforces that MDS names be unique. If you have multiple metadata
+server daemons that start with with the same ID (e.g., mds.a) the second
+metadata server will implicitly mark the first metadata server as ``failed``.
+Multi-MDS configurations with identical names must be adjusted accordingly to
+give daemons unique names. If you run your cluster with one metadata server,
+you can disregard this notice for now.
+
+
+Upgrade Procedures
+==================
+
+The following sections describe the upgrade process.
+
+.. important:: Each release of Ceph may have some additional steps. Refer to
+ release-specific sections for details **BEFORE** you begin upgrading daemons.
Upgrading a Monitor
-===================
+-------------------
To upgrade a monitor, perform the following steps:
@@ -75,8 +232,30 @@ Once you have successfully upgraded a monitor, you may upgrade another monitor
until you have completed the upgrade cycle for all of your monitors.
+Upgrading an OSD
+----------------
+
+To upgrade an OSD peform the following steps:
+
+#. Upgrade the OSD package::
+
+ ssh {osd-host}
+ sudo apt-get update && sudo apt-get install ceph
+
+#. Restart the OSD, where ``N`` is the OSD number::
+
+ service ceph restart osd.N
+
+#. Ensure the upgraded OSD has rejoined the cluster::
+
+ ceph osd stat
+
+Once you have successfully upgraded an OSD, you may upgrade another OSD until
+you have completed the upgrade cycle for all of your OSDs.
+
+
Upgrading a Metadata Server
-===========================
+---------------------------
To upgrade an MDS, perform the following steps:
@@ -97,8 +276,9 @@ Once you have successfully upgraded a metadata, you may upgrade another metadata
server until you have completed the upgrade cycle for all of your metadata
servers.
+
Upgrading a Client
-==================
+------------------
Once you have upgraded the packages and restarted daemons on your Ceph
cluster, we recommend upgrading ``ceph-common`` and client libraries
@@ -114,77 +294,7 @@ cluster, we recommend upgrading ``ceph-common`` and client libraries
ceph --version
-Upgrading from Argonaut to Bobtail
-==================================
-
-When upgrading from Argonaut to Bobtail, you need to be aware of three things:
-
-#. Authentication now defaults to **ON**, but used to default to off.
-#. Monitors use a new internal on-wire protocol
-#. RBD ``format2`` images require updgrading all OSDs before using it.
-
-See the following sections for details.
-
-
-Authentication
---------------
-
-The Ceph Bobtail release enables authentication by default. Bobtail also has
-finer-grained authentication configuration settings. In previous versions of
-Ceph (i.e., actually v 0.55 and earlier), you could simply specify::
-
- auth supported = [cephx | none]
-
-This option still works, but is deprecated. New releases support
-``cluster``, ``service`` and ``client`` authentication settings as
-follows::
-
- auth cluster required = [cephx | none] # default cephx
- auth service required = [cephx | none] # default cephx
- auth client required = [cephx | none] # default cephx,none
-
-.. important:: If your cluster does not currently have an ``auth
- supported`` line that enables authentication, you must explicitly
- turn it off in Bobtail using the settings below.::
-
- auth cluster required = none
- auth service required = none
-
- This will disable authentication on the cluster, but still leave
- clients with the default configuration where they can talk to a
- cluster that does enable it, but do not require it.
-
-.. important:: If your cluster already has an ``auth supported`` option defined in
- the configuration file, no changes are necessary.
-
-See `Ceph Authentication - Backward Compatibility`_ for details.
-
+.. _Monitor Config Reference: ../../rados/configuration/mon-config-ref
+.. _Joao's blog post: http://ceph.com/dev-notes/cephs-new-monitor-changes
.. _Ceph Authentication: ../../rados/operations/authentication/
.. _Ceph Authentication - Backward Compatibility: ../../rados/operations/authentication/#backward-compatibility
-
-Monitor On-wire Protocol
-------------------------
-
-We recommend upgrading all monitors to Bobtail. A mixture of Bobtail and
-Argonaut monitors will not be able to use the new on-wire protocol, as the
-protocol requires all monitors to be Bobtail or greater. Upgrading only a
-majority of the nodes (e.g., two out of three) may expose the cluster to a
-situation where a single additional failure may compromise availability (because
-the non-upgraded daemon cannot participate in the new protocol). We recommend
-not waiting for an extended period of time between ``ceph-mon`` upgrades.
-
-
-RBD Images
-----------
-
-The Bobtail release supports ``format 2`` images! However, you should not create
-or use ``format 2`` RBD images until after all ``ceph-osd`` daemons have been
-upgraded. Note that ``format 1`` is still the default. You can use the new
-``ceph osd ls`` and ``ceph tell osd.N version`` commands to doublecheck your
-cluster. ``ceph osd ls`` will give a list of all OSD IDs that are part of the
-cluster, and you can use that to write a simple shell loop to display all the
-OSD version strings: ::
-
- for i in $(ceph osd ls); do
- ceph tell osd.${i} version
- done
diff --git a/src/.gitignore b/src/.gitignore
index 473af888080..fa8ab2f0526 100644
--- a/src/.gitignore
+++ b/src/.gitignore
@@ -40,7 +40,8 @@ Makefile
/ceph_smalliobenchdumb
/ceph_smalliobenchfs
/ceph_smalliobenchrbd
-/ceph_monstore_tool
+/ceph-monstore-tool
+/ceph-osdomap-tool
/ceph_ver.h
/dev
/init-ceph
diff --git a/src/Makefile.am b/src/Makefile.am
index 476c80e8027..13ea671fbfa 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -128,12 +128,19 @@ ceph_filestore_dump_LDADD += -ldl
endif
bin_PROGRAMS += ceph ceph-conf ceph-authtool ceph_filestore_dump
+ceph_osdomap_tool_SOURCES = tools/ceph-osdomap-tool.cc \
+ os/LevelDBStore.cc
+ceph_osdomap_tool_LDFLAGS = ${AM_LDFLAGS}
+ceph_osdomap_tool_LDADD = $(LIBOS_LDA) $(LIBGLOBAL_LDA) -lboost_program_options
+ceph_osdomap_tool_CXXFLAGS = ${AM_CXXFLAGS} ${UNITTEST_CXXFLAGS}
+bin_DEBUGPROGRAMS += ceph-osdomap-tool
+
ceph_monstore_tool_SOURCES = tools/ceph-monstore-tool.cc \
os/LevelDBStore.cc
ceph_monstore_tool_LDFLAGS = ${AM_LDFLAGS}
ceph_monstore_tool_LDADD = $(LIBOS_LDA) $(LIBGLOBAL_LDA) -lboost_program_options
ceph_monstore_tool_CXXFLAGS = ${AM_CXXFLAGS} ${UNITTEST_CXXFLAGS}
-bin_PROGRAMS += ceph_monstore_tool
+bin_DEBUGPROGRAMS += ceph-monstore-tool
monmaptool_SOURCES = monmaptool.cc
monmaptool_LDADD = $(LIBGLOBAL_LDA)
diff --git a/src/mds/Dumper.cc b/src/mds/Dumper.cc
index 917403ee208..9b83b4d9b00 100644
--- a/src/mds/Dumper.cc
+++ b/src/mds/Dumper.cc
@@ -25,6 +25,8 @@
#include "mon/MonClient.h"
#include "osdc/Journaler.h"
+#define dout_subsys ceph_subsys_mds
+
Dumper::~Dumper()
{
}
@@ -69,9 +71,9 @@ void Dumper::init(int rank)
objecter->init_unlocked();
lock.Lock();
objecter->init_locked();
+ lock.Unlock();
objecter->wait_for_osd_map();
timer.init();
- lock.Unlock();
}
void Dumper::shutdown()
@@ -91,11 +93,14 @@ void Dumper::dump(const char *dump_file)
int rank = strtol(g_conf->name.get_id().c_str(), 0, 0);
inodeno_t ino = MDS_INO_LOG_OFFSET + rank;
+ Mutex localLock("dump:lock");
lock.Lock();
- journaler->recover(new C_SafeCond(&lock, &cond, &done, &r));
- while (!done)
- cond.Wait(lock);
+ journaler->recover(new C_SafeCond(&localLock, &cond, &done, &r));
lock.Unlock();
+ localLock.Lock();
+ while (!done)
+ cond.Wait(localLock);
+ localLock.Unlock();
if (r < 0) { // Error
derr << "error on recovery: " << cpp_strerror(r) << dendl;
@@ -103,6 +108,8 @@ void Dumper::dump(const char *dump_file)
// wait for messenger to finish
messenger->wait();
shutdown();
+ } else {
+ dout(10) << "completed journal recovery" << dendl;
}
uint64_t start = journaler->get_read_pos();
@@ -112,12 +119,14 @@ void Dumper::dump(const char *dump_file)
Filer filer(objecter);
bufferlist bl;
- filer.read(ino, &journaler->get_layout(), CEPH_NOSNAP,
- start, len, &bl, 0, new C_SafeCond(&lock, &cond, &done));
lock.Lock();
- while (!done)
- cond.Wait(lock);
+ filer.read(ino, &journaler->get_layout(), CEPH_NOSNAP,
+ start, len, &bl, 0, new C_SafeCond(&localLock, &cond, &done));
lock.Unlock();
+ localLock.Lock();
+ while (!done)
+ cond.Wait(localLock);
+ localLock.Unlock();
cout << "read " << bl.length() << " bytes at offset " << start << std::endl;
diff --git a/src/mds/Dumper.h b/src/mds/Dumper.h
index 066ba5ccb1d..794ae0c8aa7 100644
--- a/src/mds/Dumper.h
+++ b/src/mds/Dumper.h
@@ -62,6 +62,7 @@ public:
virtual ~Dumper();
bool ms_dispatch(Message *m) {
+ Mutex::Locker locker(lock);
switch (m->get_type()) {
case CEPH_MSG_OSD_OPREPLY:
objecter->handle_osd_op_reply((MOSDOpReply *)m);
diff --git a/src/os/DBObjectMap.cc b/src/os/DBObjectMap.cc
index c3a4c3b9869..29cf8360991 100644
--- a/src/os/DBObjectMap.cc
+++ b/src/os/DBObjectMap.cc
@@ -1213,3 +1213,16 @@ bool DBObjectMap::check_spos(const hobject_t &hoid,
return true;
}
}
+
+int DBObjectMap::list_objects(vector<hobject_t> *out)
+{
+ KeyValueDB::Iterator iter = db->get_iterator(HOBJECT_TO_SEQ);
+ for (iter->seek_to_first(); iter->valid(); iter->next()) {
+ bufferlist bl = iter->value();
+ bufferlist::iterator bliter = bl.begin();
+ _Header header;
+ header.decode(bliter);
+ out->push_back(header.hoid);
+ }
+ return 0;
+}
diff --git a/src/os/DBObjectMap.h b/src/os/DBObjectMap.h
index 18c6ce402ff..ba05dff6c6f 100644
--- a/src/os/DBObjectMap.h
+++ b/src/os/DBObjectMap.h
@@ -164,6 +164,10 @@ public:
/// Ensure that all previous operations are durable
int sync(const hobject_t *hoid=0, const SequencerPosition *spos=0);
+ /// Util, list all objects, there must be no other concurrent access
+ int list_objects(vector<hobject_t> *objs ///< [out] objects
+ );
+
ObjectMapIterator get_iterator(const hobject_t &hoid);
static const string USER_PREFIX;
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index e63361b8ddd..e25c472d782 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -234,6 +234,11 @@ void OSDService::mark_split_in_progress(pg_t parent, const set<pg_t> &children)
void OSDService::cancel_pending_splits_for_parent(pg_t parent)
{
Mutex::Locker l(in_progress_split_lock);
+ return _cancel_pending_splits_for_parent(parent);
+}
+
+void OSDService::_cancel_pending_splits_for_parent(pg_t parent)
+{
map<pg_t, set<pg_t> >::iterator piter = rev_pending_splits.find(parent);
if (piter == rev_pending_splits.end())
return;
@@ -244,6 +249,7 @@ void OSDService::cancel_pending_splits_for_parent(pg_t parent)
assert(pending_splits.count(*i));
assert(!in_progress_splits.count(*i));
pending_splits.erase(*i);
+ _cancel_pending_splits_for_parent(*i);
}
rev_pending_splits.erase(piter);
}
@@ -1738,12 +1744,32 @@ void OSD::load_pgs()
assert(i->second.empty());
}
- set<pg_t> split_pgs;
+ // First, check whether we can avoid this potentially expensive check
if (osdmap->have_pg_pool(pg->info.pgid.pool()) &&
- pg->info.pgid.is_split(pg->get_osdmap()->get_pg_num(pg->info.pgid.pool()),
- osdmap->get_pg_num(pg->info.pgid.pool()),
- &split_pgs)) {
- service.start_split(pg->info.pgid, split_pgs);
+ pg->info.pgid.is_split(
+ pg->get_osdmap()->get_pg_num(pg->info.pgid.pool()),
+ osdmap->get_pg_num(pg->info.pgid.pool()),
+ 0)) {
+ // Ok, a split happened, so we need to walk the osdmaps
+ set<pg_t> new_pgs; // pgs to scan on each map
+ new_pgs.insert(pg->info.pgid);
+ for (epoch_t e = pg->get_osdmap()->get_epoch() + 1;
+ e <= osdmap->get_epoch();
+ ++e) {
+ OSDMapRef curmap(get_map(e-1));
+ OSDMapRef nextmap(get_map(e));
+ set<pg_t> even_newer_pgs; // pgs added in this loop
+ for (set<pg_t>::iterator i = new_pgs.begin(); i != new_pgs.end(); ++i) {
+ set<pg_t> split_pgs;
+ if (i->is_split(curmap->get_pg_num(i->pool()),
+ nextmap->get_pg_num(i->pool()),
+ &split_pgs)) {
+ service.start_split(*i, split_pgs);
+ even_newer_pgs.insert(split_pgs.begin(), split_pgs.end());
+ }
+ }
+ new_pgs.insert(even_newer_pgs.begin(), even_newer_pgs.end());
+ }
}
pg->reg_next_scrub();
diff --git a/src/osd/OSD.h b/src/osd/OSD.h
index f894768fbe5..a0b05f0818b 100644
--- a/src/osd/OSD.h
+++ b/src/osd/OSD.h
@@ -398,6 +398,7 @@ public:
void mark_split_in_progress(pg_t parent, const set<pg_t> &pgs);
void complete_split(const set<pg_t> &pgs);
void cancel_pending_splits_for_parent(pg_t parent);
+ void _cancel_pending_splits_for_parent(pg_t parent);
bool splitting(pg_t pgid);
void expand_pg_num(OSDMapRef old_map,
OSDMapRef new_map);
diff --git a/src/tools/ceph-osdomap-tool.cc b/src/tools/ceph-osdomap-tool.cc
new file mode 100644
index 00000000000..28a407ca151
--- /dev/null
+++ b/src/tools/ceph-osdomap-tool.cc
@@ -0,0 +1,154 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+/*
+* Ceph - scalable distributed file system
+*
+* Copyright (C) 2012 Inktank, Inc.
+*
+* This is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License kkjversion 2.1, as published by the Free Software
+* Foundation. See file COPYING.
+*/
+#include <boost/scoped_ptr.hpp>
+#include <boost/lexical_cast.hpp>
+#include <boost/program_options/option.hpp>
+#include <boost/program_options/options_description.hpp>
+#include <boost/program_options/variables_map.hpp>
+#include <boost/program_options/cmdline.hpp>
+#include <boost/program_options/parsers.hpp>
+#include <iostream>
+#include <set>
+#include <sstream>
+#include <stdlib.h>
+#include <fstream>
+#include <string>
+#include <sstream>
+#include <map>
+#include <set>
+#include <boost/scoped_ptr.hpp>
+
+#include "global/global_init.h"
+#include "os/LevelDBStore.h"
+#include "mon/MonitorDBStore.h"
+#include "os/DBObjectMap.h"
+
+namespace po = boost::program_options;
+using namespace std;
+
+int main(int argc, char **argv) {
+ po::options_description desc("Allowed options");
+ string store_path, cmd, out_path;
+ desc.add_options()
+ ("help", "produce help message")
+ ("omap-path", po::value<string>(&store_path),
+ "path to mon directory, mandatory (current/omap usually)")
+ ("command", po::value<string>(&cmd),
+ "command")
+ ;
+ po::positional_options_description p;
+ p.add("command", 1);
+
+ po::variables_map vm;
+ po::parsed_options parsed =
+ po::command_line_parser(argc, argv).options(desc).positional(p).run();
+ po::store(
+ parsed,
+ vm);
+ try {
+ po::notify(vm);
+ } catch (...) {
+ cout << desc << std::endl;
+ return 1;
+ }
+
+ vector<const char *> ceph_options, def_args;
+ vector<string> ceph_option_strings = po::collect_unrecognized(
+ parsed.options, po::include_positional);
+ ceph_options.reserve(ceph_option_strings.size());
+ for (vector<string>::iterator i = ceph_option_strings.begin();
+ i != ceph_option_strings.end();
+ ++i) {
+ ceph_options.push_back(i->c_str());
+ }
+
+ global_init(
+ &def_args, ceph_options, CEPH_ENTITY_TYPE_OSD,
+ CODE_ENVIRONMENT_UTILITY, 0);
+ common_init_finish(g_ceph_context);
+ g_ceph_context->_conf->apply_changes(NULL);
+ g_conf = g_ceph_context->_conf;
+
+ if (vm.count("help")) {
+ std::cerr << desc << std::endl;
+ return 1;
+ }
+
+ LevelDBStore* store(new LevelDBStore(store_path));
+ DBObjectMap omap(store);
+ stringstream out;
+ int r = store->open(out);
+ if (r < 0) {
+ std::cerr << "Store open got: " << cpp_strerror(r) << std::endl;
+ std::cerr << "Output: " << out.str() << std::endl;
+ goto done;
+ }
+ r = 0;
+
+
+ if (cmd == "dump-raw-keys") {
+ KeyValueDB::WholeSpaceIterator i = store->get_iterator();
+ for (i->seek_to_first(); i->valid(); i->next()) {
+ std::cout << i->raw_key() << std::endl;
+ }
+ } else if (cmd == "dump-raw-key-vals") {
+ KeyValueDB::WholeSpaceIterator i = store->get_iterator();
+ for (i->seek_to_first(); i->valid(); i->next()) {
+ std::cout << i->raw_key() << std::endl;
+ i->value().hexdump(std::cout);
+ }
+ } else if (cmd == "dump-objects") {
+ vector<hobject_t> objects;
+ r = omap.list_objects(&objects);
+ if (r < 0) {
+ std::cerr << "list_objects got: " << cpp_strerror(r) << std::endl;
+ goto done;
+ }
+ for (vector<hobject_t>::iterator i = objects.begin();
+ i != objects.end();
+ ++i) {
+ std::cout << *i << std::endl;
+ }
+ r = 0;
+ } else if (cmd == "dump-objects-with-keys") {
+ vector<hobject_t> objects;
+ r = omap.list_objects(&objects);
+ if (r < 0) {
+ std::cerr << "list_objects got: " << cpp_strerror(r) << std::endl;
+ goto done;
+ }
+ for (vector<hobject_t>::iterator i = objects.begin();
+ i != objects.end();
+ ++i) {
+ std::cout << "Object: " << *i << std::endl;
+ ObjectMap::ObjectMapIterator j = omap.get_iterator(*i);
+ for (j->seek_to_first(); j->valid(); j->next()) {
+ std::cout << j->key() << std::endl;
+ j->value().hexdump(std::cout);
+ }
+ }
+ } else if (cmd == "check") {
+ r = omap.check(std::cout);
+ if (!r) {
+ std::cerr << "check got: " << cpp_strerror(r) << std::endl;
+ goto done;
+ }
+ std::cout << "check succeeded" << std::endl;
+ } else {
+ std::cerr << "Did not recognize command " << cmd << std::endl;
+ goto done;
+ }
+
+ done:
+ return r;
+}