summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ceph.spec.in1
-rw-r--r--debian/ceph-fuse.install1
-rw-r--r--doc/architecture.rst185
-rw-r--r--doc/install/os-recommendations.rst23
-rw-r--r--doc/rados/configuration/filestore-config-ref.rst12
-rw-r--r--doc/rados/configuration/ms-ref.rst20
-rw-r--r--doc/rados/deployment/mkcephfs.rst23
-rw-r--r--doc/rados/operations/add-or-rm-osds.rst5
-rw-r--r--doc/rados/operations/control.rst6
-rw-r--r--doc/radosgw/admin/adminops.rst1507
-rw-r--r--doc/radosgw/config-ref.rst6
-rw-r--r--doc/radosgw/config.rst4
-rw-r--r--doc/radosgw/index.rst2
-rw-r--r--doc/radosgw/s3.rst2
-rw-r--r--doc/radosgw/s3/python.rst6
-rw-r--r--doc/radosgw/swift.rst74
-rw-r--r--doc/radosgw/swift/swift.rst (renamed from doc/radosgw/swift/index.rst)0
-rw-r--r--qa/run_xfstests.sh2
-rw-r--r--qa/run_xfstests_qemu.sh6
-rwxr-xr-xqa/workunits/rbd/map-unmap.sh41
-rw-r--r--src/Makefile.am9
-rw-r--r--src/auth/cephx/CephxProtocol.h9
-rwxr-xr-xsrc/ceph-disk-activate5
-rw-r--r--src/client/Client.cc6
-rw-r--r--src/common/config_opts.h3
-rw-r--r--src/common/lockdep.cc2
-rw-r--r--src/init-ceph.in2
-rw-r--r--src/mkcephfs.in2
-rw-r--r--src/mon/OSDMonitor.cc40
-rwxr-xr-xsrc/mount.fuse.ceph26
-rw-r--r--src/os/FileStore.cc6
-rw-r--r--src/osd/OSD.cc6
-rw-r--r--src/osd/ReplicatedPG.cc8
-rw-r--r--src/pybind/rbd.py31
-rw-r--r--src/rbd.cc14
-rw-r--r--src/rgw/rgw_cache.h5
-rw-r--r--src/rgw/rgw_main.cc2
-rw-r--r--src/rgw/rgw_rest.cc117
-rw-r--r--src/rgw/rgw_rest.h2
-rw-r--r--src/rgw/rgw_rest_s3.cc15
-rw-r--r--src/rgw/rgw_tools.cc19
-rw-r--r--src/sample.ceph.conf18
-rw-r--r--src/test/libcephfs/caps.cc50
-rw-r--r--src/upstart/ceph-mds-all-starter.conf4
-rw-r--r--src/upstart/ceph-mon-all-starter.conf2
-rw-r--r--src/upstart/ceph-osd-all-starter.conf3
46 files changed, 2050 insertions, 282 deletions
diff --git a/ceph.spec.in b/ceph.spec.in
index 3a369cf5359..ba1cce459bc 100644
--- a/ceph.spec.in
+++ b/ceph.spec.in
@@ -356,6 +356,7 @@ fi
%defattr(-,root,root,-)
%{_bindir}/ceph-fuse
%{_mandir}/man8/ceph-fuse.8*
+/sbin/mount.fuse.ceph
#################################################################################
%files devel
diff --git a/debian/ceph-fuse.install b/debian/ceph-fuse.install
index b8dd3b32d63..79588d2a629 100644
--- a/debian/ceph-fuse.install
+++ b/debian/ceph-fuse.install
@@ -1,2 +1,3 @@
usr/bin/ceph-fuse
usr/share/man/man8/ceph-fuse.8
+sbin/mount.fuse.ceph
diff --git a/doc/architecture.rst b/doc/architecture.rst
index e944192ef7e..116ec4110f3 100644
--- a/doc/architecture.rst
+++ b/doc/architecture.rst
@@ -146,15 +146,30 @@ appropriate placement groups in the secondary and tertiary OSDs (as many OSDs as
additional replicas), and responds to the client once it has confirmed the
object was stored successfully.
-.. ditaa:: +--------+ Write +--------------+ Replica 1 +----------------+
- | Client |*-------------->| Primary OSD |*---------------->| Secondary OSD |
- | |<--------------*| |<----------------*| |
- +--------+ Write Ack +--------------+ Replica 1 Ack +----------------+
- ^ *
- | | Replica 2 +----------------+
- | +----------------------->| Tertiary OSD |
- +--------------------------*| |
- Replica 2 Ack +----------------+
+
+.. ditaa::
+ +----------+
+ | Client |
+ | |
+ +----------+
+ * ^
+ Write (1) | | Ack (6)
+ | |
+ v *
+ +-------------+
+ | Primary OSD |
+ | |
+ +-------------+
+ * ^ ^ *
+ Write (2) | | | | Write (3)
+ +------+ | | +------+
+ | +------+ +------+ |
+ | | Ack (4) Ack (5)| |
+ v * * v
+ +---------------+ +---------------+
+ | Secondary OSD | | Tertiary OSD |
+ | | | |
+ +---------------+ +---------------+
Since any network device has a limit to the number of concurrent connections it
@@ -222,82 +237,84 @@ of striping:
If you anticipate large images sizes, large S3 or Swift objects (video), or
-large CephFS files, you may see considerable read/write performance improvements
-by striping client data over mulitple objects within an object set. Significant
-write performance occurs when the client writes the stripe units to their
-corresponding objects simultaneously. Since objects get mapped to different
-placement groups and further mapped to different OSDs, each write occurs
-simultaneously at the maximum write speed. So the stripe count may serve as a
-proxy for the multiple of the performance improvement. Read performance is
-similarly affected. However, setting up connections between the client and the
-OSDs and the network latency also play a role in the overall performance.
+large CephFS directories, you may see considerable read/write performance
+improvements by striping client data over mulitple objects within an object set.
+Significant write performance occurs when the client writes the stripe units to
+their corresponding objects in parallel. Since objects get mapped to different
+placement groups and further mapped to different OSDs, each write occurs in
+parallel at the maximum write speed. A write to a single disk would be limited
+by the head movement (e.g. 6ms per seek) and bandwidth of that one device (e.g.
+100MB/s). By spreading that write over multiple objects (which map to different
+placement groups and OSDs) Ceph can reduce the number of seeks per drive and
+combine the throughput of multiple drives to achieve much faster write (or read)
+speeds.
In the following diagram, client data gets striped across an object set
(``object set 1`` in the following diagram) consisting of 4 objects, where the
-first stripe unit is ``stripe 0`` in ``object 0``, and the fourth stripe unit is
-``stripe 3`` in ``object 3``. After writing the fourth stripe, the client
-determines if the object set is full. If the object set is not full, the client
-begins writing a stripe to the first object again (``object 0`` in the following
-diagram). If the object set is full, the client creates a new object set
-(``object set 2`` in the following diagram), and begins writing to the first
-stripe (``stripe 4``) in the first object in the new object set (``object 4`` in
-the diagram below).
+first stripe unit is ``stripe unit 0`` in ``object 0``, and the fourth stripe
+unit is ``stripe unit 3`` in ``object 3``. After writing the fourth stripe, the
+client determines if the object set is full. If the object set is not full, the
+client begins writing a stripe to the first object again (``object 0`` in the
+following diagram). If the object set is full, the client creates a new object
+set (``object set 2`` in the following diagram), and begins writing to the first
+stripe (``stripe unit 16``) in the first object in the new object set (``object
+4`` in the diagram below).
.. ditaa::
- +---------------+
- | Client Data |
- | Format |
- | cCCC |
- +---------------+
- |
- +-----------------+--------+--------+-----------------+
- | | | | +--\
- v v v v |
- /-----------\ /-----------\ /-----------\ /-----------\ |
- | Begin cCCC| | Begin cCCC| | Begin cCCC| | Begin cCCC| |
- | Object 0 | | Object 1 | | Object 2 | | Object 3 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | |
- | unit 0 | | unit 1 | | unit 2 | | unit 3 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | +-\
- | unit 4 | | unit 5 | | unit 6 | | unit 7 | | Object
- +-----------+ +-----------+ +-----------+ +-----------+ +- Set
- | stripe | | stripe | | stripe | | stripe | | 1
- | unit 8 | | unit 9 | | unit 10 | | unit 11 | +-/
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | |
- | unit 12 | | unit 13 | | unit 14 | | unit 15 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | End cCCC | | End cCCC | | End cCCC | | End cCCC | |
- | Object 0 | | Object 1 | | Object 2 | | Object 3 | |
- \-----------/ \-----------/ \-----------/ \-----------/ |
- |
- +--/
+ +---------------+
+ | Client Data |
+ | Format |
+ | cCCC |
+ +---------------+
+ |
+ +-----------------+--------+--------+-----------------+
+ | | | | +--\
+ v v v v |
+ /-----------\ /-----------\ /-----------\ /-----------\ |
+ | Begin cCCC| | Begin cCCC| | Begin cCCC| | Begin cCCC| |
+ | Object 0 | | Object 1 | | Object 2 | | Object 3 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | |
+ | unit 0 | | unit 1 | | unit 2 | | unit 3 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | +-\
+ | unit 4 | | unit 5 | | unit 6 | | unit 7 | | Object
+ +-----------+ +-----------+ +-----------+ +-----------+ +- Set
+ | stripe | | stripe | | stripe | | stripe | | 1
+ | unit 8 | | unit 9 | | unit 10 | | unit 11 | +-/
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | |
+ | unit 12 | | unit 13 | | unit 14 | | unit 15 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | End cCCC | | End cCCC | | End cCCC | | End cCCC | |
+ | Object 0 | | Object 1 | | Object 2 | | Object 3 | |
+ \-----------/ \-----------/ \-----------/ \-----------/ |
+ |
+ +--/
- +--\
- |
- /-----------\ /-----------\ /-----------\ /-----------\ |
- | Begin cCCC| | Begin cCCC| | Begin cCCC| | Begin cCCC| |
- | Object 4 | | Object 5 | | Object 6 | | Object 7 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | |
- | unit 15 | | unit 16 | | unit 17 | | unit 18 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | +-\
- | unit 19 | | unit 20 | | unit 21 | | unit 22 | | Object
- +-----------+ +-----------+ +-----------+ +-----------+ +- Set
- | stripe | | stripe | | stripe | | stripe | | 2
- | unit 23 | | unit 24 | | unit 25 | | unit 26 | +-/
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | stripe | | stripe | | stripe | | stripe | |
- | unit 27 | | unit 28 | | unit 29 | | unit 30 | |
- +-----------+ +-----------+ +-----------+ +-----------+ |
- | End cCCC | | End cCCC | | End cCCC | | End cCCC | |
- | Object 4 | | Object 5 | | Object 6 | | Object 7 | |
- \-----------/ \-----------/ \-----------/ \-----------/ |
- |
- +--/
+ +--\
+ |
+ /-----------\ /-----------\ /-----------\ /-----------\ |
+ | Begin cCCC| | Begin cCCC| | Begin cCCC| | Begin cCCC| |
+ | Object 4 | | Object 5 | | Object 6 | | Object 7 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | |
+ | unit 16 | | unit 17 | | unit 18 | | unit 19 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | +-\
+ | unit 20 | | unit 21 | | unit 22 | | unit 23 | | Object
+ +-----------+ +-----------+ +-----------+ +-----------+ +- Set
+ | stripe | | stripe | | stripe | | stripe | | 2
+ | unit 24 | | unit 25 | | unit 26 | | unit 27 | +-/
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | stripe | | stripe | | stripe | | stripe | |
+ | unit 28 | | unit 29 | | unit 30 | | unit 31 | |
+ +-----------+ +-----------+ +-----------+ +-----------+ |
+ | End cCCC | | End cCCC | | End cCCC | | End cCCC | |
+ | Object 4 | | Object 5 | | Object 6 | | Object 7 | |
+ \-----------/ \-----------/ \-----------/ \-----------/ |
+ |
+ +--/
Three important variables determine how Ceph stripes data:
@@ -306,9 +323,9 @@ Three important variables determine how Ceph stripes data:
enough to accomodate many stripe units, and should be a multiple of
the stripe unit.
-- **Stripe Unit:** Stripes have a configurable unit size (e.g., 64kb).
+- **Stripe Width:** Stripes have a configurable unit size (e.g., 64kb).
The Ceph client divides the data it will write to objects into equally
- sized stripe units, except for the last stripe unit. A stripe unit,
+ sized stripe units, except for the last stripe unit. A stripe width,
should be a fraction of the Object Size so that an object may contain
many stripe units.
@@ -347,7 +364,11 @@ storage disk. See `How Ceph Scales`_ for details.
get mapped to placement groups in the same pool. So they use the same CRUSH
map and the same access controls.
-.. tip:: The objects Ceph stores in the Object Store are not striped.
+.. tip:: The objects Ceph stores in the Object Store are not striped. RGW, RBD
+ and CephFS automatically stripe their data over multiple RADOS objects.
+ Clients that write directly to the Object Store via ``librados`` must
+ peform the the striping (and parallel I/O) for themselves to obtain these
+ benefits.
Data Consistency
diff --git a/doc/install/os-recommendations.rst b/doc/install/os-recommendations.rst
index 52e9c2fff89..2f101dc4271 100644
--- a/doc/install/os-recommendations.rst
+++ b/doc/install/os-recommendations.rst
@@ -24,7 +24,8 @@ glibc
- **syncfs(2)**: For non-btrfs filesystems such as XFS and ext4 where
more than one ``ceph-osd`` daemon is used on a single server, Ceph
performs signficantly better with the ``syncfs(2)`` system call
- (added in kernel 2.6.39 and glibc 2.14).
+ (added in kernel 2.6.39 and glibc 2.14). New versions of Ceph (v0.55 and
+ later) do not depend on glibc support.
Platforms
@@ -49,12 +50,12 @@ Argonaut (0.48)
+----------+----------+--------------------+--------------+---------+------------+
| Ubuntu | 12.10 | Quantal Quetzal | linux-3.5.4 | 2 | B |
+----------+----------+--------------------+--------------+---------+------------+
-| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2 | B |
+| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2, 3 | B |
+----------+----------+--------------------+--------------+---------+------------+
-| Debian | 7.0 | Wheezy | linux-3.2.0 | 1, 2 | B |
+| Debian | 7.0 | Wheezy | linux-3.2.0 | 1, 2, 3 | B |
+----------+----------+--------------------+--------------+---------+------------+
-Bobtail (0.55)
+Bobtail (0.56)
--------------
+----------+----------+--------------------+--------------+---------+------------+
@@ -62,17 +63,17 @@ Bobtail (0.55)
+==========+==========+====================+==============+=========+============+
| Ubuntu | 11.04 | Natty Narwhal | linux-2.6.38 | 1, 2, 3 | B |
+----------+----------+--------------------+--------------+---------+------------+
-| Ubuntu | 11.10 | Oneric Ocelot | linux-3.0.0 | 1, 2, 3 | B |
+| Ubuntu | 11.10 | Oneric Ocelot | linux-3.0.0 | 1, 2 | B |
+----------+----------+--------------------+--------------+---------+------------+
| Ubuntu | 12.04 | Precise Pangolin | linux-3.2.0 | 1, 2 | B, I, C |
+----------+----------+--------------------+--------------+---------+------------+
| Ubuntu | 12.10 | Quantal Quetzal | linux-3.5.4 | 2 | B |
+----------+----------+--------------------+--------------+---------+------------+
-| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2 | B |
+| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2, 3 | B |
+----------+----------+--------------------+--------------+---------+------------+
| Debian | 7.0 | Wheezy | linux-3.2.0 | 1, 2 | B |
+----------+----------+--------------------+--------------+---------+------------+
-| CentOS | 6.3 | N/A | linux-2.6.32 | 1, 2, 3 | B, I |
+| CentOS | 6.3 | N/A | linux-2.6.32 | 1, 2 | B, I |
+----------+----------+--------------------+--------------+---------+------------+
| Fedora | 17.0 | Beefy Miracle | linux-3.3.4 | 1, 2 | B |
+----------+----------+--------------------+--------------+---------+------------+
@@ -93,10 +94,10 @@ Notes
for kernel client (kernel RBD or the Ceph file system). Upgrade to a
recommended kernel.
-- **3**: The installed version of ``glibc`` does not support the
- ``syncfs(2)`` system call. Putting multiple ``ceph-osd`` daemons
- using ``XFS`` or ``ext4`` on the same host will not perform as well as
- they could.
+- **3**: The default kernel or installed version of ``glibc`` does not
+ support the ``syncfs(2)`` system call. Putting multiple
+ ``ceph-osd`` daemons using ``XFS`` or ``ext4`` on the same host will
+ not perform as well as they could.
Testing
-------
diff --git a/doc/rados/configuration/filestore-config-ref.rst b/doc/rados/configuration/filestore-config-ref.rst
index e45c0f7a9fc..80aed8e81a4 100644
--- a/doc/rados/configuration/filestore-config-ref.rst
+++ b/doc/rados/configuration/filestore-config-ref.rst
@@ -205,7 +205,7 @@ Journal
``filestore journal parallel``
-:Description:
+:Description: Enables parallel journaling, default for btrfs.
:Type: Boolean
:Required: No
:Default: ``false``
@@ -213,7 +213,7 @@ Journal
``filestore journal writeahead``
-:Description:
+:Description: Enables writeahead journaling, default for xfs.
:Type: Boolean
:Required: No
:Default: ``false``
@@ -221,7 +221,7 @@ Journal
``filestore journal trailing``
-:Description:
+:Description: Deprecated, never use.
:Type: Boolean
:Required: No
:Default: ``false``
@@ -233,7 +233,7 @@ Misc
``filestore merge threshold``
-:Description:
+:Description: Min number of files in a subdir before merging into parent
:Type: Integer
:Required: No
:Default: ``10``
@@ -241,7 +241,7 @@ Misc
``filestore split multiple``
-:Description:
+:Description: filestore_split_multiple*filestore_merge_threshold*16 is the max files in a subdir before splitting into child directories.
:Type: Integer
:Required: No
:Default: ``2``
@@ -249,7 +249,7 @@ Misc
``filestore update to``
-:Description:
+:Description: Limits filestore auto upgrade to specified version.
:Type: Integer
:Required: No
:Default: ``1000``
diff --git a/doc/rados/configuration/ms-ref.rst b/doc/rados/configuration/ms-ref.rst
index d76f7f3bb25..d0b1b172d7f 100644
--- a/doc/rados/configuration/ms-ref.rst
+++ b/doc/rados/configuration/ms-ref.rst
@@ -5,7 +5,7 @@
``ms tcp nodelay``
-:Description:
+:Description: Disables nagle's algorithm on messenger tcp sessions.
:Type: Boolean
:Required: No
:Default: ``true``
@@ -13,7 +13,7 @@
``ms initial backoff``
-:Description:
+:Description: The initial time to wait before reconnecting on a fault.
:Type: Double
:Required: No
:Default: ``.2``
@@ -21,7 +21,7 @@
``ms max backoff``
-:Description:
+:Description: The maximum time to wait before reconnecting on a fault.
:Type: Double
:Required: No
:Default: ``15.0``
@@ -29,7 +29,7 @@
``ms nocrc``
-:Description:
+:Description: Disables crc on network messages. May increase performance if cpu limited.
:Type: Boolean
:Required: No
:Default: ``false``
@@ -37,7 +37,7 @@
``ms die on bad msg``
-:Description:
+:Description: Debug option; do not configure.
:Type: Boolean
:Required: No
:Default: ``false``
@@ -45,7 +45,7 @@
``ms dispatch throttle bytes``
-:Description:
+:Description: Throttles total size of messages waiting to be dispatched.
:Type: 64-bit Unsigned Integer
:Required: No
:Default: ``100 << 20``
@@ -53,7 +53,7 @@
``ms bind ipv6``
-:Description:
+:Description: Enable if you want your daemons to bind to IPv6 address instead of IPv4 ones. (Not required if you specify a daemon or cluster IP.)
:Type: Boolean
:Required: No
:Default: ``false``
@@ -61,7 +61,7 @@
``ms rwthread stack bytes``
-:Description:
+:Description: Debug option for stack size; do not configure.
:Type: 64-bit Unsigned Integer
:Required: No
:Default: ``1024 << 10``
@@ -69,7 +69,7 @@
``ms tcp read timeout``
-:Description:
+:Description: Controls how long (in seconds) the messenger will wait before closing an idle connection.
:Type: 64-bit Unsigned Integer
:Required: No
:Default: ``900``
@@ -77,7 +77,7 @@
``ms inject socket failures``
-:Description:
+:Description: Debug option; do not configure.
:Type: 64-bit Unsigned Integer
:Required: No
:Default: ``0``
diff --git a/doc/rados/deployment/mkcephfs.rst b/doc/rados/deployment/mkcephfs.rst
index a6531ec84d7..fc37c7b7922 100644
--- a/doc/rados/deployment/mkcephfs.rst
+++ b/doc/rados/deployment/mkcephfs.rst
@@ -19,7 +19,7 @@ Enter a password for the root user.
On the admin host, generate an ``ssh`` key without specifying a passphrase
and use the default locations. ::
- sudo -s
+ sudo -i
ssh-keygen
Generating public/private key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
@@ -27,14 +27,8 @@ and use the default locations. ::
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
-
-You may use RSA or DSA keys. Once you generate your keys, copy them to each
-OSD host. For example::
-
- ssh-copy-id root@myserver01
- ssh-copy-id root@myserver02
-Modify your ``~/.ssh/config`` file to login as ``root``, as follows::
+Modify your ``/root/.ssh/config`` file to login as ``root``, as follows::
Host myserver01
Hostname myserver01.fully-qualified-domain.com
@@ -43,6 +37,11 @@ Modify your ``~/.ssh/config`` file to login as ``root``, as follows::
Hostname myserver02.fully-qualified-domain.com
User root
+You may use RSA or DSA keys. Once you generate your keys, copy them to each
+OSD host. For example::
+
+ ssh-copy-id root@myserver01
+ ssh-copy-id root@myserver02
Copy Configuration File to All Hosts
====================================
@@ -55,9 +54,9 @@ if you are using ``mkcephfs`` to deploy Ceph.
::
- ssh myserver01 sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
- ssh myserver02 sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
- ssh myserver03 sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
+ sudo ssh myserver01 tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
+ sudo ssh myserver02 tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
+ sudo ssh myserver03 tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
Create the Default Directories
@@ -115,7 +114,7 @@ root password. See `Authentication`_ when running with ``cephx`` enabled.
When you start or stop your cluster, you will not have to use ``sudo`` or
provide passwords. For example::
- service ceph -a start
+ sudo service ceph -a start
See `Operating a Cluster`_ for details.
diff --git a/doc/rados/operations/add-or-rm-osds.rst b/doc/rados/operations/add-or-rm-osds.rst
index fa377b3c58f..f60ddc6970f 100644
--- a/doc/rados/operations/add-or-rm-osds.rst
+++ b/doc/rados/operations/add-or-rm-osds.rst
@@ -107,10 +107,9 @@ hard disks than older hosts in the cluster (i.e., they may have greater weight).
ssh {new-osd} sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
-#. Create the OSD. ::
+#. Create the OSD. If no UUID is given, it will be set automatically when the OSD starts up. ::
- ceph osd create {osd-num}
- ceph osd create 123 #for example
+ ceph osd create [{uuid}]
#. Initialize the OSD data directory. ::
diff --git a/doc/rados/operations/control.rst b/doc/rados/operations/control.rst
index 4a88955d3f4..ced336d376c 100644
--- a/doc/rados/operations/control.rst
+++ b/doc/rados/operations/control.rst
@@ -151,10 +151,10 @@ Mark an OSD as lost. This may result in permanent data loss. Use with caution. :
ceph osd lost [--yes-i-really-mean-it]
-Create a new OSD. If no ID is given, a new ID is automatically selected
-if possible. ::
+Create a new OSD. If no UUID is given, it will be set automatically when the OSD
+starts up. ::
- ceph osd create [{id}]
+ ceph osd create [{uuid}]
Remove the given OSD(s). ::
diff --git a/doc/radosgw/admin/adminops.rst b/doc/radosgw/admin/adminops.rst
index 211a8e21259..b90c8a93f86 100644
--- a/doc/radosgw/admin/adminops.rst
+++ b/doc/radosgw/admin/adminops.rst
@@ -67,17 +67,17 @@ If successful, the response contains the requested information.
``usage``
-:Description: A container for the usage information
+:Description: A container for the usage information.
:Type: Container
``entries``
-:Description: A container for the usage entries information
+:Description: A container for the usage entries information.
:Type: Container
``user``
-:Description: A container for the user data information
+:Description: A container for the user data information.
:Type: Container
``owner``
@@ -192,3 +192,1504 @@ Request Parameters
:Required: No
+Get User Info
+=============
+
+Get user information.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/user HTTP/1.1
+ Host: {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user for which the information is requested.
+:Type: String
+:Required: Yes
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+
+Create User
+===========
+
+Create a new user.
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/user HTTP/1.1
+ Host: {fqdn}
+
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to be created.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+``display-name``
+
+:Description: The display name of the user to be created.
+:Type: String
+:Example: ``foo user``
+:Required: Yes
+
+
+``email``
+
+:Description: The email address associated with the user.
+:Type: String
+:Example" ``foo@bar.com``
+:Required: No
+
+``key-type``
+
+:Description: Key type to be generated, options are: swift, s3 (default)
+:Type: String
+:Example: ``s3``
+:Required: No
+
+``secret``
+
+:Description: Specify secret key
+:Type: String
+:Example: ``0AbCDEFg1h2i34JklM5nop6QrSTUV+WxyzaBC7D8``
+:Required: No
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Modify User
+===========
+
+Modify a user.
+
+Syntax
+~~~~~~
+
+::
+
+ POST /{admin}/user HTTP/1.1
+ Host: {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to be modified.
+:Type: String
+:Example ``foo_user``
+:Required: No
+
+``display-name``
+
+:Description: The display name of the user to be modified.
+:Type: String
+:Example: ``foo user``
+:Required: No
+
+``email``
+
+:Description: The email address to be associated with the user.
+:Type: String
+:Example" ``foo@bar.com``
+:Required: No
+
+``gen-secret``
+
+:Description: Generate a new secret key.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``key-type``
+
+:Description: Key type to be generated, options are: swift, s3 (default)
+:Type: String
+:Example: ``s3``
+:Required: No
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Remove User
+===========
+
+Remove an existing user.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/user HTTP/1.1
+ Host: {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to be removed.
+:Type: String
+:Example ``foo_user``
+:Required: Yes.
+
+``purge-data``
+
+:Description: When specified the buckets and objects belonging
+ to the user will also be removed.
+:Type: Boolean
+:Example: True
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+Create Subuser
+==============
+
+Create a new subuser (primarily useful for clients using the Swift API)
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/user/?subuser
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID under which a subuser is to be created.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+
+``subuser``
+
+:Description: The subuser ID to be created
+:Type: String
+:Example: ``sub_foo``
+:Required: Yes
+
+``gen-secret``
+
+:Description: Generate a secret key for the subuser.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``key-type``
+
+:Description: Key type to be generated, options are: swift, s3 (default)
+:Type: String
+:Example: ``swift``
+:Required: No
+
+``access``
+
+:Description: Set access permissions for sub-user, should be one
+ of read, write, readwrite, full
+:Type: String
+:Example: ``read``
+:Required: No
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Modify Subuser
+==============
+
+Modify an existing subuser
+
+Syntax
+~~~~~~
+
+::
+
+ POST /{admin}/user/?subuser
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID under which the subuser is to be modified.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+
+``subuser``
+
+:Description: The subuser ID to be modified.
+:Type: String
+:Example: ``sub_foo``
+:Required: Yes
+
+``gen-secret``
+
+:Description: Generate a new secret key for the subuser.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``key-type``
+
+:Description: Key type to be generated, options are: swift, s3 (default)
+:Type: String
+:Example: ``s3``
+:Required: No
+
+``access``
+
+:Description: Set access permissions for sub-user, should be one
+ of read, write, readwrite, full
+:Type: String
+:Example: ``read``
+:Required: No
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Remove Subuser
+==============
+
+Remove an existing subuser
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/subuser
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID under which the subuser is to be removed.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+
+``subuser``
+
+:Description: The subuser ID to be removed.
+:Type: String
+:Example: ``sub_foo``
+:Required: Yes
+
+``purge-keys``
+
+:Description: Remove keys belonging to the subuser.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``purge-data``
+:Description: Remove data belonging to the subuser.
+:Type: Boolean
+:Example: True
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Create Key
+==========
+
+Create a new key.
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/key
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to receive the new key.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+``subuser``
+
+:Description: The subuser ID to receive the new key.
+:Type: String
+:Example: ``sub_foo``
+:Required: No
+
+``key-type``
+
+:Description: Key type to be generated, options are: swift, s3 (default).
+:Type: String
+:Example: ``s3``
+:Required: No
+
+``secret``
+
+:Description: Specify the secret key
+:Type: String
+:Example: ``0ab/CdeFGhij1klmnopqRSTUv1WxyZabcDEFgHij``
+:Required: No
+
+Remove Key
+==========
+
+Remove an existing key.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/key
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``access-key``
+
+:Description: The S3 access key belonging to the S3 keypair to remove.
+:Type: String
+:Example: ``AB01C2D3EF45G6H7IJ8K``
+:Required: Yes
+
+``uid``
+
+:Description: The user to remove the key from.
+:Type: String
+:Example ``foo_user``
+:Required: No
+
+``subuser``
+
+:Description: The subuser to remove the key from.
+:Type: String
+:Example: ``sub_foo``
+:Required: No
+
+``key-type``
+
+:Description: Key type to be removed, options are: swift, s3.
+ NOTE: Required to remove swift key.
+:Type: String
+:Example: ``swift``
+:Required: No
+
+
+Get Bucket
+==========
+
+Get information for an existing bucket, if no request parameters are
+included lists buckets.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/bucket
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to return info on.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+
+``list``
+
+:Description: Return list of buckets.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``stats``
+
+:Description: Return bucket statistics.
+:Type: Boolean
+:Example: True
+:Required: No
+
+``check``
+
+:Description: Check bucket index.
+:Type: Boolean
+:Example: False
+:Required: No
+
+``fix``
+
+:Description: Also fix the bucket index when checking.
+:Type: Boolean
+:Example: False
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful the request returns a buckets container containing
+the desired bucket information.
+
+``buckets``
+
+:Description: Contains a list of one or more bucket containers.
+:Type: Container
+
+``stats``
+
+:Description: Per bucket information.
+:Type: Container
+
+``bucket``
+
+:Description: The name of the bucket.
+:Type: String
+
+``pool``
+
+:Desciption: The pool the bucket is stored in.
+:Type: String
+
+``id``
+
+:Description: The unique bucket id.
+:Type: String
+
+``marker``
+
+:Description:
+:Type: String
+
+``owner``
+
+:Description: The user id of the bucket owner.
+:Type: String
+
+``usage``
+
+:Description: Storage usage information.
+:Type: Container
+
+Check Bucket Index
+==================
+
+Check the index of an existing bucket.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/bucket/?index
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to return info on.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``fix``
+
+:Description: Also fix the bucket index when checking.
+:Type: Boolean
+:Example: False
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD
+
+Remove Bucket
+=============
+
+Delete an existing bucket.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/bucket
+ Host {fqdn}
+
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to remove.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``delete``
+
+:Description: Parameter specifying the bucket is to be removed.
+:Type: Boolean
+:Example: True
+:Required: Yes
+
+``purge-objects``
+
+:Description: Remove a buckets objects before deletion.
+:Type: Boolean
+:Example: True
+:Required: No
+
+
+Unlink Bucket
+=============
+
+Unlink a bucket from a specified user.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/bucket
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to unlink.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``unlink``
+
+:Description: Parameter specifying that the bucket is to
+ be unlinked, not removed.
+:Type: Boolean
+:Example: True
+:Required: Yes
+
+``uid``
+
+:Description: The user ID to unlink the bucket from.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+Link Bucket
+===========
+
+Link a bucket to a specified user.
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/bucket
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to unlink.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``uid``
+
+:Description: The user ID to link the bucket to.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+Get Object
+==========
+
+Get an existing object.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/object
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket containing the object to be retrieved.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``object``
+
+:Description: The object to be retrieved.
+:Type: String
+:Example: ``foo.txt``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, returns the desired object.
+
+``object``
+
+:Description: The desired object.
+:Type: Object
+
+Remove Object
+=============
+
+Remove an existing object.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/object
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket containing the object to be removed.
+:Type: String
+:Example: ``foo_bucket``
+:Required: Yes
+
+``object``
+
+:Description: The object to remove.
+:Type: String
+:Example: ``foo.txt``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+Get Cluster Info
+================
+
+Get cluster information.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/cluster
+ Host {fqdn}
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, returns cluster pool configuration.
+
+``cluster``
+
+:Description: Contains current cluster pool configuration.
+:Type: Container
+
+
+Add Placement Pool
+==================
+
+Make a pool available for data placement.
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/pool
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``pool``
+
+:Description: The pool to be made available for data placement.
+:Type: String
+:Example: ``foo_pool``
+:Required: Yes
+
+``create``
+
+:Description: Creates the data pool if it does not exist.
+:Type: Boolean
+:Example: False
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+Remove Placement Pool
+=====================
+
+Make a pool unavailable for data placement.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/pool
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``pool``
+
+:Description: The existing pool to be made available for data placement.
+:Type: String
+:Example: ``foo_pool``
+:Required: Yes
+
+``destroy``
+
+:Description: Destroys the pool after removing it from the active set.
+:Type: Boolean
+:Example: False
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+TBD.
+
+List Available Data Placement Pools
+===================================
+
+List current pools available for data placement.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/pool
+ Host {fqdn}
+
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, returns a list of pools available for data placement.
+
+``pools``
+
+:Description: Contains currently available pools for data placement.
+:Type: Container
+
+Get Bucket or Object Policy
+===========================
+
+Read the policy of an object or bucket.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/policy
+ Host {fqdn}
+
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``bucket``
+
+:Description: The bucket to read the policy from.
+:Type: String
+:Example: ``foo_bucket``
+:Required: No
+
+``object``
+
+:Description: The object to read the policy from.
+:Type: String
+:Example: ``foo.txt``
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, returns the object or bucket policy
+
+``policy``
+
+:Description: Access control policy.
+:Type: Container
+
+Add A User Capability
+=====================
+
+Add an administrative capability to a specified user.
+
+Syntax
+~~~~~~
+
+::
+
+ PUT /{admin}/caps
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to add an administrative capability to.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+``caps``
+
+:Description: The administrative capability to add to the user.
+:Type: String
+:Example: ``usage=read, write``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+Remove A User Capability
+========================
+
+Remove an administrative capability from a specified user.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/caps
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``uid``
+
+:Description: The user ID to remove an administrative capability from.
+:Type: String
+:Example ``foo_user``
+:Required: Yes
+
+``caps``
+
+:Description: The administrative capabilities to remove from the user.
+:Type: String
+:Example: ``usage=read, write``
+:Required: Yes
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If successful, the response contains the user information.
+
+``user``
+
+:Description: A container for the user data information.
+:Type: Container
+
+``user_id``
+
+:Description: The user id.
+:Type: String
+
+``display_name``
+
+:Description: Display name for the user.
+:Type: String
+
+``suspended``
+
+:Description: True if the user is suspended.
+:Type: Boolean
+
+``max_buckets``
+
+:Description: The maximum number of buckets to be owned by the user.
+:Type: Integer
+
+``subusers``
+
+:Description: Subusers associated with this user account.
+:Type: Container
+
+``keys``
+
+:Description: S3 keys associated with this user account.
+:Type: Container
+
+``swift_keys``
+
+:Description: Swift keys associated with this user account.
+:Type: Container
+
+``caps``
+
+:Description: User capabilities.
+:Type: Container
+
+
+List Expired Garbage Collection Items
+=====================================
+
+List objects scheduled for garbage collection.
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/garbage
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+None.
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If expired garbage collection items exist, a list of such objects
+will be returned.
+
+``garbage``
+
+:Description: Expired garbage collection items.
+:Type: Container
+
+``object``
+
+:Description: A container garbage collection object information.
+:Type: Container
+
+``name``
+
+:Description: The name of the object.
+:Type: String
+
+``expired``
+
+:Description: The date at which the object expired.
+:Type: String
+
+Manually Processes Garbage Collection Items
+===========================================
+
+List objects scheduled for garbage collection.
+
+Syntax
+~~~~~~
+
+::
+
+ DELETE /{admin}/garbage
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+None.
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If expired garbage collection items exist, a list of removed objects
+will be returned.
+
+``garbage``
+
+:Description: Expired garbage collection items.
+:Type: Container
+
+``object``
+
+:Description: A container garbage collection object information.
+:Type: Container
+
+``name``
+
+:Description: The name of the object.
+:Type: String
+
+``expired``
+
+:Description: The date at which the object expired.
+:Type: String
+
+Show Log Objects
+================
+
+Show log objects
+
+Syntax
+~~~~~~
+
+::
+
+ GET /{admin}/log
+ Host {fqdn}
+
+Request Parameters
+~~~~~~~~~~~~~~~~~~
+
+``object``
+
+:Description: The log object to return.
+:Type: String:
+:Example: ``2012-10-11-09-4165.2-foo_bucket``
+:Required: No
+
+Response Entities
+~~~~~~~~~~~~~~~~~
+
+If no object is specified, returns the full list of log objects.
+
+``log-objects``
+
+:Description: A list of log objects.
+:Type: Container
+
+``object``
+
+:Description: The name of the log object.
+:Type: String
+
+``log``
+
+:Description: The contents of the log object.
+:Type: Container
diff --git a/doc/radosgw/config-ref.rst b/doc/radosgw/config-ref.rst
index 564270aa602..b8530b428cb 100644
--- a/doc/radosgw/config-ref.rst
+++ b/doc/radosgw/config-ref.rst
@@ -228,3 +228,9 @@ set automatically.
:Description: Total backlog data size for unix domain socket operations logging
:Type: Integer
:Default: ``5ul << 20``
+
+``rgw extended http attrs``
+:Description: Add new set of attributes that could be set on an object. These extra attributes can be set through HTTP header fields when putting the objects. If set, these attributes will return as HTTP fields when doing GET/HEAD on the object.
+:Type: String
+:Default: N/A
+:Example: "content_foo, content_bar"
diff --git a/doc/radosgw/config.rst b/doc/radosgw/config.rst
index 32ce66d0426..12cca0168a9 100644
--- a/doc/radosgw/config.rst
+++ b/doc/radosgw/config.rst
@@ -113,7 +113,7 @@ See `<IfModule> Directive`_ for additional details.
.. _<IfModule> Directive: http://httpd.apache.org/docs/2.2/mod/core.html#ifmodule
Finally, you should configure Apache to allow encoded slashes, provide paths for
-log files and to trun off server signatures. ::
+log files and to turn off server signatures. ::
<VirtualHost *:80>
...
@@ -171,7 +171,7 @@ Add to Ceph Keyring Entries
Once you have created a keyring and key for RADOS GW, add it as an entry in
the Ceph keyring. For example::
- ceph -k /etc/ceph/ceph.keyring auth add client.radosgw.gateway -i /etc/ceph/keyring.radosgw.gateway
+ sudo ceph -k /etc/ceph/ceph.keyring auth add client.radosgw.gateway -i /etc/ceph/keyring.radosgw.gateway
Restart Services and Start the RADOS Gateway
diff --git a/doc/radosgw/index.rst b/doc/radosgw/index.rst
index d9c2e3579bd..7f76c588c2d 100644
--- a/doc/radosgw/index.rst
+++ b/doc/radosgw/index.rst
@@ -40,7 +40,7 @@ one API and retrieve it with the other.
Config Reference <config-ref>
Purging Temp Data <purge-temp>
S3 API <s3>
- Swift API <swift/index>
+ Swift API <swift>
Admin API <admin/index>
troubleshooting
Manpage radosgw <../../man/8/radosgw>
diff --git a/doc/radosgw/s3.rst b/doc/radosgw/s3.rst
index 73b3a5eb54b..25e9d977d9a 100644
--- a/doc/radosgw/s3.rst
+++ b/doc/radosgw/s3.rst
@@ -66,7 +66,7 @@ The following table describes the support status for current Amazon S3 functiona
+---------------------------------+-----------------+----------------------------------------+
| **Get Object Info (HEAD)** | Supported | |
+---------------------------------+-----------------+----------------------------------------+
-| **POST Object** | Not Supported | |
+| **POST Object** | Supported | |
+---------------------------------+-----------------+----------------------------------------+
| **Copy Object** | Supported | |
+---------------------------------+-----------------+----------------------------------------+
diff --git a/doc/radosgw/s3/python.rst b/doc/radosgw/s3/python.rst
index 35261694538..c5a8432485a 100644
--- a/doc/radosgw/s3/python.rst
+++ b/doc/radosgw/s3/python.rst
@@ -32,9 +32,9 @@ This also prints out the bucket name and creation date of each bucket.
.. code-block:: python
for bucket in conn.get_all_buckets():
- print "{name}\t{created}".format(
- name = bucket.name,
- created = bucket.creation_date,
+ print "{name}\t{created}".format(
+ name = bucket.name,
+ created = bucket.creation_date,
)
The output will look something like this::
diff --git a/doc/radosgw/swift.rst b/doc/radosgw/swift.rst
new file mode 100644
index 00000000000..d3fbcd1ca03
--- /dev/null
+++ b/doc/radosgw/swift.rst
@@ -0,0 +1,74 @@
+===============
+RADOS Swift API
+===============
+
+Ceph supports a RESTful API that is compatible with the the basic data access model of the Swift API.
+
+API
+---
+
+.. toctree::
+ :maxdepth: 1
+
+ Common <swift/index>
+ Authentication <swift/auth>
+ Service Ops <swift/serviceops>
+ Container Ops <swift/containerops>
+ Object Ops <swift/objectops>
+ Tutorial <swift/tutorial>
+ Java <swift/java>
+ Python <swift/python>
+ Ruby <swift/ruby>
+
+
+Features Support
+----------------
+
+The following table describes the support status for current Swift functional features:
+
++---------------------------------+-----------------+----------------------------------------+
+| Feature | Status | Remarks |
++=================================+=================+========================================+
+| **Authentication** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Get Account Metadata** | Supported | No custom metadata |
++---------------------------------+-----------------+----------------------------------------+
+| **Swift ACLs** | Supported | Supports a subset of Swift ACLs |
++---------------------------------+-----------------+----------------------------------------+
+| **List Containers** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Delete Container** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Create Container** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Get Container Metadata** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Update Container Metadata** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Delete Container Metadata** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **List Objects** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Static Website** | Not Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Create Object** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Create Large Object** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Delete Object** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Get Object** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Copy Object** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Get Object Metadata** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Update Object Metadata** | Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Expiring Objects** | Not Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **Object Versioning** | Not Supported | |
++---------------------------------+-----------------+----------------------------------------+
+| **CORS** | Not Supported | |
++---------------------------------+-----------------+----------------------------------------+
+
diff --git a/doc/radosgw/swift/index.rst b/doc/radosgw/swift/swift.rst
index 3b8531d7bd4..3b8531d7bd4 100644
--- a/doc/radosgw/swift/index.rst
+++ b/doc/radosgw/swift/swift.rst
diff --git a/qa/run_xfstests.sh b/qa/run_xfstests.sh
index 1eba38a248d..3bcd8b5a636 100644
--- a/qa/run_xfstests.sh
+++ b/qa/run_xfstests.sh
@@ -49,7 +49,7 @@ XFS_MKFS_OPTIONS="-l su=32k"
# until we can work through getting them all passing reliably.
TESTS="1-9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76"
TESTS="${TESTS} 78 79 84-89 91-92 100 103 105 108 110 116-121 124 126"
-TESTS="${TESTS} 129-135 137-141 164-167 179 182-184 186-190 192 194"
+TESTS="${TESTS} 129-135 137-141 164-167 182 184 186-190 192 194"
TESTS="${TESTS} 196 199 201 203 214-216 220-227 234 236-238 241 243-249"
TESTS="${TESTS} 253 257-259 261 262 269 273 275 277 278 280 285 286"
# 275 was the highest available test as of 4/10/12.
diff --git a/qa/run_xfstests_qemu.sh b/qa/run_xfstests_qemu.sh
index 0b5b86de090..d2df8a563ba 100644
--- a/qa/run_xfstests_qemu.sh
+++ b/qa/run_xfstests_qemu.sh
@@ -3,5 +3,7 @@
mkdir /tmp/cephtest
wget https://raw.github.com/ceph/ceph/master/qa/run_xfstests.sh
chmod +x run_xfstests.sh
-# tests excluded require extra packages for advanced acl and quota support
-./run_xfstests.sh -c 1 -f xfs -t /dev/vdb -s /dev/vdc 1-26 28-49 51-63 65-83 85-233 235-291
+# tests excluded fail in the current testing vm regardless of whether
+# rbd is used
+
+./run_xfstests.sh -c 1 -f xfs -t /dev/vdb -s /dev/vdc 1-17 19-26 28-49 51-61 63 66-79 83 85-105 108-110 112-135 137-170 172-204 206-217 220-227 230-231 233 235-241 243-249 251-262 264-278 281-286 288-289
diff --git a/qa/workunits/rbd/map-unmap.sh b/qa/workunits/rbd/map-unmap.sh
index 9ecc226e5f5..341a0be081c 100755
--- a/qa/workunits/rbd/map-unmap.sh
+++ b/qa/workunits/rbd/map-unmap.sh
@@ -1,4 +1,4 @@
-#!/bin/bash -e
+#!/bin/bash -ex
RUN_TIME=300 # approximate duration of run (seconds)
@@ -10,8 +10,6 @@ IMAGE_SIZE="1024" # MB
ID_TIMEOUT="10" # seconds to wait to get rbd id after mapping
ID_DELAY=".1" # floating-point seconds to delay before rescan
-MAP_DELAY=".25" # floating-point seconds to delay before unmap
-
function get_time() {
date '+%s'
}
@@ -22,26 +20,34 @@ function times_up() {
test $(get_time) -ge "${end_time}"
}
-function get_id() {
+function _get_id() {
[ $# -eq 1 ] || exit 99
local image_name="$1"
local id=""
- local end_time=$(expr $(get_time) + ${ID_TIMEOUT})
cd /sys/bus/rbd/devices
-
- while [ -z "${id}" ]; do
- if times_up "${end_time}"; then
- break;
+ for i in *; do
+ if [ "$(cat $i/name)" = "${image_name}" ]; then
+ id="$i"
+ break
fi
- for i in *; do
- if [ "$(cat $i/name)" = "${image_name}" ]; then
- id=$i
- break
- fi
- done
+ done
+ cd - >/dev/null
+
+ echo $id
+ test -n "${id}" # return code 0 if id was found
+}
+function get_id() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local id=""
+ local end_time=$(expr $(get_time) + ${ID_TIMEOUT})
+
+ while ! id=$(_get_id "${image_name}") && ! times_up "${end_time}"; do
+ echo "get_id: image not mapped; trying again after delay" >&2
sleep "${ID_DELAY}"
done
+
echo $id
test -n "${id}" # return code 0 if id was found
}
@@ -51,11 +57,12 @@ function map_unmap() {
local image_name="$1"
rbd map "${image_name}"
- RBD_ID=$(get_id "${image_name}")
+ udevadm settle
- sleep "${MAP_DELAY}"
+ RBD_ID=$(get_id "${image_name}")
rbd unmap "/dev/rbd${RBD_ID}"
+ udevadm settle
}
function setup() {
diff --git a/src/Makefile.am b/src/Makefile.am
index 64924d7bd50..1497b9dd341 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -29,7 +29,8 @@ sbin_PROGRAMS =
sbin_SCRIPTS = \
ceph-disk-prepare \
ceph-disk-activate \
- ceph-create-keys
+ ceph-create-keys \
+ mount.fuse.ceph
bin_SCRIPTS = ceph-run $(srcdir)/ceph-clsinfo ceph-debugpack ceph-rbdnamer
dist_bin_SCRIPTS =
# C/C++ tests to build will be appended to this
@@ -227,10 +228,12 @@ bin_DEBUGPROGRAMS += smalliobench
smalliobenchfs_SOURCES = test/bench/small_io_bench_fs.cc test/bench/filestore_backend.cc test/bench/detailed_stat_collector.cc test/bench/bencher.cc
smalliobenchfs_LDADD = librados.la -lboost_program_options $(LIBOS_LDA) $(LIBGLOBAL_LDA)
+smalliobenchfs_CXXFLAGS = ${CRYPTO_CXXFLAGS} ${AM_CXXFLAGS}
bin_DEBUGPROGRAMS += smalliobenchfs
smalliobenchdumb_SOURCES = test/bench/small_io_bench_dumb.cc test/bench/dumb_backend.cc test/bench/detailed_stat_collector.cc test/bench/bencher.cc
smalliobenchdumb_LDADD = librados.la -lboost_program_options $(LIBOS_LDA) $(LIBGLOBAL_LDA)
+smalliobenchdumb_CXXFLAGS = ${CRYPTO_CXXFLAGS} ${AM_CXXFLAGS}
bin_DEBUGPROGRAMS += smalliobenchdumb
tpbench_SOURCES = test/bench/tp_bench.cc test/bench/detailed_stat_collector.cc
@@ -861,6 +864,7 @@ test_mon_workloadgen_SOURCES = \
osdc/Objecter.cc \
osdc/Striper.cc
test_mon_workloadgen_LDADD = $(LIBOS_LDA) $(LIBGLOBAL_LDA)
+test_mon_workloadgen_CXXFLAGS = ${CRYPTO_CXXFLAGS} ${AM_CXXFLAGS}
bin_DEBUGPROGRAMS += test_mon_workloadgen
test_rados_api_io_SOURCES = test/librados/io.cc test/librados/test.cc
@@ -1097,7 +1101,8 @@ EXTRA_DIST += \
$(srcdir)/upstart/radosgw-all-starter.conf \
ceph-disk-prepare \
ceph-disk-activate \
- ceph-create-keys
+ ceph-create-keys \
+ mount.fuse.ceph
EXTRA_DIST += $(srcdir)/$(shell_scripts:%=%.in)
diff --git a/src/auth/cephx/CephxProtocol.h b/src/auth/cephx/CephxProtocol.h
index dfa2b2f0896..38e0616b501 100644
--- a/src/auth/cephx/CephxProtocol.h
+++ b/src/auth/cephx/CephxProtocol.h
@@ -470,8 +470,13 @@ int decode_decrypt(CephContext *cct, T& t, const CryptoKey key,
bufferlist::iterator& iter, std::string &error)
{
bufferlist bl_enc;
- ::decode(bl_enc, iter);
- decode_decrypt_enc_bl(cct, t, key, bl_enc, error);
+ try {
+ ::decode(bl_enc, iter);
+ decode_decrypt_enc_bl(cct, t, key, bl_enc, error);
+ }
+ catch (buffer::error e) {
+ error = "error decoding block for decryption";
+ }
if (!error.empty())
return CEPHX_CRYPT_ERR;
return 0;
diff --git a/src/ceph-disk-activate b/src/ceph-disk-activate
index 5fcc5bd177a..f78ae17ce88 100755
--- a/src/ceph-disk-activate
+++ b/src/ceph-disk-activate
@@ -474,6 +474,11 @@ def activate(
keyring=keyring,
)
+ # indicate this daemon is managed by upstart
+ if not os.path.exists(os.path.join(path, 'upstart')):
+ with file(os.path.join(path, 'upstart'), 'w'):
+ pass
+
if not os.path.exists(os.path.join(path, 'active')):
log.debug('Authorizing OSD key...')
auth_key(
diff --git a/src/client/Client.cc b/src/client/Client.cc
index 0ca174b4536..d876454f0f4 100644
--- a/src/client/Client.cc
+++ b/src/client/Client.cc
@@ -3656,6 +3656,12 @@ void Client::flush_cap_releases()
void Client::tick()
{
+ if (cct->_conf->client_debug_inject_tick_delay > 0) {
+ sleep(cct->_conf->client_debug_inject_tick_delay);
+ assert(0 == cct->_conf->set_val("client_debug_inject_tick_delay", "0"));
+ cct->_conf->apply_changes(NULL);
+ }
+
ldout(cct, 21) << "tick" << dendl;
tick_event = new C_C_Tick(this);
timer.add_event_after(cct->_conf->client_tick_interval, tick_event);
diff --git a/src/common/config_opts.h b/src/common/config_opts.h
index b04a28f3259..bd5b733bd64 100644
--- a/src/common/config_opts.h
+++ b/src/common/config_opts.h
@@ -183,6 +183,7 @@ OPTION(client_oc_target_dirty, OPT_INT, 1024*1024* 8) // target dirty (keep this
OPTION(client_oc_max_dirty_age, OPT_DOUBLE, 5.0) // max age in cache before writeback
OPTION(client_oc_max_objects, OPT_INT, 1000) // max objects in cache
OPTION(client_debug_force_sync_read, OPT_BOOL, false) // always read synchronously (go to osds)
+OPTION(client_debug_inject_tick_delay, OPT_INT, 0) // delay the client tick for a number of seconds
// note: the max amount of "in flight" dirty data is roughly (max - target)
OPTION(fuse_use_invalidate_cb, OPT_BOOL, false) // use fuse 2.8+ invalidate callback to keep page cache consistent
OPTION(fuse_big_writes, OPT_BOOL, true)
@@ -483,8 +484,10 @@ OPTION(rgw_gc_max_objs, OPT_INT, 32)
OPTION(rgw_gc_obj_min_wait, OPT_INT, 2 * 3600) // wait time before object may be handled by gc
OPTION(rgw_gc_processor_max_time, OPT_INT, 3600) // total run time for a single gc processor work
OPTION(rgw_gc_processor_period, OPT_INT, 3600) // gc processor cycle time
+OPTION(rgw_s3_success_create_obj_status, OPT_INT, 0) // alternative success status response for create-obj (0 - default)
OPTION(rgw_resolve_cname, OPT_BOOL, false) // should rgw try to resolve hostname as a dns cname record
OPTION(rgw_obj_stripe_size, OPT_INT, 4 << 20)
+OPTION(rgw_extended_http_attrs, OPT_STR, "") // list of extended attrs that can be set on objects (beyond the default)
OPTION(mutex_perf_counter, OPT_BOOL, false) // enable/disable mutex perf counter
diff --git a/src/common/lockdep.cc b/src/common/lockdep.cc
index 873f28e9023..7f67ae959fe 100644
--- a/src/common/lockdep.cc
+++ b/src/common/lockdep.cc
@@ -37,7 +37,7 @@ namespace __gnu_cxx {
#define DOUT_COND(cct, l) cct && l <= XDOUT_CONDVAR(cct, dout_subsys)
#define lockdep_dout(v) lsubdout(g_lockdep_ceph_ctx, lockdep, v)
#define MAX_LOCKS 1000 // increase me as needed
-#define BACKTRACE_SKIP 3
+#define BACKTRACE_SKIP 2
/******* Globals **********/
int g_lockdep = get_env_int("CEPH_LOCKDEP");
diff --git a/src/init-ceph.in b/src/init-ceph.in
index 788d7172979..b75f639a359 100644
--- a/src/init-ceph.in
+++ b/src/init-ceph.in
@@ -277,7 +277,7 @@ for name in $what; do
# try to fallback to to old keys
get_conf tmp_devs "" "btrfs devs"
if [ -n "$tmp_devs" ]; then
- fs_type = "btrfs"
+ fs_type="btrfs"
else
echo No filesystem type defined!
exit 0
diff --git a/src/mkcephfs.in b/src/mkcephfs.in
index 62d8c9fcbc8..3f271ce6c49 100644
--- a/src/mkcephfs.in
+++ b/src/mkcephfs.in
@@ -364,7 +364,7 @@ if [ -n "$prepareosdfs" ]; then
done
get_conf mkfs_opt "" "osd mkfs options $fs_type"
- if [ "$fs_type" == "xfs" ] && [ -z "$mkfs_opt" ]; then
+ if [ "$fs_type" = "xfs" ] && [ -z "$mkfs_opt" ]; then
echo Xfs filesystem found add missing -f mkfs option!
mkfs_opt="-f"
fi
diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
index 476e5138b30..e893655dab6 100644
--- a/src/mon/OSDMonitor.cc
+++ b/src/mon/OSDMonitor.cc
@@ -2623,22 +2623,34 @@ bool OSDMonitor::prepare_command(MMonCommand *m)
}
int pg_num = 0;
int pgp_num = 0;
- const char *start = m->cmd[4].c_str();
- char *end = (char*)start;
- pgp_num = pg_num = strtol(start, &end, 10);
- if (*end != '\0') { // failed to parse
+
+ /* Don't allow over 65535 pgs in a single pool */
+ pg_num = parse_pos_long(m->cmd[4].c_str(), &ss);
+ if ((pg_num == 0) || (pg_num > 65535)) {
+ ss << "'pg_num' must be greater than 0 and lower or equal than 65535";
+ err = -ERANGE;
+ goto out;
+ }
+
+ if (pg_num < 0) {
err = -EINVAL;
- ss << "usage: osd pool create <poolname> <pg_num> [pgp_num]";
goto out;
- } else if (m->cmd.size() > 5) { // check for pgp_num too
- start = m->cmd[5].c_str();
- end = (char *)start;
- pgp_num = strtol(start, &end, 10);
- if (*end != '\0') { // failed to parse
- err = -EINVAL;
- ss << "usage: osd pool create <poolname> <pg_num> [pgp_num]";
- goto out;
- }
+ }
+
+ pgp_num = pg_num;
+ if (m->cmd.size() > 5) {
+ pgp_num = parse_pos_long(m->cmd[5].c_str(), &ss);
+ if (pgp_num < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if ((pgp_num == 0) || (pgp_num > pg_num)) {
+ ss << "'pgp_num' must be greater than 0 and lower or equal than 'pg_num'"
+ << ", which in this case is " << pg_num;
+ err = -ERANGE;
+ goto out;
+ }
}
if (osdmap.name_pool.count(m->cmd[3])) {
diff --git a/src/mount.fuse.ceph b/src/mount.fuse.ceph
new file mode 100755
index 00000000000..cbf2874f3bd
--- /dev/null
+++ b/src/mount.fuse.ceph
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+# Helper to mount ceph-fuse from /etc/fstab. To use, add an entry
+# like:
+#
+# # DEVICE PATH TYPE OPTIONS
+# id=admin /mnt/ceph fuse.ceph defaults 0 0
+# id=myuser,conf=/etc/ceph/foo.conf /mnt/ceph2 fuse.ceph defaults 0 0
+#
+# where the device field is a comma-separated list of options to pass on
+# the command line. The examples above, for example, specify that
+# ceph-fuse will authenticated as client.admin and client.myuser
+# (respectively), and the second example also sets the 'conf' option to
+# '/etc/ceph/foo.conf' via the ceph-fuse command line. Any valid
+# ceph-fuse can be passed in this way.
+
+set -e
+
+# convert device string to options
+cephargs='--'`echo $1 | sed 's/,/ --/g'`
+
+# strip out 'noauto' option; libfuse doesn't like it
+opts=`echo $4 | sed 's/,noauto//' | sed 's/noauto,//'`
+
+# go
+exec ceph-fuse $cephargs $2 $3 $opts
diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc
index 8cb8720738e..99cea7f22da 100644
--- a/src/os/FileStore.cc
+++ b/src/os/FileStore.cc
@@ -1227,16 +1227,14 @@ int FileStore::_detect_fs()
} else {
dout(0) << "mount syncfs(2) syscall supported by glibc BUT NOT the kernel" << dendl;
}
-#endif
-#ifdef SYS_syncfs
+#elif defined(SYS_syncfs)
if (syscall(SYS_syncfs, fd) == 0) {
dout(0) << "mount syscall(SYS_syncfs, fd) fully supported" << dendl;
have_syncfs = true;
} else {
dout(0) << "mount syscall(SYS_syncfs, fd) supported by libc BUT NOT the kernel" << dendl;
}
-#endif
-#ifdef __NR_syncfs
+#elif defined(__NR_syncfs)
if (syscall(__NR_syncfs, fd) == 0) {
dout(0) << "mount syscall(__NR_syncfs, fd) fully supported" << dendl;
have_syncfs = true;
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index c88dfea4818..217dd23b152 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -4770,9 +4770,9 @@ void OSD::handle_pg_create(OpRequestRef op)
continue;
}
if (up != acting) {
- dout(10) << "mkpg " << pgid << " up " << up << " != acting " << acting << dendl;
- clog.error() << "mkpg " << pgid << " up " << up << " != acting "
- << acting << "\n";
+ dout(10) << "mkpg " << pgid << " up " << up << " != acting " << acting << ", ignoring" << dendl;
+ // we'll get a query soon anyway, since we know the pg
+ // must exist. we can ignore this.
continue;
}
diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
index ba314118f90..4c6c481cc65 100644
--- a/src/osd/ReplicatedPG.cc
+++ b/src/osd/ReplicatedPG.cc
@@ -119,7 +119,7 @@ void ReplicatedPG::wait_for_missing_object(const hobject_t& soid, OpRequestRef o
}
else {
dout(7) << "missing " << soid << " v " << v << ", pulling." << dendl;
- pull(soid, v, op->request->get_priority());
+ pull(soid, v, g_conf->osd_client_op_priority);
}
waiting_for_missing_object[soid].push_back(op);
op->mark_delayed();
@@ -175,7 +175,7 @@ void ReplicatedPG::wait_for_degraded_object(const hobject_t& soid, OpRequestRef
break;
}
}
- recover_object_replicas(soid, v, op->request->get_priority());
+ recover_object_replicas(soid, v, g_conf->osd_client_op_priority);
}
waiting_for_degraded_object[soid].push_back(op);
op->mark_delayed();
@@ -6909,7 +6909,7 @@ int ReplicatedPG::recover_backfill(int max)
MOSDPGBackfill *m = NULL;
if (bound.is_max()) {
m = new MOSDPGBackfill(MOSDPGBackfill::OP_BACKFILL_FINISH, e, e, info.pgid);
- m->set_priority(g_conf->osd_recovery_op_priority);
+ // Use default priority here, must match sub_op priority
/* pinfo.stats might be wrong if we did log-based recovery on the
* backfilled portion in addition to continuing backfill.
*/
@@ -6917,7 +6917,7 @@ int ReplicatedPG::recover_backfill(int max)
start_recovery_op(hobject_t::get_max());
} else {
m = new MOSDPGBackfill(MOSDPGBackfill::OP_BACKFILL_PROGRESS, e, e, info.pgid);
- m->set_priority(g_conf->osd_recovery_op_priority);
+ // Use default priority here, must match sub_op priority
}
m->last_backfill = bound;
m->stats = pinfo.stats.stats;
diff --git a/src/pybind/rbd.py b/src/pybind/rbd.py
index da5bc69f229..81ebb57f73e 100644
--- a/src/pybind/rbd.py
+++ b/src/pybind/rbd.py
@@ -155,22 +155,41 @@ class RBD(object):
:param stripe_count: objects to stripe over before looping
:type stripe_count: int
:raises: :class:`ImageExists`
+ :raises: :class:`TypeError`
+ :raises: :class:`InvalidArgument`
+ :raises: :class:`FunctionNotSupported`
"""
if order is None:
order = 0
if not isinstance(name, str):
raise TypeError('name must be a string')
if old_format:
+ if features != 0 or stripe_unit != 0 or stripe_count != 0:
+ raise InvalidArgument('format 1 images do not support feature'
+ ' masks or non-default striping')
ret = self.librbd.rbd_create(ioctx.io, c_char_p(name),
c_uint64(size),
byref(c_int(order)))
else:
- ret = self.librbd.rbd_create3(ioctx.io, c_char_p(name),
- c_uint64(size),
- c_uint64(features),
- byref(c_int(order)),
- c_uint64(stripe_unit),
- c_uint64(stripe_count))
+ if not hasattr(self.librbd, 'rbd_create2'):
+ raise FunctionNotSupported('installed version of librbd does'
+ ' not support format 2 images')
+ has_create3 = hasattr(self.librbd, 'rbd_create3')
+ if (stripe_unit != 0 or stripe_count != 0) and not has_create3:
+ raise FunctionNotSupported('installed version of librbd does'
+ ' not support stripe unit or count')
+ if has_create3:
+ ret = self.librbd.rbd_create3(ioctx.io, c_char_p(name),
+ c_uint64(size),
+ c_uint64(features),
+ byref(c_int(order)),
+ c_uint64(stripe_unit),
+ c_uint64(stripe_count))
+ else:
+ ret = self.librbd.rbd_create2(ioctx.io, c_char_p(name),
+ c_uint64(size),
+ c_uint64(features),
+ byref(c_int(order)))
if ret < 0:
raise make_ex(ret, 'error creating image')
diff --git a/src/rbd.cc b/src/rbd.cc
index 3db89787126..3920d4b9d55 100644
--- a/src/rbd.cc
+++ b/src/rbd.cc
@@ -199,7 +199,19 @@ static int do_list(librbd::RBD &rbd, librados::IoCtx& io_ctx, bool lflag)
librbd::image_info_t info;
librbd::Image im;
- rbd.open(io_ctx, im, i->c_str());
+ r = rbd.open(io_ctx, im, i->c_str());
+ // image might disappear between rbd.list() and rbd.open(); ignore
+ // that, warn about other possible errors (EPERM, say, for opening
+ // an old-format image, because you need execute permission for the
+ // class method)
+ if (r < 0) {
+ if (r != -ENOENT) {
+ cerr << "rbd: error opening " << *i << ": " << cpp_strerror(r)
+ << std::endl;
+ }
+ // in any event, continue to next image
+ continue;
+ }
// handle second-nth trips through loop
parent.clear();
diff --git a/src/rgw/rgw_cache.h b/src/rgw/rgw_cache.h
index fb6ca10692d..e4002f6af25 100644
--- a/src/rgw/rgw_cache.h
+++ b/src/rgw/rgw_cache.h
@@ -268,6 +268,11 @@ int RGWCache<T>::get_obj(void *ctx, void **handle, rgw_obj& obj, bufferlist& obl
return r;
}
+ if (obl.length() == end + 1) {
+ /* in this case, most likely object contains more data, we can't cache it */
+ return r;
+ }
+
bufferptr p(r);
bufferlist& bl = info.data;
bl.clear();
diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc
index 5f52dde228d..29c10dd0eeb 100644
--- a/src/rgw/rgw_main.cc
+++ b/src/rgw/rgw_main.cc
@@ -433,7 +433,7 @@ int main(int argc, const char **argv)
rgw_tools_init(g_ceph_context);
rgw_init_resolver();
- rgw_rest_init();
+ rgw_rest_init(g_ceph_context);
curl_global_init(CURL_GLOBAL_ALL);
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index 9d978d1a4f1..c8c0bacf0a0 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -3,6 +3,7 @@
#include "common/Formatter.h"
#include "common/utf8.h"
+#include "include/str_list.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_formats.h"
@@ -37,15 +38,6 @@ static struct rgw_http_attr rgw_to_http_attr_list[] = {
};
-map<string, string> rgw_to_http_attrs;
-
-void rgw_rest_init()
-{
- for (struct rgw_http_attr *attr = rgw_to_http_attr_list; attr->rgw_attr; attr++) {
- rgw_to_http_attrs[attr->rgw_attr] = attr->http_attr;
- }
-}
-
struct generic_attr {
const char *http_header;
const char *rgw_attr;
@@ -64,6 +56,106 @@ struct generic_attr generic_attrs[] = {
{ NULL, NULL },
};
+map<string, string> rgw_to_http_attrs;
+static map<string, string> generic_attrs_map;
+
+/*
+ * make attrs look_like_this
+ */
+string lowercase_http_attr(const string& orig)
+{
+ const char *s = orig.c_str();
+ char buf[orig.size() + 1];
+ buf[orig.size()] = '\0';
+
+ for (size_t i = 0; i < orig.size(); ++i, ++s) {
+ switch (*s) {
+ case '-':
+ buf[i] = '_';
+ break;
+ default:
+ buf[i] = tolower(*s);
+ }
+ }
+ return string(buf);
+}
+
+/*
+ * make attrs LOOK_LIKE_THIS
+ */
+string uppercase_http_attr(const string& orig)
+{
+ const char *s = orig.c_str();
+ char buf[orig.size() + 1];
+ buf[orig.size()] = '\0';
+
+ for (size_t i = 0; i < orig.size(); ++i, ++s) {
+ switch (*s) {
+ case '-':
+ buf[i] = '_';
+ break;
+ default:
+ buf[i] = toupper(*s);
+ }
+ }
+ return string(buf);
+}
+
+/*
+ * make attrs Look-Like-This
+ */
+string camelcase_dash_http_attr(const string& orig)
+{
+ const char *s = orig.c_str();
+ char buf[orig.size() + 1];
+ buf[orig.size()] = '\0';
+
+ bool last_sep = true;
+
+ for (size_t i = 0; i < orig.size(); ++i, ++s) {
+ switch (*s) {
+ case '_':
+ buf[i] = '-';
+ last_sep = true;
+ break;
+ default:
+ if (last_sep)
+ buf[i] = toupper(*s);
+ else
+ buf[i] = tolower(*s);
+ last_sep = false;
+ }
+ }
+ return string(buf);
+}
+
+void rgw_rest_init(CephContext *cct)
+{
+ for (struct rgw_http_attr *attr = rgw_to_http_attr_list; attr->rgw_attr; attr++) {
+ rgw_to_http_attrs[attr->rgw_attr] = attr->http_attr;
+ }
+
+ for (struct generic_attr *gen_attr = generic_attrs; gen_attr->http_header; gen_attr++) {
+ generic_attrs_map[gen_attr->http_header] = gen_attr->rgw_attr;
+ }
+
+ list<string> extended_http_attrs;
+ get_str_list(cct->_conf->rgw_extended_http_attrs, extended_http_attrs);
+
+ list<string>::iterator iter;
+ for (iter = extended_http_attrs.begin(); iter != extended_http_attrs.end(); ++iter) {
+ string rgw_attr = RGW_ATTR_PREFIX;
+ rgw_attr.append(lowercase_http_attr(*iter));
+
+ rgw_to_http_attrs[rgw_attr] = camelcase_dash_http_attr(*iter);
+
+ string http_header = "HTTP_";
+ http_header.append(uppercase_http_attr(*iter));
+
+ generic_attrs_map[http_header] = rgw_attr;
+ }
+}
+
static void dump_status(struct req_state *s, const char *status)
{
int r = s->cio->print("Status: %s\n", status);
@@ -1075,10 +1167,11 @@ int RGWREST::preprocess(struct req_state *s, RGWClientIO *cio)
s->content_length = atoll(s->length);
}
- for (int i = 0; generic_attrs[i].http_header; i++) {
- const char *env = s->env->get(generic_attrs[i].http_header);
+ map<string, string>::iterator giter;
+ for (giter = generic_attrs_map.begin(); giter != generic_attrs_map.end(); ++giter) {
+ const char *env = s->env->get(giter->first.c_str());
if (env) {
- s->generic_attrs[generic_attrs[i].rgw_attr] = env;
+ s->generic_attrs[giter->second] = env;
}
}
diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h
index 35c34a6cdba..00ad584575e 100644
--- a/src/rgw/rgw_rest.h
+++ b/src/rgw/rgw_rest.h
@@ -8,7 +8,7 @@
extern std::map<std::string, std::string> rgw_to_http_attrs;
-extern void rgw_rest_init();
+extern void rgw_rest_init(CephContext *cct);
extern void rgw_flush_formatter_and_reset(struct req_state *s,
ceph::Formatter *formatter);
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index f200db847fa..b2925940f77 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -352,11 +352,26 @@ int RGWPutObj_ObjStore_S3::get_params()
return RGWPutObj_ObjStore::get_params();
}
+static int get_success_retcode(int code)
+{
+ switch (code) {
+ case 201:
+ return STATUS_CREATED;
+ case 204:
+ return STATUS_NO_CONTENT;
+ }
+ return 0;
+}
+
void RGWPutObj_ObjStore_S3::send_response()
{
if (ret) {
set_req_state_err(s, ret);
} else {
+ if (s->cct->_conf->rgw_s3_success_create_obj_status) {
+ ret = get_success_retcode(s->cct->_conf->rgw_s3_success_create_obj_status);
+ set_req_state_err(s, ret);
+ }
dump_etag(s, etag.c_str());
dump_content_length(s, 0);
}
diff --git a/src/rgw/rgw_tools.cc b/src/rgw/rgw_tools.cc
index b6d9f284771..e83e49a0652 100644
--- a/src/rgw/rgw_tools.cc
+++ b/src/rgw/rgw_tools.cc
@@ -10,7 +10,7 @@
#define dout_subsys ceph_subsys_rgw
-#define READ_CHUNK_LEN (16 * 1024)
+#define READ_CHUNK_LEN (512 * 1024)
static map<string, string> ext_mime_map;
@@ -41,25 +41,24 @@ int rgw_get_obj(RGWRados *rgwstore, void *ctx, rgw_bucket& bucket, string& key,
bufferlist::iterator iter;
int request_len = READ_CHUNK_LEN;
rgw_obj obj(bucket, key);
- ret = rgwstore->prepare_get_obj(ctx, obj, NULL, NULL, pattrs, NULL,
+ do {
+ ret = rgwstore->prepare_get_obj(ctx, obj, NULL, NULL, pattrs, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, &handle, &err);
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
- do {
ret = rgwstore->get_obj(ctx, &handle, obj, bl, 0, request_len - 1);
+ rgwstore->finish_get_obj(&handle);
if (ret < 0)
- goto done;
+ return ret;
+
if (ret < request_len)
break;
bl.clear();
request_len *= 2;
} while (true);
- ret = 0;
-done:
- rgwstore->finish_get_obj(&handle);
- return ret;
+ return 0;
}
void parse_mime_map_line(const char *start, const char *end)
diff --git a/src/sample.ceph.conf b/src/sample.ceph.conf
index dd121f2ee5c..62450bebf4e 100644
--- a/src/sample.ceph.conf
+++ b/src/sample.ceph.conf
@@ -4,7 +4,7 @@
; This file defines cluster membership, the various locations
; that Ceph stores data, and any other runtime options.
-; If a 'host' is defined for a daemon, the start/stop script will
+; If a 'host' is defined for a daemon, the init.d start/stop script will
; verify that it matches the hostname (or else ignore it). If it is
; not defined, it is assumed that the daemon is intended to start on
; the current host (e.g., in a setup with a startup.conf on each
@@ -104,20 +104,19 @@
; You need at least one. Two if you want data to be replicated.
; Define as many as you like.
[osd]
- ; This is where the btrfs volume will be mounted.
+ ; This is where the osd expects its data
osd data = /data/$name
- ; Ideally, make this a separate disk or partition. A few
- ; hundred MB should be enough; more if you have fast or many
+ ; Ideally, make the journal a separate disk or partition.
+ ; 1-10GB should be enough; more if you have fast or many
; disks. You can use a file under the osd data dir if need be
; (e.g. /data/$name/journal), but it will be slower than a
; separate disk or partition.
-
; This is an example of a file-based journal.
osd journal = /data/$name/journal
osd journal size = 1000 ; journal size, in megabytes
- ; If you want to run the journal on a tmpfs, disable DirectIO
+ ; If you want to run the journal on a tmpfs (don't), disable DirectIO
;journal dio = false
; You can change the number of recovery operations to speed up recovery
@@ -131,6 +130,9 @@
;debug filestore = 20
;debug journal = 20
+
+ ; ### The below options only apply if you're using mkcephfs
+ ; ### and the devs options
; The filesystem used on the volumes
osd mkfs type = btrfs
; If you want to specify some other mount options, you can do so.
@@ -145,9 +147,7 @@
host = delta
; if 'devs' is not specified, you're responsible for
- ; setting up the 'osd data' dir. if it is not btrfs, things
- ; will behave up until you try to recover from a crash (which
- ; usually fine for basic testing).
+ ; setting up the 'osd data' dir.
devs = /dev/sdx
[osd.1]
diff --git a/src/test/libcephfs/caps.cc b/src/test/libcephfs/caps.cc
index 5a421ea410c..96f1a90024d 100644
--- a/src/test/libcephfs/caps.cc
+++ b/src/test/libcephfs/caps.cc
@@ -26,60 +26,14 @@
#include <sys/xattr.h>
#include <signal.h>
-void do_sigusr1(int s) {}
-
-// wait_and_suspend() forks the process, waits for the
-// child to signal SIGUSR1, suspends the child with SIGSTOP
-// sleeps for s seconds, and then unsuspends the child,
-// waits for the child to exit, and then returns the exit code
-// of the child
-static int _wait_and_suspend(int s) {
-
- int fpid = fork();
- if (fpid != 0) {
- // wait for child to signal
- signal(SIGUSR1, &do_sigusr1);
- sigset_t set;
- sigaddset(&set, SIGUSR1);
- int sig;
- sigwait(&set, &sig);
-
- // fork and suspend child, sleep for 20 secs, and resume
- kill(fpid, SIGSTOP);
- sleep(s);
- kill(fpid, SIGCONT);
- int status;
- wait(&status);
- if (WIFEXITED(status))
- return WEXITSTATUS(status);
- return 1;
- }
- return -1;
-}
-
-// signal_for_suspend sends the parent the SIGUSR1 signal
-// and sleeps for 1 second so that it can be suspended at the
-// point of the call
-static void _signal_for_suspend() {
- kill(getppid(), SIGUSR1);
-}
-
TEST(Caps, ReadZero) {
- int w = _wait_and_suspend(20);
- if (w >= 0) {
- ASSERT_EQ(0, w);
- return;
- }
-
- pid_t mypid = getpid();
+ int mypid = getpid();
struct ceph_mount_info *cmount;
ASSERT_EQ(0, ceph_create(&cmount, NULL));
ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL));
ASSERT_EQ(0, ceph_mount(cmount, "/"));
- ASSERT_EQ(0, ceph_conf_set(cmount, "client_cache_size", "10"));
-
int i = 0;
for(; i < 30; ++i) {
@@ -114,7 +68,7 @@ TEST(Caps, ReadZero) {
ASSERT_EQ(expect, caps & expect);
}
- _signal_for_suspend();
+ ASSERT_EQ(0, ceph_conf_set(cmount, "client_debug_inject_tick_delay", "20"));
for(i = 0; i < 30; ++i) {
diff --git a/src/upstart/ceph-mds-all-starter.conf b/src/upstart/ceph-mds-all-starter.conf
index fe7e2bd32ad..8e7540331ba 100644
--- a/src/upstart/ceph-mds-all-starter.conf
+++ b/src/upstart/ceph-mds-all-starter.conf
@@ -1,4 +1,4 @@
-description "Ceph MDS (task to start all instances)"
+description "Ceph MDS (start all instances)"
start on starting ceph-mds-all
stop on runlevel [!2345] or stopping ceph-mds-all
@@ -10,7 +10,7 @@ script
# TODO what's the valid charset for cluster names and mds ids?
find /var/lib/ceph/mds/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \
| while read f; do
- if [ -e "/var/lib/ceph/mds/$f/done" ]; then
+ if [ -e "/var/lib/ceph/mds/$f/done" ] && [ -e "/var/lib/ceph/mds/$f/upstart" ]; then
cluster="${f%%-*}"
id="${f#*-}"
initctl emit ceph-mds cluster="$cluster" id="$id"
diff --git a/src/upstart/ceph-mon-all-starter.conf b/src/upstart/ceph-mon-all-starter.conf
index 7101a8acca9..723d4127846 100644
--- a/src/upstart/ceph-mon-all-starter.conf
+++ b/src/upstart/ceph-mon-all-starter.conf
@@ -10,7 +10,7 @@ script
# TODO what's the valid charset for cluster names and mon ids?
find /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \
| while read f; do
- if [ -e "/var/lib/ceph/mon/$f/done" ]; then
+ if [ -e "/var/lib/ceph/mon/$f/done" ] && [ -e "/var/lib/ceph/mon/$f/upstart" ]; then
cluster="${f%%-*}"
id="${f#*-}"
diff --git a/src/upstart/ceph-osd-all-starter.conf b/src/upstart/ceph-osd-all-starter.conf
index c4d74e58442..616f02ada6e 100644
--- a/src/upstart/ceph-osd-all-starter.conf
+++ b/src/upstart/ceph-osd-all-starter.conf
@@ -10,10 +10,9 @@ script
# TODO what's the valid charset for cluster names and osd ids?
find /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[a-z0-9]+-[a-z0-9._-]+' -printf '%P\n' \
| while read f; do
- if [ -e "/var/lib/ceph/osd/$f/ready" ]; then
+ if [ -e "/var/lib/ceph/osd/$f/ready" ] && [ -e "/var/lib/ceph/osd/$f/upstart" ]; then
cluster="${f%%-*}"
id="${f#*-}"
-
initctl emit ceph-osd cluster="$cluster" id="$id"
fi
done