diff options
78 files changed, 2773 insertions, 1264 deletions
diff --git a/doc/index.rst b/doc/index.rst index 8bf5340b2f6..4068be599e5 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -90,6 +90,7 @@ about Ceph, see our `Architecture`_ section. :maxdepth: 1 :hidden: + start/intro start/index install/index rados/index diff --git a/doc/install/index.rst b/doc/install/index.rst index 347b6ae9ac2..3be09c5d0df 100644 --- a/doc/install/index.rst +++ b/doc/install/index.rst @@ -1,50 +1,54 @@ -============== - Installation -============== - -The Ceph Object Store is the foundation of all Ceph clusters, and it consists -primarily of two types of daemons: Object Storage Daemons (OSDs) and monitors. -The Ceph Object Store is based upon the concept of -:abbr:`RADOS (Reliable Autonomic Distributed Object Store)`, which eliminates -single points of failure and delivers infinite scalability. For details on -the architecture of Ceph and RADOS, refer to `Ceph Architecture`_. All Ceph -deployments have OSDs and monitors, so you should prepare your Ceph cluster -by focusing first on the object storage cluster. +======================= + Installation (Manual) +======================= .. raw:: html - <table cellpadding="10"><colgroup><col width="33%"><col width="33%"><col width="33%"></colgroup><tbody valign="top"><tr><td><h3>Recommendations</h3> - -To begin using Ceph in production, you should review our hardware -recommendations and operating system recommendations. Many of the -frequently-asked questions in our mailing list involve hardware-related -questions and how to install Ceph on various distributions. + <table><colgroup><col width="50%"><col width="50%"></colgroup><tbody valign="top"><tr><td><h3>Advanced Package Tool (APT)</h3> + +If you are deploying a Ceph cluster on Debian or Ubuntu distributions, +use the instructions below to install packages manually. .. toctree:: :maxdepth: 2 - Hardware Recommendations <hardware-recommendations> - OS Recommendations <os-recommendations> - -.. raw:: html + Installing Debian/Ubuntu Packages <debian> + Installing on Calxeda Hardware <calxeda> + Installing QEMU <qemu-deb> + Installing libvirt <libvirt-deb> - </td><td><h3>Installation</h3> +.. raw:: html -If you are deploying a Ceph cluster (that is, not developing Ceph), -install Ceph using our stable release packages. For testing, you -may install development release and testing packages. + </td><td><h3>Redhat Package Manager (RPM) / Yellowdog Updater, Modified (YUM) </h3> + +If you are deploying a Ceph cluster on Red Hat(rhel6), CentOS (el6), Fedora +17-19 (f17-f19), OpenSUSE 12 (opensuse12), and SLES (sles11) distributions, use +the instructions below to install packages manually. .. toctree:: :maxdepth: 2 - Installing Debian/Ubuntu Packages <debian> Installing RPM Packages <rpm> - Installing on Calxeda <calxeda> + Installing YUM Priorities <yum-priorities> + Installing QEMU <qemu-rpm> + Installing libvirt <libvirt-rpm> + +.. raw:: html + + </td></tr><tr><td><h3>Upgrading Ceph</h3> + +If you are upgrading Ceph from a previous release, please read the the upgrade +documentation to ensure that you follow the proper upgrade sequence. + +.. toctree:: + :maxdepth: 2 + Upgrading Ceph <upgrading-ceph> + -.. raw:: html +.. raw:: html - </td><td><h3>Building Ceph from Source</h3> + </td><td><h3>Building Ceph</h3> You can build Ceph from source by downloading a release or cloning the ``ceph`` repository at github. If you intend to build Ceph from source, please see the @@ -63,9 +67,10 @@ will save you time. Build a Package <build-packages> Contributing Code <contributing> +See the `Development`_ section for additional development details. .. raw:: html </td></tr></tbody></table> - -.. _Ceph Architecture: ../architecture/ + +.. _Development: ../../dev
\ No newline at end of file diff --git a/doc/install/libvirt-deb.rst b/doc/install/libvirt-deb.rst new file mode 100644 index 00000000000..9365e46c747 --- /dev/null +++ b/doc/install/libvirt-deb.rst @@ -0,0 +1,43 @@ +==================== + Installing libvirt +==================== + + +Prerequisites +============= + +- `Install`_ and `configure`_ a Ceph Storage Cluster +- `Install and configure`_ QEMU/KVM + + +Installing ``libvirt`` on Ubuntu 12.04 Precise +============================================== + +``libvirt`` packages are incorporated into the Ubuntu 12.04 precise +distribution. To install ``libvirt`` on precise, execute the following:: + + sudo apt-get update && sudo apt-get install libvirt-bin + + +Installing ``libvirt`` on Earlier Versions of Ubuntu +==================================================== + +For Ubuntu distributions 11.10 oneiric and earlier, you must build ``libvirt`` +from source. Clone the ``libvirt`` repository, and use `AutoGen`_ to generate +the build. Then, execute ``make`` and ``make install`` to complete the +installation. For example:: + + git clone git://libvirt.org/libvirt.git + cd libvirt + ./autogen.sh + make + sudo make install + +See `libvirt Installation`_ for details. + + +.. _libvirt Installation: http://www.libvirt.org/compiling.html +.. _AutoGen: http://www.gnu.org/software/autogen/ +.. _Install: ../index +.. _configure: ../../rados/configuration +.. _Install and configure: ../../rbd/qemu-rbd diff --git a/doc/install/libvirt-rpm.rst b/doc/install/libvirt-rpm.rst new file mode 100644 index 00000000000..a94c6e8ae12 --- /dev/null +++ b/doc/install/libvirt-rpm.rst @@ -0,0 +1,19 @@ +==================== + Installing libvirt +==================== + +To use ``libvirt`` with a Ceph Storage Cluster, you must +have a running Ceph Storage Cluster. You must also install QEMU. +See `Installing QEMU`_ for details. + + +``libvirt`` packages are incorporated into the recent CentOS/RHEL distributions. +To install ``libvirt``, execute the following:: + + sudo yum install libvirt + +See `libvirt Installation`_ for details. + + +.. _libvirt Installation: http://www.libvirt.org/compiling.html +.. _Installing QEMU: ../qemu-rpm
\ No newline at end of file diff --git a/doc/install/qemu-deb.rst b/doc/install/qemu-deb.rst new file mode 100644 index 00000000000..29abeafa3bc --- /dev/null +++ b/doc/install/qemu-deb.rst @@ -0,0 +1,26 @@ +================= + Installing QEMU +================= + + + +Installing QEMU (12.04 Precise and later) +========================================= + +QEMU packages are incorporated into Ubuntu 12.04 Precise Pangolin and later +versions. To install QEMU, execute the following:: + + sudo apt-get install qemu + +Installing QEMU (11.10 Oneric and earlier) +========================================== + +For Ubuntu distributions 11.10 Oneiric and earlier, you must install +the 0.15 version of QEMU or later. To build QEMU from source, use the +following procedure:: + + cd {your-development-directory} + git clone git://git.qemu.org/qemu.git + cd qemu + ./configure --enable-rbd + make; make install diff --git a/doc/install/qemu-rpm.rst b/doc/install/qemu-rpm.rst new file mode 100644 index 00000000000..67da2c3714c --- /dev/null +++ b/doc/install/qemu-rpm.rst @@ -0,0 +1,56 @@ +================= + Installing QEMU +================= + +To install QEMU with ``yum``, you must ensure that you have +``yum-plugin-priorities`` installed. See `Installing YUM Priorities`_ +for details. + +To install QEMU, execute the following: + +#. Create a ``/etc/yum.repos.d/ceph-qemu.conf`` file with the following + contents:: + + [ceph-qemu] + name=Ceph Packages for QEMU + baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/$basearch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-qemu-noarch] + name=Ceph QEMU noarch + baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/noarch + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + + [ceph-qemu-source] + name=Ceph QEMU Sources + baseurl=http://ceph.com/packages/ceph-extras/rpm/centos6.3/SRPMS + enabled=1 + priority=2 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc + +#. Update your repositories. :: + + sudo yum update + +#. Install QEMU for Ceph. :: + + sudo yum install qemu-kvm qemu-kvm-tools qemu-img + +#. Install additional QEMU packages (optional):: + + sudo yum install qemu-guest-agent qemu-guest-agent-win32 + +See `QEMU and Block Devices`_ for usage. + +.. _QEMU and Block Devices: ../../rbd/qemu-rbd +.. _Installing YUM Priorities: ../yum-priorities
\ No newline at end of file diff --git a/doc/install/rpm.rst b/doc/install/rpm.rst index ea96d394c7a..9e8cdcd003c 100644 --- a/doc/install/rpm.rst +++ b/doc/install/rpm.rst @@ -7,6 +7,7 @@ development release packages (for the latest features), or development testing packages (for development and QA only). Do not add multiple package sources at the same time. + Install Release Key =================== @@ -139,142 +140,54 @@ You can download the RPMs directly from:: -Installing Ceph Deploy -====================== - -Once you have added either release or development packages to ``yum``, you -can install ``ceph-deploy``. :: - - sudo yum install ceph-deploy python-pushy - - - -Installing Ceph Packages -======================== - -Once you have added either release or development packages to ``yum``, you -can install Ceph packages. You can also use ``ceph-deploy`` to install Ceph -packages. :: - - sudo yum install ceph - - - -Installing Ceph Object Storage -============================== - -:term:`Ceph Object Storage` runs on Apache and FastCGI in conjunction with the -:term:`Ceph Storage Cluster`. - -#. Install Apache and FastCGI. :: - - rpm -ivh fcgi-2.4.0-10.el6.x86_64.rpm - rpm -ivh mod_fastcgi-2.4.6-2.el6.rf.x86_64.rpm - - -#. Install the Ceph Object Storage daemon. :: +Adding Ceph to YUM +================== - yum install ceph-radosgw +You may also add Ceph to the ``/etc/yum.repos.d`` directory. Create a +``ceph.repo`` file. In the example below, replace ``{ceph-stable}`` with +a stable release of Ceph (e.g., ``cuttlefish``, ``dumpling``, etc.) and +``{distro}`` with your Linux distribution (e.g., ``el6``, ``rhel6``, etc.). :: + [ceph] + name=Ceph packages for $basearch + baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/$basearch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc -#. Add the following lines to your Ceph configuration file. + [ceph-noarch] + name=Ceph noarch packages + baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/noarch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc -.. code-block:: ini + [ceph-source] + name=Ceph source packages + baseurl=http://ceph.com/rpm-{ceph-stable}/{distro}/SRPMS + enabled=0 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - [client.radosgw.gateway] - host = {fqdn} - keyring = /etc/ceph/keyring.radosgw.gateway - rgw socket path = /tmp/radosgw.sock - log file = /var/log/ceph/radosgw.log - rgw print continue = false - -.. note:: Replace ``{fqdn}`` with the output from ``hostname``. This is - important. Debian systems use the simple hostname, but on CentOS 6/RHEL 6 - you must use the fully qualified domain name. - -#. Create a data directory. :: - - mkdir -p /var/lib/ceph/radosgw/ceph-radosgw.gateway - - -#. Change ``httpd ServerName`` in ``/etc/httpd/conf/httpd.conf``. :: - - ServerName {FQDN} - - -#. Create an Apache httpd virtual host in ``/etc/httpd/conf.d/rgw.conf``. :: - - FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock - <VirtualHost *:80> - ServerName <FQDN of the host> - ServerAdmin root@localhost - DocumentRoot /var/www - RewriteEngine On - RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] - <IfModule mod_fastcgi.c> - <Directory /var/www> - Options +ExecCGI - AllowOverride All - SetHandler fastcgi-script - Order allow,deny - Allow from all - AuthBasicAuthoritative Off - </Directory> - </IfModule> - AllowEncodedSlashes On - ErrorLog /var/log/httpd/error.log - CustomLog /var/log/httpd/access.log combined - ServerSignature Off - </VirtualHost> - -#. Turn off ``fastcgiwrapper`` in ``/etc/httpd/conf.d/fastcgi.conf`` by - commenting out the following line:: - - #FastCgiWrapper On - - -#. Add a ``fastcgi`` script with the following path ``/var/www/s3gw.fcgi``. :: - - #!/bin/sh - exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway - - -#. Make ``s3gw.fcgi`` executable:: - - chmod +x /var/www/s3gw.fcgi - - -#. Create a user key. :: - - ceph-authtool -C -n client.radosgw.gateway --gen-key /etc/ceph/keyring.radosgw.gateway - ceph-authtool -n client.radosgw.gateway --cap mon 'allow rw' --cap osd 'allow rwx' /etc/ceph/keyring.radosgw.gateway - ceph auth add client.radosgw.gateway --in-file=/etc/ceph/keyring.radosgw.gateway - - -#. Please make sure ``/etc/ceph/keyring.radosgw.gateway`` file and - ``/var/log/ceph/radosgw.log`` are accessible by the ``apache`` user. :: - - sudo chown apache:apache /etc/ceph/keyring.radosgw.gateway - sudo chown apache:apache /var/log/ceph/radosgw.log - -.. note:: This is important. The user is ``root`` for Debian. +Installing Ceph Deploy +====================== -#. Create ``.rgw.buckets`` and add it to the Ceph Object Storage daemon. :: +Once you have added either release or development packages, or added a +``ceph.repo`` file to ``/etc/yum.repos.d``, you can install ``ceph-deploy``. :: - rados mkpool .rgw.buckets - radosgw-admin pool add --pool .rgw.buckets + sudo yum install ceph-deploy python-pushy -#. Configure Apache and the Ceph Object Storage daemon to start on boot. :: - chkconfig httpd on - chkconfig ceph-radosgw on +Installing Ceph Packages +======================== -#. Start the services. :: +Once you have added either release or development packages, or added a +``ceph.repo`` file to ``/etc/yum.repos.d``, you can install Ceph packages. :: - /etc/init.d/httpd start - /etc/init.d/ceph-radosgw start - -See `Ceph Object Storage`_ for additional details. + sudo yum install ceph -.. _Ceph Object Storage: ../../radosgw +.. note:: You can also use ``ceph-deploy`` to install Ceph packages. diff --git a/doc/install/yum-priorities.rst b/doc/install/yum-priorities.rst new file mode 100644 index 00000000000..e4adb72b7dd --- /dev/null +++ b/doc/install/yum-priorities.rst @@ -0,0 +1,20 @@ +=========================== + Installing YUM Priorities +=========================== + +Ceph builds packages for Apache and FastCGI (for 100-continue support) and +QEMU (for ``rbd`` support). You must set priorities in your ``.repo`` +files to ensure that ``yum`` installs the Ceph packages instead of the +standard packages. The ``priorities`` setting requires you to install +and enable ``yum-plugin-priorities``. + +#. Install ``yum-plugin-priorities``. :: + + sudo yum install yum-plugin-priorities + +#. Ensure ``/etc/yum/pluginconf.d/priorities.conf`` exists. :: + +#. Ensure ``priorities.conf`` enables the plugin. :: + + [main] + enabled = 1 diff --git a/doc/rados/operations/add-or-rm-mons.rst b/doc/rados/operations/add-or-rm-mons.rst index 17ae9d86b85..e3bac1fca09 100644 --- a/doc/rados/operations/add-or-rm-mons.rst +++ b/doc/rados/operations/add-or-rm-mons.rst @@ -32,7 +32,7 @@ version of Linux installed (typically Ubuntu 12.04 precise). Add your monitor host to a rack in your cluster, connect it to the network and ensure that it has network connectivity. -.. _Hardware Recommendations: ../../install/hardware-recommendations +.. _Hardware Recommendations: ../../../start/hardware-recommendations Install the Required Software ----------------------------- @@ -42,17 +42,9 @@ manually. See `Installing Debian/Ubuntu Packages`_ for details. You should configure SSH to a user with password-less authentication and root permissions. -.. _Installing Debian/Ubuntu Packages: ../../install/debian +.. _Installing Debian/Ubuntu Packages: ../../../install/debian -For clusters deployed with Chef, create a `chef user`_, `configure -SSH keys`_, `install Ruby`_ and `install the Chef client`_ on your host. See -`Installing Chef`_ for details. -.. _chef user: ../../install/chef#createuser -.. _configure SSH keys: ../../install/chef#genkeys -.. _install the Chef client: ../../install/chef#installchef -.. _Installing Chef: ../../install/chef -.. _install Ruby: ../../install/chef#installruby .. _Adding a Monitor (Manual): diff --git a/doc/rados/operations/authentication.rst b/doc/rados/operations/authentication.rst index 6bacf4c7dff..d9995da8fb8 100644 --- a/doc/rados/operations/authentication.rst +++ b/doc/rados/operations/authentication.rst @@ -154,6 +154,7 @@ during setup and/or troubleshooting to temporarily disable authentication. auth cluster required = none auth service required = none auth client required = none + auth supported = none #. Or, disable ``cephx`` authentication for versions ``0.50`` and below (deprecated as of version 0.51) by setting the following option in the diff --git a/doc/rados/operations/operating.rst b/doc/rados/operations/operating.rst index 9942ea3cabf..8c62ed5cdbf 100644 --- a/doc/rados/operations/operating.rst +++ b/doc/rados/operations/operating.rst @@ -7,11 +7,10 @@ Running Ceph with Upstart ========================= -When deploying Ceph Cuttlefish and beyond with ``ceph-deploy``, you may start -and stop Ceph daemons on a :term:`Ceph Node` using the event-based `Upstart`_. -Upstart does not require you to define daemon instances in the Ceph configuration -file (although, they are still required for ``sysvinit`` should you choose to -use it). +When deploying Ceph Cuttlefish and beyond with ``ceph-deploy`` on Debian/Ubuntu +distributions, you may start and stop Ceph daemons on a :term:`Ceph Node` using +the event-based `Upstart`_. Upstart does not require you to define daemon +instances in the Ceph configuration file. To list the Ceph Upstart jobs and instances on a node, execute:: @@ -19,6 +18,7 @@ To list the Ceph Upstart jobs and instances on a node, execute:: See `initctl`_ for additional details. + Starting all Daemons -------------------- @@ -93,29 +93,20 @@ For example:: sudo start ceph-mds id=ceph-server - .. index:: Ceph service; sysvinit; operating a cluster -Running Ceph as a Service -========================= +Running Ceph +============ -When you deploy Ceph Argonaut or Bobtail with ``mkcephfs``, use the -service or traditional sysvinit. +Each time you to **start**, **restart**, and **stop** Ceph daemons (or your +entire cluster) you must specify at least one option and one command. You may +also specify a daemon type or a daemon instance. :: -The ``ceph`` service provides functionality to **start**, **restart**, and -**stop** your Ceph cluster. Each time you execute ``ceph`` processes, you -must specify at least one option and one command. You may also specify a daemon -type or a daemon instance. For most newer Debian/Ubuntu distributions, you may -use the following syntax:: + {commandline} [options] [commands] [daemons] - sudo service ceph [options] [commands] [daemons] -For older distributions, you may wish to use the ``/etc/init.d/ceph`` path:: - - sudo /etc/init.d/ceph [options] [commands] [daemons] - -The ``ceph`` service options include: +The ``ceph`` options include: +-----------------+----------+-------------------------------------------------+ | Option | Shortcut | Description | @@ -134,7 +125,7 @@ The ``ceph`` service options include: | ``--conf`` | ``-c`` | Use an alternate configuration file. | +-----------------+----------+-------------------------------------------------+ -The ``ceph`` service commands include: +The ``ceph`` commands include: +------------------+------------------------------------------------------------+ | Command | Description | @@ -152,83 +143,213 @@ The ``ceph`` service commands include: | ``cleanalllogs`` | Cleans out **everything** in the log directory. | +------------------+------------------------------------------------------------+ -For subsystem operations, the ``ceph`` service can target specific daemon types by -adding a particular daemon type for the ``[daemons]`` option. Daemon types include: +For subsystem operations, the ``ceph`` service can target specific daemon types +by adding a particular daemon type for the ``[daemons]`` option. Daemon types +include: - ``mon`` - ``osd`` - ``mds`` -The ``ceph`` service's ``[daemons]`` setting may also target a specific instance. -To start a Ceph daemon on the local :term:`Ceph Node`, use the following syntax:: - sudo /etc/init.d/ceph start osd.0 +Running Ceph with sysvinit +-------------------------- -To start a Ceph daemon on another node, use the following syntax:: - - sudo /etc/init.d/ceph -a start osd.0 +Using traditional ``sysvinit`` is the recommended way to run Ceph with CentOS, +Red Hat, Fedora, and SLES distributions. You may also use it for older +distributions of Debian/Ubuntu. -Where ``osd.0`` is the first OSD in the cluster. - -Starting a Cluster ------------------- +Starting all Daemons +~~~~~~~~~~~~~~~~~~~~ To start your Ceph cluster, execute ``ceph`` with the ``start`` command. -The usage may differ based upon your Linux distribution. For example, for most -newer Debian/Ubuntu distributions, you may use the following syntax:: - - sudo service ceph [options] [start|restart] [daemonType|daemonID] - -For older distributions, you may wish to use the ``/etc/init.d/ceph`` path:: +Use the following syntax:: sudo /etc/init.d/ceph [options] [start|restart] [daemonType|daemonID] The following examples illustrates a typical use case:: - sudo service ceph -a start sudo /etc/init.d/ceph -a start Once you execute with ``-a`` (i.e., execute on all nodes), Ceph should begin -operating. You may also specify a particular daemon instance to constrain the -command to a single instance. To start a Ceph daemon on the local Ceph Node, -use the following syntax:: +operating. + + +Stopping all Daemons +~~~~~~~~~~~~~~~~~~~~ + +To stop your Ceph cluster, execute ``ceph`` with the ``stop`` command. +Use the following syntax:: + + sudo /etc/init.d/ceph [options] stop [daemonType|daemonID] + +The following examples illustrates a typical use case:: + + sudo /etc/init.d/ceph -a stop +Once you execute with ``-a`` (i.e., execute on all nodes), Ceph should stop +operating. + + +Starting all Daemons by Type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To start all Ceph daemons of a particular type on the local Ceph Node, use the +following syntax:: + + sudo /etc/init.d/ceph start {daemon-type} + sudo /etc/init.d/ceph start osd + +To start all Ceph daemons of a particular type on another node, use the +following syntax:: + + sudo /etc/init.d/ceph -a start {daemon-type} + sudo /etc/init.d/ceph -a start osd + + +Stopping all Daemons by Type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To stop all Ceph daemons of a particular type on the local Ceph Node, use the +following syntax:: + + sudo /etc/init.d/ceph stop {daemon-type} + sudo /etc/init.d/ceph stop osd + +To stop all Ceph daemons of a particular type on another node, use the +following syntax:: + + sudo /etc/init.d/ceph -a stop {daemon-type} + sudo /etc/init.d/ceph -a stop osd + + +Starting a Daemon +~~~~~~~~~~~~~~~~~ + +To start a Ceph daemon on the local Ceph Node, use the following syntax:: + + sudo /etc/init.d/ceph start {daemon-type}.{instance} sudo /etc/init.d/ceph start osd.0 To start a Ceph daemon on another node, use the following syntax:: + sudo /etc/init.d/ceph -a start {daemon-type}.{instance} sudo /etc/init.d/ceph -a start osd.0 -Stopping a Cluster ------------------- +Stopping a Daemon +~~~~~~~~~~~~~~~~~ + +To stop a Ceph daemon on the local Ceph Node, use the following syntax:: + + sudo /etc/init.d/ceph stop {daemon-type}.{instance} + sudo /etc/init.d/ceph stop osd.0 + +To stop a Ceph daemon on another node, use the following syntax:: + + sudo /etc/init.d/ceph -a stop {daemon-type}.{instance} + sudo /etc/init.d/ceph -a stop osd.0 + + +Running Ceph as a Service +------------------------- + +When you deploy Ceph Argonaut or Bobtail with ``mkcephfs``, you operate +Ceph as a service (you may also use sysvinit). + + +Starting all Daemons +~~~~~~~~~~~~~~~~~~~~ + +To start your Ceph cluster, execute ``ceph`` with the ``start`` command. +Use the following syntax:: + + sudo service ceph [options] [start|restart] [daemonType|daemonID] + +The following examples illustrates a typical use case:: + + sudo service ceph -a start + +Once you execute with ``-a`` (i.e., execute on all nodes), Ceph should begin +operating. + + +Stopping all Daemons +~~~~~~~~~~~~~~~~~~~~ To stop your Ceph cluster, execute ``ceph`` with the ``stop`` command. -The usage may differ based upon your Linux distribution. For example, for most -newer Debian/Ubuntu distributions, you may use the following syntax:: +Use the following syntax:: sudo service ceph [options] stop [daemonType|daemonID] For example:: - sudo service ceph -a stop - -For older distributions, you may wish to use the ``/etc/init.d/ceph`` path:: - - sudo /etc/init.d/ceph -a stop + sudo service ceph -a stop Once you execute with ``-a`` (i.e., execute on all nodes), Ceph should shut -down. You may also specify a particular daemon instance to constrain the -command to a single instance. To stop a Ceph daemon on the local Ceph Node, -use the following syntax:: +down. + + +Starting all Daemons by Type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To start all Ceph daemons of a particular type on the local Ceph Node, use the +following syntax:: + + sudo service ceph start {daemon-type} + sudo service ceph start osd + +To start all Ceph daemons of a particular type on all nodes, use the following +syntax:: + + sudo service ceph -a start {daemon-type} + sudo service ceph -a start osd + + +Stopping all Daemons by Type +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To stop all Ceph daemons of a particular type on the local Ceph Node, use the +following syntax:: + + sudo service ceph stop {daemon-type} + sudo service ceph stop osd + +To stop all Ceph daemons of a particular type on all nodes, use the following +syntax:: + + sudo service ceph -a stop {daemon-type} + sudo service ceph -a stop osd - sudo /etc/init.d/ceph stop osd.0 + +Starting a Daemon +~~~~~~~~~~~~~~~~~ + +To start a Ceph daemon on the local Ceph Node, use the following syntax:: + + sudo service ceph start {daemon-type}.{instance} + sudo service ceph start osd.0 + +To start a Ceph daemon on another node, use the following syntax:: + + sudo service ceph -a start {daemon-type}.{instance} + sudo service ceph -a start osd.0 + + +Stopping a Daemon +~~~~~~~~~~~~~~~~~ + +To stop a Ceph daemon on the local Ceph Node, use the following syntax:: + + sudo service ceph stop {daemon-type}.{instance} + sudo service ceph stop osd.0 To stop a Ceph daemon on another node, use the following syntax:: - sudo /etc/init.d/ceph -a stop osd.0 + sudo service ceph -a stop {daemon-type}.{instance} + sudo service ceph -a stop osd.0 diff --git a/doc/rbd/libvirt.rst b/doc/rbd/libvirt.rst index cc8dc9bd189..4813c3258d0 100644 --- a/doc/rbd/libvirt.rst +++ b/doc/rbd/libvirt.rst @@ -40,46 +40,11 @@ The most common ``libvirt`` use case involves providing Ceph block devices to cloud solutions like OpenStack or CloudStack. The cloud solution uses ``libvirt`` to interact with QEMU/KVM, and QEMU/KVM interacts with Ceph block devices via ``librbd``. See `Block Devices and OpenStack`_ and `Block Devices -and CloudStack`_ for details. +and CloudStack`_ for details. See `Installation`_ for installation details. You can also use Ceph block devices with ``libvirt``, ``virsh`` and the ``libvirt`` API. See `libvirt Virtualization API`_ for details. -Prerequisites -============= - -- `Install`_ and `configure`_ a Ceph cluster -- `Install and configure`_ QEMU/KVM - - -Installing ``libvirt`` on Ubuntu 12.04 Precise -============================================== - -``libvirt`` packages are incorporated into the Ubuntu 12.04 precise -distribution. To install ``libvirt`` on precise, execute the following:: - - sudo apt-get update && sudo apt-get install libvirt-bin - - -Installing ``libvirt`` on Earlier Versions of Ubuntu -==================================================== - -For Ubuntu distributions 11.10 oneiric and earlier, you must build ``libvirt`` -from source. Clone the ``libvirt`` repository, and use `AutoGen`_ to generate -the build. Then, execute ``make`` and ``make install`` to complete the -installation. For example:: - - git clone git://libvirt.org/libvirt.git - cd libvirt - ./autogen.sh - make - sudo make install - -See `libvirt Installation`_ for details. - - -Using Ceph with Virtual Machines -================================ To create VMs that use Ceph block devices, use the procedures in the following sections. In the exemplary embodiment, we've used ``libvirt-pool`` for the pool @@ -89,7 +54,7 @@ when executing commands in the subsequent procedures. Configuring Ceph ----------------- +================ To configure Ceph for use with ``libvirt``, perform the following steps: @@ -132,7 +97,7 @@ To configure Ceph for use with ``libvirt``, perform the following steps: Preparing the VM Manager ------------------------- +======================== You may use ``libvirt`` without a VM manager, but you may find it simpler to create your first domain with ``virt-manager``. @@ -150,7 +115,7 @@ create your first domain with ``virt-manager``. Creating a VM -------------- +============= To create a VM with ``virt-manager``, perform the following steps: @@ -182,7 +147,7 @@ To create a VM with ``virt-manager``, perform the following steps: Configuring the VM ------------------- +================== When configuring the VM for use with Ceph, it is important to use ``virsh`` where appropriate. Additionally, ``virsh`` commands often require root @@ -290,7 +255,7 @@ commands, refer to `Virsh Command Reference`_. Summary -------- +======= Once you have configured the VM for use with Ceph, you can start the VM. To verify that the VM and Ceph are communicating, you may perform the @@ -320,13 +285,8 @@ If everything looks okay, you may begin using the Ceph block device within your VM. - -.. _AutoGen: http://www.gnu.org/software/autogen/ -.. _libvirt Installation: http://www.libvirt.org/compiling.html +.. _Installation: ../../install .. _libvirt Virtualization API: http://www.libvirt.org -.. _Install: ../../install -.. _configure: ../../rados/configuration -.. _Install and configure: ../qemu-rbd .. _Block Devices and OpenStack: ../rbd-openstack .. _Block Devices and CloudStack: ../rbd-cloudstack .. _Create a pool: ../../rados/operations/pools#create-a-pool diff --git a/doc/rbd/qemu-rbd.rst b/doc/rbd/qemu-rbd.rst index 9d366f3ea8d..e0b55dee257 100644 --- a/doc/rbd/qemu-rbd.rst +++ b/doc/rbd/qemu-rbd.rst @@ -27,33 +27,12 @@ image each time it spins up a new virtual machine. Ceph Block Devices can integrate with the QEMU virtual machine. For details on QEMU, see `QEMU Open Source Processor Emulator`_. For QEMU documentation, see -`QEMU Manual`_. +`QEMU Manual`_. For installation details, see `Installation`_. .. important:: To use Ceph Block Devices with QEMU, you must have access to a running Ceph cluster. -Installing QEMU (12.04 Precise and later) -========================================= - -QEMU packages are incorporated into Ubuntu 12.04 Precise Pangolin and later -versions. To install QEMU, execute the following:: - - sudo apt-get install qemu - -Installing QEMU (11.10 Oneric and earlier) -========================================== - -For Ubuntu distributions 11.10 Oneiric and earlier, you must install -the 0.15 version of QEMU or later. To build QEMU from source, use the -following procedure:: - - cd {your-development-directory} - git clone git://git.qemu.org/qemu.git - cd qemu - ./configure --enable-rbd - make; make install - Creating Images with QEMU ========================= @@ -199,4 +178,5 @@ QEMU command line settings override the Ceph configuration file settings. .. _QEMU Open Source Processor Emulator: http://wiki.qemu.org/Main_Page .. _QEMU Manual: http://wiki.qemu.org/Manual .. _RBD Cache: ../rbd-config-ref/ -.. _Snapshots: ../rbd-snapshot/
\ No newline at end of file +.. _Snapshots: ../rbd-snapshot/ +.. _Installation: ../../install
\ No newline at end of file diff --git a/doc/rbd/rbd-openstack.rst b/doc/rbd/rbd-openstack.rst index 660757639aa..80dd43ce406 100644 --- a/doc/rbd/rbd-openstack.rst +++ b/doc/rbd/rbd-openstack.rst @@ -127,7 +127,7 @@ Hosts running ``nova-compute`` do not need the keyring. Instead, they store the secret key in libvirt. Create a temporary copy of the secret key on the hosts running ``nova-compute``:: - ssh {your-compute-host} client.volumes.key <`ceph auth get-key client.volumes` + ceph auth get-key client.volumes | ssh {your-compute-host} tee client.volumes.key Then, on the compute hosts, add the secret key to libvirt and remove the temporary copy of the key:: diff --git a/doc/install/hardware-recommendations.rst b/doc/start/hardware-recommendations.rst index 90d29e5e7e2..90d29e5e7e2 100644 --- a/doc/install/hardware-recommendations.rst +++ b/doc/start/hardware-recommendations.rst diff --git a/doc/start/index.rst b/doc/start/index.rst index 2fc03c0a284..6e9277746d9 100644 --- a/doc/start/index.rst +++ b/doc/start/index.rst @@ -1,34 +1,6 @@ -================= - Getting Started -================= - -Whether you want to provide :term:`Ceph Object Storage` and/or :term:`Ceph Block -Device` services to :term:`Cloud Platforms`, deploy a :term:`Ceph Filesystem` or -use Ceph for another purpose, all :term:`Ceph Storage Cluster` deployments begin -with setting up each :term:`Ceph Node`, your network and the Ceph Storage -Cluster. A Ceph Storage Cluster has three essential daemons: - -.. ditaa:: +---------------+ +---------------+ +---------------+ - | OSDs | | Monitor | | MDS | - +---------------+ +---------------+ +---------------+ - -- **OSDs**: A :term:`Ceph OSD Daemon` (OSD) stores data, handles data - replication, recovery, backfilling, rebalancing, and provides some monitoring - information to Ceph Monitors by checking other Ceph OSD Daemons for a - heartbeat. A Ceph Storage Cluster requires at least two Ceph OSD Daemons to - achieve an ``active + clean`` state. - -- **Monitors**: A :term:`Ceph Monitor` maintains maps of the cluster state, - including the monitor map, the OSD map, the Placement Group (PG) map, and the - CRUSH map. Ceph maintains a history (called an "epoch") of each state change - in the Ceph Monitors, Ceph OSD Daemons, and PGs. - -- **MDSs**: A :term:`Ceph Metadata Server` (MDS) stores metadata on behalf of - the :term:`Ceph Filesystem` (i.e., Ceph Block Devices and Ceph Object Storage - do not use MDS). Ceph Metadata Servers make it feasible for POSIX file system - users to execute basic commands like ``ls``, ``find``, etc. without placing - an enormous burden on the Ceph Storage Cluster. - +====================== + Installation (Quick) +====================== .. raw:: html @@ -37,18 +9,17 @@ Cluster. A Ceph Storage Cluster has three essential daemons: A :term:`Ceph Client` and a :term:`Ceph Node` may require some basic configuration work prior to deploying a Ceph Storage Cluster. You can also -avail yourself of help from the Ceph community by getting involved. +avail yourself of help by getting involved in the Ceph community. .. toctree:: - Get Involved <get-involved> Preflight <quick-start-preflight> .. raw:: html </td><td><h3>Step 2: Storage Cluster</h3> -Once you've completed your preflight checklist, you should be able to begin +Once you've completed your preflight checklist, you should be able to begin deploying a Ceph Storage Cluster. .. toctree:: diff --git a/doc/start/intro.rst b/doc/start/intro.rst new file mode 100644 index 00000000000..704ff1e8cd5 --- /dev/null +++ b/doc/start/intro.rst @@ -0,0 +1,70 @@ +=============== + Intro to Ceph +=============== + +Whether you want to provide :term:`Ceph Object Storage` and/or :term:`Ceph Block +Device` services to :term:`Cloud Platforms`, deploy a :term:`Ceph Filesystem` or +use Ceph for another purpose, all :term:`Ceph Storage Cluster` deployments begin +with setting up each :term:`Ceph Node`, your network and the Ceph Storage +Cluster. A Ceph Storage Cluster requires at least one Ceph Monitor and at least +two Ceph OSD Daemons. The Ceph Metadata Server is essential when running Ceph +Filesystem clients. + +.. ditaa:: +---------------+ +---------------+ +---------------+ + | OSDs | | Monitor | | MDS | + +---------------+ +---------------+ +---------------+ + +- **OSDs**: A :term:`Ceph OSD Daemon` (OSD) stores data, handles data + replication, recovery, backfilling, rebalancing, and provides some monitoring + information to Ceph Monitors by checking other Ceph OSD Daemons for a + heartbeat. A Ceph Storage Cluster requires at least two Ceph OSD Daemons to + achieve an ``active + clean`` state when the cluster makes two copies of your + data (Ceph makes 2 copies by default, but you can adjust it). + +- **Monitors**: A :term:`Ceph Monitor` maintains maps of the cluster state, + including the monitor map, the OSD map, the Placement Group (PG) map, and the + CRUSH map. Ceph maintains a history (called an "epoch") of each state change + in the Ceph Monitors, Ceph OSD Daemons, and PGs. + +- **MDSs**: A :term:`Ceph Metadata Server` (MDS) stores metadata on behalf of + the :term:`Ceph Filesystem` (i.e., Ceph Block Devices and Ceph Object Storage + do not use MDS). Ceph Metadata Servers make it feasible for POSIX file system + users to execute basic commands like ``ls``, ``find``, etc. without placing + an enormous burden on the Ceph Storage Cluster. + +Ceph stores a client's data as objects within storage pools. Using the CRUSH +algorithm, Ceph calculates which placement group should contain the object, +and further calculates which Ceph OSD Daemon should store the placement group. +The CRUSH algorithm enables the Ceph Storage Cluster to scale, rebalance, and +recover dynamically. + + +.. raw:: html + + <style type="text/css">div.body h3{margin:5px 0px 0px 0px;}</style> + <table cellpadding="10"><colgroup><col width="50%"><col width="50%"></colgroup><tbody valign="top"><tr><td><h3>Recommendations</h3> + +To begin using Ceph in production, you should review our hardware +recommendations and operating system recommendations. + +.. toctree:: + :maxdepth: 2 + + Hardware Recommendations <hardware-recommendations> + OS Recommendations <os-recommendations> + + +.. raw:: html + + </td><td><h3>Get Involved</h3> + + You can avail yourself of help or contribute documentation, source + code or bugs by getting involved in the Ceph community. + +.. toctree:: + + get-involved + +.. raw:: html + + </td></tr></tbody></table> diff --git a/doc/install/os-recommendations.rst b/doc/start/os-recommendations.rst index 71a4d3a278b..d8b418fe1b0 100644 --- a/doc/install/os-recommendations.rst +++ b/doc/start/os-recommendations.rst @@ -36,6 +36,36 @@ platforms. Generally speaking, there is very little dependence on specific distributions aside from the kernel and system initialization package (i.e., sysvinit, upstart, systemd). + +Dumpling (0.67) +--------------- + ++----------+----------+--------------------+--------------+---------+------------+ +| Distro | Release | Code Name | Kernel | Notes | Testing | ++==========+==========+====================+==============+=========+============+ +| Ubuntu | 12.04 | Precise Pangolin | linux-3.2.0 | 1, 2 | B, I, C | ++----------+----------+--------------------+--------------+---------+------------+ +| Ubuntu | 12.10 | Quantal Quetzal | linux-3.5.4 | 2 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Ubuntu | 13.04 | Raring Ringtail | linux-3.8.5 | | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Debian | 6.0 | Squeeze | linux-2.6.32 | 1, 2, 3 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Debian | 7.0 | Wheezy | linux-3.2.0 | 1, 2 | B | ++----------+----------+--------------------+--------------+---------+------------+ +| CentOS | 6.3 | N/A | linux-2.6.32 | 1, 2 | B, I | ++----------+----------+--------------------+--------------+---------+------------+ +| RHEL | 6.3 | | linux-2.6.32 | 1, 2 | B, I | ++----------+----------+--------------------+--------------+---------+------------+ +| Fedora | 18.0 | Spherical Cow | linux-3.6.0 | | B | ++----------+----------+--------------------+--------------+---------+------------+ +| Fedora | 19.0 | Schrödinger's Cat | linux-3.10.0 | | B | ++----------+----------+--------------------+--------------+---------+------------+ +| OpenSuse | 12.2 | N/A | linux-3.4.0 | 2 | B | ++----------+----------+--------------------+--------------+---------+------------+ + + + Cuttlefish (0.61) ----------------- @@ -63,6 +93,7 @@ Cuttlefish (0.61) | OpenSuse | 12.2 | N/A | linux-3.4.0 | 2 | B | +----------+----------+--------------------+--------------+---------+------------+ + Bobtail (0.56) -------------- @@ -90,6 +121,7 @@ Bobtail (0.56) | OpenSuse | 12.2 | N/A | linux-3.4.0 | 2 | B | +----------+----------+--------------------+--------------+---------+------------+ + Argonaut (0.48) --------------- @@ -126,6 +158,7 @@ Notes ``ceph-osd`` daemons using ``XFS`` or ``ext4`` on the same host will not perform as well as they could. + Testing ------- diff --git a/doc/start/quick-ceph-deploy.rst b/doc/start/quick-ceph-deploy.rst index 3c0ca1b0653..1fabd1b182f 100644 --- a/doc/start/quick-ceph-deploy.rst +++ b/doc/start/quick-ceph-deploy.rst @@ -3,26 +3,31 @@ ============================= If you haven't completed your `Preflight Checklist`_, do that first. This -**Quick Start** sets up a two-node demo cluster so you can explore some of the -:term:`Ceph Storage Cluster` functionality. This **Quick Start** will help you -install a minimal Ceph Storage Cluster on a server node from your admin node -using ``ceph-deploy``. +**Quick Start** sets up a :term:`Ceph Storage Cluster` using ``ceph-deploy`` +on your admin node. Create a three Ceph Node cluster so you can +explore Ceph functionality. .. ditaa:: - /----------------\ /----------------\ - | Admin Node |<------->| Server Node | - | cCCC | | cCCC | - +----------------+ +----------------+ - | Ceph Commands | | ceph - mon | - \----------------/ +----------------+ - | ceph - osd | - +----------------+ - | ceph - mds | - \----------------/ - - -For best results, create a directory on your admin node for maintaining the -configuration of your cluster. :: + /------------------\ /----------------\ + | Admin Node | | ceph–node1 | + | +-------->+ cCCC | + | ceph–deploy | | mon.ceph–node1 | + \---------+--------/ \----------------/ + | + | /----------------\ + | | ceph–node2 | + +----------------->+ cCCC | + | | osd.0 | + | \----------------/ + | + | /----------------\ + | | ceph–node3 | + +----------------->| cCCC | + | osd.1 | + \----------------/ + +For best results, create a directory on your admin node node for maintaining the +configuration that ``ceph-deploy`` generates for your cluster. :: mkdir my-cluster cd my-cluster @@ -31,228 +36,283 @@ configuration of your cluster. :: current directory. Ensure you are in this directory when executing ``ceph-deploy``. +As a first exercise, create a Ceph Storage Cluster with one Ceph Monitor and two +Ceph OSD Daemons. Once the cluster reaches a ``active + clean`` state, expand it +by adding a third Ceph OSD Daemon, a Metadata Server and two more Ceph Monitors. + +.. important:: Do not call ``ceph-deploy`` with ``sudo`` or run it as ``root`` + if you are logged in as a different user, because it will not issue ``sudo`` + commands needed on the remote host. Create a Cluster ================ -To create your Ceph Storage Cluster, declare its initial monitors, generate a -filesystem ID (``fsid``) and generate monitor keys by entering the following -command on a commandline prompt:: +If at any point you run into trouble and you want to start over, execute +the following:: - ceph-deploy new {mon-server-name} - ceph-deploy new mon-ceph-node + ceph-deploy purgedata {ceph-node} [{ceph-node}] + ceph-deploy forgetkeys -Check the output of ``ceph-deploy`` with ``ls`` and ``cat`` in the current -directory. You should see a Ceph configuration file, a keyring, and a log file -for the new cluster. See `ceph-deploy new -h`_ for additional details. -.. topic:: Single Node Quick Start +On your admin node, perform the following steps using ``ceph-deploy``. - Assuming only one node for your Ceph Storage Cluster, you will need to - modify the default ``osd crush chooseleaf type`` setting (it defaults to - ``1`` for ``node``) to ``0`` for ``device`` so that it will peer with OSDs - on the local node. Add the following line to your Ceph configuration file:: - - osd crush chooseleaf type = 0 +#. Create the cluster. :: -.. tip:: If you deploy without executing foregoing step on a single node - cluster, your Ceph Storage Cluster will not achieve an ``active + clean`` - state. To remedy this situation, you must modify your `CRUSH Map`_. + ceph-deploy new {ceph-node} + ceph-deploy new ceph-node1 -Install Ceph -============ + Check the output of ``ceph-deploy`` with ``ls`` and ``cat`` in the current + directory. You should see a Ceph configuration file, a keyring, and a log + file for the new cluster. See `ceph-deploy new -h`_ for additional details. -To install Ceph on your server node, open a command line on your admin -node and type the following:: +#. Install Ceph. :: - ceph-deploy install {server-node-name}[,{server-node-name}] - ceph-deploy install mon-ceph-node + ceph-deploy install {ceph-node}[{ceph-node} ...] + ceph-deploy install ceph-node1 ceph-node2 ceph-node3 -Without additional arguments, ``ceph-deploy`` will install the most recent -stable Ceph package to the server node. See `ceph-deploy install -h`_ for -additional details. -.. tip:: When ``ceph-deploy`` completes installation successfully, - it should echo ``OK``. +#. Add a Ceph Monitor. :: + ceph-deploy mon create {ceph-node} + ceph-deploy mon create ceph-node1 + +#. Gather keys. :: -Add a Monitor -============= + ceph-deploy gatherkeys {ceph-node} + ceph-deploy gatherkeys ceph-node1 -To run a Ceph cluster, you need at least one Ceph Monitor. When using -``ceph-deploy``, the tool enforces a single Ceph Monitor per node. Execute the -following to create a Ceph Monitor:: + Once you have gathered keys, your local directory should have the following + keyrings: - ceph-deploy mon create {mon-server-name} - ceph-deploy mon create mon-ceph-node + - ``{cluster-name}.client.admin.keyring`` + - ``{cluster-name}.bootstrap-osd.keyring`` + - ``{cluster-name}.bootstrap-mds.keyring`` + -.. tip:: In production environments, we recommend running Ceph Monitors on - nodes that do not run OSDs. +#. Add two OSDs. For fast setup, this quick start uses a directory rather + than an entire disk per Ceph OSD Daemon. See `ceph-deploy osd`_ for + details on using separate disks/partitions for OSDs and journals. + Login to the Ceph Nodes and create a directory for + the Ceph OSD Daemon. :: + + ssh ceph-node2 + sudo mkdir /tmp/osd0 + exit + + ssh ceph-node3 + sudo mkdir /tmp/osd1 + exit -When you have added a monitor successfully, directories under ``/var/lib/ceph`` -on your server node should have subdirectories ``bootstrap-mds`` and -``bootstrap-osd`` that contain keyrings. If these directories do not contain -keyrings, execute ``ceph-deploy mon create`` again on the admin node. + Then, from your admin node, use ``ceph-deploy`` to prepare the OSDs. :: + ceph-deploy osd prepare {ceph-node}:/path/to/directory + ceph-deploy osd prepare ceph-node2:/tmp/osd0 ceph-node3:/tmp/osd1 -Gather Keys -=========== + Finally, activate the OSDs. :: -To deploy additional daemons and provision them with monitor authentication keys -from your admin node, you must first gather keys from a monitor node. Execute -the following to gather keys:: + ceph-deploy osd activate {ceph-node}:/path/to/directory + ceph-deploy osd activate ceph-node2:/tmp/osd0 ceph-node3:/tmp/osd1 - ceph-deploy gatherkeys {mon-server-name} - ceph-deploy gatherkeys mon-ceph-node +#. Use ``ceph-deploy`` to copy the configuration file and admin key to + your admin node and your Ceph Nodes so that you can use the ``ceph`` + CLI without having to specify the monitor address and + ``ceph.client.admin.keyring`` each time you execute a command. :: + + ceph-deploy admin {ceph-node} + ceph-deploy admin admin-node ceph-node1 ceph-node2 ceph-node3 -Once you have gathered keys, your local directory should have the following keyrings: + **Note:** Since you are using ``ceph-deploy`` to talk to the + local host, your host must be reachable by its hostname + (e.g., you can modify ``/etc/hosts`` if necessary). Ensure that + you have the correct permissions for the ``ceph.client.admin.keyring``. -- ``{cluster-name}.client.admin.keyring`` -- ``{cluster-name}.bootstrap-osd.keyring`` -- ``{cluster-name}.bootstrap-mds.keyring`` +#. Check your cluster's health. :: -If you don't have these keyrings, you may not have created a monitor successfully, -or you may have a problem with your network connection. Ensure that you complete -this step such that you have the foregoing keyrings before proceeding further. + ceph health -.. tip:: You may repeat this procedure. If it fails, check to see if the - ``/var/lib/ceph/boostrap-{osd}|{mds}`` directories on the server node - have keyrings. If they do not have keyrings, try adding the monitor again; - then, return to this step. + Your cluster should return an ``active + clean`` state when it + has finished peering. -Add Ceph OSD Daemons -==================== +Operating Your Cluster +====================== -For a cluster's object placement groups to reach an ``active + clean`` state, -you must have at least two instances of a :term:`Ceph OSD Daemon` running and -at least two copies of an object (``osd pool default size`` is ``2`` -by default). +Deploying a Ceph cluster with ``ceph-deploy`` automatically starts the cluster. +To operate the cluster daemons with Debian/Ubuntu distributions, see +`Running Ceph with Upstart`_. To operate the cluster daemons with CentOS, +Red Hat, Fedora, and SLES distributions, see `Running Ceph with sysvinit`_. -Adding Ceph OSD Daemons is slightly more involved than other ``ceph-deploy`` -commands, because a Ceph OSD Daemon involves both a data store and a journal. -The ``ceph-deploy`` tool has the ability to invoke ``ceph-disk-prepare`` to -prepare the disk and activate the Ceph OSD Daemon for you. +To learn more about peering and cluster health, see `Monitoring a Cluster`_. +To learn more about Ceph OSD Daemon and placement group health, see +`Monitoring OSDs and PGs`_. + +Once you deploy a Ceph cluster, you can try out some of the administration +functionality, the ``rados`` object store command line, and then proceed to +Quick Start guides for Ceph Block Device, Ceph Filesystem, and the Ceph Object +Gateway. -Multiple OSDs on the OS Disk (Demo Only) ----------------------------------------- -For demonstration purposes, you may wish to add multiple OSDs to the OS disk -(not recommended for production systems). To use Ceph OSDs daemons on the OS -disk, you must use ``prepare`` and ``activate`` as separate steps. First, -define a directory for the Ceph OSD daemon(s). :: - - mkdir /tmp/osd0 - mkdir /tmp/osd1 - -Then, use ``prepare`` to prepare the directory(ies) for use with a -Ceph OSD Daemon. :: - - ceph-deploy osd prepare {osd-node-name}:/tmp/osd0 - ceph-deploy osd prepare {osd-node-name}:/tmp/osd1 +Expanding Your Cluster +====================== -Finally, use ``activate`` to activate the Ceph OSD Daemons. :: +Once you have a basic cluster up and running, the next step is to expand +cluster. Add a Ceph OSD Daemon and a Ceph Metadata Server to ``ceph-node1``. +Then add a Ceph Monitor to ``ceph-node2`` and ``ceph-node3`` to establish a +quorum of Ceph Monitors. - ceph-deploy osd activate {osd-node-name}:/tmp/osd0 - ceph-deploy osd activate {osd-node-name}:/tmp/osd1 +.. ditaa:: + /------------------\ /----------------\ + | ceph–deploy | | ceph–node1 | + | Admin Node | | cCCC | + | +-------->+ mon.ceph–node1 | + | | | osd.2 | + | | | mds.ceph–node1 | + \---------+--------/ \----------------/ + | + | /----------------\ + | | ceph–node2 | + | | cCCC | + +----------------->+ | + | | osd.0 | + | | mon.ceph–node2 | + | \----------------/ + | + | /----------------\ + | | ceph–node3 | + | | cCCC | + +----------------->+ | + | osd.1 | + | mon.ceph–node3 | + \----------------/ -.. tip:: You need two OSDs to reach an ``active + clean`` state. You can - add one OSD at a time, but OSDs need to communicate with each other - for Ceph to run properly. Always use more than one OSD per cluster. +Adding an OSD +------------- +Since you are running a 3-node cluster for demonstration purposes, add the OSD +to the monitor node. :: -List Disks ----------- + ssh ceph-node1 + sudo mkdir /tmp/osd2 + exit -To list the available disk drives on a prospective :term:`Ceph Node`, execute -the following:: +Then, from your ``ceph-deploy`` node, prepare the OSD. :: - ceph-deploy disk list {osd-node-name} - ceph-deploy disk list ceph-node + ceph-deploy osd prepare {ceph-node}:/path/to/directory + ceph-deploy osd prepare ceph-node1:/tmp/osd2 +Finally, activate the OSDs. :: -Zap a Disk ----------- + ceph-deploy osd activate {ceph-node}:/path/to/directory + ceph-deploy osd activate ceph-node1:/tmp/osd2 -To zap a disk (delete its partition table) in preparation for use with Ceph, -execute the following:: - ceph-deploy disk zap {osd-node-name}:{disk} - ceph-deploy disk zap ceph-node:sdb ceph-node:sdb2 +Once you have added your new OSD, Ceph will begin rebalancing the cluster by +migrating placement groups to your new OSD. You can observe this process with +the ``ceph`` CLI. :: -.. important:: This will delete all data on the disk. + ceph -w +You should see the placement group states change from ``active+clean`` to active +with some degraded objects, and finally ``active+clean`` when migration +completes. (Control-c to exit.) -Add OSDs on Standalone Disks ----------------------------- -You can add OSDs using ``prepare`` and ``activate`` in two discrete -steps. To prepare a disk for use with a Ceph OSD Daemon, execute the -following:: +Add a Metadata Server +--------------------- - ceph-deploy osd prepare {osd-node-name}:{osd-disk-name}[:/path/to/journal] - ceph-deploy osd prepare ceph-node:sdb +To use CephFS, you need at least one metadata server. Execute the following to +create a metadata server:: -To activate the Ceph OSD Daemon, execute the following:: + ceph-deploy mds create {ceph-node} + ceph-deploy mds create ceph-node1 - ceph-deploy osd activate {osd-node-name}:{osd-partition-name} - ceph-deploy osd activate ceph-node:sdb1 -To prepare an OSD disk and activate it in one step, execute the following:: +.. note:: Currently Ceph runs in production with one metadata server only. You + may use more, but there is currently no commercial support for a cluster + with multiple metadata servers. - ceph-deploy osd create {osd-node-name}:{osd-disk-name}[:/path/to/journal] [{osd-node-name}:{osd-disk-name}[:/path/to/journal]] - ceph-deploy osd create ceph-node:sdb:/dev/ssd1 ceph-node:sdc:/dev/ssd2 +Adding Monitors +--------------- -.. note:: The journal example assumes you will use a partition on a separate - solid state drive (SSD). If you omit a journal drive or partition, - ``ceph-deploy`` will use create a separate partition for the journal - on the same drive. If you have already formatted your disks and created - partitions, you may also use partition syntax for your OSD disk. +A Ceph Storage Cluster requires at least one Ceph Monitor to run. For high +availability, Ceph Storage Clusters typically run multiple Ceph +Monitors so that the failure of a single Ceph Monitor will not bring down the +Ceph Storage Cluster. Ceph uses the Paxos algorithm, which requires a majority +of monitors (i.e., 1, 2:3, 3:4, 3:5, 4:6, etc.) to form a quorum. -You must add a minimum of two Ceph OSD Daemons for the placement groups in -a cluster to achieve an ``active + clean`` state. +Add two Ceph Monitors to your cluster. :: + ceph-deploy mon create {ceph-node} + ceph-deploy mon create ceph-node2 ceph-node3 -Add a MDS -========= +Once you have added your new Ceph Monitors, Ceph will begin synchronizing +the monitors and form a quorum. You can check the quorum status by executing +the following:: -To use CephFS, you need at least one metadata node. Execute the following to -create a metadata node:: + ceph quorum_status - ceph-deploy mds create {node-name} - ceph-deploy mds create ceph-node -.. note:: Currently Ceph runs in production with one metadata node only. You - may use more, but there is currently no commercial support for a cluster - with multiple metadata nodes. +Storing/Retrieving Object Data +============================== +To store object data in the Ceph Storage Cluster, a Ceph client must: -Summary -======= +#. Set an object name +#. Specify a `pool`_ -Deploying a Ceph cluster with ``ceph-deploy`` automatically starts the cluster. -To operate the cluster daemons, see `Running Ceph with Upstart`_. +The Ceph Client retrieves the latest cluster map and the CRUSH algorithm +calculates how to map the object to a `placement group`_, and then calculates +how to assign the placement group to a Ceph OSD Daemon dynamically. To find the +object location, all you need is the object name and the pool name. For +example:: -Once you deploy a Ceph cluster, you can try out some of the administration -functionality, the object store command line, and then proceed to Quick Start -guides for RBD, CephFS, and the Ceph Gateway. + ceph osd map {poolname} {object-name} -.. topic:: Other ceph-deploy Commands +.. topic:: Exercise: Locate an Object - To view other ``ceph-deploy`` commands, execute: - - ``ceph-deploy -h`` - + As an exercise, lets create an object. Specify an object name, a path to + a test file containing some object data and a pool name using the + ``rados put`` command on the command line. For example:: + + rados put {object-name} {file-path} --pool=data + rados put test-object-1 testfile.txt --pool=data + + To verify that the Ceph Storage Cluster stored the object, execute + the following:: + + rados -p data ls + + Now, identify the object location:: -See `Ceph Deploy`_ for additional details. + ceph osd map {pool-name} {object-name} + ceph osd map data test-object-1 + + Ceph should output the object's location. For example:: + + osdmap e537 pool 'data' (0) object 'test-object-1' -> pg 0.d1743484 (0.4) -> up [1,0] acting [1,0] + + To remove the test object, simply delete it using the ``rados rm`` + command. For example:: + + rados rm test-object-1 --pool=data + +As the cluster evolves, the object location may change dynamically. One benefit +of Ceph's dynamic rebalancing is that Ceph relieves you from having to perform +the migration manually. .. _Preflight Checklist: ../quick-start-preflight .. _Ceph Deploy: ../../rados/deployment .. _ceph-deploy install -h: ../../rados/deployment/ceph-deploy-install .. _ceph-deploy new -h: ../../rados/deployment/ceph-deploy-new +.. _ceph-deploy osd: ../../rados/deployment/ceph-deploy-osd .. _Running Ceph with Upstart: ../../rados/operations/operating#running-ceph-with-upstart -.. _CRUSH Map: ../../rados/operations/crush-map
\ No newline at end of file +.. _Running Ceph with sysvinit: ../../rados/operations/operating#running-ceph-with-sysvinit +.. _CRUSH Map: ../../rados/operations/crush-map +.. _pool: ../../rados/operations/pools +.. _placement group: ../../rados/operations/placement-groups +.. _Monitoring a Cluster: ../../rados/operations/monitoring +.. _Monitoring OSDs and PGs: ../../rados/operations/monitoring-osd-pg
\ No newline at end of file diff --git a/doc/start/quick-cephfs.rst b/doc/start/quick-cephfs.rst index 18dadb005ec..5449e5a6fe3 100644 --- a/doc/start/quick-cephfs.rst +++ b/doc/start/quick-cephfs.rst @@ -3,7 +3,7 @@ ===================== To use the :term:`Ceph FS` Quick Start guide, you must have executed the -procedures in the `Ceph Deploy Quick Start`_ guide first. Execute this quick +procedures in the `Storage Cluster Quick Start`_ guide first. Execute this quick start on the Admin Host. Prerequisites @@ -91,7 +91,7 @@ See `Ceph FS`_ for additional information. Ceph FS is not quite as stable as the Ceph Block Device and Ceph Object Storage. See `Troubleshooting`_ if you encounter trouble. -.. _Ceph Deploy Quick Start: ../quick-ceph-deploy +.. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _Ceph FS: ../../cephfs/ .. _FAQ: http://wiki.ceph.com/03FAQs/01General_FAQ#How_Can_I_Give_Ceph_a_Try.3F .. _Troubleshooting: ../../cephfs/troubleshooting
\ No newline at end of file diff --git a/doc/start/quick-rbd.rst b/doc/start/quick-rbd.rst index a466771502d..9424457f8c2 100644 --- a/doc/start/quick-rbd.rst +++ b/doc/start/quick-rbd.rst @@ -2,47 +2,73 @@ Block Device Quick Start ========================== -To use this guide, you must have executed the procedures in the `Object Store -Quick Start`_ guide first. Ensure your :term:`Ceph Storage Cluster` is in an -``active + clean`` state before working with the :term:`Ceph Block Device`. -Execute this quick start on the admin node. +To use this guide, you must have executed the procedures in the `Storage +Cluster Quick Start`_ guide first. Ensure your :term:`Ceph Storage Cluster` is +in an ``active + clean`` state before working with the :term:`Ceph Block +Device`. .. note:: The Ceph Block Device is also known as :term:`RBD` or :term:`RADOS` Block Device. -#. Install ``ceph-common``. :: - sudo apt-get install ceph-common +.. ditaa:: + /------------------\ /----------------\ + | Admin Node | | ceph–client | + | +-------->+ cCCC | + | ceph–deploy | | ceph | + \------------------/ \----------------/ -#. Create a block device image. :: - rbd create foo --size 4096 [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring] +You may use a virtual machine for your ``ceph-client`` node, but do not +execute the following procedures on the same physical node as your Ceph +Storage Cluster nodes (unless you use a VM). See `FAQ`_ for details. -#. Load the ``rbd`` client module. :: + +Install Ceph +============ + +#. On the admin node, use ``ceph-deploy`` to install Ceph on your + ``ceph-client`` node. :: + + ceph-deploy install ceph-client + +#. On the admin node, use ``ceph-deploy`` to copy the Ceph configuration file + and the ``ceph.client.admin.keyring`` to the ``ceph-client``. :: + + ceph-deploy admin ceph-client + + +Configure a Block Device +======================== + +#. On the ``ceph-client`` node, create a block device image. :: + + rbd create foo --size 4096 [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring] + +#. On the ``ceph-client`` node, load the ``rbd`` client module. :: sudo modprobe rbd -#. Map the image to a block device. :: +#. On the ``ceph-client`` node, map the image to a block device. :: sudo rbd map foo --pool rbd --name client.admin [-m {mon-IP}] [-k /path/to/ceph.client.admin.keyring] -#. Use the block device. In the following example, create a file system. :: +#. Use the block device by creating a file system on the ``ceph-client`` + node. :: sudo mkfs.ext4 -m0 /dev/rbd/rbd/foo This may take a few moments. -#. Mount the file system. :: +#. Mount the file system on the ``ceph-client`` node. :: sudo mkdir /mnt/ceph-block-device sudo mount /dev/rbd/rbd/foo /mnt/ceph-block-device cd /mnt/ceph-block-device -.. note:: Mount the block device on the client machine, - not the server machine. See `FAQ`_ for details. See `block devices`_ for additional details. -.. _Object Store Quick Start: ../quick-ceph-deploy +.. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _block devices: ../../rbd/rbd .. _FAQ: http://wiki.ceph.com/03FAQs/01General_FAQ#How_Can_I_Give_Ceph_a_Try.3F diff --git a/doc/start/quick-rgw.rst b/doc/start/quick-rgw.rst index af48a3154c1..40cf7d4f4dc 100644 --- a/doc/start/quick-rgw.rst +++ b/doc/start/quick-rgw.rst @@ -2,7 +2,7 @@ Object Storage Quick Start ============================ -To use this guide, you must have executed the procedures in the `Ceph Deploy +To use this guide, you must have executed the procedures in the `Storage Cluster Quick Start`_ guide first. Ensure your :term:`Ceph Storage Cluster` is in an ``active + clean`` state before working with the :term:`Ceph Object Storage`. @@ -344,7 +344,7 @@ tutorials. See the `S3-compatible`_ and `Swift-compatible`_ APIs for details. .. _Create rgw.conf: ../../radosgw/config/index.html#create-rgw-conf -.. _Ceph Deploy Quick Start: ../quick-ceph-deploy +.. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _Ceph Object Storage Manual Install: ../../radosgw/manual-install .. _RGW Configuration: ../../radosgw/config .. _S3-compatible: ../../radosgw/s3 diff --git a/doc/start/quick-start-preflight.rst b/doc/start/quick-start-preflight.rst index 74dc403c211..77a54795f19 100644 --- a/doc/start/quick-start-preflight.rst +++ b/doc/start/quick-start-preflight.rst @@ -4,74 +4,57 @@ .. versionadded:: 0.60 -Thank you for trying Ceph! Petabyte-scale data clusters are quite an -undertaking. Before delving deeper into Ceph, we recommend setting up a two-node -demo cluster to explore some of the functionality. This **Preflight Checklist** -will help you prepare an admin node and a server node for use with -``ceph-deploy``. - -.. ditaa:: - /----------------\ /----------------\ - | Admin Node |<------->| Server Node | - | cCCC | | cCCC | - \----------------/ \----------------/ - - -Before you can deploy Ceph using ``ceph-deploy``, you need to ensure that you -have a few things set up first on your admin node and on nodes running Ceph -daemons. - - -Install an Operating System -=========================== - -Install a recent release of Debian or Ubuntu (e.g., 12.04, 12.10, 13.04) on your -nodes. For additional details on operating systems or to use other operating -systems other than Debian or Ubuntu, see `OS Recommendations`_. - - -Install an SSH Server -===================== - -The ``ceph-deploy`` utility requires ``ssh``, so your server node(s) require an -SSH server. :: - - sudo apt-get install openssh-server - - -Create a User -============= - -Create a user on nodes running Ceph daemons. - -.. tip:: We recommend a username that brute force attackers won't - guess easily (e.g., something other than ``root``, ``ceph``, etc). - -:: +Thank you for trying Ceph! We recommend setting up a ``ceph-deploy`` admin node +and a 3-node :term:`Ceph Storage Cluster` to explore the basics of Ceph. This +**Preflight Checklist** will help you prepare a ``ceph-deploy`` admin node and +three Ceph Nodes (or virtual machines) that will host your Ceph Storage Cluster. + + +.. ditaa:: + /------------------\ /----------------\ + | Admin Node | | ceph–node1 | + | +-------->+ | + | ceph–deploy | | cCCC | + \---------+--------/ \----------------/ + | + | /----------------\ + | | ceph–node2 | + +----------------->+ | + | | cCCC | + | \----------------/ + | + | /----------------\ + | | ceph–node3 | + +----------------->| | + | cCCC | + \----------------/ + + +Ceph Node Setup +=============== + +Perform the following steps: + +#. Create a user on each Ceph Node. :: ssh user@ceph-server sudo useradd -d /home/ceph -m ceph sudo passwd ceph - -``ceph-deploy`` installs packages onto your nodes. This means that -the user you create requires passwordless ``sudo`` privileges. - -.. note:: We **DO NOT** recommend enabling the ``root`` password - for security reasons. - -To provide full privileges to the user, add the following to -``/etc/sudoers.d/ceph``. :: +#. Add ``root`` privileges for the user on each Ceph Node. :: echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph sudo chmod 0440 /etc/sudoers.d/ceph -Configure SSH -============= +#. Install an SSH server (if necessary):: -Configure your admin machine with password-less SSH access to each node -running Ceph daemons (leave the passphrase empty). :: + sudo apt-get install openssh-server + sudo yum install openssh-server + + +#. Configure your ``ceph-deploy`` admin node with password-less SSH access to + each Ceph Node. Leave the passphrase empty:: ssh-keygen Generating public/private key pair. @@ -81,77 +64,95 @@ running Ceph daemons (leave the passphrase empty). :: Your identification has been saved in /ceph-client/.ssh/id_rsa. Your public key has been saved in /ceph-client/.ssh/id_rsa.pub. -Copy the key to each node running Ceph daemons:: +#. Copy the key to each Ceph Node. :: ssh-copy-id ceph@ceph-server -Modify your ~/.ssh/config file of your admin node so that it defaults -to logging in as the user you created when no username is specified. :: + +#. Modify the ``~/.ssh/config`` file of your ``ceph-deploy`` admin node so that + it logs in to Ceph Nodes as the user you created (e.g., ``ceph``). :: Host ceph-server - Hostname ceph-server.fqdn-or-ip-address.com - User ceph + Hostname ceph-server.fqdn-or-ip-address.com + User ceph + + +#. Ensure connectivity using ``ping`` with hostnames (i.e., not IP addresses). + Address hostname resolution issues and firewall issues as necessary. -.. note:: Do not call ceph-deploy with ``sudo`` or run as ``root`` if you are - login in as a different user (as in the ssh config above) because it - will not issue ``sudo`` commands needed on the remote host. -Install ceph-deploy -=================== +Ceph Deploy Setup +================= -To install ``ceph-deploy``, execute the following:: +Add Ceph repositories to the ``ceph-deploy`` admin node. Then, install +``ceph-deploy``. + +.. important:: Do not call ``ceph-deploy`` with ``sudo`` or run it as ``root`` + if you are logged in as a different user, because it will not issue ``sudo`` + commands needed on the remote host. + + +Advanced Package Tool (APT) +--------------------------- + +For Debian and Ubuntu distributions, perform the following steps: + +#. Add the release key:: wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add - echo deb http://ceph.com/debian-dumpling/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list sudo apt-get update sudo apt-get install ceph-deploy +#. Add the Ceph packages to your repository. Replace ``{ceph-stable-release}`` + with a stable Ceph release (e.g., ``cuttlefish``, ``dumpling``, etc.). + For example:: + + echo deb http://ceph.com/debian-{ceph-stable-release}/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list -Ensure Connectivity -=================== +#. Update your repository and install ``ceph-deploy``:: -Ensure that your admin node has connectivity to the network and to your Server -node (e.g., ensure ``iptables``, ``ufw`` or other tools that may prevent -connections, traffic forwarding, etc. to allow what you need). + sudo apt-get update && sudo apt-get install ceph-deploy -.. tip:: The ``ceph-deploy`` tool is new and you may encounter some issues - without effective error messages. -Once you have completed this pre-flight checklist, you are ready to begin using -``ceph-deploy``. +Red Hat Package Manager (RPM) +----------------------------- +For Red Hat(rhel6), CentOS (el6), Fedora 17-19 (f17-f19), OpenSUSE 12 +(opensuse12), and SLES (sles11) perform the following steps: -Hostname Resolution -=================== +#. Add the package to your repository. Open a text editor and create a + Yellowdog Updater, Modified (YUM) entry. Use the file path + ``/etc/yum.repos.d/ceph.repo``. For example:: -Ensure that your admin node can resolve the server node's hostname. :: + sudo vim /etc/yum.repos.d/ceph.repo - ping {server-node} + Paste the following example code. Replace ``{ceph-stable-release}`` with + the recent stable release of Ceph (e.g., ``dumpling``). Replace ``{distro}`` + with your Linux distribution (e.g., ``el6`` for CentOS 6, ``rhel6`` for + Red Hat 6, ``fc18`` or ``fc19`` for Fedora 18 or Fedora 19, and ``sles11`` + for SLES 11). Finally, save the contents to the + ``/etc/yum.repos.d/ceph.repo`` file. :: -If you execute ``ceph-deploy`` against the localhost, ``ceph-deploy`` -must be able to resolve its IP address. Consider adding the IP address -to your ``/etc/hosts`` file such that it resolves to the hostname. :: + [ceph-noarch] + name=Ceph noarch packages + baseurl=http://ceph.com/rpm-{ceph-stable-release}/{distro}/noarch + enabled=1 + gpgcheck=1 + type=rpm-md + gpgkey=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc - hostname - host -4 {hostname} - sudo vim /etc/hosts - {ip-address} {hostname} +#. Update your repository and install ``ceph-deploy``:: - ceph-deploy {command} {hostname} + sudo yum update && sudo yum install ceph-deploy -.. tip:: The ``ceph-deploy`` tool will not resolve to ``localhost``. Use - the hostname. Summary ======= -Once you have passwordless ``ssh`` connectivity, passwordless ``sudo``, -installed ``ceph-deploy``, and you have ensured appropriate connectivity, -proceed to the `Storage Cluster Quick Start`_. - -.. tip:: The ``ceph-deploy`` utility can install Ceph packages on remote - machines from the admin node! +This completes the Quick Start Preflight. Proceed to the `Storage Cluster +Quick Start`_. .. _Storage Cluster Quick Start: ../quick-ceph-deploy .. _OS Recommendations: ../../install/os-recommendations diff --git a/qa/workunits/suites/fsstress.sh b/qa/workunits/suites/fsstress.sh index 7f945172687..394e5fad991 100755 --- a/qa/workunits/suites/fsstress.sh +++ b/qa/workunits/suites/fsstress.sh @@ -2,6 +2,7 @@ if [ ! -f /usr/lib/ltp/testcases/bin/fsstress ] then + path=`pwd` mkdir -p /tmp/fsstress cd /tmp/fsstress wget -q -O /tmp/fsstress/ltp-full.tgz http://ceph.com/qa/ltp-full-20091231.tgz @@ -13,6 +14,7 @@ then sudo cp -avf /tmp/fsstress/ltp-full-20091231/testcases/kernel/fs/fsstress/fsstress /usr/lib/ltp/testcases/bin/fsstress sudo chmod 755 /usr/lib/ltp/testcases/bin/fsstress rm -Rf /tmp/fsstress + cd $path fi command="/usr/lib/ltp/testcases/bin/fsstress -d fsstress-`hostname`$$ -l 1 -n 1000 -p 10 -v" diff --git a/src/cls/rgw/cls_rgw_client.cc b/src/cls/rgw/cls_rgw_client.cc index 165ca437987..2851f2bd702 100644 --- a/src/cls/rgw/cls_rgw_client.cc +++ b/src/cls/rgw/cls_rgw_client.cc @@ -2,6 +2,7 @@ #include "include/types.h" #include "cls/rgw/cls_rgw_ops.h" +#include "cls/rgw/cls_rgw_client.h" #include "include/rados/librados.hpp" #include "common/debug.h" @@ -157,6 +158,44 @@ int cls_rgw_get_dir_header(IoCtx& io_ctx, string& oid, rgw_bucket_dir_header *he return r; } +class GetDirHeaderCompletion : public ObjectOperationCompletion { + RGWGetDirHeader_CB *ret_ctx; +public: + GetDirHeaderCompletion(RGWGetDirHeader_CB *_ctx) : ret_ctx(_ctx) {} + ~GetDirHeaderCompletion() { + ret_ctx->put(); + } + void handle_completion(int r, bufferlist& outbl) { + struct rgw_cls_list_ret ret; + try { + bufferlist::iterator iter = outbl.begin(); + ::decode(ret, iter); + } catch (buffer::error& err) { + r = -EIO; + } + + ret_ctx->handle_response(r, ret.dir.header); + }; +}; + +int cls_rgw_get_dir_header_async(IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx) +{ + bufferlist in, out; + struct rgw_cls_list_op call; + call.num_entries = 0; + ::encode(call, in); + ObjectReadOperation op; + GetDirHeaderCompletion *cb = new GetDirHeaderCompletion(ctx); + op.exec("rgw", "bucket_list", in, cb); + AioCompletion *c = librados::Rados::aio_create_completion(NULL, NULL, NULL); + int r = io_ctx.aio_operate(oid, c, &op, NULL); + c->release(); + if (r < 0) + return r; + + return 0; +} + int cls_rgw_bi_log_list(IoCtx& io_ctx, string& oid, string& marker, uint32_t max, list<rgw_bi_log_entry>& entries, bool *truncated) { diff --git a/src/cls/rgw/cls_rgw_client.h b/src/cls/rgw/cls_rgw_client.h index 2ea5d9ca771..39bb3c9fc4a 100644 --- a/src/cls/rgw/cls_rgw_client.h +++ b/src/cls/rgw/cls_rgw_client.h @@ -4,6 +4,13 @@ #include "include/types.h" #include "include/rados/librados.hpp" #include "cls_rgw_types.h" +#include "common/RefCountedObj.h" + +class RGWGetDirHeader_CB : public RefCountedObject { +public: + virtual ~RGWGetDirHeader_CB() {} + virtual void handle_response(int r, rgw_bucket_dir_header& header) = 0; +}; /* bucket index */ void cls_rgw_bucket_init(librados::ObjectWriteOperation& o); @@ -27,6 +34,7 @@ int cls_rgw_bucket_check_index_op(librados::IoCtx& io_ctx, string& oid, int cls_rgw_bucket_rebuild_index_op(librados::IoCtx& io_ctx, string& oid); int cls_rgw_get_dir_header(librados::IoCtx& io_ctx, string& oid, rgw_bucket_dir_header *header); +int cls_rgw_get_dir_header_async(librados::IoCtx& io_ctx, string& oid, RGWGetDirHeader_CB *ctx); void cls_rgw_encode_suggestion(char op, rgw_bucket_dir_entry& dirent, bufferlist& updates); diff --git a/src/common/Formatter.h b/src/common/Formatter.h index 27089ce04f2..ac68b7f461d 100644 --- a/src/common/Formatter.h +++ b/src/common/Formatter.h @@ -44,6 +44,9 @@ class Formatter { virtual void dump_int(const char *name, int64_t s) = 0; virtual void dump_float(const char *name, double d) = 0; virtual void dump_string(const char *name, std::string s) = 0; + virtual void dump_bool(const char *name, bool b) { + dump_format_unquoted(name, "%s", (b ? "true" : "false")); + } virtual std::ostream& dump_stream(const char *name) = 0; virtual void dump_format(const char *name, const char *fmt, ...) = 0; virtual void dump_format_unquoted(const char *name, const char *fmt, ...) = 0; diff --git a/src/common/TrackedOp.cc b/src/common/TrackedOp.cc new file mode 100644 index 00000000000..d1dbc1e7135 --- /dev/null +++ b/src/common/TrackedOp.cc @@ -0,0 +1,265 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * Copyright 2013 Inktank + */ + +#include "TrackedOp.h" +#include "common/Formatter.h" +#include <iostream> +#include <vector> +#include "common/debug.h" +#include "common/config.h" +#include "msg/Message.h" +#include "include/assert.h" + +#define dout_subsys ceph_subsys_optracker +#undef dout_prefix +#define dout_prefix _prefix(_dout) + +static ostream& _prefix(std::ostream* _dout) +{ + return *_dout << "-- op tracker -- "; +} + +void OpHistory::on_shutdown() +{ + arrived.clear(); + duration.clear(); + shutdown = true; +} + +void OpHistory::insert(utime_t now, TrackedOpRef op) +{ + if (shutdown) + return; + duration.insert(make_pair(op->get_duration(), op)); + arrived.insert(make_pair(op->get_arrived(), op)); + cleanup(now); +} + +void OpHistory::cleanup(utime_t now) +{ + while (arrived.size() && + (now - arrived.begin()->first > + (double)(history_duration))) { + duration.erase(make_pair( + arrived.begin()->second->get_duration(), + arrived.begin()->second)); + arrived.erase(arrived.begin()); + } + + while (duration.size() > history_size) { + arrived.erase(make_pair( + duration.begin()->second->get_arrived(), + duration.begin()->second)); + duration.erase(duration.begin()); + } +} + +void OpHistory::dump_ops(utime_t now, Formatter *f) +{ + cleanup(now); + f->open_object_section("OpHistory"); + f->dump_int("num to keep", history_size); + f->dump_int("duration to keep", history_duration); + { + f->open_array_section("Ops"); + for (set<pair<utime_t, TrackedOpRef> >::const_iterator i = + arrived.begin(); + i != arrived.end(); + ++i) { + f->open_object_section("Op"); + i->second->dump(now, f); + f->close_section(); + } + f->close_section(); + } + f->close_section(); +} + +void OpTracker::dump_historic_ops(Formatter *f) +{ + Mutex::Locker locker(ops_in_flight_lock); + utime_t now = ceph_clock_now(cct); + history.dump_ops(now, f); +} + +void OpTracker::dump_ops_in_flight(Formatter *f) +{ + Mutex::Locker locker(ops_in_flight_lock); + f->open_object_section("ops_in_flight"); // overall dump + f->dump_int("num_ops", ops_in_flight.size()); + f->open_array_section("ops"); // list of TrackedOps + utime_t now = ceph_clock_now(cct); + for (xlist<TrackedOp*>::iterator p = ops_in_flight.begin(); !p.end(); ++p) { + f->open_object_section("op"); + (*p)->dump(now, f); + f->close_section(); // this TrackedOp + } + f->close_section(); // list of TrackedOps + f->close_section(); // overall dump +} + +void OpTracker::register_inflight_op(xlist<TrackedOp*>::item *i) +{ + Mutex::Locker locker(ops_in_flight_lock); + ops_in_flight.push_back(i); + ops_in_flight.back()->seq = seq++; +} + +void OpTracker::unregister_inflight_op(TrackedOp *i) +{ + Mutex::Locker locker(ops_in_flight_lock); + assert(i->xitem.get_list() == &ops_in_flight); + utime_t now = ceph_clock_now(cct); + i->xitem.remove_myself(); + i->request->clear_data(); + history.insert(now, TrackedOpRef(i)); +} + +bool OpTracker::check_ops_in_flight(std::vector<string> &warning_vector) +{ + Mutex::Locker locker(ops_in_flight_lock); + if (!ops_in_flight.size()) + return false; + + utime_t now = ceph_clock_now(cct); + utime_t too_old = now; + too_old -= complaint_time; + + utime_t oldest_secs = now - ops_in_flight.front()->get_arrived(); + + dout(10) << "ops_in_flight.size: " << ops_in_flight.size() + << "; oldest is " << oldest_secs + << " seconds old" << dendl; + + if (oldest_secs < complaint_time) + return false; + + xlist<TrackedOp*>::iterator i = ops_in_flight.begin(); + warning_vector.reserve(log_threshold + 1); + + int slow = 0; // total slow + int warned = 0; // total logged + while (!i.end() && (*i)->get_arrived() < too_old) { + slow++; + + // exponential backoff of warning intervals + if (((*i)->get_arrived() + + (complaint_time * (*i)->warn_interval_multiplier)) < now) { + // will warn + if (warning_vector.empty()) + warning_vector.push_back(""); + warned++; + if (warned > log_threshold) + break; + + utime_t age = now - (*i)->get_arrived(); + stringstream ss; + ss << "slow request " << age << " seconds old, received at " << (*i)->get_arrived() + << ": " << *((*i)->request) << " currently " + << ((*i)->current.size() ? (*i)->current : (*i)->state_string()); + warning_vector.push_back(ss.str()); + + // only those that have been shown will backoff + (*i)->warn_interval_multiplier *= 2; + } + ++i; + } + + // only summarize if we warn about any. if everything has backed + // off, we will stay silent. + if (warned > 0) { + stringstream ss; + ss << slow << " slow requests, " << warned << " included below; oldest blocked for > " + << oldest_secs << " secs"; + warning_vector[0] = ss.str(); + } + + return warning_vector.size(); +} + +void OpTracker::get_age_ms_histogram(pow2_hist_t *h) +{ + Mutex::Locker locker(ops_in_flight_lock); + + h->clear(); + + utime_t now = ceph_clock_now(NULL); + unsigned bin = 30; + uint32_t lb = 1 << (bin-1); // lower bound for this bin + int count = 0; + for (xlist<TrackedOp*>::iterator i = ops_in_flight.begin(); !i.end(); ++i) { + utime_t age = now - (*i)->get_arrived(); + uint32_t ms = (long)(age * 1000.0); + if (ms >= lb) { + count++; + continue; + } + if (count) + h->set(bin, count); + while (lb > ms) { + bin--; + lb >>= 1; + } + count = 1; + } + if (count) + h->set(bin, count); +} + +void OpTracker::mark_event(TrackedOp *op, const string &dest) +{ + utime_t now = ceph_clock_now(cct); + return _mark_event(op, dest, now); +} + +void OpTracker::_mark_event(TrackedOp *op, const string &evt, + utime_t time) +{ + Mutex::Locker locker(ops_in_flight_lock); + dout(5) << //"reqid: " << op->get_reqid() << + ", seq: " << op->seq + << ", time: " << time << ", event: " << evt + << ", request: " << *op->request << dendl; +} + +void OpTracker::RemoveOnDelete::operator()(TrackedOp *op) { + op->mark_event("done"); + tracker->unregister_inflight_op(op); + // Do not delete op, unregister_inflight_op took control +} + +void TrackedOp::mark_event(const string &event) +{ + utime_t now = ceph_clock_now(g_ceph_context); + { + Mutex::Locker l(lock); + events.push_back(make_pair(now, event)); + } + tracker->mark_event(this, event); + _event_marked(); +} + +void TrackedOp::dump(utime_t now, Formatter *f) const +{ + Message *m = request; + stringstream name; + m->print(name); + f->dump_string("description", name.str().c_str()); // this TrackedOp + f->dump_stream("received_at") << get_arrived(); + f->dump_float("age", now - get_arrived()); + f->dump_float("duration", get_duration()); + { + f->open_array_section("type_data"); + _dump(now, f); + f->close_section(); + } +} diff --git a/src/common/TrackedOp.h b/src/common/TrackedOp.h index 753331df7f3..44e03905759 100644 --- a/src/common/TrackedOp.h +++ b/src/common/TrackedOp.h @@ -17,15 +17,163 @@ #include <stdint.h> #include <include/utime.h> #include "common/Mutex.h" +#include "include/histogram.h" #include "include/xlist.h" #include "msg/Message.h" #include <tr1/memory> +class TrackedOp; +typedef std::tr1::shared_ptr<TrackedOp> TrackedOpRef; + +class OpTracker; +class OpHistory { + set<pair<utime_t, TrackedOpRef> > arrived; + set<pair<double, TrackedOpRef> > duration; + void cleanup(utime_t now); + bool shutdown; + OpTracker *tracker; + uint32_t history_size; + uint32_t history_duration; + +public: + OpHistory(OpTracker *tracker_) : shutdown(false), tracker(tracker_), + history_size(0), history_duration(0) {} + ~OpHistory() { + assert(arrived.empty()); + assert(duration.empty()); + } + void insert(utime_t now, TrackedOpRef op); + void dump_ops(utime_t now, Formatter *f); + void on_shutdown(); + void set_size_and_duration(uint32_t new_size, uint32_t new_duration) { + history_size = new_size; + history_duration = new_duration; + } +}; + +class OpTracker { + class RemoveOnDelete { + OpTracker *tracker; + public: + RemoveOnDelete(OpTracker *tracker) : tracker(tracker) {} + void operator()(TrackedOp *op); + }; + friend class RemoveOnDelete; + friend class OpHistory; + uint64_t seq; + Mutex ops_in_flight_lock; + xlist<TrackedOp *> ops_in_flight; + OpHistory history; + float complaint_time; + int log_threshold; + +public: + CephContext *cct; + OpTracker(CephContext *cct_) : seq(0), ops_in_flight_lock("OpTracker mutex"), + history(this), complaint_time(0), log_threshold(0), cct(cct_) {} + void set_complaint_and_threshold(float time, int threshold) { + complaint_time = time; + log_threshold = threshold; + } + void set_history_size_and_duration(uint32_t new_size, uint32_t new_duration) { + history.set_size_and_duration(new_size, new_duration); + } + void dump_ops_in_flight(Formatter *f); + void dump_historic_ops(Formatter *f); + void register_inflight_op(xlist<TrackedOp*>::item *i); + void unregister_inflight_op(TrackedOp *i); + + void get_age_ms_histogram(pow2_hist_t *h); + + /** + * Look for Ops which are too old, and insert warning + * strings for each Op that is too old. + * + * @param warning_strings A vector<string> reference which is filled + * with a warning string for each old Op. + * @return True if there are any Ops to warn on, false otherwise. + */ + bool check_ops_in_flight(std::vector<string> &warning_strings); + void mark_event(TrackedOp *op, const string &evt); + void _mark_event(TrackedOp *op, const string &evt, utime_t now); + + void on_shutdown() { + Mutex::Locker l(ops_in_flight_lock); + history.on_shutdown(); + } + ~OpTracker() { + assert(ops_in_flight.empty()); + } + + template <typename T> + typename T::Ref create_request(Message *ref) + { + typename T::Ref retval(new T(ref, this), + RemoveOnDelete(this)); + + _mark_event(retval.get(), "header_read", ref->get_recv_stamp()); + _mark_event(retval.get(), "throttled", ref->get_throttle_stamp()); + _mark_event(retval.get(), "all_read", ref->get_recv_complete_stamp()); + _mark_event(retval.get(), "dispatched", ref->get_dispatch_stamp()); + + retval->init_from_message(); + + return retval; + } +}; + class TrackedOp { +private: + friend class OpHistory; + friend class OpTracker; + xlist<TrackedOp*>::item xitem; +protected: + Message *request; /// the logical request we are tracking + OpTracker *tracker; /// the tracker we are associated with + + list<pair<utime_t, string> > events; /// list of events and their times + Mutex lock; /// to protect the events list + string current; /// the current state the event is in + uint64_t seq; /// a unique value set by the OpTracker + + uint32_t warn_interval_multiplier; // limits output of a given op warning + + TrackedOp(Message *req, OpTracker *_tracker) : + xitem(this), + request(req), + tracker(_tracker), + lock("TrackedOp::lock"), + seq(0), + warn_interval_multiplier(1) + { + tracker->register_inflight_op(&xitem); + } + + virtual void init_from_message() {} + /// output any type-specific data you want to get when dump() is called + virtual void _dump(utime_t now, Formatter *f) const {} + /// if you want something else to happen when events are marked, implement + virtual void _event_marked() {} + public: - virtual void mark_event(const string &event) = 0; - virtual ~TrackedOp() {} + virtual ~TrackedOp() { assert(request); request->put(); } + + utime_t get_arrived() const { + return request->get_recv_stamp(); + } + // This function maybe needs some work; assumes last event is completion time + double get_duration() const { + return events.size() ? + (events.rbegin()->first - get_arrived()) : + 0.0; + } + Message *get_req() const { return request; } + + void mark_event(const string &event); + virtual const char *state_string() const { + return events.rbegin()->second.c_str(); + } + void dump(utime_t now, Formatter *f) const; }; -typedef std::tr1::shared_ptr<TrackedOp> TrackedOpRef; #endif diff --git a/src/common/bloom_filter.cc b/src/common/bloom_filter.cc index f602b80149e..68875e925bf 100644 --- a/src/common/bloom_filter.cc +++ b/src/common/bloom_filter.cc @@ -6,26 +6,26 @@ void bloom_filter::encode(bufferlist& bl) const { - ENCODE_START(1, 1, bl); + ENCODE_START(2, 2, bl); ::encode((uint64_t)salt_count_, bl); - ::encode((uint64_t)table_size_, bl); - ::encode((uint64_t)inserted_element_count_, bl); + ::encode((uint64_t)insert_count_, bl); + ::encode((uint64_t)target_element_count_, bl); ::encode((uint64_t)random_seed_, bl); - bufferptr bp((const char*)bit_table_, raw_table_size_); + bufferptr bp((const char*)bit_table_, table_size_); ::encode(bp, bl); ENCODE_FINISH(bl); } void bloom_filter::decode(bufferlist::iterator& p) { - DECODE_START(1, p); + DECODE_START(2, p); uint64_t v; ::decode(v, p); salt_count_ = v; ::decode(v, p); - table_size_ = v; + insert_count_ = v; ::decode(v, p); - inserted_element_count_ = v; + target_element_count_ = v; ::decode(v, p); random_seed_ = v; bufferlist t; @@ -33,11 +33,14 @@ void bloom_filter::decode(bufferlist::iterator& p) salt_.clear(); generate_unique_salt(); - raw_table_size_ = t.length(); - assert(raw_table_size_ == table_size_ / bits_per_char); + table_size_ = t.length(); delete bit_table_; - bit_table_ = new cell_type[raw_table_size_]; - t.copy(0, raw_table_size_, (char *)bit_table_); + if (table_size_) { + bit_table_ = new cell_type[table_size_]; + t.copy(0, table_size_, (char *)bit_table_); + } else { + bit_table_ = NULL; + } DECODE_FINISH(p); } @@ -46,8 +49,8 @@ void bloom_filter::dump(Formatter *f) const { f->dump_unsigned("salt_count", salt_count_); f->dump_unsigned("table_size", table_size_); - f->dump_unsigned("raw_table_size", raw_table_size_); - f->dump_unsigned("insert_count", inserted_element_count_); + f->dump_unsigned("insert_count", insert_count_); + f->dump_unsigned("target_element_count", target_element_count_); f->dump_unsigned("random_seed", random_seed_); f->open_array_section("salt_table"); @@ -56,7 +59,7 @@ void bloom_filter::dump(Formatter *f) const f->close_section(); f->open_array_section("bit_table"); - for (unsigned i = 0; i < raw_table_size_; ++i) + for (unsigned i = 0; i < table_size_; ++i) f->dump_unsigned("byte", (unsigned)bit_table_[i]); f->close_section(); } @@ -74,3 +77,61 @@ void bloom_filter::generate_test_instances(list<bloom_filter*>& ls) ls.back()->insert("boof"); ls.back()->insert("boogggg"); } + + +void compressible_bloom_filter::encode(bufferlist& bl) const +{ + ENCODE_START(2, 2, bl); + bloom_filter::encode(bl); + + uint32_t s = size_list.size(); + ::encode(s, bl); + for (vector<size_t>::const_iterator p = size_list.begin(); + p != size_list.end(); ++p) + ::encode((uint64_t)*p, bl); + + ENCODE_FINISH(bl); +} + +void compressible_bloom_filter::decode(bufferlist::iterator& p) +{ + DECODE_START(2, p); + bloom_filter::decode(p); + + uint32_t s; + ::decode(s, p); + size_list.resize(s); + for (unsigned i = 0; i < s; i++) { + uint64_t v; + ::decode(v, p); + size_list[i] = v; + } + + DECODE_FINISH(p); +} + +void compressible_bloom_filter::dump(Formatter *f) const +{ + bloom_filter::dump(f); + + f->open_array_section("table_sizes"); + for (vector<size_t>::const_iterator p = size_list.begin(); + p != size_list.end(); ++p) + f->dump_unsigned("size", (uint64_t)*p); + f->close_section(); +} + +void compressible_bloom_filter::generate_test_instances(list<compressible_bloom_filter*>& ls) +{ + ls.push_back(new compressible_bloom_filter(10, .5, 1)); + ls.push_back(new compressible_bloom_filter(10, .5, 1)); + ls.back()->insert("foo"); + ls.back()->insert("bar"); + ls.push_back(new compressible_bloom_filter(50, .5, 1)); + ls.back()->insert("foo"); + ls.back()->insert("bar"); + ls.back()->insert("baz"); + ls.back()->insert("boof"); + ls.back()->compress(20); + ls.back()->insert("boogggg"); +} diff --git a/src/common/bloom_filter.hpp b/src/common/bloom_filter.hpp index 6216c7fb34d..93787a89a60 100644 --- a/src/common/bloom_filter.hpp +++ b/src/common/bloom_filter.hpp @@ -53,14 +53,22 @@ protected: typedef unsigned int bloom_type; typedef unsigned char cell_type; + unsigned char* bit_table_; ///< pointer to bit map + std::vector<bloom_type> salt_; ///< vector of salts + std::size_t salt_count_; ///< number of salts + std::size_t table_size_; ///< bit table size in bytes + std::size_t insert_count_; ///< insertion count + std::size_t target_element_count_; ///< target number of unique insertions + std::size_t random_seed_; ///< random seed + public: bloom_filter() : bit_table_(0), salt_count_(0), table_size_(0), - raw_table_size_(0), - inserted_element_count_(0), + insert_count_(0), + target_element_count_(0), random_seed_(0) {} @@ -68,7 +76,8 @@ public: const double& false_positive_probability, const std::size_t& random_seed) : bit_table_(0), - inserted_element_count_(0), + insert_count_(0), + target_element_count_(predicted_inserted_element_count), random_seed_((random_seed) ? random_seed : 0xA5A5A5A5) { find_optimal_parameters(predicted_inserted_element_count, false_positive_probability, @@ -76,12 +85,15 @@ public: init(); } - bloom_filter(const std::size_t& salt_count, std::size_t table_size, - const std::size_t& random_seed) + bloom_filter(const std::size_t& salt_count, + std::size_t table_size, + const std::size_t& random_seed, + std::size_t target_element_count) : bit_table_(0), salt_count_(salt_count), table_size_(table_size), - inserted_element_count_(0), + insert_count_(0), + target_element_count_(target_element_count), random_seed_((random_seed) ? random_seed : 0xA5A5A5A5) { init(); @@ -89,9 +101,12 @@ public: void init() { generate_unique_salt(); - raw_table_size_ = table_size_ / bits_per_char; - bit_table_ = new cell_type[raw_table_size_]; - std::fill_n(bit_table_,raw_table_size_,0x00); + if (table_size_) { + bit_table_ = new cell_type[table_size_]; + std::fill_n(bit_table_, table_size_, 0x00); + } else { + bit_table_ = NULL; + } } bloom_filter(const bloom_filter& filter) @@ -104,12 +119,11 @@ public: if (this != &filter) { salt_count_ = filter.salt_count_; table_size_ = filter.table_size_; - raw_table_size_ = filter.raw_table_size_; - inserted_element_count_ = filter.inserted_element_count_; + insert_count_ = filter.insert_count_; random_seed_ = filter.random_seed_; delete[] bit_table_; - bit_table_ = new cell_type[raw_table_size_]; - std::copy(filter.bit_table_,filter.bit_table_ + raw_table_size_,bit_table_); + bit_table_ = new cell_type[table_size_]; + std::copy(filter.bit_table_, filter.bit_table_ + table_size_, bit_table_); salt_ = filter.salt_; } return *this; @@ -127,8 +141,9 @@ public: inline void clear() { - std::fill_n(bit_table_,raw_table_size_,0x00); - inserted_element_count_ = 0; + if (bit_table_) + std::fill_n(bit_table_, table_size_, 0x00); + insert_count_ = 0; } /** @@ -141,26 +156,28 @@ public: * @param val integer value to insert */ inline void insert(uint32_t val) { + assert(bit_table_); std::size_t bit_index = 0; std::size_t bit = 0; for (std::size_t i = 0; i < salt_.size(); ++i) { compute_indices(hash_ap(val,salt_[i]),bit_index,bit); - bit_table_[bit_index / bits_per_char] |= bit_mask[bit]; + bit_table_[bit_index >> 3] |= bit_mask[bit]; } - ++inserted_element_count_; + ++insert_count_; } inline void insert(const unsigned char* key_begin, const std::size_t& length) { + assert(bit_table_); std::size_t bit_index = 0; std::size_t bit = 0; for (std::size_t i = 0; i < salt_.size(); ++i) { compute_indices(hash_ap(key_begin,length,salt_[i]),bit_index,bit); - bit_table_[bit_index / bits_per_char] |= bit_mask[bit]; + bit_table_[bit_index >> 3] |= bit_mask[bit]; } - ++inserted_element_count_; + ++insert_count_; } template<typename T> @@ -202,12 +219,14 @@ public: */ inline virtual bool contains(uint32_t val) const { + if (!bit_table_) + return false; std::size_t bit_index = 0; std::size_t bit = 0; for (std::size_t i = 0; i < salt_.size(); ++i) { compute_indices(hash_ap(val,salt_[i]),bit_index,bit); - if ((bit_table_[bit_index / bits_per_char] & bit_mask[bit]) != bit_mask[bit]) + if ((bit_table_[bit_index >> 3] & bit_mask[bit]) != bit_mask[bit]) { return false; } @@ -217,12 +236,14 @@ public: inline virtual bool contains(const unsigned char* key_begin, const std::size_t length) const { + if (!bit_table_) + return false; std::size_t bit_index = 0; std::size_t bit = 0; for (std::size_t i = 0; i < salt_.size(); ++i) { compute_indices(hash_ap(key_begin,length,salt_[i]),bit_index,bit); - if ((bit_table_[bit_index / bits_per_char] & bit_mask[bit]) != bit_mask[bit]) + if ((bit_table_[bit_index >> 3] & bit_mask[bit]) != bit_mask[bit]) { return false; } @@ -278,12 +299,41 @@ public: inline virtual std::size_t size() const { - return table_size_; + return table_size_ * bits_per_char; } inline std::size_t element_count() const { - return inserted_element_count_; + return insert_count_; + } + + /* + * density of bits set. inconvenient units, but: + * .3 = ~50% target insertions + * .5 = 100% target insertions, "perfectly full" + * .75 = 200% target insertions + * 1.0 = all bits set... infinite insertions + */ + inline double density() const + { + if (!bit_table_) + return 0.0; + size_t set = 0; + uint8_t *p = bit_table_; + size_t left = table_size_; + while (left-- > 0) { + uint8_t c = *p; + for (; c; ++set) + c &= c - 1; + ++p; + } + return (double)set / (double)(table_size_ << 3); + } + + virtual inline double approx_unique_element_count() const { + // this is not a very good estimate; a better solution should have + // some asymptotic behavior as density() approaches 1.0. + return (double)target_element_count_ * 2.0 * density(); } inline double effective_fpp() const @@ -295,7 +345,7 @@ public: the current number of inserted elements - not the user defined predicated/expected number of inserted elements. */ - return std::pow(1.0 - std::exp(-1.0 * salt_.size() * inserted_element_count_ / size()), 1.0 * salt_.size()); + return std::pow(1.0 - std::exp(-1.0 * salt_.size() * insert_count_ / size()), 1.0 * salt_.size()); } inline bloom_filter& operator &= (const bloom_filter& filter) @@ -306,7 +356,7 @@ public: (table_size_ == filter.table_size_) && (random_seed_ == filter.random_seed_) ) { - for (std::size_t i = 0; i < raw_table_size_; ++i) { + for (std::size_t i = 0; i < table_size_; ++i) { bit_table_[i] &= filter.bit_table_[i]; } } @@ -321,7 +371,7 @@ public: (table_size_ == filter.table_size_) && (random_seed_ == filter.random_seed_) ) { - for (std::size_t i = 0; i < raw_table_size_; ++i) { + for (std::size_t i = 0; i < table_size_; ++i) { bit_table_[i] |= filter.bit_table_[i]; } } @@ -336,7 +386,7 @@ public: (table_size_ == filter.table_size_) && (random_seed_ == filter.random_seed_) ) { - for (std::size_t i = 0; i < raw_table_size_; ++i) { + for (std::size_t i = 0; i < table_size_; ++i) { bit_table_[i] ^= filter.bit_table_[i]; } } @@ -352,8 +402,8 @@ protected: inline virtual void compute_indices(const bloom_type& hash, std::size_t& bit_index, std::size_t& bit) const { - bit_index = hash % table_size_; - bit = bit_index % bits_per_char; + bit_index = hash % (table_size_ << 3); + bit = bit_index & 7; } void generate_unique_salt() @@ -418,7 +468,8 @@ protected: } else { - std::copy(predef_salt,predef_salt + predef_salt_count,std::back_inserter(salt_)); + std::copy(predef_salt,predef_salt + predef_salt_count, + std::back_inserter(salt_)); srand(static_cast<unsigned int>(random_seed_)); while (salt_.size() < salt_count_) { @@ -466,8 +517,8 @@ protected: *salt_count = static_cast<std::size_t>(min_k); size_t t = static_cast<std::size_t>(min_m); - t += (((t % bits_per_char) != 0) ? (bits_per_char - (t % bits_per_char)) : 0); - *table_size = t; + t += (((t & 7) != 0) ? (bits_per_char - (t & 7)) : 0); + *table_size = t >> 3; } inline bloom_type hash_ap(uint32_t val, bloom_type hash) const @@ -507,14 +558,6 @@ protected: return hash; } - std::vector<bloom_type> salt_; - unsigned char* bit_table_; - std::size_t salt_count_; - std::size_t table_size_; - std::size_t raw_table_size_; - std::size_t inserted_element_count_; - std::size_t random_seed_; - public: void encode(bufferlist& bl) const; void decode(bufferlist::iterator& bl); @@ -549,53 +592,77 @@ class compressible_bloom_filter : public bloom_filter { public: + compressible_bloom_filter() : bloom_filter() {} + compressible_bloom_filter(const std::size_t& predicted_element_count, const double& false_positive_probability, const std::size_t& random_seed) - : bloom_filter(predicted_element_count,false_positive_probability,random_seed) + : bloom_filter(predicted_element_count, false_positive_probability, random_seed) + { + size_list.push_back(table_size_); + } + + compressible_bloom_filter(const std::size_t& salt_count, + std::size_t table_size, + const std::size_t& random_seed, + std::size_t target_count) + : bloom_filter(salt_count, table_size, random_seed, target_count) { size_list.push_back(table_size_); } inline virtual std::size_t size() const { - return size_list.back(); + return size_list.back() * bits_per_char; } - inline bool compress(const double& percentage) + inline bool compress(const double& target_ratio) { - if ((0.0 >= percentage) || (percentage >= 100.0)) + if (!bit_table_) + return false; + + if ((0.0 >= target_ratio) || (target_ratio >= 1.0)) { return false; } std::size_t original_table_size = size_list.back(); - std::size_t new_table_size = static_cast<std::size_t>((size_list.back() * (1.0 - (percentage / 100.0)))); - new_table_size -= (((new_table_size % bits_per_char) != 0) ? (new_table_size % bits_per_char) : 0); + std::size_t new_table_size = static_cast<std::size_t>(size_list.back() * target_ratio); - if ((bits_per_char > new_table_size) || (new_table_size >= original_table_size)) + if ((!new_table_size) || (new_table_size >= original_table_size)) { return false; } - cell_type* tmp = new cell_type[new_table_size / bits_per_char]; - std::copy(bit_table_, bit_table_ + (new_table_size / bits_per_char), tmp); - cell_type* itr = bit_table_ + (new_table_size / bits_per_char); - cell_type* end = bit_table_ + (original_table_size / bits_per_char); + cell_type* tmp = new cell_type[new_table_size]; + std::copy(bit_table_, bit_table_ + (new_table_size), tmp); + cell_type* itr = bit_table_ + (new_table_size); + cell_type* end = bit_table_ + (original_table_size); cell_type* itr_tmp = tmp; - + cell_type* itr_end = tmp + (new_table_size); while (end != itr) { *(itr_tmp++) |= (*itr++); + if (itr_tmp == itr_end) + itr_tmp = tmp; } delete[] bit_table_; bit_table_ = tmp; size_list.push_back(new_table_size); + table_size_ = new_table_size; return true; } + virtual inline double approx_unique_element_count() const { + // this is not a very good estimate; a better solution should have + // some asymptotic behavior as density() approaches 1.0. + // + // the compress() correction is also bad; it tends to under-estimate. + return (double)target_element_count_ * 2.0 * density() * (double)size_list.back() / (double)size_list.front(); + } + private: inline virtual void compute_indices(const bloom_type& hash, std::size_t& bit_index, std::size_t& bit) const @@ -603,13 +670,19 @@ private: bit_index = hash; for (std::size_t i = 0; i < size_list.size(); ++i) { - bit_index %= size_list[i]; + bit_index %= size_list[i] << 3; } - bit = bit_index % bits_per_char; + bit = bit_index & 7; } std::vector<std::size_t> size_list; +public: + void encode(bufferlist& bl) const; + void decode(bufferlist::iterator& bl); + void dump(Formatter *f) const; + static void generate_test_instances(std::list<compressible_bloom_filter*>& ls); }; +WRITE_CLASS_ENCODER(compressible_bloom_filter) #endif diff --git a/src/common/config_opts.h b/src/common/config_opts.h index 2d3f981379b..700a210b412 100644 --- a/src/common/config_opts.h +++ b/src/common/config_opts.h @@ -721,6 +721,10 @@ OPTION(rgw_data_log_num_shards, OPT_INT, 128) // number of objects to keep data OPTION(rgw_data_log_obj_prefix, OPT_STR, "data_log") // OPTION(rgw_replica_log_obj_prefix, OPT_STR, "replica_log") // +OPTION(rgw_bucket_quota_ttl, OPT_INT, 600) // time for cached bucket stats to be cached within rgw instance +OPTION(rgw_bucket_quota_soft_threshold, OPT_DOUBLE, 0.95) // threshold from which we don't rely on cached info for quota decisions +OPTION(rgw_bucket_quota_cache_size, OPT_INT, 10000) // number of entries in bucket quota cache + OPTION(mutex_perf_counter, OPT_BOOL, false) // enable/disable mutex perf counter // This will be set to true when it is safe to start threads. diff --git a/src/common/lru_map.h b/src/common/lru_map.h index 6e7f7b3786f..1e1acc95f76 100644 --- a/src/common/lru_map.h +++ b/src/common/lru_map.h @@ -21,41 +21,76 @@ class lru_map { size_t max; public: + class UpdateContext { + public: + virtual ~UpdateContext() {} + + /* update should return true if object is updated */ + virtual bool update(V *v) = 0; + }; + + bool _find(const K& key, V *value, UpdateContext *ctx); + void _add(const K& key, V& value); + +public: lru_map(int _max) : lock("lru_map"), max(_max) {} virtual ~lru_map() {} bool find(const K& key, V& value); + + /* + * find_and_update() + * + * - will return true if object is found + * - if ctx is set will return true if object is found and updated + */ + bool find_and_update(const K& key, V *value, UpdateContext *ctx); void add(const K& key, V& value); void erase(const K& key); }; template <class K, class V> -bool lru_map<K, V>::find(const K& key, V& value) +bool lru_map<K, V>::_find(const K& key, V *value, UpdateContext *ctx) { - lock.Lock(); typename std::map<K, entry>::iterator iter = entries.find(key); if (iter == entries.end()) { - lock.Unlock(); return false; } entry& e = iter->second; entries_lru.erase(e.lru_iter); - value = e.value; + bool r = true; + + if (ctx) + r = ctx->update(&e.value); + + if (value) + *value = e.value; entries_lru.push_front(key); e.lru_iter = entries_lru.begin(); - lock.Unlock(); + return r; +} - return true; +template <class K, class V> +bool lru_map<K, V>::find(const K& key, V& value) +{ + Mutex::Locker l(lock); + return _find(key, &value, NULL); } template <class K, class V> -void lru_map<K, V>::add(const K& key, V& value) +bool lru_map<K, V>::find_and_update(const K& key, V *value, UpdateContext *ctx) +{ + Mutex::Locker l(lock); + return _find(key, value, ctx); +} + +template <class K, class V> +void lru_map<K, V>::_add(const K& key, V& value) { - lock.Lock(); typename std::map<K, entry>::iterator iter = entries.find(key); if (iter != entries.end()) { entry& e = iter->second; @@ -74,8 +109,14 @@ void lru_map<K, V>::add(const K& key, V& value) entries.erase(iter); entries_lru.pop_back(); } - - lock.Unlock(); +} + + +template <class K, class V> +void lru_map<K, V>::add(const K& key, V& value) +{ + Mutex::Locker l(lock); + _add(key, value); } template <class K, class V> diff --git a/src/global/signal_handler.cc b/src/global/signal_handler.cc index ce604fe1e5d..ffdc5402caf 100644 --- a/src/global/signal_handler.cc +++ b/src/global/signal_handler.cc @@ -196,13 +196,13 @@ struct SignalHandler : public Thread { lock.Lock(); int num_fds = 0; fds[num_fds].fd = pipefd[0]; - fds[num_fds].events = POLLIN | POLLOUT | POLLERR; + fds[num_fds].events = POLLIN | POLLERR; fds[num_fds].revents = 0; ++num_fds; for (unsigned i=0; i<32; i++) { if (handlers[i]) { fds[num_fds].fd = handlers[i]->pipefd[0]; - fds[num_fds].events = POLLIN | POLLOUT | POLLERR; + fds[num_fds].events = POLLIN | POLLERR; fds[num_fds].revents = 0; ++num_fds; } diff --git a/src/include/Makefile.am b/src/include/Makefile.am index 2d98e777f00..c8823ce523d 100644 --- a/src/include/Makefile.am +++ b/src/include/Makefile.am @@ -43,6 +43,7 @@ noinst_HEADERS += \ include/filepath.h \ include/frag.h \ include/hash.h \ + include/histogram.h \ include/intarith.h \ include/interval_set.h \ include/int_types.h \ diff --git a/src/include/histogram.h b/src/include/histogram.h new file mode 100644 index 00000000000..c817b1ec175 --- /dev/null +++ b/src/include/histogram.h @@ -0,0 +1,76 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * Copyright 2013 Inktank + */ + +#ifndef HISTOGRAM_H_ +#define HISTOGRAM_H_ + +/** + * power of 2 histogram + */ +struct pow2_hist_t { // + /** + * histogram + * + * bin size is 2^index + * value is count of elements that are <= the current bin but > the previous bin. + */ + vector<int32_t> h; + +private: + /// expand to at least another's size + void _expand_to(unsigned s) { + if (s > h.size()) + h.resize(s, 0); + } + /// drop useless trailing 0's + void _contract() { + unsigned p = h.size(); + while (p > 0 && h[p-1] == 0) + --p; + h.resize(p); + } + +public: + void clear() { + h.clear(); + } + void set(int bin, int32_t v) { + _expand_to(bin + 1); + h[bin] = v; + _contract(); + } + + void add(const pow2_hist_t& o) { + _expand_to(o.h.size()); + for (unsigned p = 0; p < o.h.size(); ++p) + h[p] += o.h[p]; + _contract(); + } + void sub(const pow2_hist_t& o) { + _expand_to(o.h.size()); + for (unsigned p = 0; p < o.h.size(); ++p) + h[p] -= o.h[p]; + _contract(); + } + + int32_t upper_bound() const { + return 1 << h.size(); + } + + void dump(Formatter *f) const; + void encode(bufferlist &bl) const; + void decode(bufferlist::iterator &bl); + static void generate_test_instances(std::list<pow2_hist_t*>& o); +}; +WRITE_CLASS_ENCODER(pow2_hist_t) + +#endif /* HISTOGRAM_H_ */ diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 9dc1229fbb9..0188d418e0d 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -632,7 +632,7 @@ void MDCache::populate_mydir() CDir *dir = strays[i]->get_dirfrag(fg); if (!dir) dir = strays[i]->get_or_open_dirfrag(this, fg); - if (!dir->is_complete()) { + if (dir->get_version() == 0) { dir->fetch(new C_MDS_RetryOpenRoot(this)); return; } @@ -653,6 +653,8 @@ void MDCache::populate_mydir() assert(!open); open = true; mds->queue_waiters(waiting_for_open); + + scan_stray_dir(); } void MDCache::open_foreign_mdsdir(inodeno_t ino, Context *fin) @@ -9135,19 +9137,34 @@ void MDCache::_snaprealm_create_finish(MDRequest *mdr, Mutation *mut, CInode *in // ------------------------------------------------------------------------------- // STRAYS -void MDCache::scan_stray_dir() +struct C_MDC_RetryScanStray : public Context { + MDCache *cache; + dirfrag_t next; + C_MDC_RetryScanStray(MDCache *c, dirfrag_t n) : cache(c), next(n) { } + void finish(int r) { + cache->scan_stray_dir(next); + } +}; + +void MDCache::scan_stray_dir(dirfrag_t next) { - dout(10) << "scan_stray_dir" << dendl; - + dout(10) << "scan_stray_dir " << next << dendl; + list<CDir*> ls; for (int i = 0; i < NUM_STRAY; ++i) { - if (strays[i]) { - strays[i]->get_dirfrags(ls); - } + if (strays[i]->ino() < next.ino) + continue; + strays[i]->get_dirfrags(ls); } for (list<CDir*>::iterator p = ls.begin(); p != ls.end(); ++p) { CDir *dir = *p; + if (dir->dirfrag() < next) + continue; + if (!dir->is_complete()) { + dir->fetch(new C_MDC_RetryScanStray(this, dir->dirfrag())); + return; + } for (CDir::map_t::iterator q = dir->items.begin(); q != dir->items.end(); ++q) { CDentry *dn = q->second; CDentry::linkage_t *dnl = dn->get_projected_linkage(); @@ -9354,8 +9371,12 @@ void MDCache::purge_stray(CDentry *dn) if (in->is_file()) { uint64_t period = (uint64_t)in->inode.layout.fl_object_size * (uint64_t)in->inode.layout.fl_stripe_count; - uint64_t cur_max_size = in->inode.get_max_size(); - uint64_t to = MAX(in->inode.size, cur_max_size); + uint64_t to = in->inode.get_max_size(); + to = MAX(in->inode.size, to); + // when truncating a file, the filer does not delete stripe objects that are + // truncated to zero. so we need to purge stripe objects up to the max size + // the file has ever been. + to = MAX(in->inode.max_size_ever, to); if (to && period) { uint64_t num = (to + period - 1) / period; dout(10) << "purge_stray 0~" << to << " objects 0~" << num diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h index d8f2a9486fb..416c6454292 100644 --- a/src/mds/MDCache.h +++ b/src/mds/MDCache.h @@ -870,7 +870,6 @@ public: public: elist<CDentry*> delayed_eval_stray; - void scan_stray_dir(); void eval_stray(CDentry *dn, bool delay=false); void eval_remote(CDentry *dn); @@ -884,11 +883,13 @@ public: eval_stray(dn, delay); } protected: + void scan_stray_dir(dirfrag_t next=dirfrag_t()); void fetch_backtrace(inodeno_t ino, int64_t pool, bufferlist& bl, Context *fin); void purge_stray(CDentry *dn); void _purge_stray_purged(CDentry *dn, int r=0); void _purge_stray_logged(CDentry *dn, version_t pdv, LogSegment *ls); void _purge_stray_logged_truncate(CDentry *dn, LogSegment *ls); + friend class C_MDC_RetryScanStray; friend class C_MDC_FetchedBacktrace; friend class C_MDC_PurgeStrayLogged; friend class C_MDC_PurgeStrayLoggedTruncate; diff --git a/src/mds/MDS.cc b/src/mds/MDS.cc index c2e0bbbe369..83722274981 100644 --- a/src/mds/MDS.cc +++ b/src/mds/MDS.cc @@ -1525,7 +1525,6 @@ void MDS::active_start() mdcache->open_root(); mdcache->clean_open_file_lists(); - mdcache->scan_stray_dir(); mdcache->export_remaining_imported_caps(); finish_contexts(g_ceph_context, waiting_for_replay); // kick waiters finish_contexts(g_ceph_context, waiting_for_active); // kick waiters diff --git a/src/mds/Server.cc b/src/mds/Server.cc index 869f3773441..41862847e27 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -3086,6 +3086,7 @@ void Server::handle_client_file_readlock(MDRequest *mdr) checking_lock.length = req->head.args.filelock_change.length; checking_lock.client = req->get_orig_source().num(); checking_lock.pid = req->head.args.filelock_change.pid; + checking_lock.pid_namespace = req->head.args.filelock_change.pid_namespace; checking_lock.type = req->head.args.filelock_change.type; // get the appropriate lock state diff --git a/src/mds/flock.h b/src/mds/flock.h index ae93d1660f0..b767fe58507 100644 --- a/src/mds/flock.h +++ b/src/mds/flock.h @@ -12,7 +12,7 @@ inline ostream& operator<<(ostream& out, ceph_filelock& l) { out << "start: " << l.start << ", length: " << l.length << ", client: " << l.client << ", pid: " << l.pid - << ", type: " << (int)l.type + << ", pid_ns: " << l.pid_namespace << ", type: " << (int)l.type << std::endl; return out; } diff --git a/src/mds/mdstypes.cc b/src/mds/mdstypes.cc index 6886786f27e..362f74774c4 100644 --- a/src/mds/mdstypes.cc +++ b/src/mds/mdstypes.cc @@ -204,7 +204,7 @@ ostream& operator<<(ostream& out, const client_writeable_range_t& r) */ void inode_t::encode(bufferlist &bl) const { - ENCODE_START(7, 6, bl); + ENCODE_START(8, 6, bl); ::encode(ino, bl); ::encode(rdev, bl); @@ -238,6 +238,7 @@ void inode_t::encode(bufferlist &bl) const ::encode(xattr_version, bl); ::encode(backtrace_version, bl); ::encode(old_pools, bl); + ::encode(max_size_ever, bl); ENCODE_FINISH(bl); } @@ -294,6 +295,8 @@ void inode_t::decode(bufferlist::iterator &p) ::decode(backtrace_version, p); if (struct_v >= 7) ::decode(old_pools, p); + if (struct_v >= 8) + ::decode(max_size_ever, p); DECODE_FINISH(p); } diff --git a/src/mds/mdstypes.h b/src/mds/mdstypes.h index 2a3874818b7..bd53c85b48d 100644 --- a/src/mds/mdstypes.h +++ b/src/mds/mdstypes.h @@ -329,6 +329,7 @@ struct inode_t { ceph_file_layout layout; vector <int64_t> old_pools; uint64_t size; // on directory, # dentries + uint64_t max_size_ever; // max size the file has ever been uint32_t truncate_seq; uint64_t truncate_size, truncate_from; uint32_t truncate_pending; @@ -353,7 +354,8 @@ struct inode_t { inode_t() : ino(0), rdev(0), mode(0), uid(0), gid(0), nlink(0), anchored(false), - size(0), truncate_seq(0), truncate_size(0), truncate_from(0), + size(0), max_size_ever(0), + truncate_seq(0), truncate_size(0), truncate_from(0), truncate_pending(0), time_warp_seq(0), version(0), file_data_version(0), xattr_version(0), backtrace_version(0) { @@ -369,6 +371,8 @@ struct inode_t { bool is_truncating() const { return (truncate_pending > 0); } void truncate(uint64_t old_size, uint64_t new_size) { assert(new_size < old_size); + if (old_size > max_size_ever) + max_size_ever = old_size; truncate_from = old_size; size = new_size; rstat.rbytes = new_size; diff --git a/src/mon/Monitor.cc b/src/mon/Monitor.cc index 3fe658d9623..d8c90bc3d76 100644 --- a/src/mon/Monitor.cc +++ b/src/mon/Monitor.cc @@ -2583,10 +2583,10 @@ bool Monitor::_ms_dispatch(Message *m) // and considering that we are creating a new session it is safe to // assume that the sender hasn't authenticated yet, so we have no way // of assessing whether we should handle it or not. - if (!src_is_mon && m->get_type() != CEPH_MSG_AUTH) { + if (!src_is_mon && (m->get_type() != CEPH_MSG_AUTH && + m->get_type() != CEPH_MSG_MON_GET_MAP)) { dout(1) << __func__ << " dropping stray message " << *m << " from " << m->get_source_inst() << dendl; - m->put(); return false; } diff --git a/src/mon/PGMonitor.h b/src/mon/PGMonitor.h index 44015395e94..d29f47c1c43 100644 --- a/src/mon/PGMonitor.h +++ b/src/mon/PGMonitor.h @@ -28,6 +28,7 @@ using namespace std; #include "PaxosService.h" #include "include/types.h" #include "include/utime.h" +#include "include/histogram.h" #include "msg/Messenger.h" #include "common/config.h" #include "mon/MonitorDBStore.h" diff --git a/src/objclass/class_api.cc b/src/objclass/class_api.cc index 1ac224cdfe7..bb26c752f9b 100644 --- a/src/objclass/class_api.cc +++ b/src/objclass/class_api.cc @@ -177,7 +177,7 @@ int cls_read(cls_method_context_t hctx, int ofs, int len, int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin) { ReplicatedPG::OpContext **pctx = static_cast<ReplicatedPG::OpContext **>(hctx); - *origin = (*pctx)->op->request->get_orig_source_inst(); + *origin = (*pctx)->op->get_req()->get_orig_source_inst(); return 0; } diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc index 514ff022bee..20afde9a0dc 100644 --- a/src/os/FileStore.cc +++ b/src/os/FileStore.cc @@ -201,7 +201,9 @@ int FileStore::lfn_open(coll_t cid, IndexedPath *path, Index *index) { - assert(get_allow_sharded_objects() || oid.shard_id == ghobject_t::NO_SHARD); + assert(get_allow_sharded_objects() || + ( oid.shard_id == ghobject_t::NO_SHARD && + oid.generation == ghobject_t::NO_GEN )); assert(outfd); int flags = O_RDWR; if (create) @@ -3464,6 +3466,8 @@ int FileStore::getattrs(coll_t cid, const ghobject_t& oid, map<string,bufferptr> dout(10) << __func__ << " could not get omap_attrs r = " << r << dendl; goto out; } + if (r == -ENOENT) + r = 0; assert(omap_attrs.size() == omap_aset.size()); for (map<string, bufferlist>::iterator i = omap_aset.begin(); i != omap_aset.end(); @@ -3651,6 +3655,8 @@ int FileStore::_rmattrs(coll_t cid, const ghobject_t& oid, dout(10) << __func__ << " could not remove omap_attrs r = " << r << dendl; return r; } + if (r == -ENOENT) + r = 0; out: dout(10) << "rmattrs " << cid << "/" << oid << " = " << r << dendl; return r; diff --git a/src/os/Makefile.am b/src/os/Makefile.am index b7fef8dd209..4f12a6a3278 100644 --- a/src/os/Makefile.am +++ b/src/os/Makefile.am @@ -13,7 +13,8 @@ libos_la_SOURCES = \ os/WBThrottle.cc \ os/BtrfsFileStoreBackend.cc \ os/GenericFileStoreBackend.cc \ - os/ZFSFileStoreBackend.cc + os/ZFSFileStoreBackend.cc \ + common/TrackedOp.cc noinst_LTLIBRARIES += libos.la noinst_HEADERS += \ diff --git a/src/osd/Makefile.am b/src/osd/Makefile.am index 9d3bc1d5e47..cae02015fce 100644 --- a/src/osd/Makefile.am +++ b/src/osd/Makefile.am @@ -16,6 +16,7 @@ libosd_la_SOURCES = \ osd/Watch.cc \ osd/ClassHandler.cc \ osd/OpRequest.cc \ + common/TrackedOp.cc \ osd/SnapMapper.cc \ osd/osd_types.cc \ objclass/class_api.cc diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 8ce11bb558c..b2aa2ebbcd2 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -907,6 +907,10 @@ OSD::OSD(CephContext *cct_, int id, Messenger *internal_messenger, Messenger *ex service(this) { monc->set_messenger(client_messenger); + op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time, + cct->_conf->osd_op_log_threshold); + op_tracker.set_history_size_and_duration(cct->_conf->osd_op_history_size, + cct->_conf->osd_op_history_duration); } OSD::~OSD() @@ -4539,7 +4543,7 @@ void OSD::do_waiters() void OSD::dispatch_op(OpRequestRef op) { - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case MSG_OSD_PG_CREATE: handle_pg_create(op); @@ -4665,7 +4669,7 @@ void OSD::_dispatch(Message *m) default: { - OpRequestRef op = op_tracker.create_request(m); + OpRequestRef op = op_tracker.create_request<OpRequest>(m); op->mark_event("waiting_for_osdmap"); // no map? starting up? if (!osdmap) { @@ -5711,9 +5715,9 @@ bool OSD::require_mon_peer(Message *m) bool OSD::require_osd_peer(OpRequestRef op) { - if (!op->request->get_connection()->peer_is_osd()) { - dout(0) << "require_osd_peer received from non-osd " << op->request->get_connection()->get_peer_addr() - << " " << *op->request << dendl; + if (!op->get_req()->get_connection()->peer_is_osd()) { + dout(0) << "require_osd_peer received from non-osd " << op->get_req()->get_connection()->get_peer_addr() + << " " << *op->get_req() << dendl; return false; } return true; @@ -5725,7 +5729,7 @@ bool OSD::require_osd_peer(OpRequestRef op) */ bool OSD::require_same_or_newer_map(OpRequestRef op, epoch_t epoch) { - Message *m = op->request; + Message *m = op->get_req(); dout(15) << "require_same_or_newer_map " << epoch << " (i am " << osdmap->get_epoch() << ") " << m << dendl; assert(osd_lock.is_locked()); @@ -5837,7 +5841,7 @@ void OSD::split_pgs( */ void OSD::handle_pg_create(OpRequestRef op) { - MOSDPGCreate *m = (MOSDPGCreate*)op->request; + MOSDPGCreate *m = (MOSDPGCreate*)op->get_req(); assert(m->get_header().type == MSG_OSD_PG_CREATE); dout(10) << "handle_pg_create " << *m << dendl; @@ -5857,11 +5861,16 @@ void OSD::handle_pg_create(OpRequestRef op) } } - if (!require_mon_peer(op->request)) { - // we have to hack around require_mon_peer's interface limits - op->request = NULL; + /* we have to hack around require_mon_peer's interface limits, so + * grab an extra reference before going in. If the peer isn't + * a Monitor, the reference is put for us (and then cleared + * up automatically by our OpTracker infrastructure). Otherwise, + * we put the extra ref ourself. + */ + if (!require_mon_peer(op->get_req()->get())) { return; } + op->get_req()->put(); if (!require_same_or_newer_map(op, m->epoch)) return; @@ -6166,7 +6175,7 @@ void OSD::do_infos(map<int,vector<pair<pg_notify_t, pg_interval_map_t> > >& info */ void OSD::handle_pg_notify(OpRequestRef op) { - MOSDPGNotify *m = (MOSDPGNotify*)op->request; + MOSDPGNotify *m = (MOSDPGNotify*)op->get_req(); assert(m->get_header().type == MSG_OSD_PG_NOTIFY); dout(7) << "handle_pg_notify from " << m->get_source() << dendl; @@ -6201,7 +6210,7 @@ void OSD::handle_pg_notify(OpRequestRef op) void OSD::handle_pg_log(OpRequestRef op) { - MOSDPGLog *m = (MOSDPGLog*) op->request; + MOSDPGLog *m = (MOSDPGLog*) op->get_req(); assert(m->get_header().type == MSG_OSD_PG_LOG); dout(7) << "handle_pg_log " << *m << " from " << m->get_source() << dendl; @@ -6229,7 +6238,7 @@ void OSD::handle_pg_log(OpRequestRef op) void OSD::handle_pg_info(OpRequestRef op) { - MOSDPGInfo *m = static_cast<MOSDPGInfo *>(op->request); + MOSDPGInfo *m = static_cast<MOSDPGInfo *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_INFO); dout(7) << "handle_pg_info " << *m << " from " << m->get_source() << dendl; @@ -6262,7 +6271,7 @@ void OSD::handle_pg_info(OpRequestRef op) void OSD::handle_pg_trim(OpRequestRef op) { - MOSDPGTrim *m = (MOSDPGTrim *)op->request; + MOSDPGTrim *m = (MOSDPGTrim *)op->get_req(); assert(m->get_header().type == MSG_OSD_PG_TRIM); dout(7) << "handle_pg_trim " << *m << " from " << m->get_source() << dendl; @@ -6315,7 +6324,7 @@ void OSD::handle_pg_trim(OpRequestRef op) void OSD::handle_pg_scan(OpRequestRef op) { - MOSDPGScan *m = static_cast<MOSDPGScan*>(op->request); + MOSDPGScan *m = static_cast<MOSDPGScan*>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_SCAN); dout(10) << "handle_pg_scan " << *m << " from " << m->get_source() << dendl; @@ -6343,7 +6352,7 @@ void OSD::handle_pg_scan(OpRequestRef op) void OSD::handle_pg_backfill(OpRequestRef op) { - MOSDPGBackfill *m = static_cast<MOSDPGBackfill*>(op->request); + MOSDPGBackfill *m = static_cast<MOSDPGBackfill*>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_BACKFILL); dout(10) << "handle_pg_backfill " << *m << " from " << m->get_source() << dendl; @@ -6371,7 +6380,7 @@ void OSD::handle_pg_backfill(OpRequestRef op) void OSD::handle_pg_backfill_reserve(OpRequestRef op) { - MBackfillReserve *m = static_cast<MBackfillReserve*>(op->request); + MBackfillReserve *m = static_cast<MBackfillReserve*>(op->get_req()); assert(m->get_header().type == MSG_OSD_BACKFILL_RESERVE); if (!require_osd_peer(op)) @@ -6415,7 +6424,7 @@ void OSD::handle_pg_backfill_reserve(OpRequestRef op) void OSD::handle_pg_recovery_reserve(OpRequestRef op) { - MRecoveryReserve *m = static_cast<MRecoveryReserve*>(op->request); + MRecoveryReserve *m = static_cast<MRecoveryReserve*>(op->get_req()); assert(m->get_header().type == MSG_OSD_RECOVERY_RESERVE); if (!require_osd_peer(op)) @@ -6467,7 +6476,7 @@ void OSD::handle_pg_query(OpRequestRef op) { assert(osd_lock.is_locked()); - MOSDPGQuery *m = (MOSDPGQuery*)op->request; + MOSDPGQuery *m = (MOSDPGQuery*)op->get_req(); assert(m->get_header().type == MSG_OSD_PG_QUERY); if (!require_osd_peer(op)) @@ -6554,7 +6563,7 @@ void OSD::handle_pg_query(OpRequestRef op) void OSD::handle_pg_remove(OpRequestRef op) { - MOSDPGRemove *m = (MOSDPGRemove *)op->request; + MOSDPGRemove *m = (MOSDPGRemove *)op->get_req(); assert(m->get_header().type == MSG_OSD_PG_REMOVE); assert(osd_lock.is_locked()); @@ -6827,7 +6836,7 @@ void OSDService::reply_op_error(OpRequestRef op, int err) void OSDService::reply_op_error(OpRequestRef op, int err, eversion_t v, version_t uv) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); assert(m->get_header().type == CEPH_MSG_OSD_OP); int flags; flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK); @@ -6839,7 +6848,7 @@ void OSDService::reply_op_error(OpRequestRef op, int err, eversion_t v, void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); assert(m->get_header().type == CEPH_MSG_OSD_OP); if (m->get_map_epoch() < pg->info.history.same_primary_since) { @@ -6858,7 +6867,7 @@ void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op) void OSD::handle_op(OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); assert(m->get_header().type == CEPH_MSG_OSD_OP); if (op_is_discardable(m)) { dout(10) << " discardable " << *m << dendl; @@ -6993,7 +7002,7 @@ void OSD::handle_op(OpRequestRef op) template<typename T, int MSGTYPE> void OSD::handle_replica_op(OpRequestRef op) { - T *m = static_cast<T *>(op->request); + T *m = static_cast<T *>(op->get_req()); assert(m->get_header().type == MSGTYPE); dout(10) << __func__ << *m << " epoch " << m->map_epoch << dendl; @@ -7047,24 +7056,24 @@ bool OSD::op_is_discardable(MOSDOp *op) */ void OSD::enqueue_op(PG *pg, OpRequestRef op) { - utime_t latency = ceph_clock_now(cct) - op->request->get_recv_stamp(); - dout(15) << "enqueue_op " << op << " prio " << op->request->get_priority() - << " cost " << op->request->get_cost() + utime_t latency = ceph_clock_now(cct) - op->get_req()->get_recv_stamp(); + dout(15) << "enqueue_op " << op << " prio " << op->get_req()->get_priority() + << " cost " << op->get_req()->get_cost() << " latency " << latency - << " " << *(op->request) << dendl; + << " " << *(op->get_req()) << dendl; pg->queue_op(op); } void OSD::OpWQ::_enqueue(pair<PGRef, OpRequestRef> item) { - unsigned priority = item.second->request->get_priority(); - unsigned cost = item.second->request->get_cost(); + unsigned priority = item.second->get_req()->get_priority(); + unsigned cost = item.second->get_req()->get_cost(); if (priority >= CEPH_MSG_PRIO_LOW) pqueue.enqueue_strict( - item.second->request->get_source_inst(), + item.second->get_req()->get_source_inst(), priority, item); else - pqueue.enqueue(item.second->request->get_source_inst(), + pqueue.enqueue(item.second->get_req()->get_source_inst(), priority, cost, item); osd->logger->set(l_osd_opq, pqueue.length()); } @@ -7079,14 +7088,14 @@ void OSD::OpWQ::_enqueue_front(pair<PGRef, OpRequestRef> item) pg_for_processing[&*(item.first)].pop_back(); } } - unsigned priority = item.second->request->get_priority(); - unsigned cost = item.second->request->get_cost(); + unsigned priority = item.second->get_req()->get_priority(); + unsigned cost = item.second->get_req()->get_cost(); if (priority >= CEPH_MSG_PRIO_LOW) pqueue.enqueue_strict_front( - item.second->request->get_source_inst(), + item.second->get_req()->get_source_inst(), priority, item); else - pqueue.enqueue_front(item.second->request->get_source_inst(), + pqueue.enqueue_front(item.second->get_req()->get_source_inst(), priority, cost, item); osd->logger->set(l_osd_opq, pqueue.length()); } @@ -7138,11 +7147,11 @@ void OSD::dequeue_op( PGRef pg, OpRequestRef op, ThreadPool::TPHandle &handle) { - utime_t latency = ceph_clock_now(cct) - op->request->get_recv_stamp(); - dout(10) << "dequeue_op " << op << " prio " << op->request->get_priority() - << " cost " << op->request->get_cost() + utime_t latency = ceph_clock_now(cct) - op->get_req()->get_recv_stamp(); + dout(10) << "dequeue_op " << op << " prio " << op->get_req()->get_priority() + << " cost " << op->get_req()->get_cost() << " latency " << latency - << " " << *(op->request) + << " " << *(op->get_req()) << " pg " << *pg << dendl; if (pg->deleting) return; @@ -7243,6 +7252,8 @@ const char** OSD::get_tracked_conf_keys() const { static const char* KEYS[] = { "osd_max_backfills", + "osd_op_complaint_time", "osd_op_log_threshold", + "osd_op_history_size", "osd_op_history_duration", NULL }; return KEYS; @@ -7255,13 +7266,23 @@ void OSD::handle_conf_change(const struct md_config_t *conf, service.local_reserver.set_max(cct->_conf->osd_max_backfills); service.remote_reserver.set_max(cct->_conf->osd_max_backfills); } + if (changed.count("osd_op_complaint_time") || + changed.count("osd_op_log_threshold")) { + op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time, + cct->_conf->osd_op_log_threshold); + } + if (changed.count("osd_op_history_size") || + changed.count("osd_op_history_duration")) { + op_tracker.set_history_size_and_duration(cct->_conf->osd_op_history_size, + cct->_conf->osd_op_history_duration); + } } // -------------------------------- int OSD::init_op_flags(OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); vector<OSDOp>::iterator iter; // client flags have no bearing on whether an op is a read, write, etc. diff --git a/src/osd/OpRequest.cc b/src/osd/OpRequest.cc index 1ffe3073051..2ed7a23086f 100644 --- a/src/osd/OpRequest.cc +++ b/src/osd/OpRequest.cc @@ -11,229 +11,21 @@ #include "messages/MOSDSubOp.h" #include "include/assert.h" -#define dout_subsys ceph_subsys_optracker -#undef dout_prefix -#define dout_prefix _prefix(_dout) -static ostream& _prefix(std::ostream* _dout) -{ - return *_dout << "--OSD::tracker-- "; -} OpRequest::OpRequest(Message *req, OpTracker *tracker) : - request(req), xitem(this), + TrackedOp(req, tracker), rmw_flags(0), - warn_interval_multiplier(1), - lock("OpRequest::lock"), - tracker(tracker), - hit_flag_points(0), latest_flag_point(0), - seq(0) { - received_time = request->get_recv_stamp(); - tracker->register_inflight_op(&xitem); + hit_flag_points(0), latest_flag_point(0) { if (req->get_priority() < tracker->cct->_conf->osd_client_op_priority) { // don't warn as quickly for low priority ops warn_interval_multiplier = tracker->cct->_conf->osd_recovery_op_warn_multiple; } } -void OpHistory::on_shutdown() -{ - arrived.clear(); - duration.clear(); - shutdown = true; -} - -void OpHistory::insert(utime_t now, OpRequestRef op) -{ - if (shutdown) - return; - duration.insert(make_pair(op->get_duration(), op)); - arrived.insert(make_pair(op->get_arrived(), op)); - cleanup(now); -} - -void OpHistory::cleanup(utime_t now) -{ - while (arrived.size() && - (now - arrived.begin()->first > - (double)(tracker->cct->_conf->osd_op_history_duration))) { - duration.erase(make_pair( - arrived.begin()->second->get_duration(), - arrived.begin()->second)); - arrived.erase(arrived.begin()); - } - - while (duration.size() > tracker->cct->_conf->osd_op_history_size) { - arrived.erase(make_pair( - duration.begin()->second->get_arrived(), - duration.begin()->second)); - duration.erase(duration.begin()); - } -} - -void OpHistory::dump_ops(utime_t now, Formatter *f) -{ - cleanup(now); - f->open_object_section("OpHistory"); - f->dump_int("num to keep", tracker->cct->_conf->osd_op_history_size); - f->dump_int("duration to keep", tracker->cct->_conf->osd_op_history_duration); - { - f->open_array_section("Ops"); - for (set<pair<utime_t, OpRequestRef> >::const_iterator i = - arrived.begin(); - i != arrived.end(); - ++i) { - f->open_object_section("Op"); - i->second->dump(now, f); - f->close_section(); - } - f->close_section(); - } - f->close_section(); -} - -void OpTracker::dump_historic_ops(Formatter *f) -{ - Mutex::Locker locker(ops_in_flight_lock); - utime_t now = ceph_clock_now(cct); - history.dump_ops(now, f); -} - -void OpTracker::dump_ops_in_flight(Formatter *f) -{ - Mutex::Locker locker(ops_in_flight_lock); - f->open_object_section("ops_in_flight"); // overall dump - f->dump_int("num_ops", ops_in_flight.size()); - f->open_array_section("ops"); // list of OpRequests - utime_t now = ceph_clock_now(cct); - for (xlist<OpRequest*>::iterator p = ops_in_flight.begin(); !p.end(); ++p) { - f->open_object_section("op"); - (*p)->dump(now, f); - f->close_section(); // this OpRequest - } - f->close_section(); // list of OpRequests - f->close_section(); // overall dump -} - -void OpTracker::register_inflight_op(xlist<OpRequest*>::item *i) -{ - Mutex::Locker locker(ops_in_flight_lock); - ops_in_flight.push_back(i); - ops_in_flight.back()->seq = seq++; -} - -void OpTracker::unregister_inflight_op(OpRequest *i) -{ - Mutex::Locker locker(ops_in_flight_lock); - assert(i->xitem.get_list() == &ops_in_flight); - utime_t now = ceph_clock_now(cct); - i->xitem.remove_myself(); - i->request->clear_data(); - history.insert(now, OpRequestRef(i)); -} - -bool OpTracker::check_ops_in_flight(std::vector<string> &warning_vector) -{ - Mutex::Locker locker(ops_in_flight_lock); - if (!ops_in_flight.size()) - return false; - - utime_t now = ceph_clock_now(cct); - utime_t too_old = now; - too_old -= cct->_conf->osd_op_complaint_time; - - utime_t oldest_secs = now - ops_in_flight.front()->received_time; - - dout(10) << "ops_in_flight.size: " << ops_in_flight.size() - << "; oldest is " << oldest_secs - << " seconds old" << dendl; - - if (oldest_secs < cct->_conf->osd_op_complaint_time) - return false; - - xlist<OpRequest*>::iterator i = ops_in_flight.begin(); - warning_vector.reserve(cct->_conf->osd_op_log_threshold + 1); - - int slow = 0; // total slow - int warned = 0; // total logged - while (!i.end() && (*i)->received_time < too_old) { - slow++; - - // exponential backoff of warning intervals - if (((*i)->received_time + - (cct->_conf->osd_op_complaint_time * - (*i)->warn_interval_multiplier)) < now) { - // will warn - if (warning_vector.empty()) - warning_vector.push_back(""); - warned++; - if (warned > cct->_conf->osd_op_log_threshold) - break; - - utime_t age = now - (*i)->received_time; - stringstream ss; - ss << "slow request " << age << " seconds old, received at " << (*i)->received_time - << ": " << *((*i)->request) << " currently " - << ((*i)->current.size() ? (*i)->current : (*i)->state_string()); - warning_vector.push_back(ss.str()); - - // only those that have been shown will backoff - (*i)->warn_interval_multiplier *= 2; - } - ++i; - } - - // only summarize if we warn about any. if everything has backed - // off, we will stay silent. - if (warned > 0) { - stringstream ss; - ss << slow << " slow requests, " << warned << " included below; oldest blocked for > " - << oldest_secs << " secs"; - warning_vector[0] = ss.str(); - } - - return warning_vector.size(); -} - -void OpTracker::get_age_ms_histogram(pow2_hist_t *h) -{ - Mutex::Locker locker(ops_in_flight_lock); - - h->clear(); - - utime_t now = ceph_clock_now(NULL); - unsigned bin = 30; - uint32_t lb = 1 << (bin-1); // lower bound for this bin - int count = 0; - for (xlist<OpRequest*>::iterator i = ops_in_flight.begin(); !i.end(); ++i) { - utime_t age = now - (*i)->received_time; - uint32_t ms = (long)(age * 1000.0); - if (ms >= lb) { - count++; - continue; - } - if (count) - h->set(bin, count); - while (lb > ms) { - bin--; - lb >>= 1; - } - count = 1; - } - if (count) - h->set(bin, count); -} - -void OpRequest::dump(utime_t now, Formatter *f) const +void OpRequest::_dump(utime_t now, Formatter *f) const { Message *m = request; - stringstream name; - m->print(name); - f->dump_string("description", name.str().c_str()); // this OpRequest - f->dump_unsigned("rmw_flags", rmw_flags); - f->dump_stream("received_at") << received_time; - f->dump_float("age", now - received_time); - f->dump_float("duration", get_duration()); f->dump_string("flag_point", state_string()); if (m->get_orig_source().is_client()) { f->open_object_section("client_info"); @@ -257,50 +49,11 @@ void OpRequest::dump(utime_t now, Formatter *f) const } } -void OpTracker::mark_event(OpRequest *op, const string &dest) -{ - utime_t now = ceph_clock_now(cct); - return _mark_event(op, dest, now); -} - -void OpTracker::_mark_event(OpRequest *op, const string &evt, - utime_t time) -{ - Mutex::Locker locker(ops_in_flight_lock); - dout(5) << "reqid: " << op->get_reqid() << ", seq: " << op->seq - << ", time: " << time << ", event: " << evt - << ", request: " << *op->request << dendl; -} - -void OpTracker::RemoveOnDelete::operator()(OpRequest *op) { - op->mark_event("done"); - tracker->unregister_inflight_op(op); - // Do not delete op, unregister_inflight_op took control -} - -OpRequestRef OpTracker::create_request(Message *ref) -{ - OpRequestRef retval(new OpRequest(ref, this), - RemoveOnDelete(this)); - - if (ref->get_type() == CEPH_MSG_OSD_OP) { - retval->reqid = static_cast<MOSDOp*>(ref)->get_reqid(); - } else if (ref->get_type() == MSG_OSD_SUBOP) { - retval->reqid = static_cast<MOSDSubOp*>(ref)->reqid; - } - _mark_event(retval.get(), "header_read", ref->get_recv_stamp()); - _mark_event(retval.get(), "throttled", ref->get_throttle_stamp()); - _mark_event(retval.get(), "all_read", ref->get_recv_complete_stamp()); - _mark_event(retval.get(), "dispatched", ref->get_dispatch_stamp()); - return retval; -} - -void OpRequest::mark_event(const string &event) +void OpRequest::init_from_message() { - utime_t now = ceph_clock_now(tracker->cct); - { - Mutex::Locker l(lock); - events.push_back(make_pair(now, event)); + if (request->get_type() == CEPH_MSG_OSD_OP) { + reqid = static_cast<MOSDOp*>(request)->get_reqid(); + } else if (request->get_type() == MSG_OSD_SUBOP) { + reqid = static_cast<MOSDSubOp*>(request)->reqid; } - tracker->mark_event(this, event); } diff --git a/src/osd/OpRequest.h b/src/osd/OpRequest.h index 9634be87846..87571f58787 100644 --- a/src/osd/OpRequest.h +++ b/src/osd/OpRequest.h @@ -25,87 +25,12 @@ #include "common/TrackedOp.h" #include "osd/osd_types.h" -struct OpRequest; -class OpTracker; -typedef std::tr1::shared_ptr<OpRequest> OpRequestRef; -class OpHistory { - set<pair<utime_t, OpRequestRef> > arrived; - set<pair<double, OpRequestRef> > duration; - void cleanup(utime_t now); - bool shutdown; - OpTracker *tracker; - -public: - OpHistory(OpTracker *tracker_) : shutdown(false), tracker(tracker_) {} - ~OpHistory() { - assert(arrived.empty()); - assert(duration.empty()); - } - void insert(utime_t now, OpRequestRef op); - void dump_ops(utime_t now, Formatter *f); - void on_shutdown(); -}; - -class OpTracker { - class RemoveOnDelete { - OpTracker *tracker; - public: - RemoveOnDelete(OpTracker *tracker) : tracker(tracker) {} - void operator()(OpRequest *op); - }; - friend class RemoveOnDelete; - friend class OpRequest; - friend class OpHistory; - uint64_t seq; - Mutex ops_in_flight_lock; - xlist<OpRequest *> ops_in_flight; - OpHistory history; - -protected: - CephContext *cct; - -public: - OpTracker(CephContext *cct_) : seq(0), ops_in_flight_lock("OpTracker mutex"), history(this), cct(cct_) {} - void dump_ops_in_flight(Formatter *f); - void dump_historic_ops(Formatter *f); - void register_inflight_op(xlist<OpRequest*>::item *i); - void unregister_inflight_op(OpRequest *i); - - void get_age_ms_histogram(pow2_hist_t *h); - - /** - * Look for Ops which are too old, and insert warning - * strings for each Op that is too old. - * - * @param warning_strings A vector<string> reference which is filled - * with a warning string for each old Op. - * @return True if there are any Ops to warn on, false otherwise. - */ - bool check_ops_in_flight(std::vector<string> &warning_strings); - void mark_event(OpRequest *op, const string &evt); - void _mark_event(OpRequest *op, const string &evt, utime_t now); - OpRequestRef create_request(Message *req); - void on_shutdown() { - Mutex::Locker l(ops_in_flight_lock); - history.on_shutdown(); - } - ~OpTracker() { - assert(ops_in_flight.empty()); - } -}; - /** * The OpRequest takes in a Message* and takes over a single reference * to it, which it puts() when destroyed. - * OpRequest is itself ref-counted. The expectation is that you get a Message - * you want to track, create an OpRequest with it, and then pass around that OpRequest - * the way you used to pass around the Message. */ struct OpRequest : public TrackedOp { friend class OpTracker; - friend class OpHistory; - Message *request; - xlist<OpRequest*>::item xitem; // rmw flags int rmw_flags; @@ -134,28 +59,12 @@ struct OpRequest : public TrackedOp { void set_class_write() { rmw_flags |= CEPH_OSD_RMW_FLAG_CLASS_WRITE; } void set_pg_op() { rmw_flags |= CEPH_OSD_RMW_FLAG_PGOP; } - utime_t received_time; - uint32_t warn_interval_multiplier; - utime_t get_arrived() const { - return received_time; - } - double get_duration() const { - return events.size() ? - (events.rbegin()->first - received_time) : - 0.0; - } - - void dump(utime_t now, Formatter *f) const; + void _dump(utime_t now, Formatter *f) const; private: - list<pair<utime_t, string> > events; - string current; - Mutex lock; - OpTracker *tracker; osd_reqid_t reqid; uint8_t hit_flag_points; uint8_t latest_flag_point; - uint64_t seq; static const uint8_t flag_queued_for_pg=1 << 0; static const uint8_t flag_reached_pg = 1 << 1; static const uint8_t flag_delayed = 1 << 2; @@ -164,12 +73,8 @@ private: static const uint8_t flag_commit_sent = 1 << 5; OpRequest(Message *req, OpTracker *tracker); -public: - ~OpRequest() { - assert(request); - request->put(); - } +public: bool been_queued_for_pg() { return hit_flag_points & flag_queued_for_pg; } bool been_reached_pg() { return hit_flag_points & flag_reached_pg; } bool been_delayed() { return hit_flag_points & flag_delayed; } @@ -233,10 +138,15 @@ public: latest_flag_point = flag_commit_sent; } - void mark_event(const string &event); osd_reqid_t get_reqid() const { return reqid; } + + void init_from_message(); + + typedef std::tr1::shared_ptr<OpRequest> Ref; }; +typedef OpRequest::Ref OpRequestRef; + #endif /* OPREQUEST_H_ */ diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 1d9ed5f6a31..8f7d3ccb684 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -1332,10 +1332,10 @@ void PG::do_pending_flush() bool PG::op_has_sufficient_caps(OpRequestRef op) { // only check MOSDOp - if (op->request->get_type() != CEPH_MSG_OSD_OP) + if (op->get_req()->get_type() != CEPH_MSG_OSD_OP) return true; - MOSDOp *req = static_cast<MOSDOp*>(op->request); + MOSDOp *req = static_cast<MOSDOp*>(op->get_req()); OSD::Session *session = (OSD::Session *)req->get_connection()->get_priv(); if (!session) { @@ -1417,7 +1417,7 @@ void PG::replay_queued_ops() c = p->first; } dout(10) << "activate replay " << p->first << " " - << *p->second->request << dendl; + << *p->second->get_req() << dendl; replay.push_back(p->second); } replay_queue.clear(); @@ -2618,7 +2618,7 @@ void PG::unreg_next_scrub() void PG::sub_op_scrub_map(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp *>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp *>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); dout(7) << "sub_op_scrub_map" << dendl; @@ -2804,7 +2804,7 @@ void PG::_request_scrub_map(int replica, eversion_t version, void PG::sub_op_scrub_reserve(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); dout(7) << "sub_op_scrub_reserve" << dendl; @@ -2824,7 +2824,7 @@ void PG::sub_op_scrub_reserve(OpRequestRef op) void PG::sub_op_scrub_reserve_reply(OpRequestRef op) { - MOSDSubOpReply *reply = static_cast<MOSDSubOpReply*>(op->request); + MOSDSubOpReply *reply = static_cast<MOSDSubOpReply*>(op->get_req()); assert(reply->get_header().type == MSG_OSD_SUBOPREPLY); dout(7) << "sub_op_scrub_reserve_reply" << dendl; @@ -2857,7 +2857,7 @@ void PG::sub_op_scrub_reserve_reply(OpRequestRef op) void PG::sub_op_scrub_unreserve(OpRequestRef op) { - assert(op->request->get_header().type == MSG_OSD_SUBOP); + assert(op->get_req()->get_header().type == MSG_OSD_SUBOP); dout(7) << "sub_op_scrub_unreserve" << dendl; op->mark_started(); @@ -2869,7 +2869,7 @@ void PG::sub_op_scrub_stop(OpRequestRef op) { op->mark_started(); - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); dout(7) << "sub_op_scrub_stop" << dendl; @@ -4732,7 +4732,7 @@ ostream& operator<<(ostream& out, const PG& pg) bool PG::can_discard_op(OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); if (OSD::op_is_discardable(m)) { dout(20) << " discard " << *m << dendl; return true; @@ -4760,7 +4760,7 @@ bool PG::can_discard_op(OpRequestRef op) template<typename T, int MSGTYPE> bool PG::can_discard_replica_op(OpRequestRef op) { - T *m = static_cast<T *>(op->request); + T *m = static_cast<T *>(op->get_req()); assert(m->get_header().type == MSGTYPE); // same pg? @@ -4776,7 +4776,7 @@ bool PG::can_discard_replica_op(OpRequestRef op) bool PG::can_discard_scan(OpRequestRef op) { - MOSDPGScan *m = static_cast<MOSDPGScan *>(op->request); + MOSDPGScan *m = static_cast<MOSDPGScan *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_SCAN); if (old_peering_msg(m->map_epoch, m->query_epoch)) { @@ -4788,7 +4788,7 @@ bool PG::can_discard_scan(OpRequestRef op) bool PG::can_discard_backfill(OpRequestRef op) { - MOSDPGBackfill *m = static_cast<MOSDPGBackfill *>(op->request); + MOSDPGBackfill *m = static_cast<MOSDPGBackfill *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_BACKFILL); if (old_peering_msg(m->map_epoch, m->query_epoch)) { @@ -4802,7 +4802,7 @@ bool PG::can_discard_backfill(OpRequestRef op) bool PG::can_discard_request(OpRequestRef op) { - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case CEPH_MSG_OSD_OP: return can_discard_op(op); case MSG_OSD_SUBOP: @@ -4827,55 +4827,55 @@ bool PG::can_discard_request(OpRequestRef op) bool PG::split_request(OpRequestRef op, unsigned match, unsigned bits) { unsigned mask = ~((~0)<<bits); - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case CEPH_MSG_OSD_OP: - return (static_cast<MOSDOp*>(op->request)->get_pg().m_seed & mask) == match; + return (static_cast<MOSDOp*>(op->get_req())->get_pg().m_seed & mask) == match; } return false; } bool PG::op_must_wait_for_map(OSDMapRef curmap, OpRequestRef op) { - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case CEPH_MSG_OSD_OP: return !have_same_or_newer_map( curmap, - static_cast<MOSDOp*>(op->request)->get_map_epoch()); + static_cast<MOSDOp*>(op->get_req())->get_map_epoch()); case MSG_OSD_SUBOP: return !have_same_or_newer_map( curmap, - static_cast<MOSDSubOp*>(op->request)->map_epoch); + static_cast<MOSDSubOp*>(op->get_req())->map_epoch); case MSG_OSD_SUBOPREPLY: return !have_same_or_newer_map( curmap, - static_cast<MOSDSubOpReply*>(op->request)->map_epoch); + static_cast<MOSDSubOpReply*>(op->get_req())->map_epoch); case MSG_OSD_PG_SCAN: return !have_same_or_newer_map( curmap, - static_cast<MOSDPGScan*>(op->request)->map_epoch); + static_cast<MOSDPGScan*>(op->get_req())->map_epoch); case MSG_OSD_PG_BACKFILL: return !have_same_or_newer_map( curmap, - static_cast<MOSDPGBackfill*>(op->request)->map_epoch); + static_cast<MOSDPGBackfill*>(op->get_req())->map_epoch); case MSG_OSD_PG_PUSH: return !have_same_or_newer_map( curmap, - static_cast<MOSDPGPush*>(op->request)->map_epoch); + static_cast<MOSDPGPush*>(op->get_req())->map_epoch); case MSG_OSD_PG_PULL: return !have_same_or_newer_map( curmap, - static_cast<MOSDPGPull*>(op->request)->map_epoch); + static_cast<MOSDPGPull*>(op->get_req())->map_epoch); case MSG_OSD_PG_PUSH_REPLY: return !have_same_or_newer_map( curmap, - static_cast<MOSDPGPushReply*>(op->request)->map_epoch); + static_cast<MOSDPGPushReply*>(op->get_req())->map_epoch); } assert(0); return false; diff --git a/src/osd/ReplicatedBackend.cc b/src/osd/ReplicatedBackend.cc index ddc39d70372..9529e15ae77 100644 --- a/src/osd/ReplicatedBackend.cc +++ b/src/osd/ReplicatedBackend.cc @@ -96,7 +96,7 @@ bool ReplicatedBackend::handle_message( ) { dout(10) << __func__ << ": " << op << dendl; - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case MSG_OSD_PG_PUSH: // TODOXXX: needs to be active possibly do_push(op); @@ -111,7 +111,7 @@ bool ReplicatedBackend::handle_message( return true; case MSG_OSD_SUBOP: { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); if (m->ops.size() >= 1) { OSDOp *first = &m->ops[0]; switch (first->op.op) { @@ -130,7 +130,7 @@ bool ReplicatedBackend::handle_message( } case MSG_OSD_SUBOPREPLY: { - MOSDSubOpReply *r = static_cast<MOSDSubOpReply*>(op->request); + MOSDSubOpReply *r = static_cast<MOSDSubOpReply*>(op->get_req()); if (r->ops.size() >= 1) { OSDOp &first = r->ops[0]; switch (first.op.op) { diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc index d02a9c9cc48..f466eb8ccdc 100644 --- a/src/osd/ReplicatedPG.cc +++ b/src/osd/ReplicatedPG.cc @@ -86,9 +86,9 @@ static void log_subop_stats( { utime_t now = ceph_clock_now(g_ceph_context); utime_t latency = now; - latency -= op->request->get_recv_stamp(); + latency -= op->get_req()->get_recv_stamp(); - uint64_t inb = op->request->get_data().length(); + uint64_t inb = op->get_req()->get_data().length(); osd->logger->inc(l_osd_sop); @@ -583,7 +583,7 @@ bool ReplicatedPG::pg_op_must_wait(MOSDOp *op) void ReplicatedPG::do_pg_op(OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp *>(op->request); + MOSDOp *m = static_cast<MOSDOp *>(op->get_req()); assert(m->get_header().type == CEPH_MSG_OSD_OP); dout(10) << "do_pg_op " << *m << dendl; @@ -828,7 +828,7 @@ void ReplicatedPG::do_request( if (pgbackend->handle_message(op)) return; - switch (op->request->get_type()) { + switch (op->get_req()->get_type()) { case CEPH_MSG_OSD_OP: if (is_replay() || !is_active()) { dout(20) << " replay, waiting for active on " << op << dendl; @@ -866,7 +866,7 @@ void ReplicatedPG::do_request( */ void ReplicatedPG::do_op(OpRequestRef op) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); assert(m->get_header().type == CEPH_MSG_OSD_OP); if (op->includes_pg_op()) { if (pg_op_must_wait(m)) { @@ -1172,7 +1172,7 @@ bool ReplicatedPG::maybe_handle_cache(OpRequestRef op, ObjectContextRef obc, void ReplicatedPG::do_cache_redirect(OpRequestRef op, ObjectContextRef obc) { - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); int flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK); MOSDOpReply *reply = new MOSDOpReply(m, -ENOENT, get_osdmap()->get_epoch(), flags); @@ -1188,7 +1188,7 @@ void ReplicatedPG::execute_ctx(OpContext *ctx) { dout(10) << __func__ << " " << ctx << dendl; OpRequestRef op = ctx->op; - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); ObjectContextRef obc = ctx->obc; const hobject_t& soid = obc->obs.oi.soid; map<hobject_t,ObjectContextRef>& src_obc = ctx->src_obc; @@ -1412,16 +1412,16 @@ void ReplicatedPG::reply_ctx(OpContext *ctx, int r, eversion_t v, version_t uv) void ReplicatedPG::log_op_stats(OpContext *ctx) { OpRequestRef op = ctx->op; - MOSDOp *m = static_cast<MOSDOp*>(op->request); + MOSDOp *m = static_cast<MOSDOp*>(op->get_req()); utime_t now = ceph_clock_now(cct); utime_t latency = now; - latency -= ctx->op->request->get_recv_stamp(); + latency -= ctx->op->get_req()->get_recv_stamp(); utime_t rlatency; if (ctx->readable_stamp != utime_t()) { rlatency = ctx->readable_stamp; - rlatency -= ctx->op->request->get_recv_stamp(); + rlatency -= ctx->op->get_req()->get_recv_stamp(); } uint64_t inb = ctx->bytes_written; @@ -1460,10 +1460,10 @@ void ReplicatedPG::log_op_stats(OpContext *ctx) void ReplicatedPG::do_sub_op(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(have_same_or_newer_map(m->map_epoch)); assert(m->get_header().type == MSG_OSD_SUBOP); - dout(15) << "do_sub_op " << *op->request << dendl; + dout(15) << "do_sub_op " << *op->get_req() << dendl; OSDOp *first = NULL; if (m->ops.size() >= 1) { @@ -1501,7 +1501,7 @@ void ReplicatedPG::do_sub_op(OpRequestRef op) void ReplicatedPG::do_sub_op_reply(OpRequestRef op) { - MOSDSubOpReply *r = static_cast<MOSDSubOpReply *>(op->request); + MOSDSubOpReply *r = static_cast<MOSDSubOpReply *>(op->get_req()); assert(r->get_header().type == MSG_OSD_SUBOPREPLY); if (r->ops.size() >= 1) { OSDOp& first = r->ops[0]; @@ -1519,7 +1519,7 @@ void ReplicatedPG::do_scan( OpRequestRef op, ThreadPool::TPHandle &handle) { - MOSDPGScan *m = static_cast<MOSDPGScan*>(op->request); + MOSDPGScan *m = static_cast<MOSDPGScan*>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_SCAN); dout(10) << "do_scan " << *m << dendl; @@ -1594,7 +1594,7 @@ void ReplicatedPG::do_scan( void ReplicatedBackend::_do_push(OpRequestRef op) { - MOSDPGPush *m = static_cast<MOSDPGPush *>(op->request); + MOSDPGPush *m = static_cast<MOSDPGPush *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_PUSH); int from = m->get_source().num(); @@ -1646,7 +1646,7 @@ struct C_ReplicatedBackend_OnPullComplete : GenContext<ThreadPool::TPHandle&> { void ReplicatedBackend::_do_pull_response(OpRequestRef op) { - MOSDPGPush *m = static_cast<MOSDPGPush *>(op->request); + MOSDPGPush *m = static_cast<MOSDPGPush *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_PUSH); int from = m->get_source().num(); @@ -1691,7 +1691,7 @@ void ReplicatedBackend::_do_pull_response(OpRequestRef op) void ReplicatedBackend::do_pull(OpRequestRef op) { - MOSDPGPull *m = static_cast<MOSDPGPull *>(op->request); + MOSDPGPull *m = static_cast<MOSDPGPull *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_PULL); int from = m->get_source().num(); @@ -1707,7 +1707,7 @@ void ReplicatedBackend::do_pull(OpRequestRef op) void ReplicatedBackend::do_push_reply(OpRequestRef op) { - MOSDPGPushReply *m = static_cast<MOSDPGPushReply *>(op->request); + MOSDPGPushReply *m = static_cast<MOSDPGPushReply *>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_PUSH_REPLY); int from = m->get_source().num(); @@ -1728,7 +1728,7 @@ void ReplicatedBackend::do_push_reply(OpRequestRef op) void ReplicatedPG::do_backfill(OpRequestRef op) { - MOSDPGBackfill *m = static_cast<MOSDPGBackfill*>(op->request); + MOSDPGBackfill *m = static_cast<MOSDPGBackfill*>(op->get_req()); assert(m->get_header().type == MSG_OSD_PG_BACKFILL); dout(10) << "do_backfill " << *m << dendl; @@ -2392,7 +2392,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops) ObjectContextRef src_obc; if (ceph_osd_op_type_multi(op.op)) { - MOSDOp *m = static_cast<MOSDOp *>(ctx->op->request); + MOSDOp *m = static_cast<MOSDOp *>(ctx->op->get_req()); object_locator_t src_oloc; get_src_oloc(soid.oid, m->get_object_locator(), src_oloc); hobject_t src_oid(osd_op.soid, src_oloc.key, soid.hash, @@ -3190,10 +3190,10 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops) << " oi.version=" << oi.version.version << " ctx->at_version=" << ctx->at_version << dendl; dout(10) << "watch: oi.user_version=" << oi.user_version<< dendl; dout(10) << "watch: peer_addr=" - << ctx->op->request->get_connection()->get_peer_addr() << dendl; + << ctx->op->get_req()->get_connection()->get_peer_addr() << dendl; watch_info_t w(cookie, cct->_conf->osd_client_watch_timeout, - ctx->op->request->get_connection()->get_peer_addr()); + ctx->op->get_req()->get_connection()->get_peer_addr()); if (do_watch) { if (oi.watchers.count(make_pair(cookie, entity))) { dout(10) << " found existing watch " << w << " by " << entity << dendl; @@ -4038,7 +4038,7 @@ void ReplicatedPG::add_interval_usage(interval_set<uint64_t>& s, object_stat_sum void ReplicatedPG::do_osd_op_effects(OpContext *ctx) { - ConnectionRef conn(ctx->op->request->get_connection()); + ConnectionRef conn(ctx->op->get_req()->get_connection()); boost::intrusive_ptr<OSD::Session> session( (OSD::Session *)conn->get_priv()); session->put(); // get_priv() takes a ref, and so does the intrusive_ptr @@ -4412,14 +4412,13 @@ void ReplicatedPG::process_copy_chunk(hobject_t oid, tid_t tid, int r) dout(10) << __func__ << " fetching more" << dendl; _copy_some(obc, cop); return; - } else { - _build_finish_copy_transaction(cop, results.get<3>()); - results.get<1>() = cop->temp_cursor.data_offset; } + _build_finish_copy_transaction(cop, results.get<3>()); + results.get<1>() = cop->temp_cursor.data_offset; } dout(20) << __func__ << " complete; committing" << dendl; - results.get<0>() = cop->rval; + results.get<0>() = r; cop->cb->complete(results); copy_ops.erase(obc->obs.oi.soid); @@ -4698,7 +4697,7 @@ void ReplicatedPG::eval_repop(RepGather *repop) { MOSDOp *m = NULL; if (repop->ctx->op) - m = static_cast<MOSDOp *>(repop->ctx->op->request); + m = static_cast<MOSDOp *>(repop->ctx->op->get_req()); if (m) dout(10) << "eval_repop " << *repop @@ -4774,7 +4773,7 @@ void ReplicatedPG::eval_repop(RepGather *repop) for (list<OpRequestRef>::iterator i = waiting_for_ack[repop->v].begin(); i != waiting_for_ack[repop->v].end(); ++i) { - MOSDOp *m = (MOSDOp*)(*i)->request; + MOSDOp *m = (MOSDOp*)(*i)->get_req(); MOSDOpReply *reply = new MOSDOpReply(m, 0, get_osdmap()->get_epoch(), 0); reply->set_reply_versions(repop->ctx->at_version, repop->ctx->user_at_version); @@ -4870,7 +4869,7 @@ void ReplicatedPG::issue_repop(RepGather *repop, utime_t now) get_osdmap()->get_epoch(), repop->rep_tid, repop->ctx->at_version); if (ctx->op && - ((static_cast<MOSDOp *>(ctx->op->request))->get_flags() & CEPH_OSD_FLAG_PARALLELEXEC)) { + ((static_cast<MOSDOp *>(ctx->op->get_req()))->get_flags() & CEPH_OSD_FLAG_PARALLELEXEC)) { // replicate original op for parallel execution on replica assert(0 == "broken implementation, do not use"); } @@ -4911,7 +4910,7 @@ ReplicatedPG::RepGather *ReplicatedPG::new_repop(OpContext *ctx, ObjectContextRe tid_t rep_tid) { if (ctx->op) - dout(10) << "new_repop rep_tid " << rep_tid << " on " << *ctx->op->request << dendl; + dout(10) << "new_repop rep_tid " << rep_tid << " on " << *ctx->op->get_req() << dendl; else dout(10) << "new_repop rep_tid " << rep_tid << " (no op)" << dendl; @@ -4942,7 +4941,7 @@ void ReplicatedPG::repop_ack(RepGather *repop, int result, int ack_type, MOSDOp *m = NULL; if (repop->ctx->op) - m = static_cast<MOSDOp *>(repop->ctx->op->request); + m = static_cast<MOSDOp *>(repop->ctx->op->get_req()); if (m) dout(7) << "repop_ack rep_tid " << repop->rep_tid << " op " << *m @@ -5488,7 +5487,7 @@ void ReplicatedPG::put_snapset_context(SnapSetContext *ssc) void ReplicatedPG::sub_op_modify(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); const hobject_t& soid = m->poid; @@ -5607,8 +5606,8 @@ void ReplicatedPG::sub_op_modify_applied(RepModify *rm) rm->applied = true; if (!pg_has_reset_since(rm->epoch_started)) { - dout(10) << "sub_op_modify_applied on " << rm << " op " << *rm->op->request << dendl; - MOSDSubOp *m = static_cast<MOSDSubOp*>(rm->op->request); + dout(10) << "sub_op_modify_applied on " << rm << " op " << *rm->op->get_req() << dendl; + MOSDSubOp *m = static_cast<MOSDSubOp*>(rm->op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); if (!rm->committed) { @@ -5630,7 +5629,7 @@ void ReplicatedPG::sub_op_modify_applied(RepModify *rm) } } } else { - dout(10) << "sub_op_modify_applied on " << rm << " op " << *rm->op->request + dout(10) << "sub_op_modify_applied on " << rm << " op " << *rm->op->get_req() << " from epoch " << rm->epoch_started << " < last_peering_reset " << last_peering_reset << dendl; } @@ -5652,19 +5651,19 @@ void ReplicatedPG::sub_op_modify_commit(RepModify *rm) if (!pg_has_reset_since(rm->epoch_started)) { // send commit. - dout(10) << "sub_op_modify_commit on op " << *rm->op->request + dout(10) << "sub_op_modify_commit on op " << *rm->op->get_req() << ", sending commit to osd." << rm->ackerosd << dendl; if (get_osdmap()->is_up(rm->ackerosd)) { last_complete_ondisk = rm->last_complete; - MOSDSubOpReply *commit = new MOSDSubOpReply(static_cast<MOSDSubOp*>(rm->op->request), 0, get_osdmap()->get_epoch(), CEPH_OSD_FLAG_ONDISK); + MOSDSubOpReply *commit = new MOSDSubOpReply(static_cast<MOSDSubOp*>(rm->op->get_req()), 0, get_osdmap()->get_epoch(), CEPH_OSD_FLAG_ONDISK); commit->set_last_complete_ondisk(rm->last_complete); commit->set_priority(CEPH_MSG_PRIO_HIGH); // this better match ack priority! osd->send_message_osd_cluster(rm->ackerosd, commit, get_osdmap()->get_epoch()); } } else { - dout(10) << "sub_op_modify_commit " << rm << " op " << *rm->op->request + dout(10) << "sub_op_modify_commit " << rm << " op " << *rm->op->get_req() << " from epoch " << rm->epoch_started << " < last_peering_reset " << last_peering_reset << dendl; } @@ -5681,7 +5680,7 @@ void ReplicatedPG::sub_op_modify_commit(RepModify *rm) void ReplicatedPG::sub_op_modify_reply(OpRequestRef op) { - MOSDSubOpReply *r = static_cast<MOSDSubOpReply*>(op->request); + MOSDSubOpReply *r = static_cast<MOSDSubOpReply*>(op->get_req()); assert(r->get_header().type == MSG_OSD_SUBOPREPLY); op->mark_started(); @@ -6631,7 +6630,7 @@ void ReplicatedBackend::prep_push_op_blank(const hobject_t& soid, PushOp *op) void ReplicatedBackend::sub_op_push_reply(OpRequestRef op) { - MOSDSubOpReply *reply = static_cast<MOSDSubOpReply*>(op->request); + MOSDSubOpReply *reply = static_cast<MOSDSubOpReply*>(op->get_req()); const hobject_t& soid = reply->get_poid(); assert(reply->get_header().type == MSG_OSD_SUBOPREPLY); dout(10) << "sub_op_push_reply from " << reply->get_source() << " " << *reply << dendl; @@ -6644,7 +6643,7 @@ void ReplicatedBackend::sub_op_push_reply(OpRequestRef op) PushOp pop; bool more = handle_push_reply(peer, rop, &pop); if (more) - send_push_op_legacy(op->request->get_priority(), peer, pop); + send_push_op_legacy(op->get_req()->get_priority(), peer, pop); } bool ReplicatedBackend::handle_push_reply(int peer, PushReplyOp &op, PushOp *reply) @@ -6725,7 +6724,7 @@ void ReplicatedPG::finish_degraded_object(const hobject_t& oid) */ void ReplicatedBackend::sub_op_pull(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); op->mark_started(); @@ -6918,7 +6917,7 @@ void ReplicatedBackend::trim_pushed_data( void ReplicatedBackend::sub_op_push(OpRequestRef op) { op->mark_started(); - MOSDSubOp *m = static_cast<MOSDSubOp *>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp *>(op->get_req()); PushOp pop; pop.soid = m->recovery_info.soid; @@ -6950,14 +6949,14 @@ void ReplicatedBackend::sub_op_push(OpRequestRef op) C_ReplicatedBackend_OnPullComplete *c = new C_ReplicatedBackend_OnPullComplete( this, - op->request->get_priority()); + op->get_req()->get_priority()); c->to_continue.swap(to_continue); t->register_on_complete( new C_QueueInWQ( &osd->push_wq, get_parent()->bless_gencontext(c))); } - run_recovery_op(h, op->request->get_priority()); + run_recovery_op(h, op->get_req()->get_priority()); } else { PushReplyOp resp; MOSDSubOpReply *reply = new MOSDSubOpReply( @@ -7002,7 +7001,7 @@ void ReplicatedBackend::_failed_push(int from, const hobject_t &soid) void ReplicatedPG::sub_op_remove(OpRequestRef op) { - MOSDSubOp *m = static_cast<MOSDSubOp*>(op->request); + MOSDSubOp *m = static_cast<MOSDSubOp*>(op->get_req()); assert(m->get_header().type == MSG_OSD_SUBOP); dout(7) << "sub_op_remove " << m->poid << dendl; @@ -7225,7 +7224,7 @@ void ReplicatedPG::apply_and_flush_repops(bool requeue) if (requeue) { if (repop->ctx->op) { - dout(10) << " requeuing " << *repop->ctx->op->request << dendl; + dout(10) << " requeuing " << *repop->ctx->op->get_req() << dendl; rq.push_back(repop->ctx->op); repop->ctx->op = OpRequestRef(); } diff --git a/src/osd/ReplicatedPG.h b/src/osd/ReplicatedPG.h index c277c0d3f86..27c9d1bb605 100644 --- a/src/osd/ReplicatedPG.h +++ b/src/osd/ReplicatedPG.h @@ -993,7 +993,7 @@ inline ostream& operator<<(ostream& out, ReplicatedPG::RepGather& repop) //<< " wfnvram=" << repop.waitfor_nvram << " wfdisk=" << repop.waitfor_disk; if (repop.ctx->op) - out << " op=" << *(repop.ctx->op->request); + out << " op=" << *(repop.ctx->op->get_req()); out << ")"; return out; } diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 59b71cc6f67..a54fc65f375 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -23,6 +23,7 @@ #include "include/types.h" #include "include/utime.h" #include "include/CompatSet.h" +#include "include/histogram.h" #include "include/interval_set.h" #include "common/snap_types.h" #include "common/Formatter.h" @@ -555,67 +556,6 @@ inline ostream& operator<<(ostream& out, const eversion_t e) { return out << e.epoch << "'" << e.version; } - -/** - * power of 2 histogram - */ -struct pow2_hist_t { - /** - * histogram - * - * bin size is 2^index - * value is count of elements that are <= the current bin but > the previous bin. - */ - vector<int32_t> h; - -private: - /// expand to at least another's size - void _expand_to(unsigned s) { - if (s > h.size()) - h.resize(s, 0); - } - /// drop useless trailing 0's - void _contract() { - unsigned p = h.size(); - while (p > 0 && h[p-1] == 0) - --p; - h.resize(p); - } - -public: - void clear() { - h.clear(); - } - void set(int bin, int32_t v) { - _expand_to(bin + 1); - h[bin] = v; - _contract(); - } - - void add(const pow2_hist_t& o) { - _expand_to(o.h.size()); - for (unsigned p = 0; p < o.h.size(); ++p) - h[p] += o.h[p]; - _contract(); - } - void sub(const pow2_hist_t& o) { - _expand_to(o.h.size()); - for (unsigned p = 0; p < o.h.size(); ++p) - h[p] -= o.h[p]; - _contract(); - } - - int32_t upper_bound() const { - return 1 << h.size(); - } - - void dump(Formatter *f) const; - void encode(bufferlist &bl) const; - void decode(bufferlist::iterator &bl); - static void generate_test_instances(std::list<pow2_hist_t*>& o); -}; -WRITE_CLASS_ENCODER(pow2_hist_t) - /** * filestore_perf_stat_t * diff --git a/src/osdc/Objecter.h b/src/osdc/Objecter.h index 1196633276d..938c97a4f31 100644 --- a/src/osdc/Objecter.h +++ b/src/osdc/Objecter.h @@ -386,7 +386,6 @@ struct ObjectOperation { pwatchers->push_back(ow); } } - *prval = 0; } catch (buffer::error& e) { if (prval) @@ -424,8 +423,6 @@ struct ObjectOperation { } psnaps->seq = resp.seq; } - if (prval) - *prval = 0; } catch (buffer::error& e) { if (prval) @@ -617,10 +614,9 @@ struct ObjectOperation { } ::decode(*cursor, p); } catch (buffer::error& e) { - r = -EIO; + if (prval) + *prval = -EIO; } - if (prval) - *prval = r; } }; @@ -664,10 +660,9 @@ struct ObjectOperation { if (pisdirty) *pisdirty = isdirty; } catch (buffer::error& e) { - r = -EIO; + if (prval) + *prval = -EIO; } - if (prval) - *prval = r; } }; diff --git a/src/rgw/Makefile.am b/src/rgw/Makefile.am index 24060b52e25..b92c35e08d6 100644 --- a/src/rgw/Makefile.am +++ b/src/rgw/Makefile.am @@ -31,7 +31,8 @@ librgw_la_SOURCES = \ rgw/rgw_auth_s3.cc \ rgw/rgw_metadata.cc \ rgw/rgw_replica_log.cc \ - rgw/rgw_keystone.cc + rgw/rgw_keystone.cc \ + rgw/rgw_quota.cc librgw_la_CXXFLAGS = -Woverloaded-virtual ${AM_CXXFLAGS} noinst_LTLIBRARIES += librgw.la @@ -124,6 +125,7 @@ noinst_HEADERS += \ rgw/rgw_http_client.h \ rgw/rgw_swift.h \ rgw/rgw_swift_auth.h \ + rgw/rgw_quota.h \ rgw/rgw_rados.h \ rgw/rgw_replica_log.h \ rgw/rgw_resolve.h \ diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 81abb231b6f..b23bf3ba5d4 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -62,6 +62,9 @@ void _usage() cerr << " bucket check check bucket index\n"; cerr << " object rm remove object\n"; cerr << " object unlink unlink object from bucket index\n"; + cerr << " quota set set quota params\n"; + cerr << " quota enable enable quota\n"; + cerr << " quota disable disable quota\n"; cerr << " region get show region info\n"; cerr << " regions list list all regions set on this cluster\n"; cerr << " region set set region info (requires infile)\n"; @@ -154,6 +157,11 @@ void _usage() cerr << " --yes-i-really-mean-it required for certain operations\n"; cerr << "\n"; cerr << "<date> := \"YYYY-MM-DD[ hh:mm:ss]\"\n"; + cerr << "\nQuota options:\n"; + cerr << " --bucket specified bucket for quota command\n"; + cerr << " --max-objects specify max objects\n"; + cerr << " --max-size specify max size (in bytes)\n"; + cerr << " --quota-scope scope of quota (bucket, user)\n"; cerr << "\n"; generic_client_usage(); } @@ -203,6 +211,9 @@ enum { OPT_OBJECT_RM, OPT_OBJECT_UNLINK, OPT_OBJECT_STAT, + OPT_QUOTA_SET, + OPT_QUOTA_ENABLE, + OPT_QUOTA_DISABLE, OPT_GC_LIST, OPT_GC_PROCESS, OPT_REGION_GET, @@ -253,6 +264,7 @@ static int get_cmd(const char *cmd, const char *prev_cmd, bool *need_more) strcmp(cmd, "opstate") == 0 || strcmp(cmd, "pool") == 0 || strcmp(cmd, "pools") == 0 || + strcmp(cmd, "quota") == 0 || strcmp(cmd, "region") == 0 || strcmp(cmd, "regions") == 0 || strcmp(cmd, "region-map") == 0 || @@ -362,6 +374,13 @@ static int get_cmd(const char *cmd, const char *prev_cmd, bool *need_more) return OPT_REGION_SET; if (strcmp(cmd, "default") == 0) return OPT_REGION_DEFAULT; + } else if (strcmp(prev_cmd, "quota") == 0) { + if (strcmp(cmd, "set") == 0) + return OPT_QUOTA_SET; + if (strcmp(cmd, "enable") == 0) + return OPT_QUOTA_ENABLE; + if (strcmp(cmd, "disable") == 0) + return OPT_QUOTA_DISABLE; } else if (strcmp(prev_cmd, "regions") == 0) { if (strcmp(cmd, "list") == 0) return OPT_REGION_LIST; @@ -660,6 +679,64 @@ static bool dump_string(const char *field_name, bufferlist& bl, Formatter *f) return true; } +void set_quota_info(RGWQuotaInfo& quota, int opt_cmd, int64_t max_size, int64_t max_objects) +{ + switch (opt_cmd) { + case OPT_QUOTA_ENABLE: + quota.enabled = true; + + // falling through on purpose + + case OPT_QUOTA_SET: + if (max_objects >= 0) { + quota.max_objects = max_objects; + } + if (max_size >= 0) { + quota.max_size_kb = rgw_rounded_kb(max_size); + } + break; + case OPT_QUOTA_DISABLE: + quota.enabled = false; + break; + } +} + +int set_bucket_quota(RGWRados *store, int opt_cmd, string& bucket_name, int64_t max_size, int64_t max_objects) +{ + RGWBucketInfo bucket_info; + map<string, bufferlist> attrs; + int r = store->get_bucket_info(NULL, bucket_name, bucket_info, NULL, &attrs); + if (r < 0) { + cerr << "could not get bucket info for bucket=" << bucket_name << ": " << cpp_strerror(-r) << std::endl; + return -r; + } + + set_quota_info(bucket_info.quota, opt_cmd, max_size, max_objects); + + r = store->put_bucket_instance_info(bucket_info, false, 0, &attrs); + if (r < 0) { + cerr << "ERROR: failed writing bucket instance info: " << cpp_strerror(-r) << std::endl; + return -r; + } + return 0; +} + +int set_user_bucket_quota(int opt_cmd, RGWUser& user, RGWUserAdminOpState& op_state, int64_t max_size, int64_t max_objects) +{ + RGWUserInfo& user_info = op_state.get_user_info(); + + set_quota_info(user_info.bucket_quota, opt_cmd, max_size, max_objects); + + op_state.set_bucket_quota(user_info.bucket_quota); + + string err; + int r = user.modify(op_state, &err); + if (r < 0) { + cerr << "ERROR: failed updating user info: " << cpp_strerror(-r) << ": " << err << std::endl; + return -r; + } + return 0; +} int main(int argc, char **argv) { @@ -721,6 +798,10 @@ int main(int argc, char **argv) string replica_log_type_str; ReplicaLogType replica_log_type = ReplicaLog_Invalid; string op_mask_str; + string quota_scope; + + int64_t max_objects = -1; + int64_t max_size = -1; std::string val; std::ostringstream errs; @@ -788,6 +869,10 @@ int main(int argc, char **argv) max_buckets = atoi(val.c_str()); } else if (ceph_argparse_witharg(args, i, &val, "--max-entries", (char*)NULL)) { max_entries = atoi(val.c_str()); + } else if (ceph_argparse_witharg(args, i, &val, "--max-size", (char*)NULL)) { + max_size = (int64_t)atoll(val.c_str()); + } else if (ceph_argparse_witharg(args, i, &val, "--max-objects", (char*)NULL)) { + max_objects = (int64_t)atoll(val.c_str()); } else if (ceph_argparse_witharg(args, i, &val, "--date", "--time", (char*)NULL)) { date = val; if (end_date.empty()) @@ -848,6 +933,8 @@ int main(int argc, char **argv) start_marker = val; } else if (ceph_argparse_witharg(args, i, &val, "--end-marker", (char*)NULL)) { end_marker = val; + } else if (ceph_argparse_witharg(args, i, &val, "--quota-scope", (char*)NULL)) { + quota_scope = val; } else if (ceph_argparse_witharg(args, i, &val, "--replica-log-type", (char*)NULL)) { replica_log_type_str = val; replica_log_type = get_replicalog_type(replica_log_type_str); @@ -2228,5 +2315,28 @@ next: return -ret; } } + + bool quota_op = (opt_cmd == OPT_QUOTA_SET || opt_cmd == OPT_QUOTA_ENABLE || opt_cmd == OPT_QUOTA_DISABLE); + + if (quota_op) { + if (bucket_name.empty() && user_id.empty()) { + cerr << "ERROR: bucket name or uid is required for quota operation" << std::endl; + return EINVAL; + } + + if (!bucket_name.empty()) { + if (!quota_scope.empty() && quota_scope != "bucket") { + cerr << "ERROR: invalid quota scope specification." << std::endl; + return EINVAL; + } + set_bucket_quota(store, opt_cmd, bucket_name, max_size, max_objects); + } else if (!user_id.empty()) { + if (quota_scope != "bucket") { + cerr << "ERROR: only bucket-level user quota can be handled. Please specify --quota-scope=bucket" << std::endl; + return EINVAL; + } + set_user_bucket_quota(opt_cmd, user, user_op, max_size, max_objects); + } + } return 0; } diff --git a/src/rgw/rgw_bucket.cc b/src/rgw/rgw_bucket.cc index 5356417f09a..3267bc51948 100644 --- a/src/rgw/rgw_bucket.cc +++ b/src/rgw/rgw_bucket.cc @@ -901,6 +901,7 @@ static int bucket_stats(RGWRados *store, std::string& bucket_name, Formatter *f formatter->dump_int("mtime", mtime); formatter->dump_string("max_marker", max_marker); dump_bucket_usage(stats, formatter); + encode_json("bucket_quota", bucket_info.quota, formatter); formatter->close_section(); return 0; diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h index 2c7c0c716be..baf60001a8b 100644 --- a/src/rgw/rgw_common.h +++ b/src/rgw/rgw_common.h @@ -29,6 +29,7 @@ #include "include/utime.h" #include "rgw_acl.h" #include "rgw_cors.h" +#include "rgw_quota.h" #include "cls/version/cls_version_types.h" #include "include/rados/librados.hpp" @@ -90,6 +91,7 @@ using ceph::crypto::MD5; #define RGW_OP_TYPE_WRITE 0x02 #define RGW_OP_TYPE_DELETE 0x04 +#define RGW_OP_TYPE_MODIFY (RGW_OP_TYPE_WRITE | RGW_OP_TYPE_DELETE) #define RGW_OP_TYPE_ALL (RGW_OP_TYPE_READ | RGW_OP_TYPE_WRITE | RGW_OP_TYPE_DELETE) #define RGW_DEFAULT_MAX_BUCKETS 1000 @@ -128,6 +130,7 @@ using ceph::crypto::MD5; #define ERR_NOT_FOUND 2023 #define ERR_PERMANENT_REDIRECT 2024 #define ERR_LOCKED 2025 +#define ERR_QUOTA_EXCEEDED 2026 #define ERR_USER_SUSPENDED 2100 #define ERR_INTERNAL_ERROR 2200 @@ -423,11 +426,12 @@ struct RGWUserInfo __u8 system; string default_placement; list<string> placement_tags; + RGWQuotaInfo bucket_quota; RGWUserInfo() : auid(0), suspended(0), max_buckets(RGW_DEFAULT_MAX_BUCKETS), op_mask(RGW_OP_TYPE_ALL), system(0) {} void encode(bufferlist& bl) const { - ENCODE_START(13, 9, bl); + ENCODE_START(14, 9, bl); ::encode(auid, bl); string access_key; string secret_key; @@ -462,6 +466,7 @@ struct RGWUserInfo ::encode(system, bl); ::encode(default_placement, bl); ::encode(placement_tags, bl); + ::encode(bucket_quota, bl); ENCODE_FINISH(bl); } void decode(bufferlist::iterator& bl) { @@ -518,6 +523,9 @@ struct RGWUserInfo ::decode(default_placement, bl); ::decode(placement_tags, bl); /* tags of allowed placement rules */ } + if (struct_v >= 14) { + ::decode(bucket_quota, bl); + } DECODE_FINISH(bl); } void dump(Formatter *f) const; @@ -599,6 +607,10 @@ struct rgw_bucket { void dump(Formatter *f) const; void decode_json(JSONObj *obj); static void generate_test_instances(list<rgw_bucket*>& o); + + bool operator<(const rgw_bucket& b) const { + return name.compare(b.name) < 0; + } }; WRITE_CLASS_ENCODER(rgw_bucket) @@ -661,9 +673,10 @@ struct RGWBucketInfo bool has_instance_obj; RGWObjVersionTracker objv_tracker; /* we don't need to serialize this, for runtime tracking */ obj_version ep_objv; /* entry point object version, for runtime tracking only */ + RGWQuotaInfo quota; void encode(bufferlist& bl) const { - ENCODE_START(8, 4, bl); + ENCODE_START(9, 4, bl); ::encode(bucket, bl); ::encode(owner, bl); ::encode(flags, bl); @@ -672,6 +685,7 @@ struct RGWBucketInfo ::encode(ct, bl); ::encode(placement_rule, bl); ::encode(has_instance_obj, bl); + ::encode(quota, bl); ENCODE_FINISH(bl); } void decode(bufferlist::iterator& bl) { @@ -692,6 +706,8 @@ struct RGWBucketInfo ::decode(placement_rule, bl); if (struct_v >= 8) ::decode(has_instance_obj, bl); + if (struct_v >= 9) + ::decode(quota, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; @@ -754,6 +770,8 @@ struct RGWBucketStats uint64_t num_kb; uint64_t num_kb_rounded; uint64_t num_objects; + + RGWBucketStats() : num_kb(0), num_kb_rounded(0), num_objects(0) {} }; struct req_state; @@ -1213,6 +1231,11 @@ static inline const char *rgw_obj_category_name(RGWObjCategory category) return "unknown"; } +static inline uint64_t rgw_rounded_kb(uint64_t bytes) +{ + return (bytes + 1023) / 1024; +} + extern string rgw_string_unquote(const string& s); extern void parse_csv_string(const string& ival, vector<string>& ovals); extern int parse_key_value(string& in_str, string& key, string& val); diff --git a/src/rgw/rgw_http_errors.h b/src/rgw/rgw_http_errors.h index 6cb9fabf6c0..ba3e522651f 100644 --- a/src/rgw/rgw_http_errors.h +++ b/src/rgw/rgw_http_errors.h @@ -36,6 +36,7 @@ const static struct rgw_http_errors RGW_HTTP_ERRORS[] = { { EPERM, 403, "AccessDenied" }, { ERR_USER_SUSPENDED, 403, "UserSuspended" }, { ERR_REQUEST_TIME_SKEWED, 403, "RequestTimeTooSkewed" }, + { ERR_QUOTA_EXCEEDED, 403, "QuotaExceeded" }, { ENOENT, 404, "NoSuchKey" }, { ERR_NO_SUCH_BUCKET, 404, "NoSuchBucket" }, { ERR_NO_SUCH_UPLOAD, 404, "NoSuchUpload" }, diff --git a/src/rgw/rgw_json_enc.cc b/src/rgw/rgw_json_enc.cc index 189e9ae961e..4d6b25374b9 100644 --- a/src/rgw/rgw_json_enc.cc +++ b/src/rgw/rgw_json_enc.cc @@ -396,6 +396,7 @@ void RGWUserInfo::dump(Formatter *f) const } encode_json("default_placement", default_placement, f); encode_json("placement_tags", placement_tags, f); + encode_json("bucket_quota", bucket_quota, f); } @@ -446,6 +447,21 @@ void RGWUserInfo::decode_json(JSONObj *obj) system = (__u8)sys; JSONDecoder::decode_json("default_placement", default_placement, obj); JSONDecoder::decode_json("placement_tags", placement_tags, obj); + JSONDecoder::decode_json("bucket_quota", bucket_quota, obj); +} + +void RGWQuotaInfo::dump(Formatter *f) const +{ + f->dump_bool("enabled", enabled); + f->dump_int("max_size_kb", max_size_kb); + f->dump_int("max_objects", max_objects); +} + +void RGWQuotaInfo::decode_json(JSONObj *obj) +{ + JSONDecoder::decode_json("max_size_kb", max_size_kb, obj); + JSONDecoder::decode_json("max_objects", max_objects, obj); + JSONDecoder::decode_json("enabled", enabled, obj); } void rgw_bucket::dump(Formatter *f) const @@ -497,6 +513,7 @@ void RGWBucketInfo::dump(Formatter *f) const encode_json("region", region, f); encode_json("placement_rule", placement_rule, f); encode_json("has_instance_obj", has_instance_obj, f); + encode_json("quota", quota, f); } void RGWBucketInfo::decode_json(JSONObj *obj) { @@ -507,6 +524,7 @@ void RGWBucketInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("region", region, obj); JSONDecoder::decode_json("placement_rule", placement_rule, obj); JSONDecoder::decode_json("has_instance_obj", has_instance_obj, obj); + JSONDecoder::decode_json("quota", quota, obj); } void RGWObjEnt::dump(Formatter *f) const @@ -673,12 +691,14 @@ void RGWRegionMap::dump(Formatter *f) const { encode_json("regions", regions, f); encode_json("master_region", master_region, f); + encode_json("bucket_quota", bucket_quota, f); } void RGWRegionMap::decode_json(JSONObj *obj) { JSONDecoder::decode_json("regions", regions, obj); JSONDecoder::decode_json("master_region", master_region, obj); + JSONDecoder::decode_json("bucket_quota", bucket_quota, obj); } void RGWMetadataLogInfo::dump(Formatter *f) const diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc index 54db609521c..acaa5deffee 100644 --- a/src/rgw/rgw_main.cc +++ b/src/rgw/rgw_main.cc @@ -357,6 +357,13 @@ void RGWProcess::handle_request(RGWRequest *req) goto done; } + req->log(s, "init op"); + ret = op->init_processing(); + if (ret < 0) { + abort_early(s, op, ret); + goto done; + } + req->log(s, "verifying op mask"); ret = op->verify_op_mask(); if (ret < 0) { diff --git a/src/rgw/rgw_metadata.cc b/src/rgw/rgw_metadata.cc index ca5ad3f2e7a..23f73e26531 100644 --- a/src/rgw/rgw_metadata.cc +++ b/src/rgw/rgw_metadata.cc @@ -1,7 +1,7 @@ -#include "rgw_metadata.h" #include "common/ceph_json.h" +#include "rgw_metadata.h" #include "cls/version/cls_version_types.h" #include "rgw_rados.h" diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index 114b8709a22..b9b4c53d696 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -421,6 +421,47 @@ int RGWOp::verify_op_mask() return 0; } +int RGWOp::init_quota() +{ + /* no quota enforcement for system requests */ + if (s->system_request) + return 0; + + /* init quota related stuff */ + if (!(s->user.op_mask & RGW_OP_TYPE_MODIFY)) { + return 0; + } + + /* only interested in object related ops */ + if (s->object_str.empty()) { + return 0; + } + + if (s->bucket_info.quota.enabled) { + bucket_quota = s->bucket_info.quota; + return 0; + } + if (s->user.user_id == s->bucket_owner.get_id()) { + if (s->user.bucket_quota.enabled) { + bucket_quota = s->user.bucket_quota; + return 0; + } + } else { + RGWUserInfo owner_info; + int r = rgw_get_user_info_by_uid(store, s->bucket_info.owner, owner_info); + if (r < 0) + return r; + + if (owner_info.bucket_quota.enabled) { + bucket_quota = owner_info.bucket_quota; + return 0; + } + } + + bucket_quota = store->region_map.bucket_quota; + return 0; +} + static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) { uint8_t flags = 0; if (strcmp(req_meth, "GET") == 0) flags = RGW_CORS_GET; @@ -1363,6 +1404,14 @@ void RGWPutObj::execute() ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl; } + if (!chunked_upload) { /* with chunked upload we don't know how big is the upload. + we also check sizes at the end anyway */ + ret = store->check_quota(s->bucket, bucket_quota, s->content_length); + if (ret < 0) { + goto done; + } + } + if (supplied_etag) { strncpy(supplied_md5, supplied_etag, sizeof(supplied_md5) - 1); supplied_md5[sizeof(supplied_md5) - 1] = '\0'; @@ -1407,6 +1456,11 @@ void RGWPutObj::execute() s->obj_size = ofs; perfcounter->inc(l_rgw_put_b, s->obj_size); + ret = store->check_quota(s->bucket, bucket_quota, s->obj_size); + if (ret < 0) { + goto done; + } + hash.Final(m); buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h index 948a11830c2..eee5ea99065 100644 --- a/src/rgw/rgw_op.h +++ b/src/rgw/rgw_op.h @@ -20,6 +20,7 @@ #include "rgw_bucket.h" #include "rgw_acl.h" #include "rgw_cors.h" +#include "rgw_quota.h" using namespace std; @@ -36,10 +37,21 @@ protected: RGWRados *store; RGWCORSConfiguration bucket_cors; bool cors_exist; + RGWQuotaInfo bucket_quota; + + virtual int init_quota(); public: RGWOp() : s(NULL), dialect_handler(NULL), store(NULL), cors_exist(false) {} virtual ~RGWOp() {} + virtual int init_processing() { + int ret = init_quota(); + if (ret < 0) + return ret; + + return 0; + } + virtual void init(RGWRados *store, struct req_state *s, RGWHandler *dialect_handler) { this->store = store; this->s = s; diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc new file mode 100644 index 00000000000..66609ca723c --- /dev/null +++ b/src/rgw/rgw_quota.cc @@ -0,0 +1,332 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2013 Inktank, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + + +#include "include/utime.h" +#include "common/lru_map.h" +#include "common/RefCountedObj.h" + +#include "rgw_common.h" +#include "rgw_rados.h" +#include "rgw_quota.h" + +#define dout_subsys ceph_subsys_rgw + + +struct RGWQuotaBucketStats { + RGWBucketStats stats; + utime_t expiration; + utime_t async_refresh_time; +}; + +class RGWBucketStatsCache { + RGWRados *store; + lru_map<rgw_bucket, RGWQuotaBucketStats> stats_map; + RefCountedWaitObject *async_refcount; + + int fetch_bucket_totals(rgw_bucket& bucket, RGWBucketStats& stats); + +public: + RGWBucketStatsCache(RGWRados *_store) : store(_store), stats_map(store->ctx()->_conf->rgw_bucket_quota_cache_size) { + async_refcount = new RefCountedWaitObject; + } + ~RGWBucketStatsCache() { + async_refcount->put_wait(); /* wait for all pending async requests to complete */ + } + + int get_bucket_stats(rgw_bucket& bucket, RGWBucketStats& stats, RGWQuotaInfo& quota); + void adjust_bucket_stats(rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes); + + bool can_use_cached_stats(RGWQuotaInfo& quota, RGWBucketStats& stats); + + void set_stats(rgw_bucket& bucket, RGWQuotaBucketStats& qs, RGWBucketStats& stats); + int async_refresh(rgw_bucket& bucket, RGWQuotaBucketStats& qs); + void async_refresh_response(rgw_bucket& bucket, RGWBucketStats& stats); +}; + +bool RGWBucketStatsCache::can_use_cached_stats(RGWQuotaInfo& quota, RGWBucketStats& cached_stats) +{ + if (quota.max_size_kb >= 0) { + if (quota.max_size_soft_threshold < 0) { + quota.max_size_soft_threshold = quota.max_size_kb * store->ctx()->_conf->rgw_bucket_quota_soft_threshold; + } + + if (cached_stats.num_kb_rounded >= (uint64_t)quota.max_size_soft_threshold) { + ldout(store->ctx(), 20) << "quota: can't use cached stats, exceeded soft threshold (size): " + << cached_stats.num_kb_rounded << " >= " << quota.max_size_soft_threshold << dendl; + return false; + } + } + + if (quota.max_objects >= 0) { + if (quota.max_objs_soft_threshold < 0) { + quota.max_objs_soft_threshold = quota.max_objects * store->ctx()->_conf->rgw_bucket_quota_soft_threshold; + } + + if (cached_stats.num_objects >= (uint64_t)quota.max_objs_soft_threshold) { + ldout(store->ctx(), 20) << "quota: can't use cached stats, exceeded soft threshold (num objs): " + << cached_stats.num_objects << " >= " << quota.max_objs_soft_threshold << dendl; + return false; + } + } + + return true; +} + +int RGWBucketStatsCache::fetch_bucket_totals(rgw_bucket& bucket, RGWBucketStats& stats) +{ + RGWBucketInfo bucket_info; + + uint64_t bucket_ver; + uint64_t master_ver; + + map<RGWObjCategory, RGWBucketStats> bucket_stats; + int r = store->get_bucket_stats(bucket, &bucket_ver, &master_ver, bucket_stats, NULL); + if (r < 0) { + ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl; + return r; + } + + stats = RGWBucketStats(); + + map<RGWObjCategory, RGWBucketStats>::iterator iter; + for (iter = bucket_stats.begin(); iter != bucket_stats.end(); ++iter) { + RGWBucketStats& s = iter->second; + stats.num_kb += s.num_kb; + stats.num_kb_rounded += s.num_kb_rounded; + stats.num_objects += s.num_objects; + } + + return 0; +} + +class AsyncRefreshHandler : public RGWGetBucketStats_CB { + RGWRados *store; + RGWBucketStatsCache *cache; +public: + AsyncRefreshHandler(RGWRados *_store, RGWBucketStatsCache *_cache, rgw_bucket& _bucket) : RGWGetBucketStats_CB(_bucket), store(_store), cache(_cache) {} + + int init_fetch(); + + void handle_response(int r); +}; + + +int AsyncRefreshHandler::init_fetch() +{ + ldout(store->ctx(), 20) << "initiating async quota refresh for bucket=" << bucket << dendl; + map<RGWObjCategory, RGWBucketStats> bucket_stats; + int r = store->get_bucket_stats_async(bucket, this); + if (r < 0) { + ldout(store->ctx(), 0) << "could not get bucket info for bucket=" << bucket.name << dendl; + + /* get_bucket_stats_async() dropped our reference already */ + return r; + } + + return 0; +} + +void AsyncRefreshHandler::handle_response(int r) +{ + if (r < 0) { + ldout(store->ctx(), 20) << "AsyncRefreshHandler::handle_response() r=" << r << dendl; + return; /* nothing to do here */ + } + + RGWBucketStats bs; + + map<RGWObjCategory, RGWBucketStats>::iterator iter; + for (iter = stats->begin(); iter != stats->end(); ++iter) { + RGWBucketStats& s = iter->second; + bs.num_kb += s.num_kb; + bs.num_kb_rounded += s.num_kb_rounded; + bs.num_objects += s.num_objects; + } + + cache->async_refresh_response(bucket, bs); +} + +class RGWBucketStatsAsyncTestSet : public lru_map<rgw_bucket, RGWQuotaBucketStats>::UpdateContext { + int objs_delta; + uint64_t added_bytes; + uint64_t removed_bytes; +public: + RGWBucketStatsAsyncTestSet() {} + bool update(RGWQuotaBucketStats *entry) { + if (entry->async_refresh_time.sec() == 0) + return false; + + entry->async_refresh_time = utime_t(0, 0); + + return true; + } +}; + +int RGWBucketStatsCache::async_refresh(rgw_bucket& bucket, RGWQuotaBucketStats& qs) +{ + /* protect against multiple updates */ + RGWBucketStatsAsyncTestSet test_update; + if (!stats_map.find_and_update(bucket, NULL, &test_update)) { + /* most likely we just raced with another update */ + return 0; + } + + async_refcount->get(); + + AsyncRefreshHandler *handler = new AsyncRefreshHandler(store, this, bucket); + + int ret = handler->init_fetch(); + if (ret < 0) { + async_refcount->put(); + handler->put(); + return ret; + } + + return 0; +} + +void RGWBucketStatsCache::async_refresh_response(rgw_bucket& bucket, RGWBucketStats& stats) +{ + ldout(store->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl; + + RGWQuotaBucketStats qs; + + stats_map.find(bucket, qs); + + set_stats(bucket, qs, stats); + + async_refcount->put(); +} + +void RGWBucketStatsCache::set_stats(rgw_bucket& bucket, RGWQuotaBucketStats& qs, RGWBucketStats& stats) +{ + qs.stats = stats; + qs.expiration = ceph_clock_now(store->ctx()); + qs.async_refresh_time = qs.expiration; + qs.expiration += store->ctx()->_conf->rgw_bucket_quota_ttl; + qs.async_refresh_time += store->ctx()->_conf->rgw_bucket_quota_ttl / 2; + + stats_map.add(bucket, qs); +} + +int RGWBucketStatsCache::get_bucket_stats(rgw_bucket& bucket, RGWBucketStats& stats, RGWQuotaInfo& quota) { + RGWQuotaBucketStats qs; + utime_t now = ceph_clock_now(store->ctx()); + if (stats_map.find(bucket, qs)) { + if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) { + int r = async_refresh(bucket, qs); + if (r < 0) { + ldout(store->ctx(), 0) << "ERROR: quota async refresh returned ret=" << r << dendl; + + /* continue processing, might be a transient error, async refresh is just optimization */ + } + } + + if (can_use_cached_stats(quota, qs.stats) && qs.expiration > ceph_clock_now(store->ctx())) { + stats = qs.stats; + return 0; + } + } + + int ret = fetch_bucket_totals(bucket, stats); + if (ret < 0 && ret != -ENOENT) + return ret; + + set_stats(bucket, qs, stats); + + return 0; +} + + +class RGWBucketStatsUpdate : public lru_map<rgw_bucket, RGWQuotaBucketStats>::UpdateContext { + int objs_delta; + uint64_t added_bytes; + uint64_t removed_bytes; +public: + RGWBucketStatsUpdate(int _objs_delta, uint64_t _added_bytes, uint64_t _removed_bytes) : + objs_delta(_objs_delta), added_bytes(_added_bytes), removed_bytes(_removed_bytes) {} + bool update(RGWQuotaBucketStats *entry) { + uint64_t rounded_kb_added = rgw_rounded_kb(added_bytes); + uint64_t rounded_kb_removed = rgw_rounded_kb(removed_bytes); + + entry->stats.num_kb_rounded += (rounded_kb_added - rounded_kb_removed); + entry->stats.num_kb += (added_bytes - removed_bytes) / 1024; + entry->stats.num_objects += objs_delta; + + return true; + } +}; + + +void RGWBucketStatsCache::adjust_bucket_stats(rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes) +{ + RGWBucketStatsUpdate update(objs_delta, added_bytes, removed_bytes); + stats_map.find_and_update(bucket, NULL, &update); +} + + +class RGWQuotaHandlerImpl : public RGWQuotaHandler { + RGWRados *store; + RGWBucketStatsCache stats_cache; +public: + RGWQuotaHandlerImpl(RGWRados *_store) : store(_store), stats_cache(_store) {} + virtual int check_quota(rgw_bucket& bucket, RGWQuotaInfo& bucket_quota, + uint64_t num_objs, uint64_t size) { + uint64_t size_kb = rgw_rounded_kb(size); + if (!bucket_quota.enabled) { + return 0; + } + + RGWBucketStats stats; + + int ret = stats_cache.get_bucket_stats(bucket, stats, bucket_quota); + if (ret < 0) + return ret; + + ldout(store->ctx(), 20) << "bucket quota: max_objects=" << bucket_quota.max_objects + << " max_size_kb=" << bucket_quota.max_size_kb << dendl; + + if (bucket_quota.max_objects >= 0 && + stats.num_objects + num_objs > (uint64_t)bucket_quota.max_objects) { + ldout(store->ctx(), 10) << "quota exceeded: stats.num_objects=" << stats.num_objects + << " bucket_quota.max_objects=" << bucket_quota.max_objects << dendl; + + return -ERR_QUOTA_EXCEEDED; + } + if (bucket_quota.max_size_kb >= 0 && + stats.num_kb_rounded + size_kb > (uint64_t)bucket_quota.max_size_kb) { + ldout(store->ctx(), 10) << "quota exceeded: stats.num_kb_rounded=" << stats.num_kb_rounded << " size_kb=" << size_kb + << " bucket_quota.max_size_kb=" << bucket_quota.max_size_kb << dendl; + return -ERR_QUOTA_EXCEEDED; + } + + return 0; + } + + virtual void update_stats(rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) { + stats_cache.adjust_bucket_stats(bucket, obj_delta, added_bytes, removed_bytes); + }; +}; + + +RGWQuotaHandler *RGWQuotaHandler::generate_handler(RGWRados *store) +{ + return new RGWQuotaHandlerImpl(store); +}; + +void RGWQuotaHandler::free_handler(RGWQuotaHandler *handler) +{ + delete handler; +} diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h new file mode 100644 index 00000000000..2f8f28e85a2 --- /dev/null +++ b/src/rgw/rgw_quota.h @@ -0,0 +1,74 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2013 Inktank, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#ifndef CEPH_RGW_QUOTA_H +#define CEPH_RGW_QUOTA_H + + +#include "include/utime.h" +#include "include/atomic.h" +#include "common/lru_map.h" + +class RGWRados; +class JSONObj; + +struct RGWQuotaInfo { + int64_t max_size_kb; + int64_t max_objects; + bool enabled; + int64_t max_size_soft_threshold; + int64_t max_objs_soft_threshold; + + RGWQuotaInfo() : max_size_kb(-1), max_objects(-1), enabled(false), + max_size_soft_threshold(-1), max_objs_soft_threshold(-1) {} + + void encode(bufferlist& bl) const { + ENCODE_START(1, 1, bl); + ::encode(max_size_kb, bl); + ::encode(max_objects, bl); + ::encode(enabled, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::iterator& bl) { + DECODE_START(1, bl); + ::decode(max_size_kb, bl); + ::decode(max_objects, bl); + ::decode(enabled, bl); + DECODE_FINISH(bl); + } + + void dump(Formatter *f) const; + + void decode_json(JSONObj *obj); + +}; +WRITE_CLASS_ENCODER(RGWQuotaInfo) + +class rgw_bucket; + +class RGWQuotaHandler { +public: + RGWQuotaHandler() {} + virtual ~RGWQuotaHandler() { + } + virtual int check_quota(rgw_bucket& bucket, RGWQuotaInfo& bucket_quota, + uint64_t num_objs, uint64_t size) = 0; + + virtual void update_stats(rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0; + + static RGWQuotaHandler *generate_handler(RGWRados *store); + static void free_handler(RGWQuotaHandler *handler); +}; + +#endif diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 8b4d18f4e68..9f0a900f3d3 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -357,16 +357,20 @@ int RGWZoneParams::store_info(CephContext *cct, RGWRados *store, RGWRegion& regi } void RGWRegionMap::encode(bufferlist& bl) const { - ENCODE_START(1, 1, bl); + ENCODE_START(2, 1, bl); ::encode(regions, bl); ::encode(master_region, bl); + ::encode(bucket_quota, bl); ENCODE_FINISH(bl); } void RGWRegionMap::decode(bufferlist::iterator& bl) { - DECODE_START(1, bl); + DECODE_START(2, bl); ::decode(regions, bl); ::decode(master_region, bl); + + if (struct_v >= 2) + ::decode(bucket_quota, bl); DECODE_FINISH(bl); regions_by_api.clear(); @@ -851,6 +855,7 @@ void RGWRados::finalize() RGWRESTConn *conn = iter->second; delete conn; } + RGWQuotaHandler::free_handler(quota_handler); } /** @@ -962,6 +967,8 @@ int RGWRados::init_complete() if (use_gc_thread) gc->start_processor(); + quota_handler = RGWQuotaHandler::generate_handler(this); + return ret; } @@ -2342,6 +2349,11 @@ int RGWRados::put_obj_meta_impl(void *ctx, rgw_obj& obj, uint64_t size, *mtime = set_mtime; } + if (state) { + /* update quota cache */ + quota_handler->update_stats(bucket, (state->exists ? 0 : 1), size, state->size); + } + return 0; done_cancel: @@ -3211,6 +3223,11 @@ int RGWRados::delete_obj_impl(void *ctx, rgw_obj& obj, RGWObjVersionTracker *obj if (ret_not_existed) return -ENOENT; + if (state) { + /* update quota cache */ + quota_handler->update_stats(bucket, -1, 0, state->size); + } + return 0; } @@ -4598,6 +4615,38 @@ int RGWRados::get_bucket_stats(rgw_bucket& bucket, uint64_t *bucket_ver, uint64_ return 0; } +class RGWGetBucketStatsContext : public RGWGetDirHeader_CB { + RGWGetBucketStats_CB *cb; + +public: + RGWGetBucketStatsContext(RGWGetBucketStats_CB *_cb) : cb(_cb) {} + void handle_response(int r, rgw_bucket_dir_header& header) { + map<RGWObjCategory, RGWBucketStats> stats; + + if (r >= 0) { + translate_raw_stats(header, stats); + cb->set_response(header.ver, header.master_ver, &stats, header.max_marker); + } + + cb->handle_response(r); + + cb->put(); + } +}; + +int RGWRados::get_bucket_stats_async(rgw_bucket& bucket, RGWGetBucketStats_CB *ctx) +{ + RGWGetBucketStatsContext *get_ctx = new RGWGetBucketStatsContext(ctx); + int r = cls_bucket_head_async(bucket, get_ctx); + if (r < 0) { + ctx->put(); + delete get_ctx; + return r; + } + + return 0; +} + void RGWRados::get_bucket_instance_entry(rgw_bucket& bucket, string& entry) { entry = bucket.name + ":" + bucket.bucket_id; @@ -5480,6 +5529,25 @@ int RGWRados::cls_bucket_head(rgw_bucket& bucket, struct rgw_bucket_dir_header& return 0; } +int RGWRados::cls_bucket_head_async(rgw_bucket& bucket, RGWGetDirHeader_CB *ctx) +{ + librados::IoCtx index_ctx; + string oid; + int r = open_bucket_index(bucket, index_ctx, oid); + if (r < 0) + return r; + + r = cls_rgw_get_dir_header_async(index_ctx, oid, ctx); + if (r < 0) + return r; + + return 0; +} + +int RGWRados::check_quota(rgw_bucket& bucket, RGWQuotaInfo& quota_info, uint64_t obj_size) +{ + return quota_handler->check_quota(bucket, quota_info, 1, obj_size); +} class IntentLogNameFilter : public RGWAccessListFilter { diff --git a/src/rgw/rgw_rados.h b/src/rgw/rgw_rados.h index 65765c414aa..52b898123d4 100644 --- a/src/rgw/rgw_rados.h +++ b/src/rgw/rgw_rados.h @@ -636,6 +636,8 @@ struct RGWRegionMap { string master_region; + RGWQuotaInfo bucket_quota; + RGWRegionMap() : lock("RGWRegionMap") {} void encode(bufferlist& bl) const; @@ -759,6 +761,29 @@ public: int renew_state(); }; +class RGWGetBucketStats_CB : public RefCountedObject { +protected: + rgw_bucket bucket; + uint64_t bucket_ver; + uint64_t master_ver; + map<RGWObjCategory, RGWBucketStats> *stats; + string max_marker; +public: + RGWGetBucketStats_CB(rgw_bucket& _bucket) : bucket(_bucket), stats(NULL) {} + virtual ~RGWGetBucketStats_CB() {} + virtual void handle_response(int r) = 0; + virtual void set_response(uint64_t _bucket_ver, uint64_t _master_ver, + map<RGWObjCategory, RGWBucketStats> *_stats, + const string &_max_marker) { + bucket_ver = _bucket_ver; + master_ver = _master_ver; + stats = _stats; + max_marker = _max_marker; + } +}; + +class RGWGetDirHeader_CB; + class RGWRados { @@ -862,6 +887,8 @@ protected: string region_name; string zone_name; + RGWQuotaHandler *quota_handler; + public: RGWRados() : lock("rados_timer_lock"), timer(NULL), gc(NULL), use_gc_thread(false), @@ -870,6 +897,7 @@ public: bucket_id_lock("rados_bucket_id"), max_bucket_id(0), cct(NULL), rados(NULL), pools_initialized(false), + quota_handler(NULL), rest_master_conn(NULL), meta_mgr(NULL), data_log(NULL) {} @@ -1290,6 +1318,7 @@ public: int decode_policy(bufferlist& bl, ACLOwner *owner); int get_bucket_stats(rgw_bucket& bucket, uint64_t *bucket_ver, uint64_t *master_ver, map<RGWObjCategory, RGWBucketStats>& stats, string *max_marker); + int get_bucket_stats_async(rgw_bucket& bucket, RGWGetBucketStats_CB *cb); void get_bucket_instance_obj(rgw_bucket& bucket, rgw_obj& obj); void get_bucket_instance_entry(rgw_bucket& bucket, string& entry); void get_bucket_meta_oid(rgw_bucket& bucket, string& oid); @@ -1321,6 +1350,7 @@ public: map<string, RGWObjEnt>& m, bool *is_truncated, string *last_entry, bool (*force_check_filter)(const string& name) = NULL); int cls_bucket_head(rgw_bucket& bucket, struct rgw_bucket_dir_header& header); + int cls_bucket_head_async(rgw_bucket& bucket, RGWGetDirHeader_CB *ctx); int prepare_update_index(RGWObjState *state, rgw_bucket& bucket, RGWModifyOp op, rgw_obj& oid, string& tag); int complete_update_index(rgw_bucket& bucket, string& oid, string& tag, int64_t poolid, uint64_t epoch, uint64_t size, @@ -1376,6 +1406,8 @@ public: int bucket_rebuild_index(rgw_bucket& bucket); int remove_objs_from_index(rgw_bucket& bucket, list<string>& oid_list); + int check_quota(rgw_bucket& bucket, RGWQuotaInfo& quota_info, uint64_t obj_size); + string unique_id(uint64_t unique_num) { char buf[32]; snprintf(buf, sizeof(buf), ".%llu.%llu", (unsigned long long)instance_id(), (unsigned long long)unique_num); diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc index 5e5b5c564bb..dc529e3d48d 100644 --- a/src/rgw/rgw_user.cc +++ b/src/rgw/rgw_user.cc @@ -1682,6 +1682,9 @@ int RGWUser::execute_add(RGWUserAdminOpState& op_state, std::string *err_msg) if (op_state.op_mask_specified) user_info.op_mask = op_state.get_op_mask(); + if (op_state.has_bucket_quota()) + user_info.bucket_quota = op_state.get_bucket_quota(); + // update the request op_state.set_user_info(user_info); op_state.set_populated(); @@ -1884,6 +1887,9 @@ int RGWUser::execute_modify(RGWUserAdminOpState& op_state, std::string *err_msg) if (op_state.op_mask_specified) user_info.op_mask = op_state.get_op_mask(); + if (op_state.has_bucket_quota()) + user_info.bucket_quota = op_state.get_bucket_quota(); + if (op_state.has_suspension_op()) { __u8 suspended = op_state.get_suspension_status(); user_info.suspended = suspended; diff --git a/src/rgw/rgw_user.h b/src/rgw/rgw_user.h index 32bcf199001..e71b8f81778 100644 --- a/src/rgw/rgw_user.h +++ b/src/rgw/rgw_user.h @@ -172,6 +172,10 @@ struct RGWUserAdminOpState { bool subuser_params_checked; bool user_params_checked; + bool bucket_quota_specified; + + RGWQuotaInfo bucket_quota; + void set_access_key(std::string& access_key) { if (access_key.empty()) return; @@ -285,6 +289,12 @@ struct RGWUserAdminOpState { key_op = true; } + void set_bucket_quota(RGWQuotaInfo& quota) + { + bucket_quota = quota; + bucket_quota_specified = true; + } + bool is_populated() { return populated; }; bool is_initialized() { return initialized; }; bool has_existing_user() { return existing_user; }; @@ -303,6 +313,7 @@ struct RGWUserAdminOpState { bool will_purge_keys() { return purge_keys; }; bool will_purge_data() { return purge_data; }; bool will_generate_subuser() { return gen_subuser; }; + bool has_bucket_quota() { return bucket_quota_specified; } void set_populated() { populated = true; }; void clear_populated() { populated = false; }; void set_initialized() { initialized = true; }; @@ -317,6 +328,7 @@ struct RGWUserAdminOpState { uint32_t get_subuser_perm() { return perm_mask; }; uint32_t get_max_buckets() { return max_buckets; }; uint32_t get_op_mask() { return op_mask; }; + RGWQuotaInfo& get_bucket_quota() { return bucket_quota; } std::string get_user_id() { return user_id; }; std::string get_subuser() { return subuser; }; @@ -403,6 +415,7 @@ struct RGWUserAdminOpState { key_params_checked = false; subuser_params_checked = false; user_params_checked = false; + bucket_quota_specified = false; } }; diff --git a/src/test/cli/radosgw-admin/help.t b/src/test/cli/radosgw-admin/help.t index 2def60107dc..4fe30b1cda7 100644 --- a/src/test/cli/radosgw-admin/help.t +++ b/src/test/cli/radosgw-admin/help.t @@ -23,6 +23,9 @@ bucket check check bucket index object rm remove object object unlink unlink object from bucket index + quota set set quota params + quota enable enable quota + quota disable disable quota region get show region info regions list list all regions set on this cluster region set set region info (requires infile) @@ -116,6 +119,12 @@ <date> := "YYYY-MM-DD[ hh:mm:ss]" + Quota options: + --bucket specified bucket for quota command + --max-objects specify max objects + --max-size specify max size (in bytes) + --quota-scope scope of quota (bucket, user) + --conf/-c FILE read configuration from the given configuration file --id/-i ID set ID portion of my name --name/-n TYPE.ID set name diff --git a/src/test/common/test_bloom_filter.cc b/src/test/common/test_bloom_filter.cc index 8e3661b2cc1..cfd41305caa 100644 --- a/src/test/common/test_bloom_filter.cc +++ b/src/test/common/test_bloom_filter.cc @@ -23,7 +23,17 @@ TEST(BloomFilter, Basic) { ASSERT_TRUE(bf.contains("bar")); } +TEST(BloomFilter, Empty) { + bloom_filter bf; + for (int i=0; i<100; ++i) { + ASSERT_FALSE(bf.contains(i)); + ASSERT_FALSE(bf.contains(stringify(i))); + } +} + TEST(BloomFilter, Sweep) { + std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield); + std::cout.precision(5); std::cout << "# max\tfpp\tactual\tsize\tB/insert" << std::endl; for (int ex = 3; ex < 12; ex += 2) { for (float fpp = .001; fpp < .5; fpp *= 4.0) { @@ -62,7 +72,9 @@ TEST(BloomFilter, Sweep) { } TEST(BloomFilter, SweepInt) { - std::cout << "# max\tfpp\tactual\tsize\tB/insert" << std::endl; + std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield); + std::cout.precision(5); + std::cout << "# max\tfpp\tactual\tsize\tB/insert\tdensity\tapprox_element_count" << std::endl; for (int ex = 3; ex < 12; ex += 2) { for (float fpp = .001; fpp < .5; fpp *= 4.0) { int max = 2 << ex; @@ -92,15 +104,70 @@ TEST(BloomFilter, SweepInt) { double byte_per_insert = (double)bl.length() / (double)max; - std::cout << max << "\t" << fpp << "\t" << actual << "\t" << bl.length() << "\t" << byte_per_insert << std::endl; + std::cout << max << "\t" << fpp << "\t" << actual << "\t" << bl.length() << "\t" << byte_per_insert + << "\t" << bf.density() << "\t" << bf.approx_unique_element_count() << std::endl; ASSERT_TRUE(actual < fpp * 10); ASSERT_TRUE(actual > fpp / 10); + ASSERT_TRUE(bf.density() > 0.40); + ASSERT_TRUE(bf.density() < 0.60); } } } +TEST(BloomFilter, CompressibleSweep) { + std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield); + std::cout.precision(5); + std::cout << "# max\tins\test ins\tafter\ttgtfpp\tactual\tsize\tb/elem\n"; + float fpp = .01; + int max = 1024; + for (int div = 1; div < 10; div++) { + compressible_bloom_filter bf(max, fpp, 1); + int t = max/div; + for (int n = 0; n < t; n++) + bf.insert(n); + + unsigned est = bf.approx_unique_element_count(); + if (div > 1) + bf.compress(1.0 / div); + + for (int n = 0; n < t; n++) + ASSERT_TRUE(bf.contains(n)); + + int test = max * 100; + int hit = 0; + for (int n = 0; n < test; n++) + if (bf.contains(100000 + n)) + hit++; + + double actual = (double)hit / (double)test; + + bufferlist bl; + ::encode(bf, bl); + + double byte_per_insert = (double)bl.length() / (double)max; + unsigned est_after = bf.approx_unique_element_count(); + std::cout << max + << "\t" << t + << "\t" << est + << "\t" << est_after + << "\t" << fpp + << "\t" << actual + << "\t" << bl.length() << "\t" << byte_per_insert + << std::endl; + + ASSERT_TRUE(actual < fpp * 2.0); + ASSERT_TRUE(actual > fpp / 2.0); + ASSERT_TRUE(est_after < est * 2); + ASSERT_TRUE(est_after > est / 2); + } +} + + + TEST(BloomFilter, BinSweep) { + std::cout.setf(std::ios_base::fixed, std::ios_base::floatfield); + std::cout.precision(5); int total_max = 16384; float total_fpp = .01; std::cout << "total_inserts " << total_max << " target-fpp " << total_fpp << std::endl; diff --git a/src/test/encoding/types.h b/src/test/encoding/types.h index 59e55a11b23..18ed795c3ef 100644 --- a/src/test/encoding/types.h +++ b/src/test/encoding/types.h @@ -6,6 +6,7 @@ TYPE(filepath) #include "common/bloom_filter.hpp" TYPE(bloom_filter) +TYPE(compressible_bloom_filter) #include "common/snap_types.h" TYPE(SnapContext) @@ -35,13 +36,15 @@ TYPEWITHSTRAYDATA(OSDMap::Incremental) #include "crush/CrushWrapper.h" TYPE(CrushWrapper) +#include "include/histogram.h" +TYPE(pow2_hist_t) + #include "osd/osd_types.h" TYPE(osd_reqid_t) TYPE(object_locator_t) TYPE(request_redirect_t) TYPE(pg_t) TYPE(coll_t) -TYPE(pow2_hist_t) TYPE(filestore_perf_stat_t) TYPE(osd_stat_t) TYPE(OSDSuperblock) diff --git a/src/test/filestore/run_seed_to_range.sh b/src/test/filestore/run_seed_to_range.sh index c5b399d7aae..365b34918d2 100755 --- a/src/test/filestore/run_seed_to_range.sh +++ b/src/test/filestore/run_seed_to_range.sh @@ -12,7 +12,7 @@ mydir=`dirname $0` for f in `seq $from $to` do if ! $mydir/run_seed_to.sh $seed $f; then - if -d $dir; then + if [ -d $dir ]; then echo copying evidence to $dir cp -a . $dir else |