summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary Lowell <gary.lowell@inktank.com>2013-02-19 14:55:14 -0800
committerGary Lowell <gary.lowell@inktank.com>2013-02-19 14:55:14 -0800
commitd0424ebced7898e942b7465e99098091c474cbfb (patch)
tree68c61caee13ec56fb708d56bed9dfacf217d4ae1
parentbcb210c677d31f3209fddbb4ada17fee3f9b18da (diff)
parent3ff0fe0fc7eb2413ce7533f1b11400ad74aecbb1 (diff)
downloadceph-d0424ebced7898e942b7465e99098091c474cbfb.tar.gz
Merge branch 'master' of https://github.com/ceph/ceph
-rw-r--r--doc/release-notes.rst40
-rwxr-xr-xqa/workunits/hadoop-internal-tests/test.sh70
-rw-r--r--src/os/FileStore.cc4
-rw-r--r--src/osd/OSD.cc21
-rw-r--r--src/osd/OSD.h9
-rw-r--r--src/osd/PG.cc6
-rw-r--r--src/osd/osd_types.cc2
7 files changed, 115 insertions, 37 deletions
diff --git a/doc/release-notes.rst b/doc/release-notes.rst
index d7840fd645d..a7d0d282350 100644
--- a/doc/release-notes.rst
+++ b/doc/release-notes.rst
@@ -2,6 +2,46 @@
Release Notes
===============
+v0.57
+-----
+
+This development release has a lot of additional functionality
+accumulated over the last couple months. Most of the bug fixes (with
+the notable exception of the MDS related work) has already been
+backported to v0.56.x, and is not mentioned here.
+
+Upgrading
+~~~~~~~~~
+
+* The 'ceph osd pool delete <poolname>' and 'rados rmpool <poolname>'
+ now have safety interlocks with loud warnings that make you confirm
+ pool removal. Any scripts curenty rely on these functions zapping
+ data without confirmation need to be adjusted accordingly.
+
+Notable Changes
+~~~~~~~~~~~~~~~
+
+* osd: default to libaio for the journal (some performance boost)
+* osd: validate snap collections on startup
+* osd: ceph-filestore-dump tool for debugging
+* osd: deep-scrub omap keys/values
+* ceph tool: some CLI interface cleanups
+* mon: easy adjustment of crush tunables via 'ceph osd crush tunables ...'
+* mon: easy creation of crush rules vai 'ceph osd rule ...'
+* mon: approximate recovery, IO workload stats
+* mon: avoid marking entire CRUSH subtrees out (e.g., if an entire rack goes offline)
+* mon: safety check for pool deletion
+* mon: new checks for identifying and reporting clock drift
+* radosgw: misc fixes
+* rbd: wait for udev to settle in strategic places (avoid spurious errors, failures)
+* rbd-fuse: new tool, package
+* mds, ceph-fuse: manage layouts via xattrs
+* mds: misc bug fixes with clustered MDSs and failure recovery
+* mds: misc bug fixes with readdir
+* libcephfs: many fixes, cleanups with the Java bindings
+* auth: ability to require new cephx signatures on messages (still off by default)
+
+
v0.56.3 "bobtail"
-----------------
diff --git a/qa/workunits/hadoop-internal-tests/test.sh b/qa/workunits/hadoop-internal-tests/test.sh
index 5b84761dee4..f37783ef3f8 100755
--- a/qa/workunits/hadoop-internal-tests/test.sh
+++ b/qa/workunits/hadoop-internal-tests/test.sh
@@ -1,23 +1,63 @@
-#!/bin/sh -e
-
-echo "starting hadoop-internal-tests tests"
+#!/bin/bash -e
# bail if $TESTDIR is not set as this test will fail in that scenario
-[ -z $TESTDIR] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
+[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; }
+
+POOL_SIZES=`seq 1 8`
+POOL_BASE=hadoop
+POOL_NAMES=`echo -n $POOL_SIZES | sed "s/\([0-9]*\)/$POOL_BASE\1/g" | sed "s/ /,/g"`
+
+function gen_hadoop_conf() {
+local outfile=$1
+local poolnames=$2
+cat << EOF > $outfile
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+<property>
+ <name>ceph.conf.file</name>
+ <value>$CEPH_CONF</value>
+</property>
+<property>
+ <name>ceph.data.pools</name>
+ <value>$poolnames</value>
+</property>
+</configuration>
+EOF
+}
+
+echo creating hadoop test pools
+for size in $POOL_SIZES; do
+ name=${POOL_BASE}$size
+ echo creating pool $name
+ #./ceph osd pool create $name 100 100
+ #./ceph osd pool set $name size $size
+ ceph osd pool create $name 100 100
+ ceph osd pool set $name size $size
+
+ echo making pool $name a data pool
+ poolid=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"`
+ ceph mds add_data_pool $poolid
+ #./ceph mds add_data_pool $poolid
+done
+
+def_repl_conf=`mktemp`
+echo generating default replication hadoop config $def_repl_conf
+gen_hadoop_conf $def_repl_conf ""
+
+cust_repl_conf=`mktemp`
+echo generating custom replication hadoop config $cust_repl_conf
+gen_hadoop_conf $cust_repl_conf $POOL_NAMES
+
+pushd $TESTDIR/hadoop
-command1="cd $TESTDIR/hadoop"
-command2="ant -Dextra.library.path=$LD_LIBRARY_PATH -Dceph.conf.file=$CEPH_CONF -Dtestcase=TestCephFileSystem"
+echo running default replication hadoop tests
+ant -Dextra.library.path=$LD_LIBRARY_PATH -Dhadoop.conf.file=$def_repl_conf -Dtestcase=TestCephDefaultReplication test
-#print out the command
-echo "----------------------"
-echo $command1
-echo "----------------------"
-echo $command2
-echo "----------------------"
+echo running custom replication hadoop tests
+ant -Dextra.library.path=$LD_LIBRARY_PATH -Dhadoop.conf.file=$cust_repl_conf -Dtestcase=TestCephCustomReplication test
-#now execute the command
-$command1
-$command2
+popd
echo "completed hadoop-internal-tests tests"
exit 0
diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc
index c91d47c6d0d..c53bd79b2ed 100644
--- a/src/os/FileStore.cc
+++ b/src/os/FileStore.cc
@@ -4169,6 +4169,10 @@ int FileStore::_collection_rename(const coll_t &cid, const coll_t &ncid,
get_cdir(cid, old_coll, sizeof(old_coll));
get_cdir(ncid, new_coll, sizeof(new_coll));
+ if (_check_replay_guard(cid, spos) < 0) {
+ return 0;
+ }
+
if (_check_replay_guard(ncid, spos) < 0) {
return _collection_remove_recursive(cid, spos);
}
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index a1546bc606d..d5f2b2299a4 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -3545,22 +3545,17 @@ void OSD::do_waiters()
{
assert(osd_lock.is_locked());
+ dout(10) << "do_waiters -- start" << dendl;
finished_lock.Lock();
- if (finished.empty()) {
+ while (!finished.empty()) {
+ OpRequestRef next = finished.front();
+ finished.pop_front();
finished_lock.Unlock();
- } else {
- list<OpRequestRef> waiting;
- waiting.splice(waiting.begin(), finished);
-
- finished_lock.Unlock();
-
- dout(10) << "do_waiters -- start" << dendl;
- for (list<OpRequestRef>::iterator it = waiting.begin();
- it != waiting.end();
- it++)
- dispatch_op(*it);
- dout(10) << "do_waiters -- finish" << dendl;
+ dispatch_op(next);
+ finished_lock.Lock();
}
+ finished_lock.Unlock();
+ dout(10) << "do_waiters -- finish" << dendl;
}
void OSD::dispatch_op(OpRequestRef op)
diff --git a/src/osd/OSD.h b/src/osd/OSD.h
index 5680acca178..1837195d339 100644
--- a/src/osd/OSD.h
+++ b/src/osd/OSD.h
@@ -618,6 +618,11 @@ private:
finished.splice(finished.end(), ls);
finished_lock.Unlock();
}
+ void take_waiters_front(list<OpRequestRef>& ls) {
+ finished_lock.Lock();
+ finished.splice(finished.begin(), ls);
+ finished_lock.Unlock();
+ }
void take_waiter(OpRequestRef op) {
finished_lock.Lock();
finished.push_back(op);
@@ -884,7 +889,7 @@ protected:
void wake_pg_waiters(pg_t pgid) {
if (waiting_for_pg.count(pgid)) {
- take_waiters(waiting_for_pg[pgid]);
+ take_waiters_front(waiting_for_pg[pgid]);
waiting_for_pg.erase(pgid);
}
}
@@ -892,7 +897,7 @@ protected:
for (map<pg_t, list<OpRequestRef> >::iterator p = waiting_for_pg.begin();
p != waiting_for_pg.end();
p++)
- take_waiters(p->second);
+ take_waiters_front(p->second);
waiting_for_pg.clear();
}
diff --git a/src/osd/PG.cc b/src/osd/PG.cc
index bc6e39bdb96..3c10230c41a 100644
--- a/src/osd/PG.cc
+++ b/src/osd/PG.cc
@@ -2702,12 +2702,6 @@ void PG::log_weirdness()
<< "\n";
}
- if (info.last_complete < log.tail)
- osd->clog.error() << info.pgid
- << " last_complete " << info.last_complete
- << " < log.tail " << log.tail
- << "\n";
-
if (log.caller_ops.size() > log.log.size()) {
osd->clog.error() << info.pgid
<< " caller_ops.size " << log.caller_ops.size()
diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc
index 8ef0c9b58a1..c3827a4680b 100644
--- a/src/osd/osd_types.cc
+++ b/src/osd/osd_types.cc
@@ -1713,7 +1713,7 @@ void pg_query_t::generate_test_instances(list<pg_query_t*>& o)
string pg_log_entry_t::get_key_name() const
{
char key[40];
- snprintf(key, sizeof(key), "%010u.%020lu", version.epoch, version.version);
+ snprintf(key, sizeof(key), "%010u.%020llu", version.epoch, (long long unsigned)version.version);
return string(key);
}