summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSage Weil <sage@newdream.net>2013-03-18 13:51:57 -0700
committerSage Weil <sage@newdream.net>2013-03-18 13:51:57 -0700
commit717d221cc4d0d05cc73d7a434539f610ee667f89 (patch)
tree573f15e34360e78f424c9bf2a604dacba0b5d620
parenta13ae372c0729a25c35a7a17bc98ac732f511aaf (diff)
parent3967ce4e39f60b32b5d8289eb80d273835c917a8 (diff)
downloadceph-717d221cc4d0d05cc73d7a434539f610ee667f89.tar.gz
Merge pull request #112 from dalgaaf/wip-da-performance-2-v2
prefer prefix ++/--operator for e.g. iterators for performance reasons Reviewed-by: Sage Weil <sage@inktank.com>
-rw-r--r--src/common/Finisher.cc2
-rw-r--r--src/key_value_store/kv_flat_btree_async.cc2
-rw-r--r--src/librados/librados.cc2
-rw-r--r--src/mds/CDir.cc2
-rw-r--r--src/mds/MDCache.cc2
-rw-r--r--src/mds/Migrator.cc52
-rw-r--r--src/mds/SnapRealm.cc30
-rw-r--r--src/mds/events/EMetaBlob.h2
-rw-r--r--src/mds/journal.cc26
-rw-r--r--src/mon/OSDMonitor.cc42
-rw-r--r--src/mon/PGMap.cc8
-rw-r--r--src/msg/Pipe.cc10
-rw-r--r--src/os/FileStore.cc8
-rw-r--r--src/os/FlatIndex.cc2
-rw-r--r--src/os/JournalingObjectStore.cc2
-rw-r--r--src/os/LFNIndex.cc2
-rw-r--r--src/osd/OSD.cc58
-rw-r--r--src/osd/OSDMap.cc52
-rw-r--r--src/osd/PG.cc72
-rw-r--r--src/osd/ReplicatedPG.cc38
-rw-r--r--src/osd/SnapMapper.cc2
-rw-r--r--src/osd/osd_types.cc10
-rw-r--r--src/osdc/Filer.cc8
-rw-r--r--src/osdc/Filer.h4
-rw-r--r--src/osdc/ObjectCacher.cc74
-rw-r--r--src/osdc/ObjectCacher.h4
-rw-r--r--src/osdc/Objecter.cc18
-rw-r--r--src/osdc/Objecter.h11
-rw-r--r--src/osdc/Striper.cc8
-rw-r--r--src/osdmaptool.cc2
-rw-r--r--src/rados.cc18
-rw-r--r--src/rgw/rgw_acl_s3.cc6
-rw-r--r--src/rgw/rgw_admin.cc4
-rw-r--r--src/rgw/rgw_rados.cc4
34 files changed, 292 insertions, 295 deletions
diff --git a/src/common/Finisher.cc b/src/common/Finisher.cc
index c2029f2d668..72bfb6f9aa7 100644
--- a/src/common/Finisher.cc
+++ b/src/common/Finisher.cc
@@ -51,7 +51,7 @@ void *Finisher::finisher_thread_entry()
for (vector<Context*>::iterator p = ls.begin();
p != ls.end();
- p++) {
+ ++p) {
if (*p) {
(*p)->finish(0);
delete *p;
diff --git a/src/key_value_store/kv_flat_btree_async.cc b/src/key_value_store/kv_flat_btree_async.cc
index bbfd1429cc7..7b0ed2bc4c4 100644
--- a/src/key_value_store/kv_flat_btree_async.cc
+++ b/src/key_value_store/kv_flat_btree_async.cc
@@ -143,7 +143,7 @@ int IndexCache::get(const string &key, index_data *idata,
return -ENODATA;
} else {
*idata = it->second.first;
- it++;
+ ++it;
if (it != k2itmap.end()) {
*next_idata = it->second.first;
}
diff --git a/src/librados/librados.cc b/src/librados/librados.cc
index 59ff0d1c3e8..0a00cfc6df2 100644
--- a/src/librados/librados.cc
+++ b/src/librados/librados.cc
@@ -1280,7 +1280,7 @@ int librados::Rados::get_pool_stats(std::list<string>& v, string& category,
int r = client->get_pool_stats(v, rawresult);
for (map<string,::pool_stat_t>::iterator p = rawresult.begin();
p != rawresult.end();
- p++) {
+ ++p) {
stats_map& c = result[p->first];
string cat;
diff --git a/src/mds/CDir.cc b/src/mds/CDir.cc
index 47c5fbec215..2403b904aed 100644
--- a/src/mds/CDir.cc
+++ b/src/mds/CDir.cc
@@ -2082,7 +2082,7 @@ void CDir::_committed(version_t v, version_t lrv)
map<version_t, list<Context*> >::iterator p = waiting_for_commit.begin();
while (p != waiting_for_commit.end()) {
map<version_t, list<Context*> >::iterator n = p;
- n++;
+ ++n;
if (p->first > committed_version) {
dout(10) << " there are waiters for " << p->first << ", committing again" << dendl;
_commit(p->first);
diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc
index e085ab6c998..554ade6465c 100644
--- a/src/mds/MDCache.cc
+++ b/src/mds/MDCache.cc
@@ -7560,7 +7560,7 @@ void MDCache::_do_find_ino_peer(find_ino_peer_info_t& fip)
m = fip.hint;
fip.hint = -1;
} else {
- for (set<int>::iterator p = active.begin(); p != active.end(); p++)
+ for (set<int>::iterator p = active.begin(); p != active.end(); ++p)
if (*p != mds->whoami &&
fip.checked.count(*p) == 0) {
m = *p;
diff --git a/src/mds/Migrator.cc b/src/mds/Migrator.cc
index 5e538037531..b485ab11a9d 100644
--- a/src/mds/Migrator.cc
+++ b/src/mds/Migrator.cc
@@ -214,7 +214,7 @@ void Migrator::handle_mds_failure_or_stop(int who)
map<CDir*,int>::iterator p = export_state.begin();
while (p != export_state.end()) {
map<CDir*,int>::iterator next = p;
- next++;
+ ++next;
CDir *dir = p->first;
// abort exports:
@@ -355,7 +355,7 @@ void Migrator::handle_mds_failure_or_stop(int who)
map<dirfrag_t,int>::iterator q = import_state.begin();
while (q != import_state.end()) {
map<dirfrag_t,int>::iterator next = q;
- next++;
+ ++next;
dirfrag_t df = q->first;
CInode *diri = mds->mdcache->get_inode(df.ino);
CDir *dir = mds->mdcache->get_dirfrag(df);
@@ -462,7 +462,7 @@ void Migrator::show_importing()
dout(10) << "show_importing" << dendl;
for (map<dirfrag_t,int>::iterator p = import_state.begin();
p != import_state.end();
- p++) {
+ ++p) {
CDir *dir = mds->mdcache->get_dirfrag(p->first);
if (dir) {
dout(10) << " importing from " << import_peer[p->first]
@@ -484,7 +484,7 @@ void Migrator::show_exporting()
dout(10) << "show_exporting" << dendl;
for (map<CDir*,int>::iterator p = export_state.begin();
p != export_state.end();
- p++)
+ ++p)
dout(10) << " exporting to " << export_peer[p->first]
<< ": (" << p->second << ") " << get_export_statename(p->second)
<< " " << p->first->dirfrag()
@@ -503,7 +503,7 @@ void Migrator::audit()
show_importing();
for (map<dirfrag_t,int>::iterator p = import_state.begin();
p != import_state.end();
- p++) {
+ ++p) {
if (p->second == IMPORT_DISCOVERING)
continue;
if (p->second == IMPORT_DISCOVERED) {
@@ -529,7 +529,7 @@ void Migrator::audit()
show_exporting();
for (map<CDir*,int>::iterator p = export_state.begin();
p != export_state.end();
- p++) {
+ ++p) {
CDir *dir = p->first;
if (p->second == EXPORT_DISCOVERING ||
p->second == EXPORT_FREEZING) continue;
@@ -601,7 +601,7 @@ void Migrator::get_export_lock_set(CDir *dir, set<SimpleLock*>& locks)
cache->make_trace(trace, dir->inode);
for (vector<CDentry*>::iterator it = trace.begin();
it != trace.end();
- it++)
+ ++it)
locks.insert(&(*it)->lock);
// bound dftlocks:
@@ -764,7 +764,7 @@ void Migrator::export_frozen(CDir *dir)
// include list of bystanders
for (map<int,int>::iterator p = dir->replicas_begin();
p != dir->replicas_end();
- p++) {
+ ++p) {
if (p->first != dest) {
dout(10) << "bystander mds." << p->first << dendl;
prep->add_bystander(p->first);
@@ -789,7 +789,7 @@ void Migrator::export_frozen(CDir *dir)
// check bounds
for (set<CDir*>::iterator it = bounds.begin();
it != bounds.end();
- it++) {
+ ++it) {
CDir *bound = *it;
// pin it.
@@ -890,7 +890,7 @@ void Migrator::handle_export_prep_ack(MExportDirPrepAck *m)
MExportDirNotify *notify = new MExportDirNotify(dir->dirfrag(), true,
pair<int,int>(mds->get_nodeid(),CDIR_AUTH_UNKNOWN),
pair<int,int>(mds->get_nodeid(),export_peer[dir]));
- for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); i++)
+ for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, p->first);
@@ -1029,7 +1029,7 @@ void Migrator::encode_export_inode_caps(CInode *in, bufferlist& bl,
// make note of clients named by exported capabilities
for (map<client_t, Capability*>::iterator it = in->client_caps.begin();
it != in->client_caps.end();
- it++)
+ ++it)
exported_client_map[it->first] = mds->sessionmap.get_inst(entity_name_t::CLIENT(it->first.v));
}
@@ -1043,7 +1043,7 @@ void Migrator::finish_export_inode_caps(CInode *in)
// tell (all) clients about migrating caps..
for (map<client_t, Capability*>::iterator it = in->client_caps.begin();
it != in->client_caps.end();
- it++) {
+ ++it) {
Capability *cap = it->second;
dout(7) << "finish_export_inode telling client." << it->first
<< " exported caps on " << *in << dendl;
@@ -1132,7 +1132,7 @@ int Migrator::encode_export_dir(bufferlist& exportbl,
// dentries
list<CDir*> subdirs;
CDir::map_t::iterator it;
- for (it = dir->begin(); it != dir->end(); it++) {
+ for (it = dir->begin(); it != dir->end(); ++it) {
CDentry *dn = it->second;
CInode *in = dn->get_linkage()->get_inode();
@@ -1190,7 +1190,7 @@ int Migrator::encode_export_dir(bufferlist& exportbl,
}
// subdirs
- for (list<CDir*>::iterator it = subdirs.begin(); it != subdirs.end(); it++)
+ for (list<CDir*>::iterator it = subdirs.begin(); it != subdirs.end(); ++it)
num_exported += encode_export_dir(exportbl, *it, exported_client_map, now);
return num_exported;
@@ -1224,7 +1224,7 @@ void Migrator::finish_export_dir(CDir *dir, list<Context*>& finished, utime_t no
// dentries
list<CDir*> subdirs;
CDir::map_t::iterator it;
- for (it = dir->begin(); it != dir->end(); it++) {
+ for (it = dir->begin(); it != dir->end(); ++it) {
CDentry *dn = it->second;
CInode *in = dn->get_linkage()->get_inode();
@@ -1241,7 +1241,7 @@ void Migrator::finish_export_dir(CDir *dir, list<Context*>& finished, utime_t no
}
// subdirs
- for (list<CDir*>::iterator it = subdirs.begin(); it != subdirs.end(); it++)
+ for (list<CDir*>::iterator it = subdirs.begin(); it != subdirs.end(); ++it)
finish_export_dir(*it, finished, now);
}
@@ -1402,7 +1402,7 @@ void Migrator::export_logged_finish(CDir *dir)
pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN),
pair<int,int>(dest, CDIR_AUTH_UNKNOWN));
- for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); i++)
+ for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, *p);
@@ -1738,7 +1738,7 @@ void Migrator::handle_export_prep(MExportDirPrep *m)
// each trace is: df ('-' | ('f' dir | 'd') dentry inode (dir dentry inode)*)
for (list<bufferlist>::iterator p = m->traces.begin();
p != m->traces.end();
- p++) {
+ ++p) {
bufferlist::iterator q = p->begin();
dirfrag_t df;
::decode(df, q);
@@ -1923,7 +1923,7 @@ void Migrator::handle_export_dir(MExportDir *m)
cache->get_subtree_bounds(dir, import_bounds);
for (set<CDir*>::iterator it = import_bounds.begin();
it != import_bounds.end();
- it++)
+ ++it)
le->metablob.add_dir(*it, false); // note that parent metadata is already in the event
// adjust popularity
@@ -1964,7 +1964,7 @@ void Migrator::import_remove_pins(CDir *dir, set<CDir*>& bounds)
set<inodeno_t> did;
for (list<dirfrag_t>::iterator p = import_bound_ls[dir].begin();
p != import_bound_ls[dir].end();
- p++) {
+ ++p) {
if (did.count(p->ino))
continue;
did.insert(p->ino);
@@ -1976,7 +1976,7 @@ void Migrator::import_remove_pins(CDir *dir, set<CDir*>& bounds)
// bounding dirfrags
for (set<CDir*>::iterator it = bounds.begin();
it != bounds.end();
- it++) {
+ ++it) {
CDir *bd = *it;
bd->put(CDir::PIN_IMPORTBOUND);
bd->state_clear(CDir::STATE_IMPORTBOUND);
@@ -2023,7 +2023,7 @@ void Migrator::import_reverse(CDir *dir)
cur->mark_clean();
CDir::map_t::iterator it;
- for (it = cur->begin(); it != cur->end(); it++) {
+ for (it = cur->begin(); it != cur->end(); ++it) {
CDentry *dn = it->second;
// dentry
@@ -2101,7 +2101,7 @@ void Migrator::import_notify_abort(CDir *dir, set<CDir*>& bounds)
new MExportDirNotify(dir->dirfrag(), true,
pair<int,int>(mds->get_nodeid(), CDIR_AUTH_UNKNOWN),
pair<int,int>(import_peer[dir->dirfrag()], CDIR_AUTH_UNKNOWN));
- for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); i++)
+ for (set<CDir*>::iterator i = bounds.begin(); i != bounds.end(); ++i)
notify->get_bounds().push_back((*i)->dirfrag());
mds->send_message_mds(notify, *p);
}
@@ -2238,7 +2238,7 @@ void Migrator::import_finish(CDir *dir)
// re-eval imported caps
for (map<CInode*, map<client_t,Capability::Export> >::iterator p = cap_imports.begin();
p != cap_imports.end();
- p++)
+ ++p)
if (p->first->is_auth())
mds->locker->eval(p->first, CEPH_CAP_LOCKS, true);
@@ -2339,7 +2339,7 @@ void Migrator::finish_import_inode_caps(CInode *in, int from,
for (map<client_t,Capability::Export>::iterator it = cap_map.begin();
it != cap_map.end();
- it++) {
+ ++it) {
dout(10) << "finish_import_inode_caps for client." << it->first << " on " << *in << dendl;
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(it->first.v));
assert(session);
@@ -2402,7 +2402,7 @@ int Migrator::decode_import_dir(bufferlist::iterator& blp,
dir->take_waiting(CDir::WAIT_ANY_MASK, waiters);
for (list<Context*>::iterator it = waiters.begin();
it != waiters.end();
- it++)
+ ++it)
import_root->add_waiter(CDir::WAIT_UNFREEZE, *it); // UNFREEZE will get kicked both on success or failure
dout(15) << "doing contents" << dendl;
diff --git a/src/mds/SnapRealm.cc b/src/mds/SnapRealm.cc
index cc9fda76138..a87b748734f 100644
--- a/src/mds/SnapRealm.cc
+++ b/src/mds/SnapRealm.cc
@@ -46,7 +46,7 @@ ostream& operator<<(ostream& out, const SnapRealm& realm)
out << " past_parents=(";
for (map<snapid_t, snaplink_t>::const_iterator p = realm.srnode.past_parents.begin();
p != realm.srnode.past_parents.end();
- p++) {
+ ++p) {
if (p != realm.srnode.past_parents.begin()) out << ",";
out << p->second.first << "-" << p->first
<< "=" << p->second.ino;
@@ -86,7 +86,7 @@ bool SnapRealm::_open_parents(Context *finish, snapid_t first, snapid_t last)
if (srnode.past_parents.size() > open_past_parents.size()) {
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.begin();
p != srnode.past_parents.end();
- p++) {
+ ++p) {
dout(10) << " past_parent [" << p->second.first << "," << p->first << "] is "
<< p->second.ino << dendl;
CInode *parent = mdcache->get_inode(p->second.ino);
@@ -115,7 +115,7 @@ bool SnapRealm::have_past_parents_open(snapid_t first, snapid_t last)
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.lower_bound(first);
p != srnode.past_parents.end();
- p++) {
+ ++p) {
if (p->second.first > last)
break;
dout(10) << " past parent [" << p->second.first << "," << p->first << "] was "
@@ -137,7 +137,7 @@ void SnapRealm::close_parents()
{
for (map<inodeno_t,SnapRealm*>::iterator p = open_past_parents.begin();
p != open_past_parents.end();
- p++)
+ ++p)
p->second->inode->put(CInode::PIN_PASTSNAPPARENT);
open_past_parents.clear();
}
@@ -163,13 +163,13 @@ void SnapRealm::build_snap_set(set<snapid_t> &s,
// include my snaps within interval [first,last]
for (map<snapid_t, SnapInfo>::iterator p = srnode.snaps.lower_bound(first); // first element >= first
p != srnode.snaps.end() && p->first <= last;
- p++)
+ ++p)
s.insert(p->first);
// include snaps for parents during intervals that intersect [first,last]
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.lower_bound(first);
p != srnode.past_parents.end() && p->first >= first && p->second.first <= last;
- p++) {
+ ++p) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
@@ -230,7 +230,7 @@ const SnapContext& SnapRealm::get_snap_context()
unsigned i = 0;
for (set<snapid_t>::reverse_iterator p = cached_snaps.rbegin();
p != cached_snaps.rend();
- p++)
+ ++p)
cached_snap_context.snaps[i++] = *p;
}
@@ -245,13 +245,13 @@ void SnapRealm::get_snap_info(map<snapid_t,SnapInfo*>& infomap, snapid_t first,
// include my snaps within interval [first,last]
for (map<snapid_t, SnapInfo>::iterator p = srnode.snaps.lower_bound(first); // first element >= first
p != srnode.snaps.end() && p->first <= last;
- p++)
+ ++p)
infomap[p->first] = &p->second;
// include snaps for parents during intervals that intersect [first,last]
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.lower_bound(first);
p != srnode.past_parents.end() && p->first >= first && p->second.first <= last;
- p++) {
+ ++p) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
@@ -308,7 +308,7 @@ snapid_t SnapRealm::resolve_snapname(const string& n, inodeno_t atino, snapid_t
for (map<snapid_t, SnapInfo>::iterator p = srnode.snaps.lower_bound(first); // first element >= first
p != srnode.snaps.end() && p->first <= last;
- p++) {
+ ++p) {
dout(15) << " ? " << p->second << dendl;
//if (num && p->second.snapid == num)
//return p->first;
@@ -321,7 +321,7 @@ snapid_t SnapRealm::resolve_snapname(const string& n, inodeno_t atino, snapid_t
// include snaps for parents during intervals that intersect [first,last]
for (map<snapid_t, snaplink_t>::iterator p = srnode.past_parents.lower_bound(first);
p != srnode.past_parents.end() && p->first >= first && p->second.first <= last;
- p++) {
+ ++p) {
CInode *oldparent = mdcache->get_inode(p->second.ino);
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
@@ -386,7 +386,7 @@ void SnapRealm::split_at(SnapRealm *child)
open_children.erase(p++);
} else {
dout(20) << " keeping child realm " << *realm << " on " << *realm->inode << dendl;
- p++;
+ ++p;
}
}
@@ -441,7 +441,7 @@ void SnapRealm::build_snap_trace(bufferlist& snapbl)
snapid_t max_seq, max_last_created, max_last_destroyed;
build_snap_set(past, max_seq, max_last_created, max_last_destroyed, 0, last);
info.prior_parent_snaps.reserve(past.size());
- for (set<snapid_t>::reverse_iterator p = past.rbegin(); p != past.rend(); p++)
+ for (set<snapid_t>::reverse_iterator p = past.rbegin(); p != past.rend(); ++p)
info.prior_parent_snaps.push_back(*p);
dout(10) << "build_snap_trace prior_parent_snaps from [1," << last << "] "
<< info.prior_parent_snaps << dendl;
@@ -452,7 +452,7 @@ void SnapRealm::build_snap_trace(bufferlist& snapbl)
info.my_snaps.reserve(srnode.snaps.size());
for (map<snapid_t,SnapInfo>::reverse_iterator p = srnode.snaps.rbegin();
p != srnode.snaps.rend();
- p++)
+ ++p)
info.my_snaps.push_back(p->first);
dout(10) << "build_snap_trace my_snaps " << info.my_snaps << dendl;
@@ -481,7 +481,7 @@ void SnapRealm::prune_past_parents()
} else {
dout(10) << "prune_past_parents keeping [" << p->second.first << "," << p->first
<< "] " << p->second.ino << dendl;
- p++;
+ ++p;
}
}
}
diff --git a/src/mds/events/EMetaBlob.h b/src/mds/events/EMetaBlob.h
index 4ff320edd2c..7065460945f 100644
--- a/src/mds/events/EMetaBlob.h
+++ b/src/mds/events/EMetaBlob.h
@@ -476,7 +476,7 @@ private:
else
in->encode_snap_blob(snapbl);
- for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); p++) {
+ for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
if ((*p)->inode.ino == in->ino()) {
roots.erase(p);
break;
diff --git a/src/mds/journal.cc b/src/mds/journal.cc
index 90ea0f28917..f388aab94ca 100644
--- a/src/mds/journal.cc
+++ b/src/mds/journal.cc
@@ -113,7 +113,7 @@ void LogSegment::try_to_expire(MDS *mds, C_GatherBuilder &gather_bld)
// master ops with possibly uncommitted slaves
for (set<metareqid_t>::iterator p = uncommitted_masters.begin();
p != uncommitted_masters.end();
- p++) {
+ ++p) {
dout(10) << "try_to_expire waiting for slaves to ack commit on " << *p << dendl;
mds->mdcache->wait_for_uncommitted_master(*p, gather_bld.new_sub());
}
@@ -238,7 +238,7 @@ void LogSegment::try_to_expire(MDS *mds, C_GatherBuilder &gather_bld)
// table servers
for (map<int, version_t>::iterator p = tablev.begin();
p != tablev.end();
- p++) {
+ ++p) {
MDSTableServer *server = mds->get_table_server(p->first);
if (p->second > server->get_committed_version()) {
dout(10) << "try_to_expire waiting for " << get_mdstable_name(p->first)
@@ -250,7 +250,7 @@ void LogSegment::try_to_expire(MDS *mds, C_GatherBuilder &gather_bld)
// truncating
for (set<CInode*>::iterator p = truncating_inodes.begin();
p != truncating_inodes.end();
- p++) {
+ ++p) {
dout(10) << "try_to_expire waiting for truncate of " << **p << dendl;
(*p)->add_waiter(CInode::WAIT_TRUNC, gather_bld.new_sub());
}
@@ -451,7 +451,7 @@ void EMetaBlob::add_dir_context(CDir *dir, int mode)
parents.splice(parents.begin(), maybe);
dout(20) << "EMetaBlob::add_dir_context final: " << parents << dendl;
- for (list<CDentry*>::iterator p = parents.begin(); p != parents.end(); p++) {
+ for (list<CDentry*>::iterator p = parents.begin(); p != parents.end(); ++p) {
assert((*p)->get_projected_linkage()->is_primary());
add_dentry(*p, false);
}
@@ -995,7 +995,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
assert(g_conf->mds_kill_journal_replay_at != 1);
- for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); p++) {
+ for (list<std::tr1::shared_ptr<fullbit> >::iterator p = roots.begin(); p != roots.end(); ++p) {
CInode *in = mds->mdcache->get_inode((*p)->inode.ino);
bool isnew = in ? false:true;
if (!in)
@@ -1100,7 +1100,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// full dentry+inode pairs
for (list<std::tr1::shared_ptr<fullbit> >::iterator pp = lump.get_dfull().begin();
pp != lump.get_dfull().end();
- pp++) {
+ ++pp) {
std::tr1::shared_ptr<fullbit> p = *pp;
CDentry *dn = dir->lookup_exact_snap(p->dn, p->dnlast);
if (!dn) {
@@ -1205,7 +1205,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// remote dentries
for (list<remotebit>::iterator p = lump.get_dremote().begin();
p != lump.get_dremote().end();
- p++) {
+ ++p) {
CDentry *dn = dir->lookup_exact_snap(p->dn, p->dnlast);
if (!dn) {
dn = dir->add_remote_dentry(p->dn, p->ino, p->d_type, p->dnfirst, p->dnlast);
@@ -1236,7 +1236,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// null dentries
for (list<nullbit>::iterator p = lump.get_dnull().begin();
p != lump.get_dnull().end();
- p++) {
+ ++p) {
CDentry *dn = dir->lookup_exact_snap(p->dn, p->dnlast);
if (!dn) {
dn = dir->add_null_dentry(p->dn, p->dnfirst, p->dnlast);
@@ -1321,7 +1321,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
}
if (!unlinked.empty()) {
- for (set<CInode*>::iterator p = linked.begin(); p != linked.end(); p++)
+ for (set<CInode*>::iterator p = linked.begin(); p != linked.end(); ++p)
unlinked.erase(*p);
dout(10) << " unlinked set contains " << unlinked << dendl;
for (map<CInode*, CDir*>::iterator p = unlinked.begin(); p != unlinked.end(); ++p) {
@@ -1416,14 +1416,14 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// truncating inodes
for (list<inodeno_t>::iterator p = truncate_start.begin();
p != truncate_start.end();
- p++) {
+ ++p) {
CInode *in = mds->mdcache->get_inode(*p);
assert(in);
mds->mdcache->add_recovered_truncate(in, logseg);
}
for (map<inodeno_t,uint64_t>::iterator p = truncate_finish.begin();
p != truncate_finish.end();
- p++) {
+ ++p) {
LogSegment *ls = mds->mdlog->get_segment(p->second);
if (ls) {
CInode *in = mds->mdcache->get_inode(p->first);
@@ -1435,7 +1435,7 @@ void EMetaBlob::replay(MDS *mds, LogSegment *logseg, MDSlaveUpdate *slaveup)
// destroyed inodes
for (vector<inodeno_t>::iterator p = destroyed_inodes.begin();
p != destroyed_inodes.end();
- p++) {
+ ++p) {
CInode *in = mds->mdcache->get_inode(*p);
if (in) {
dout(10) << "EMetaBlob.replay destroyed " << *p << ", dropping " << *in << dendl;
@@ -1958,7 +1958,7 @@ void EOpen::replay(MDS *mds)
// note which segments inodes belong to, so we don't have to start rejournaling them
for (vector<inodeno_t>::iterator p = inos.begin();
p != inos.end();
- p++) {
+ ++p) {
CInode *in = mds->mdcache->get_inode(*p);
if (!in) {
dout(0) << "EOpen.replay ino " << *p << " not in metablob" << dendl;
diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc
index e4db273f985..ab00ca0120d 100644
--- a/src/mon/OSDMonitor.cc
+++ b/src/mon/OSDMonitor.cc
@@ -240,7 +240,7 @@ bool OSDMonitor::thrash()
hash_map<pg_t,pg_stat_t>::iterator p = mon->pgmon()->pg_map.pg_stat.begin();
hash_map<pg_t,pg_stat_t>::iterator e = mon->pgmon()->pg_map.pg_stat.end();
while (n--)
- p++;
+ ++p;
for (int i=0; i<50; i++) {
vector<int> v;
for (int j=0; j<3; j++) {
@@ -249,7 +249,7 @@ bool OSDMonitor::thrash()
v.push_back(o);
}
if (v.size() < 3) {
- for (vector<int>::iterator q = p->second.acting.begin(); q != p->second.acting.end(); q++)
+ for (vector<int>::iterator q = p->second.acting.begin(); q != p->second.acting.end(); ++q)
if (std::find(v.begin(), v.end(), *q) == v.end())
v.push_back(*q);
}
@@ -257,7 +257,7 @@ bool OSDMonitor::thrash()
pending_inc.new_pg_temp[p->first] = v;
dout(5) << "thrash_map pg " << p->first << " pg_temp remapped to " << v << dendl;
- p++;
+ ++p;
if (p == e)
p = mon->pgmon()->pg_map.pg_stat.begin();
}
@@ -295,7 +295,7 @@ void OSDMonitor::remove_redundant_pg_temp()
for (map<pg_t,vector<int> >::iterator p = osdmap.pg_temp->begin();
p != osdmap.pg_temp->end();
- p++) {
+ ++p) {
if (pending_inc.new_pg_temp.count(p->first) == 0) {
vector<int> raw_up;
osdmap.pg_to_raw_up(p->first, raw_up);
@@ -315,7 +315,7 @@ void OSDMonitor::remove_down_pg_temp()
for (map<pg_t,vector<int> >::iterator p = tmpmap.pg_temp->begin();
p != tmpmap.pg_temp->end();
- p++) {
+ ++p) {
unsigned num_up = 0;
for (vector<int>::iterator i = p->second.begin();
i != p->second.end();
@@ -439,7 +439,7 @@ void OSDMonitor::encode_pending(MonitorDBStore::Transaction *t)
// tell me about it
for (map<int32_t,uint8_t>::iterator i = pending_inc.new_state.begin();
i != pending_inc.new_state.end();
- i++) {
+ ++i) {
int s = i->second ? i->second : CEPH_OSD_UP;
if (s & CEPH_OSD_UP)
dout(2) << " osd." << i->first << " DOWN" << dendl;
@@ -448,13 +448,13 @@ void OSDMonitor::encode_pending(MonitorDBStore::Transaction *t)
}
for (map<int32_t,entity_addr_t>::iterator i = pending_inc.new_up_client.begin();
i != pending_inc.new_up_client.end();
- i++) {
+ ++i) {
//FIXME: insert cluster addresses too
dout(2) << " osd." << i->first << " UP " << i->second << dendl;
}
for (map<int32_t,uint32_t>::iterator i = pending_inc.new_weight.begin();
i != pending_inc.new_weight.end();
- i++) {
+ ++i) {
if (i->second == CEPH_OSD_OUT) {
dout(2) << " osd." << i->first << " OUT" << dendl;
} else if (i->second == CEPH_OSD_IN) {
@@ -794,7 +794,7 @@ bool OSDMonitor::check_failure(utime_t now, int target_osd, failure_info_t& fi)
assert(fi.reporters.size());
for (map<int,failure_reporter_t>::iterator p = fi.reporters.begin();
p != fi.reporters.end();
- p++) {
+ ++p) {
const osd_xinfo_t& xi = osdmap.get_xinfo(p->first);
utime_t elapsed = now - xi.down_stamp;
double decay = exp((double)elapsed * decay_k);
@@ -1185,7 +1185,7 @@ bool OSDMonitor::preprocess_pgtemp(MOSDPGTemp *m)
goto ignore;
}
- for (map<pg_t,vector<int> >::iterator p = m->pg_temp.begin(); p != m->pg_temp.end(); p++) {
+ for (map<pg_t,vector<int> >::iterator p = m->pg_temp.begin(); p != m->pg_temp.end(); ++p) {
dout(20) << " " << p->first
<< (osdmap.pg_temp->count(p->first) ? (*osdmap.pg_temp)[p->first] : empty)
<< " -> " << p->second << dendl;
@@ -1211,7 +1211,7 @@ bool OSDMonitor::prepare_pgtemp(MOSDPGTemp *m)
{
int from = m->get_orig_source().num();
dout(7) << "prepare_pgtemp e" << m->map_epoch << " from " << m->get_orig_source_inst() << dendl;
- for (map<pg_t,vector<int> >::iterator p = m->pg_temp.begin(); p != m->pg_temp.end(); p++)
+ for (map<pg_t,vector<int> >::iterator p = m->pg_temp.begin(); p != m->pg_temp.end(); ++p)
pending_inc.new_pg_temp[p->first] = p->second;
pending_inc.new_up_thru[from] = m->map_epoch; // set up_thru too, so the osd doesn't have to ask again
wait_for_finished_proposal(new C_ReplyMap(this, m, m->map_epoch));
@@ -1237,7 +1237,7 @@ bool OSDMonitor::preprocess_remove_snaps(MRemoveSnaps *m)
for (map<int, vector<snapid_t> >::iterator q = m->snaps.begin();
q != m->snaps.end();
- q++) {
+ ++q) {
if (!osdmap.have_pg_pool(q->first)) {
dout(10) << " ignoring removed_snaps " << q->second << " on non-existent pool " << q->first << dendl;
continue;
@@ -1245,7 +1245,7 @@ bool OSDMonitor::preprocess_remove_snaps(MRemoveSnaps *m)
const pg_pool_t *pi = osdmap.get_pg_pool(q->first);
for (vector<snapid_t>::iterator p = q->second.begin();
p != q->second.end();
- p++) {
+ ++p) {
if (*p > pi->get_snap_seq() ||
!pi->removed_snaps.contains(*p))
return false;
@@ -1263,11 +1263,11 @@ bool OSDMonitor::prepare_remove_snaps(MRemoveSnaps *m)
for (map<int, vector<snapid_t> >::iterator p = m->snaps.begin();
p != m->snaps.end();
- p++) {
+ ++p) {
pg_pool_t& pi = osdmap.pools[p->first];
for (vector<snapid_t>::iterator q = p->second.begin();
q != p->second.end();
- q++) {
+ ++q) {
if (!pi.removed_snaps.contains(*q) &&
(!pending_inc.new_pools.count(p->first) ||
!pending_inc.new_pools[p->first].removed_snaps.contains(*q))) {
@@ -1311,7 +1311,7 @@ void OSDMonitor::send_to_waiting()
}
} else {
dout(10) << "send_to_waiting from " << from << dendl;
- p++;
+ ++p;
continue;
}
} else {
@@ -1534,7 +1534,7 @@ void OSDMonitor::tick()
int o = i->first;
utime_t down = now;
down -= i->second;
- i++;
+ ++i;
if (osdmap.is_down(o) &&
osdmap.is_in(o) &&
@@ -1596,7 +1596,7 @@ void OSDMonitor::tick()
// expire blacklisted items?
for (hash_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
p != osdmap.blacklist.end();
- p++) {
+ ++p) {
if (p->second < now) {
dout(10) << "expiring blacklist item " << p->first << " expired " << p->second << " < now " << now << dendl;
pending_inc.old_blacklist.push_back(p->first);
@@ -1692,7 +1692,7 @@ void OSDMonitor::mark_all_down()
osdmap.get_all_osds(ls);
for (set<int32_t>::iterator it = ls.begin();
it != ls.end();
- it++) {
+ ++it) {
if (osdmap.is_down(*it)) continue;
pending_inc.new_state[*it] = CEPH_OSD_UP;
}
@@ -1805,7 +1805,7 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
}
epoch = l;
} else
- i++;
+ ++i;
}
OSDMap *p = &osdmap;
@@ -2045,7 +2045,7 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
else if (m->cmd.size() == 3 && m->cmd[1] == "blacklist" && m->cmd[2] == "ls") {
for (hash_map<entity_addr_t,utime_t>::iterator p = osdmap.blacklist.begin();
p != osdmap.blacklist.end();
- p++) {
+ ++p) {
stringstream ss;
string s;
ss << p->first << " " << p->second;
diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc
index 1da32ea4448..36a35424a20 100644
--- a/src/mon/PGMap.cc
+++ b/src/mon/PGMap.cc
@@ -214,7 +214,7 @@ void PGMap::apply_incremental(CephContext *cct, const Incremental& inc)
}
for (set<pg_t>::const_iterator p = inc.pg_remove.begin();
p != inc.pg_remove.end();
- p++) {
+ ++p) {
const pg_t &removed_pg(*p);
hash_map<pg_t,pg_stat_t>::iterator s = pg_stat.find(removed_pg);
if (s != pg_stat.end()) {
@@ -225,7 +225,7 @@ void PGMap::apply_incremental(CephContext *cct, const Incremental& inc)
for (set<int>::iterator p = inc.osd_stat_rm.begin();
p != inc.osd_stat_rm.end();
- p++) {
+ ++p) {
hash_map<int,osd_stat_t>::iterator t = osd_stat.find(*p);
if (t != osd_stat.end()) {
stat_osd_sub(t->second);
@@ -537,7 +537,7 @@ void PGMap::dump(ostream& ss) const
dump_pg_stats_plain(ss, pg_stat);
for (hash_map<int,pool_stat_t>::const_iterator p = pg_pool_sum.begin();
p != pg_pool_sum.end();
- p++)
+ ++p)
ss << "pool " << p->first
<< "\t" << p->second.stats.sum.num_objects
//<< "\t" << p->second.num_object_copies
@@ -560,7 +560,7 @@ void PGMap::dump(ostream& ss) const
ss << "osdstat\tkbused\tkbavail\tkb\thb in\thb out" << std::endl;
for (hash_map<int,osd_stat_t>::const_iterator p = osd_stat.begin();
p != osd_stat.end();
- p++)
+ ++p)
ss << p->first
<< "\t" << p->second.kb_used
<< "\t" << p->second.kb_avail
diff --git a/src/msg/Pipe.cc b/src/msg/Pipe.cc
index ad1fec007c7..1420b63beb8 100644
--- a/src/msg/Pipe.cc
+++ b/src/msg/Pipe.cc
@@ -588,7 +588,7 @@ int Pipe::accept()
ldout(msgr->cct,10) << "accept re-queuing on out_seq " << out_seq << " in_seq " << in_seq << dendl;
for (map<int, list<Message*> >::iterator p = existing->out_q.begin();
p != existing->out_q.end();
- p++)
+ ++p)
out_q[p->first].splice(out_q[p->first].begin(), p->second);
}
existing->pipe_lock.Unlock();
@@ -1127,13 +1127,13 @@ void Pipe::discard_out_queue()
{
ldout(msgr->cct,10) << "discard_queue" << dendl;
- for (list<Message*>::iterator p = sent.begin(); p != sent.end(); p++) {
+ for (list<Message*>::iterator p = sent.begin(); p != sent.end(); ++p) {
ldout(msgr->cct,20) << " discard " << *p << dendl;
(*p)->put();
}
sent.clear();
- for (map<int,list<Message*> >::iterator p = out_q.begin(); p != out_q.end(); p++)
- for (list<Message*>::iterator r = p->second.begin(); r != p->second.end(); r++) {
+ for (map<int,list<Message*> >::iterator p = out_q.begin(); p != out_q.end(); ++p)
+ for (list<Message*>::iterator r = p->second.begin(); r != p->second.end(); ++r) {
ldout(msgr->cct,20) << " discard " << *r << dendl;
(*r)->put();
}
@@ -1998,7 +1998,7 @@ int Pipe::write_message(ceph_msg_header& header, ceph_msg_footer& footer, buffer
if (left == 0)
break;
while (b_off == (int)pb->length()) {
- pb++;
+ ++pb;
b_off = 0;
}
}
diff --git a/src/os/FileStore.cc b/src/os/FileStore.cc
index c0942a5e637..7888c9fa395 100644
--- a/src/os/FileStore.cc
+++ b/src/os/FileStore.cc
@@ -1869,7 +1869,7 @@ FileStore::Op *FileStore::build_op(list<Transaction*>& tls,
uint64_t bytes = 0, ops = 0;
for (list<Transaction*>::iterator p = tls.begin();
p != tls.end();
- p++) {
+ ++p) {
bytes += (*p)->get_num_bytes();
ops += (*p)->get_num_ops();
}
@@ -2124,7 +2124,7 @@ int FileStore::_do_transactions(
uint64_t bytes = 0, ops = 0;
for (list<Transaction*>::iterator p = tls.begin();
p != tls.end();
- p++) {
+ ++p) {
bytes += (*p)->get_num_bytes();
ops += (*p)->get_num_ops();
}
@@ -2132,7 +2132,7 @@ int FileStore::_do_transactions(
int trans_num = 0;
for (list<Transaction*>::iterator p = tls.begin();
p != tls.end();
- p++, trans_num++) {
+ ++p, trans_num++) {
r = _do_transaction(**p, op_seq, trans_num);
if (r < 0)
break;
@@ -4003,7 +4003,7 @@ int FileStore::_rmattrs(coll_t cid, const hobject_t& oid,
}
r = _fgetattrs(fd, aset, false);
if (r >= 0) {
- for (map<string,bufferptr>::iterator p = aset.begin(); p != aset.end(); p++) {
+ for (map<string,bufferptr>::iterator p = aset.begin(); p != aset.end(); ++p) {
char n[CHAIN_XATTR_MAX_NAME_LEN];
get_attrname(p->first.c_str(), n, CHAIN_XATTR_MAX_NAME_LEN);
r = chain_fremovexattr(fd, n);
diff --git a/src/os/FlatIndex.cc b/src/os/FlatIndex.cc
index e6bf79d8145..9f40c367dcc 100644
--- a/src/os/FlatIndex.cc
+++ b/src/os/FlatIndex.cc
@@ -421,7 +421,7 @@ int FlatIndex::collection_list(vector<hobject_t> *ls) {
// build final list
ls->resize(inolist.size());
int i = 0;
- for (vector< pair<ino_t,hobject_t> >::iterator p = inolist.begin(); p != inolist.end(); p++)
+ for (vector< pair<ino_t,hobject_t> >::iterator p = inolist.begin(); p != inolist.end(); ++p)
(*ls)[i++].swap(p->second);
::closedir(dir);
diff --git a/src/os/JournalingObjectStore.cc b/src/os/JournalingObjectStore.cc
index 971fd15b824..e65f010443f 100644
--- a/src/os/JournalingObjectStore.cc
+++ b/src/os/JournalingObjectStore.cc
@@ -250,7 +250,7 @@ void JournalingObjectStore::_op_journal_transactions(
unsigned data_len = 0;
int data_align = -1; // -1 indicates that we don't care about the alignment
for (list<ObjectStore::Transaction*>::iterator p = tls.begin();
- p != tls.end(); p++) {
+ p != tls.end(); ++p) {
ObjectStore::Transaction *t = *p;
if (t->get_data_length() > data_len &&
(int)t->get_data_length() >= g_conf->journal_align_min_size) {
diff --git a/src/os/LFNIndex.cc b/src/os/LFNIndex.cc
index 412100fe604..f1ba8c158f0 100644
--- a/src/os/LFNIndex.cc
+++ b/src/os/LFNIndex.cc
@@ -233,7 +233,7 @@ int LFNIndex::remove_objects(const vector<string> &dir,
remaining->insert(pair<string, hobject_t>(
lfn_get_short_name(candidate->second.second, *i),
candidate->second.second));
- candidate++;
+ ++candidate;
}
if (!holes.empty())
clean_chains.insert(lfn_get_short_name(to_clean->second, 0));
diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc
index 0f2707c22f1..576aeed08e2 100644
--- a/src/osd/OSD.cc
+++ b/src/osd/OSD.cc
@@ -1223,7 +1223,7 @@ int OSD::shutdown()
// then kick all pgs,
for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
- p++) {
+ ++p) {
dout(20) << " kicking pg " << p->first << dendl;
p->second->lock();
p->second->kick();
@@ -1238,7 +1238,7 @@ int OSD::shutdown()
// tell pgs we're shutting down
for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
- p++) {
+ ++p) {
p->second->lock();
p->second->on_shutdown();
p->second->unlock();
@@ -1281,7 +1281,7 @@ int OSD::shutdown()
// close pgs
for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
- p++) {
+ ++p) {
PG *pg = p->second;
pg->put();
}
@@ -1359,7 +1359,7 @@ void OSD::clear_temp(ObjectStore *store, coll_t tmp)
unsigned removed = 0;
for (vector<hobject_t>::iterator p = objects.begin();
p != objects.end();
- p++, removed++) {
+ ++p, removed++) {
t.collection_remove(tmp, *p);
if (removed > 300) {
int r = store->apply_transaction(t);
@@ -1552,7 +1552,7 @@ void OSD::load_pgs()
map<pg_t, interval_set<snapid_t> > pgs;
for (vector<coll_t>::iterator it = ls.begin();
it != ls.end();
- it++) {
+ ++it) {
pg_t pgid;
snapid_t snap;
@@ -1683,7 +1683,7 @@ void OSD::build_past_intervals_parallel()
epoch_t cur_epoch = superblock.newest_map;
for (hash_map<pg_t, PG*>::iterator i = pg_map.begin();
i != pg_map.end();
- i++) {
+ ++i) {
PG *pg = i->second;
epoch_t start, end;
@@ -2040,7 +2040,7 @@ void OSD::update_osd_stat()
osd_stat.kb_avail = stbuf.f_bavail * stbuf.f_bsize / 1024;
osd_stat.hb_in.clear();
- for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin(); p != heartbeat_peers.end(); p++)
+ for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin(); p != heartbeat_peers.end(); ++p)
osd_stat.hb_in.push_back(p->first);
osd_stat.hb_out.clear();
@@ -2095,7 +2095,7 @@ void OSD::maybe_update_heartbeat_peers()
// build heartbeat from set
for (hash_map<pg_t, PG*>::iterator i = pg_map.begin();
i != pg_map.end();
- i++) {
+ ++i) {
PG *pg = i->second;
pg->heartbeat_peer_lock.Lock();
dout(20) << i->first << " heartbeat_peers " << pg->heartbeat_peers << dendl;
@@ -2275,7 +2275,7 @@ void OSD::heartbeat_check()
cutoff -= g_conf->osd_heartbeat_grace;
for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
p != heartbeat_peers.end();
- p++) {
+ ++p) {
dout(25) << "heartbeat_check osd." << p->first
<< " first_tx " << p->second.first_tx
<< " last_tx " << p->second.last_tx
@@ -2328,7 +2328,7 @@ void OSD::heartbeat()
// send heartbeats
for (map<int,HeartbeatInfo>::iterator i = heartbeat_peers.begin();
i != heartbeat_peers.end();
- i++) {
+ ++i) {
int peer = i->first;
dout(30) << "heartbeat allocating ping for osd." << peer << dendl;
Message *m = new MOSDPing(monc->get_fsid(),
@@ -2575,7 +2575,7 @@ void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
if (r >= 0) {
ss << "header=" << string(hdrbl.c_str(), hdrbl.length());
for (map<string, bufferlist>::iterator it = keyvals.begin();
- it != keyvals.end(); it++)
+ it != keyvals.end(); ++it)
ss << " key=" << (*it).first << " val="
<< string((*it).second.c_str(), (*it).second.length());
} else {
@@ -3670,7 +3670,7 @@ void OSD::handle_scrub(MOSDScrub *m)
if (m->scrub_pgs.empty()) {
for (hash_map<pg_t, PG*>::iterator p = pg_map.begin();
p != pg_map.end();
- p++) {
+ ++p) {
PG *pg = p->second;
pg->lock();
if (pg->is_primary()) {
@@ -3686,7 +3686,7 @@ void OSD::handle_scrub(MOSDScrub *m)
} else {
for (vector<pg_t>::iterator p = m->scrub_pgs.begin();
p != m->scrub_pgs.end();
- p++)
+ ++p)
if (pg_map.count(*p)) {
PG *pg = pg_map[*p];
pg->lock();
@@ -4054,7 +4054,7 @@ void OSD::handle_osd_map(MOSDMap *m)
// kill connections to newly down osds
set<int> old;
osdmap->get_all_osds(old);
- for (set<int>::iterator p = old.begin(); p != old.end(); p++) {
+ for (set<int>::iterator p = old.begin(); p != old.end(); ++p) {
if (*p != whoami &&
osdmap->have_inst(*p) && // in old map
(!newmap->exists(*p) || !newmap->is_up(*p))) { // but not the new one
@@ -4342,7 +4342,7 @@ void OSD::consume_map()
// scan pg's
for (hash_map<pg_t,PG*>::iterator it = pg_map.begin();
it != pg_map.end();
- it++) {
+ ++it) {
PG *pg = it->second;
pg->lock();
if (pg->is_primary())
@@ -4385,7 +4385,7 @@ void OSD::consume_map()
// scan pg's
for (hash_map<pg_t,PG*>::iterator it = pg_map.begin();
it != pg_map.end();
- it++) {
+ ++it) {
PG *pg = it->second;
pg->lock();
pg->queue_null(osdmap->get_epoch(), osdmap->get_epoch());
@@ -4749,7 +4749,7 @@ void OSD::do_split(PG *parent, set<pg_t>& childpgids, ObjectStore::Transaction&
map<pg_t,PG*> children;
for (set<pg_t>::iterator q = childpgids.begin();
q != childpgids.end();
- q++) {
+ ++q) {
pg_history_t history;
history.epoch_created = history.same_up_since =
history.same_interval_since = history.same_primary_since =
@@ -4776,7 +4776,7 @@ void OSD::do_split(PG *parent, set<pg_t>& childpgids, ObjectStore::Transaction&
// unlock parent, children
parent->unlock();
- for (map<pg_t,PG*>::iterator q = children.begin(); q != children.end(); q++) {
+ for (map<pg_t,PG*>::iterator q = children.begin(); q != children.end(); ++q) {
PG *pg = q->second;
pg->handle_create(&rctx);
pg->write_if_dirty(t);
@@ -4799,7 +4799,7 @@ void OSD::split_pg(PG *parent, map<pg_t,PG*>& children, ObjectStore::Transaction
vector<hobject_t> olist;
store->collection_list(coll_t(parent->info.pgid), olist);
- for (vector<hobject_t>::iterator p = olist.begin(); p != olist.end(); p++) {
+ for (vector<hobject_t>::iterator p = olist.begin(); p != olist.end(); ++p) {
hobject_t poid = *p;
object_locator_t oloc(parentid.pool());
if (poid.get_key().size())
@@ -4847,7 +4847,7 @@ void OSD::split_pg(PG *parent, map<pg_t,PG*>& children, ObjectStore::Transaction
list<pg_log_entry_t>::iterator p = parent->log.log.begin();
while (p != parent->log.log.end()) {
list<pg_log_entry_t>::iterator cur = p;
- p++;
+ ++p;
hobject_t& poid = cur->soid;
object_locator_t oloc(parentid.pool());
if (poid.get_key().size())
@@ -4869,7 +4869,7 @@ void OSD::split_pg(PG *parent, map<pg_t,PG*>& children, ObjectStore::Transaction
for (map<pg_t,PG*>::iterator p = children.begin();
p != children.end();
- p++) {
+ ++p) {
PG *child = p->second;
// fix log bounds
@@ -4934,7 +4934,7 @@ void OSD::handle_pg_create(OpRequestRef op)
for (map<pg_t,pg_create_t>::iterator p = m->mkpg.begin();
p != m->mkpg.end();
- p++) {
+ ++p) {
pg_t pgid = p->first;
epoch_t created = p->second.created;
pg_t parent = p->second.parent;
@@ -4990,7 +4990,7 @@ void OSD::handle_pg_create(OpRequestRef op)
dout(10) << "mkpg " << pgid << " e" << created
<< " h " << history
<< " : querying priors " << pset << dendl;
- for (set<int>::iterator p = pset.begin(); p != pset.end(); p++)
+ for (set<int>::iterator p = pset.begin(); p != pset.end(); ++p)
if (osdmap->is_up(*p))
(*rctx.query_map)[*p][pgid] = pg_query_t(pg_query_t::INFO, history,
osdmap->get_epoch());
@@ -5105,7 +5105,7 @@ void OSD::do_notifies(
{
for (map< int, vector<pair<pg_notify_t,pg_interval_map_t> > >::iterator it = notify_list.begin();
it != notify_list.end();
- it++) {
+ ++it) {
if (it->first == whoami) {
dout(7) << "do_notify osd." << it->first << " is self, skipping" << dendl;
continue;
@@ -5148,7 +5148,7 @@ void OSD::do_queries(map< int, map<pg_t,pg_query_t> >& query_map,
{
for (map< int, map<pg_t,pg_query_t> >::iterator pit = query_map.begin();
pit != query_map.end();
- pit++) {
+ ++pit) {
if (!curmap->is_up(pit->first))
continue;
int who = pit->first;
@@ -5238,7 +5238,7 @@ void OSD::handle_pg_notify(OpRequestRef op)
for (vector<pair<pg_notify_t, pg_interval_map_t> >::iterator it = m->get_pg_list().begin();
it != m->get_pg_list().end();
- it++) {
+ ++it) {
PG *pg = 0;
if (it->first.info.pgid.preferred() >= 0) {
@@ -5567,7 +5567,7 @@ void OSD::handle_pg_query(OpRequestRef op)
for (map<pg_t,pg_query_t>::iterator it = m->pg_list.begin();
it != m->pg_list.end();
- it++) {
+ ++it) {
pg_t pgid = it->first;
if (pgid.preferred() >= 0) {
@@ -5653,7 +5653,7 @@ void OSD::handle_pg_remove(OpRequestRef op)
for (vector<pg_t>::iterator it = m->pg_list.begin();
it != m->pg_list.end();
- it++) {
+ ++it) {
pg_t pgid = *it;
if (pgid.preferred() >= 0) {
dout(10) << "ignoring localized pg " << pgid << dendl;
@@ -5771,7 +5771,7 @@ void OSD::check_replay_queue()
}
replay_queue_lock.Unlock();
- for (list< pair<pg_t,utime_t> >::iterator p = pgids.begin(); p != pgids.end(); p++) {
+ for (list< pair<pg_t,utime_t> >::iterator p = pgids.begin(); p != pgids.end(); ++p) {
pg_t pgid = p->first;
if (pg_map.count(pgid)) {
PG *pg = _lookup_lock_pg_with_map_lock_held(pgid);
diff --git a/src/osd/OSDMap.cc b/src/osd/OSDMap.cc
index cf818475aa3..2d3dc227215 100644
--- a/src/osd/OSDMap.cc
+++ b/src/osd/OSDMap.cc
@@ -500,7 +500,7 @@ void OSDMap::Incremental::dump(Formatter *f) const
f->open_array_section("new_pg_temp");
for (map<pg_t,vector<int> >::const_iterator p = new_pg_temp.begin();
p != new_pg_temp.end();
- p++) {
+ ++p) {
f->open_object_section("pg");
f->dump_stream("pgid") << p->first;
f->open_array_section("osds");
@@ -544,7 +544,7 @@ void OSDMap::Incremental::dump(Formatter *f) const
f->open_array_section("new_blacklist");
for (map<entity_addr_t,utime_t>::const_iterator p = new_blacklist.begin();
p != new_blacklist.end();
- p++) {
+ ++p) {
stringstream ss;
ss << p->first;
f->dump_stream(ss.str().c_str()) << p->second;
@@ -590,7 +590,7 @@ void OSDMap::set_epoch(epoch_t e)
epoch = e;
for (map<int64_t,pg_pool_t>::iterator p = pools.begin();
p != pools.end();
- p++)
+ ++p)
p->second.last_change = e;
}
@@ -820,20 +820,20 @@ int OSDMap::apply_incremental(const Incremental &inc)
for (set<int64_t>::const_iterator p = inc.old_pools.begin();
p != inc.old_pools.end();
- p++) {
+ ++p) {
pools.erase(*p);
name_pool.erase(pool_name[*p]);
pool_name.erase(*p);
}
for (map<int64_t,pg_pool_t>::const_iterator p = inc.new_pools.begin();
p != inc.new_pools.end();
- p++) {
+ ++p) {
pools[p->first] = p->second;
pools[p->first].last_change = epoch;
}
for (map<int64_t,string>::const_iterator p = inc.new_pool_names.begin();
p != inc.new_pool_names.end();
- p++) {
+ ++p) {
if (pool_name.count(p->first))
name_pool.erase(pool_name[p->first]);
pool_name[p->first] = p->second;
@@ -842,7 +842,7 @@ int OSDMap::apply_incremental(const Incremental &inc)
for (map<int32_t,uint32_t>::const_iterator i = inc.new_weight.begin();
i != inc.new_weight.end();
- i++) {
+ ++i) {
set_weight(i->first, i->second);
// if we are marking in, clear the AUTOOUT and NEW bits.
@@ -853,7 +853,7 @@ int OSDMap::apply_incremental(const Incremental &inc)
// up/down
for (map<int32_t,uint8_t>::const_iterator i = inc.new_state.begin();
i != inc.new_state.end();
- i++) {
+ ++i) {
int s = i->second ? i->second : CEPH_OSD_UP;
if ((osd_state[i->first] & CEPH_OSD_UP) &&
(s & CEPH_OSD_UP)) {
@@ -867,7 +867,7 @@ int OSDMap::apply_incremental(const Incremental &inc)
}
for (map<int32_t,entity_addr_t>::const_iterator i = inc.new_up_client.begin();
i != inc.new_up_client.end();
- i++) {
+ ++i) {
osd_state[i->first] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
osd_addrs->client_addr[i->first].reset(new entity_addr_t(i->second));
if (inc.new_hb_up.empty())
@@ -879,13 +879,13 @@ int OSDMap::apply_incremental(const Incremental &inc)
}
for (map<int32_t,entity_addr_t>::const_iterator i = inc.new_up_internal.begin();
i != inc.new_up_internal.end();
- i++)
+ ++i)
osd_addrs->cluster_addr[i->first].reset(new entity_addr_t(i->second));
// info
for (map<int32_t,epoch_t>::const_iterator i = inc.new_up_thru.begin();
i != inc.new_up_thru.end();
- i++)
+ ++i)
osd_info[i->first].up_thru = i->second;
for (map<int32_t,pair<epoch_t,epoch_t> >::const_iterator i = inc.new_last_clean_interval.begin();
i != inc.new_last_clean_interval.end();
@@ -893,7 +893,7 @@ int OSDMap::apply_incremental(const Incremental &inc)
osd_info[i->first].last_clean_begin = i->second.first;
osd_info[i->first].last_clean_end = i->second.second;
}
- for (map<int32_t,epoch_t>::const_iterator p = inc.new_lost.begin(); p != inc.new_lost.end(); p++)
+ for (map<int32_t,epoch_t>::const_iterator p = inc.new_lost.begin(); p != inc.new_lost.end(); ++p)
osd_info[p->first].lost_at = p->second;
// xinfo
@@ -905,7 +905,7 @@ int OSDMap::apply_incremental(const Incremental &inc)
(*osd_uuid)[p->first] = p->second;
// pg rebuild
- for (map<pg_t, vector<int> >::const_iterator p = inc.new_pg_temp.begin(); p != inc.new_pg_temp.end(); p++) {
+ for (map<pg_t, vector<int> >::const_iterator p = inc.new_pg_temp.begin(); p != inc.new_pg_temp.end(); ++p) {
if (p->second.empty())
pg_temp->erase(p->first);
else
@@ -915,11 +915,11 @@ int OSDMap::apply_incremental(const Incremental &inc)
// blacklist
for (map<entity_addr_t,utime_t>::const_iterator p = inc.new_blacklist.begin();
p != inc.new_blacklist.end();
- p++)
+ ++p)
blacklist[p->first] = p->second;
for (vector<entity_addr_t>::const_iterator p = inc.old_blacklist.begin();
p != inc.old_blacklist.end();
- p++)
+ ++p)
blacklist.erase(*p);
// cluster snapshot?
@@ -1306,7 +1306,7 @@ void OSDMap::decode(bufferlist::iterator& p)
// index pool names
name_pool.clear();
- for (map<int64_t,string>::iterator i = pool_name.begin(); i != pool_name.end(); i++)
+ for (map<int64_t,string>::iterator i = pool_name.begin(); i != pool_name.end(); ++i)
name_pool[i->second] = i->first;
calc_num_osds();
@@ -1386,7 +1386,7 @@ void OSDMap::dump(Formatter *f) const
f->open_array_section("pg_temp");
for (map<pg_t,vector<int> >::const_iterator p = pg_temp->begin();
p != pg_temp->end();
- p++) {
+ ++p) {
f->open_object_section("osds");
f->dump_stream("pgid") << p->first;
f->open_array_section("osds");
@@ -1400,7 +1400,7 @@ void OSDMap::dump(Formatter *f) const
f->open_array_section("blacklist");
for (hash_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
p != blacklist.end();
- p++) {
+ ++p) {
stringstream ss;
ss << p->first;
f->dump_stream(ss.str().c_str()) << p->second;
@@ -1489,7 +1489,7 @@ void OSDMap::print(ostream& out) const
<< "' " << p->second << "\n";
for (map<snapid_t,pool_snap_info_t>::const_iterator q = p->second.snaps.begin();
q != p->second.snaps.end();
- q++)
+ ++q)
out << "\tsnap " << q->second.snapid << " '" << q->second.name << "' " << q->second.stamp << "\n";
if (!p->second.removed_snaps.empty())
out << "\tremoved_snaps " << p->second.removed_snaps << "\n";
@@ -1518,12 +1518,12 @@ void OSDMap::print(ostream& out) const
for (map<pg_t,vector<int> >::const_iterator p = pg_temp->begin();
p != pg_temp->end();
- p++)
+ ++p)
out << "pg_temp " << p->first << " " << p->second << "\n";
for (hash_map<entity_addr_t,utime_t>::const_iterator p = blacklist.begin();
p != blacklist.end();
- p++)
+ ++p)
out << "blacklist " << p->first << " expires " << p->second << "\n";
// ignore pg_swap_primary
@@ -1577,7 +1577,7 @@ void OSDMap::print_tree(ostream *out, Formatter *f) const
set<int> touched;
set<int> roots;
crush->find_roots(roots);
- for (set<int>::iterator p = roots.begin(); p != roots.end(); p++) {
+ for (set<int>::iterator p = roots.begin(); p != roots.end(); ++p) {
list<qi> q;
q.push_back(qi(*p, 0, crush->get_bucket_weight(*p) / (float)0x10000));
while (!q.empty()) {
@@ -1713,7 +1713,7 @@ void OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid,
int poolbase = nosd ? nosd : 1;
- for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); p++) {
+ for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
int64_t pool = ++pool_max;
pools[pool].type = pg_pool_t::TYPE_REP;
pools[pool].flags = cct->_conf->osd_pool_default_flags;
@@ -1773,7 +1773,7 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
int minrep = conf->osd_min_rep;
int maxrep = conf->osd_max_rep;
assert(maxrep >= minrep);
- for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); p++) {
+ for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
int ruleset = p->first;
crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REP, minrep, maxrep);
assert(rule);
@@ -1838,7 +1838,7 @@ int OSDMap::build_simple_from_conf(CephContext *cct, epoch_t e, uuid_d &fsid,
rulesets[CEPH_METADATA_RULE] = "metadata";
rulesets[CEPH_RBD_RULE] = "rbd";
- for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); p++) {
+ for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
int64_t pool = ++pool_max;
pools[pool].type = pg_pool_t::TYPE_REP;
pools[pool].flags = cct->_conf->osd_pool_default_flags;
@@ -1936,7 +1936,7 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr
// rules
int minrep = conf->osd_min_rep;
int maxrep = conf->osd_max_rep;
- for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); p++) {
+ for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
int ruleset = p->first;
crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REP, minrep, maxrep);
assert(rule);
diff --git a/src/osd/PG.cc b/src/osd/PG.cc
index 7ede6023183..c0d6cb4cf86 100644
--- a/src/osd/PG.cc
+++ b/src/osd/PG.cc
@@ -487,7 +487,7 @@ void PG::rewind_divergent_log(ObjectStore::Transaction& t, eversion_t newhead)
if (info.last_complete > newhead)
info.last_complete = newhead;
- for (list<pg_log_entry_t>::iterator d = divergent.begin(); d != divergent.end(); d++)
+ for (list<pg_log_entry_t>::iterator d = divergent.begin(); d != divergent.end(); ++d)
merge_old_entry(t, *d);
dirty_info = true;
@@ -526,7 +526,7 @@ void PG::merge_log(ObjectStore::Transaction& t,
list<pg_log_entry_t>::iterator to;
for (to = from;
to != olog.log.end();
- to++) {
+ ++to) {
if (to->version > log.tail)
break;
log.index(*to);
@@ -565,18 +565,18 @@ void PG::merge_log(ObjectStore::Transaction& t,
while (1) {
if (from == olog.log.begin())
break;
- from--;
+ --from;
dout(20) << " ? " << *from << dendl;
if (from->version <= log.head) {
dout(20) << "merge_log cut point (usually last shared) is " << *from << dendl;
lower_bound = from->version;
- from++;
+ ++from;
break;
}
}
// index, update missing, delete deleted
- for (list<pg_log_entry_t>::iterator p = from; p != to; p++) {
+ for (list<pg_log_entry_t>::iterator p = from; p != to; ++p) {
pg_log_entry_t &ne = *p;
dout(20) << "merge_log " << ne << dendl;
log.index(ne);
@@ -617,7 +617,7 @@ void PG::merge_log(ObjectStore::Transaction& t,
// process divergent items
if (!divergent.empty()) {
- for (list<pg_log_entry_t>::iterator d = divergent.begin(); d != divergent.end(); d++)
+ for (list<pg_log_entry_t>::iterator d = divergent.begin(); d != divergent.end(); ++d)
merge_old_entry(t, *d);
}
@@ -769,7 +769,7 @@ ostream& PG::IndexedLog::print(ostream& out) const
out << *this << std::endl;
for (list<pg_log_entry_t>::const_iterator p = log.begin();
p != log.end();
- p++) {
+ ++p) {
out << *p << " " << (logged_object(p->soid) ? "indexed":"NOT INDEXED") << std::endl;
assert(!p->reqid_is_indexed() || logged_req(p->reqid));
}
@@ -961,7 +961,7 @@ void PG::remove_down_peer_info(const OSDMapRef osdmap)
peer_info.erase(p++);
removed = true;
} else
- p++;
+ ++p;
}
// if we removed anyone, update peers (which include peer_info)
@@ -999,7 +999,7 @@ void PG::build_prior(std::auto_ptr<PriorSet> &prior_set)
// sanity check
for (map<int,pg_info_t>::iterator it = peer_info.begin();
it != peer_info.end();
- it++) {
+ ++it) {
assert(info.history.last_epoch_started >= it->second.history.last_epoch_started);
}
}
@@ -1372,7 +1372,7 @@ void PG::build_might_have_unfound()
// include any (stray) peers
for (map<int,pg_info_t>::iterator p = peer_info.begin();
p != peer_info.end();
- p++)
+ ++p)
might_have_unfound.insert(p->first);
dout(15) << __func__ << ": built " << might_have_unfound << dendl;
@@ -1570,7 +1570,7 @@ void PG::activate(ObjectStore::Transaction& t,
if (m && pi.last_backfill != hobject_t()) {
for (list<pg_log_entry_t>::iterator p = m->log.log.begin();
p != m->log.log.end();
- p++)
+ ++p)
if (p->soid <= pi.last_backfill)
pm.add_next_event(*p);
}
@@ -1751,7 +1751,7 @@ void PG::replay_queued_ops()
for (map<eversion_t,OpRequestRef>::iterator p = replay_queue.begin();
p != replay_queue.end();
- p++) {
+ ++p) {
if (p->first.version != c.version+1) {
dout(10) << "activate replay " << p->first
<< " skipping " << c.version+1 - p->first.version
@@ -2147,7 +2147,7 @@ void PG::purge_strays()
bool removed = false;
for (set<int>::iterator p = stray_set.begin();
p != stray_set.end();
- p++) {
+ ++p) {
if (get_osdmap()->is_up(*p)) {
dout(10) << "sending PGRemove to osd." << *p << dendl;
vector<pg_t> to_remove;
@@ -2569,7 +2569,7 @@ void PG::write_log(ObjectStore::Transaction& t)
map<string,bufferlist> keys;
for (list<pg_log_entry_t>::iterator p = log.log.begin();
p != log.log.end();
- p++) {
+ ++p) {
bufferlist bl(sizeof(*p) * 2);
p->encode_with_checksum(bl);
keys[p->get_key_name()].claim(bl);
@@ -2647,7 +2647,7 @@ void PG::append_log(
map<string,bufferlist> keys;
for (vector<pg_log_entry_t>::iterator p = logv.begin();
p != logv.end();
- p++) {
+ ++p) {
p->offset = 0;
add_log_entry(*p, keys[p->get_key_name()]);
}
@@ -2950,7 +2950,7 @@ void PG::requeue_object_waiters(map<hobject_t, list<OpRequestRef> >& m)
{
for (map<hobject_t, list<OpRequestRef> >::iterator it = m.begin();
it != m.end();
- it++)
+ ++it)
requeue_ops(it->second);
m.clear();
}
@@ -3131,7 +3131,7 @@ void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep)
int i = 0;
for (vector<hobject_t>::iterator p = ls.begin();
p != ls.end();
- p++, i++) {
+ ++p, i++) {
hobject_t poid = *p;
struct stat st;
@@ -3498,12 +3498,12 @@ void PG::build_inc_scrub_map(ScrubMap &map, eversion_t v)
p = log.log.begin();
} else if (v > log.tail) {
p = log.find_entry(v);
- p++;
+ ++p;
} else {
assert(0);
}
- for (; p != log.log.end(); p++) {
+ for (; p != log.log.end(); ++p) {
if (p->is_update()) {
ls.push_back(p->soid);
map.objects[p->soid].negative = false;
@@ -4118,7 +4118,7 @@ bool PG::scrub_gather_replica_maps() {
for (map<int,ScrubMap>::iterator p = scrubber.received_maps.begin();
p != scrubber.received_maps.end();
- p++) {
+ ++p) {
if (scrubber.received_maps[p->first].valid_through != log.head) {
scrubber.waiting_on++;
@@ -4167,7 +4167,7 @@ bool PG::_compare_scrub_objects(ScrubMap::object &auth,
}
for (map<string,bufferptr>::const_iterator i = auth.attrs.begin();
i != auth.attrs.end();
- i++) {
+ ++i) {
if (!candidate.attrs.count(i->first)) {
if (!ok)
errorstream << ", ";
@@ -4182,7 +4182,7 @@ bool PG::_compare_scrub_objects(ScrubMap::object &auth,
}
for (map<string,bufferptr>::const_iterator i = candidate.attrs.begin();
i != candidate.attrs.end();
- i++) {
+ ++i) {
if (!auth.attrs.count(i->first)) {
if (!ok)
errorstream << ", ";
@@ -4227,8 +4227,8 @@ void PG::_compare_scrubmaps(const map<int,ScrubMap*> &maps,
set<hobject_t> master_set;
// Construct master set
- for (j = maps.begin(); j != maps.end(); j++) {
- for (i = j->second->objects.begin(); i != j->second->objects.end(); i++) {
+ for (j = maps.begin(); j != maps.end(); ++j) {
+ for (i = j->second->objects.begin(); i != j->second->objects.end(); ++i) {
master_set.insert(i->first);
}
}
@@ -4236,12 +4236,12 @@ void PG::_compare_scrubmaps(const map<int,ScrubMap*> &maps,
// Check maps against master set and each other
for (set<hobject_t>::const_iterator k = master_set.begin();
k != master_set.end();
- k++) {
+ ++k) {
map<int, ScrubMap *>::const_iterator auth = _select_auth_object(*k, maps);
assert(auth != maps.end());
set<int> cur_missing;
set<int> cur_inconsistent;
- for (j = maps.begin(); j != maps.end(); j++) {
+ for (j = maps.begin(); j != maps.end(); ++j) {
if (j->second->objects.count(*k)) {
// Compare
stringstream ss;
@@ -4352,13 +4352,13 @@ void PG::scrub_process_inconsistent() {
for (map<hobject_t, pair<ScrubMap::object, int> >::iterator i =
scrubber.authoritative.begin();
i != scrubber.authoritative.end();
- i++) {
+ ++i) {
set<int>::iterator j;
if (scrubber.missing.count(i->first)) {
for (j = scrubber.missing[i->first].begin();
j != scrubber.missing[i->first].end();
- j++) {
+ ++j) {
repair_object(i->first,
&(i->second.first),
acting[*j],
@@ -4369,7 +4369,7 @@ void PG::scrub_process_inconsistent() {
if (scrubber.inconsistent.count(i->first)) {
for (j = scrubber.inconsistent[i->first].begin();
j != scrubber.inconsistent[i->first].end();
- j++) {
+ ++j) {
repair_object(i->first,
&(i->second.first),
acting[*j],
@@ -4601,7 +4601,7 @@ bool PG::may_need_replay(const OSDMapRef osdmap) const
for (map<epoch_t,pg_interval_t>::const_reverse_iterator p = past_intervals.rbegin();
p != past_intervals.rend();
- p++) {
+ ++p) {
const pg_interval_t &interval = p->second;
dout(10) << "may_need_replay " << interval << dendl;
@@ -4837,7 +4837,7 @@ void PG::start_peering_interval(const OSDMapRef lastmap,
list<OpRequestRef> ls;
for (map<eversion_t,OpRequestRef>::iterator it = replay_queue.begin();
it != replay_queue.end();
- it++)
+ ++it)
ls.push_back(it->second);
replay_queue.clear();
requeue_ops(ls);
@@ -5339,7 +5339,7 @@ bool PG::read_log(ObjectStore *store, coll_t coll, hobject_t log_oid,
set<hobject_t> did;
for (list<pg_log_entry_t>::reverse_iterator i = log.log.rbegin();
i != log.log.rend();
- i++) {
+ ++i) {
if (i->version <= info.last_complete) break;
if (did.count(i->soid)) continue;
did.insert(i->soid);
@@ -5536,10 +5536,10 @@ void PG::read_log_old(ObjectStore *store, coll_t coll, hobject_t log_oid,
if (reorder) {
dout(0) << "read_log reordering log" << dendl;
map<eversion_t, pg_log_entry_t> m;
- for (list<pg_log_entry_t>::iterator p = log.log.begin(); p != log.log.end(); p++)
+ for (list<pg_log_entry_t>::iterator p = log.log.begin(); p != log.log.end(); ++p)
m[p->version] = *p;
log.log.clear();
- for (map<eversion_t, pg_log_entry_t>::iterator p = m.begin(); p != m.end(); p++)
+ for (map<eversion_t, pg_log_entry_t>::iterator p = m.begin(); p != m.end(); ++p)
log.log.push_back(p->second);
}
}
@@ -5858,7 +5858,7 @@ boost::statechart::result PG::RecoveryState::Peering::react(const QueryState& q)
q.f->open_array_section("peering_blocked_by");
for (map<int,epoch_t>::iterator p = prior_set->blocked_by.begin();
p != prior_set->blocked_by.end();
- p++) {
+ ++p) {
q.f->open_object_section("osd");
q.f->dump_int("osd", p->first);
q.f->dump_int("current_lost_at", p->second);
@@ -7398,7 +7398,7 @@ PG::PriorSet::PriorSet(const OSDMap &osdmap,
for (map<epoch_t,pg_interval_t>::const_reverse_iterator p = past_intervals.rbegin();
p != past_intervals.rend();
- p++) {
+ ++p) {
const pg_interval_t &interval = p->second;
dout(10) << "build_prior " << interval << dendl;
diff --git a/src/osd/ReplicatedPG.cc b/src/osd/ReplicatedPG.cc
index 23d71f11e5e..f87e334f48f 100644
--- a/src/osd/ReplicatedPG.cc
+++ b/src/osd/ReplicatedPG.cc
@@ -419,7 +419,7 @@ void ReplicatedPG::do_pg_op(OpRequestRef op)
snapid_t snapid = m->get_snapid();
- for (vector<OSDOp>::iterator p = m->ops.begin(); p != m->ops.end(); p++) {
+ for (vector<OSDOp>::iterator p = m->ops.begin(); p != m->ops.end(); ++p) {
bufferlist::iterator bp = p->indata.begin();
switch (p->op.op) {
case CEPH_OSD_OP_PGLS_FILTER:
@@ -776,7 +776,7 @@ void ReplicatedPG::do_op(OpRequestRef op)
// src_oids
map<hobject_t,ObjectContext*> src_obc;
- for (vector<OSDOp>::iterator p = m->ops.begin(); p != m->ops.end(); p++) {
+ for (vector<OSDOp>::iterator p = m->ops.begin(); p != m->ops.end(); ++p) {
OSDOp& osd_op = *p;
if (!ceph_osd_op_type_multi(osd_op.op.op))
continue;
@@ -1370,7 +1370,7 @@ ReplicatedPG::RepGather *ReplicatedPG::trim_object(const hobject_t &coid)
// ...from snapset
snapid_t last = coid.snap;
vector<snapid_t>::iterator p;
- for (p = snapset.clones.begin(); p != snapset.clones.end(); p++)
+ for (p = snapset.clones.begin(); p != snapset.clones.end(); ++p)
if (*p == last)
break;
assert(p != snapset.clones.end());
@@ -1866,7 +1866,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
dout(10) << "do_osd_op " << soid << " " << ops << dendl;
- for (vector<OSDOp>::iterator p = ops.begin(); p != ops.end(); p++) {
+ for (vector<OSDOp>::iterator p = ops.begin(); p != ops.end(); ++p) {
OSDOp& osd_op = *p;
ceph_osd_op& op = osd_op.op;
@@ -2225,7 +2225,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
map<pair<uint64_t, entity_name_t>, watch_info_t>::const_iterator oi_iter;
for (oi_iter = oi.watchers.begin(); oi_iter != oi.watchers.end();
- oi_iter++) {
+ ++oi_iter) {
dout(20) << "key cookie=" << oi_iter->first.first
<< " entity=" << oi_iter->first.second << " "
<< oi_iter->second << dendl;
@@ -2271,7 +2271,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
ci.cloneid = *clone_iter;
for (;snap_iter != ssc->snapset.snaps.rend()
- && (*snap_iter <= ci.cloneid); snap_iter++) {
+ && (*snap_iter <= ci.cloneid); ++snap_iter) {
dout(20) << "List snaps id=" << *snap_iter << dendl;
@@ -2316,7 +2316,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
ci.cloneid = clone_info::HEAD;
//Put remaining snapshots into head clone
- for (;snap_iter != ssc->snapset.snaps.rend(); snap_iter++)
+ for (;snap_iter != ssc->snapset.snaps.rend(); ++snap_iter)
ci.snaps.push_back(*snap_iter);
//Size for HEAD is oi.size
@@ -2896,7 +2896,7 @@ int ReplicatedPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
if (r == 0) {
for (set<string>::iterator iter = keys_to_get.begin();
iter != keys_to_get.end();
- iter++) {
+ ++iter) {
if (vals.count(*iter)) {
out.insert(*(vals.find(*iter)));
}
@@ -4094,7 +4094,7 @@ void ReplicatedPG::populate_obc_watchers(ObjectContext *obc)
for (map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator p =
obc->obs.oi.watchers.begin();
p != obc->obs.oi.watchers.end();
- p++) {
+ ++p) {
utime_t expire = now;
expire += p->second.timeout_seconds;
dout(10) << " unconnected watcher " << p->first << " will expire " << expire << dendl;
@@ -4379,7 +4379,7 @@ void ReplicatedPG::put_object_context(ObjectContext *obc)
if (mode.wake) {
requeue_ops(mode.waiting);
- for (list<Cond*>::iterator p = mode.waiting_cond.begin(); p != mode.waiting_cond.end(); p++)
+ for (list<Cond*>::iterator p = mode.waiting_cond.begin(); p != mode.waiting_cond.end(); ++p)
(*p)->Signal();
mode.wake = false;
}
@@ -4888,7 +4888,7 @@ int ReplicatedPG::pull(
random_shuffle(shuffle.begin(), shuffle.end());
for (vector<int>::iterator p = shuffle.begin();
p != shuffle.end();
- p++) {
+ ++p) {
if (get_osdmap()->is_up(*p)) {
fromosd = *p;
break;
@@ -6041,7 +6041,7 @@ void ReplicatedPG::mark_all_unfound_lost(int what)
dout(10) << e << dendl;
// we are now missing the new version; recovery code will sort it out.
- m++;
+ ++m;
missing.revise_need(oid, info.last_update);
break;
}
@@ -6271,7 +6271,7 @@ void ReplicatedPG::check_recovery_sources(const OSDMapRef osdmap)
p != missing_loc_sources.end();
) {
if (osdmap->is_up(*p)) {
- p++;
+ ++p;
continue;
}
dout(10) << "check_recovery_sources source osd." << *p << " now down" << dendl;
@@ -6311,12 +6311,12 @@ void ReplicatedPG::check_recovery_sources(const OSDMapRef osdmap)
p->second.erase(q++);
} else {
assert(missing_loc_sources.count(*q));
- q++;
+ ++q;
}
if (p->second.empty())
missing_loc.erase(p++);
else
- p++;
+ ++p;
}
}
@@ -6487,7 +6487,7 @@ int ReplicatedPG::recover_primary(int max)
soid = p->second;
}
pg_missing_t::item& item = missing.missing[p->second];
- p++;
+ ++p;
hobject_t head = soid;
head.snap = CEPH_NOSNAP;
@@ -6975,7 +6975,7 @@ void ReplicatedPG::clean_up_local(ObjectStore::Transaction& t)
set<hobject_t> did;
for (list<pg_log_entry_t>::reverse_iterator p = log.log.rbegin();
p != log.log.rend();
- p++) {
+ ++p) {
if (did.count(p->soid))
continue;
did.insert(p->soid);
@@ -7015,7 +7015,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
for (map<hobject_t,ScrubMap::object>::reverse_iterator p = scrubmap.objects.rbegin();
p != scrubmap.objects.rend();
- p++) {
+ ++p) {
const hobject_t& soid = p->first;
object_stat_sum_t stat;
if (soid.snap != CEPH_SNAPDIR)
@@ -7127,7 +7127,7 @@ void ReplicatedPG::_scrub(ScrubMap& scrubmap)
// what's next?
if (curclone != snapset.clones.rend())
- curclone++;
+ ++curclone;
if (curclone == snapset.clones.rend())
head = hobject_t();
diff --git a/src/osd/SnapMapper.cc b/src/osd/SnapMapper.cc
index d6d2749aeee..315e2e2fe86 100644
--- a/src/osd/SnapMapper.cc
+++ b/src/osd/SnapMapper.cc
@@ -140,7 +140,7 @@ int SnapMapper::get_snaps(
int r = backend.get_keys(keys, &got);
if (r < 0)
return r;
- if (got.size() == 0)
+ if (got.empty())
return -ENOENT;
if (out) {
bufferlist::iterator bp = got.begin()->second.begin();
diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc
index 04b1e54ed29..35847d7a542 100644
--- a/src/osd/osd_types.cc
+++ b/src/osd/osd_types.cc
@@ -617,7 +617,7 @@ snapid_t pg_pool_t::snap_exists(const char *s) const
{
for (map<snapid_t,pool_snap_info_t>::const_iterator p = snaps.begin();
p != snaps.end();
- p++)
+ ++p)
if (p->second.name == s)
return p->second.snapid;
return 0;
@@ -664,7 +664,7 @@ SnapContext pg_pool_t::get_snap_context() const
unsigned i = 0;
for (map<snapid_t, pool_snap_info_t>::const_reverse_iterator p = snaps.rbegin();
p != snaps.rend();
- p++)
+ ++p)
s[i++] = p->first;
return SnapContext(get_snap_seq(), s);
}
@@ -1946,7 +1946,7 @@ void pg_log_t::copy_after(const pg_log_t &other, eversion_t v)
tail = other.tail;
for (list<pg_log_entry_t>::const_reverse_iterator i = other.log.rbegin();
i != other.log.rend();
- i++) {
+ ++i) {
assert(i->version > other.tail);
if (i->version <= v) {
// make tail accurate.
@@ -1997,7 +1997,7 @@ ostream& pg_log_t::print(ostream& out) const
out << *this << std::endl;
for (list<pg_log_entry_t>::const_iterator p = log.begin();
p != log.end();
- p++)
+ ++p)
out << *p << std::endl;
return out;
}
@@ -2802,7 +2802,7 @@ void ScrubMap::merge_incr(const ScrubMap &l)
for (map<hobject_t,object>::const_iterator p = l.objects.begin();
p != l.objects.end();
- p++){
+ ++p){
if (p->second.negative) {
map<hobject_t,object>::iterator q = objects.find(p->first);
if (q != objects.end()) {
diff --git a/src/osdc/Filer.cc b/src/osdc/Filer.cc
index 75bd08673fe..aec291006b3 100644
--- a/src/osdc/Filer.cc
+++ b/src/osdc/Filer.cc
@@ -106,7 +106,7 @@ void Filer::_probe(Probe *probe)
for (vector<ObjectExtent>::iterator p = probe->probing.begin();
p != probe->probing.end();
- p++) {
+ ++p) {
ldout(cct, 10) << "_probe probing " << p->oid << dendl;
C_Probe *c = new C_Probe(this, probe, p->oid);
objecter->stat(p->oid, p->oloc, probe->snapid, &c->size, &c->mtime,
@@ -145,14 +145,14 @@ void Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, utime_t mt
vector<ObjectExtent> r;
for (vector<ObjectExtent>::reverse_iterator p = probe->probing.rbegin();
p != probe->probing.rend();
- p++)
+ ++p)
r.push_back(*p);
probe->probing.swap(r);
}
for (vector<ObjectExtent>::iterator p = probe->probing.begin();
p != probe->probing.end();
- p++) {
+ ++p) {
uint64_t shouldbe = p->length + p->offset;
ldout(cct, 10) << "_probed " << probe->ino << " object " << hex << p->oid << dec
<< " should be " << shouldbe
@@ -171,7 +171,7 @@ void Filer::_probed(Probe *probe, const object_t& oid, uint64_t size, utime_t mt
uint64_t oleft = probe->known_size[p->oid] - p->offset;
for (vector<pair<uint64_t, uint64_t> >::iterator i = p->buffer_extents.begin();
i != p->buffer_extents.end();
- i++) {
+ ++i) {
if (oleft <= (uint64_t)i->second) {
end = probe->probing_off + i->first + oleft;
ldout(cct, 10) << "_probed end is in buffer_extent " << i->first << "~" << i->second << " off " << oleft
diff --git a/src/osdc/Filer.h b/src/osdc/Filer.h
index 54a2196d9aa..cde4502d2b4 100644
--- a/src/osdc/Filer.h
+++ b/src/osdc/Filer.h
@@ -186,7 +186,7 @@ class Filer {
} else {
C_GatherBuilder gack(cct, onack);
C_GatherBuilder gcom(cct, oncommit);
- for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); p++) {
+ for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_TRIMTRUNC;
ops[0].op.extent.truncate_size = p->offset;
@@ -222,7 +222,7 @@ class Filer {
} else {
C_GatherBuilder gack(cct, onack);
C_GatherBuilder gcom(cct, oncommit);
- for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); p++) {
+ for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
if (p->offset == 0 && p->length == layout->fl_object_size)
objecter->remove(p->oid, p->oloc,
snapc, mtime, flags,
diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc
index f055cd92a02..84079438f0f 100644
--- a/src/osdc/ObjectCacher.cc
+++ b/src/osdc/ObjectCacher.cc
@@ -98,7 +98,7 @@ void ObjectCacher::Object::merge_left(BufferHead *left, BufferHead *right)
// waiters
for (map<loff_t, list<Context*> >::iterator p = right->waitfor_read.begin();
p != right->waitfor_read.end();
- p++)
+ ++p)
left->waitfor_read[p->first].splice( left->waitfor_read[p->first].begin(),
p->second );
@@ -117,18 +117,18 @@ void ObjectCacher::Object::try_merge_bh(BufferHead *bh)
map<loff_t,BufferHead*>::iterator p = data.find(bh->start());
assert(p->second == bh);
if (p != data.begin()) {
- p--;
+ --p;
if (p->second->end() == bh->start() &&
p->second->get_state() == bh->get_state()) {
merge_left(p->second, bh);
bh = p->second;
} else {
- p++;
+ ++p;
}
}
// to the right?
assert(p->second == bh);
- p++;
+ ++p;
if (p != data.end() &&
p->second->start() == bh->end() &&
p->second->get_state() == bh->get_state())
@@ -151,7 +151,7 @@ bool ObjectCacher::Object::is_cached(loff_t cur, loff_t left)
loff_t lenfromcur = MIN(p->second->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
- p++;
+ ++p;
continue;
} else if (p->first > cur) {
// gap
@@ -176,7 +176,7 @@ int ObjectCacher::Object::map_read(OSDRead *rd,
assert(oc->lock.is_locked());
for (vector<ObjectExtent>::iterator ex_it = rd->extents.begin();
ex_it != rd->extents.end();
- ex_it++) {
+ ++ex_it) {
if (ex_it->oid != oid.oid)
continue;
@@ -234,7 +234,7 @@ int ObjectCacher::Object::map_read(OSDRead *rd,
loff_t lenfromcur = MIN(e->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
- p++;
+ ++p;
continue; // more?
} else if (p->first > cur) {
@@ -310,7 +310,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(OSDWrite *wr)
for (vector<ObjectExtent>::iterator ex_it = wr->extents.begin();
ex_it != wr->extents.end();
- ex_it++) {
+ ++ex_it) {
if (ex_it->oid != oid.oid) continue;
@@ -354,12 +354,12 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(OSDWrite *wr)
if (cur + max >= p->first + p->second->length()) {
// we want right bit (one splice)
final = split(bh, cur); // just split it, take right half.
- p++;
+ ++p;
assert(p->second == final);
} else {
// we want middle bit (two splices)
final = split(bh, cur);
- p++;
+ ++p;
assert(p->second == final);
split(final, cur+max);
}
@@ -374,7 +374,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(OSDWrite *wr)
if (final) {
oc->mark_dirty(bh);
oc->mark_dirty(final);
- p--; // move iterator back to final
+ --p; // move iterator back to final
assert(p->second == final);
merge_left(final, bh);
} else {
@@ -386,7 +386,7 @@ ObjectCacher::BufferHead *ObjectCacher::Object::map_write(OSDWrite *wr)
loff_t lenfromcur = final->end() - cur;
cur += lenfromcur;
left -= lenfromcur;
- p++;
+ ++p;
continue;
} else {
// gap!
@@ -464,7 +464,7 @@ void ObjectCacher::Object::discard(loff_t off, loff_t len)
// split bh at truncation point?
if (bh->start() < off) {
split(bh, off);
- p++;
+ ++p;
continue;
}
@@ -473,7 +473,7 @@ void ObjectCacher::Object::discard(loff_t off, loff_t len)
split(bh, off + len);
}
- p++;
+ ++p;
ldout(oc->cct, 10) << "discard " << *this << " bh " << *bh << dendl;
oc->bh_remove(this, bh);
delete bh;
@@ -651,7 +651,7 @@ void ObjectCacher::bh_read_finish(int64_t poolid, sobject_t oid, loff_t start,
BufferHead *bh = p->second;
for (map<loff_t, list<Context*> >::iterator p = bh->waitfor_read.begin();
p != bh->waitfor_read.end();
- p++)
+ ++p)
ls.splice(ls.end(), p->second);
bh->waitfor_read.clear();
}
@@ -676,7 +676,7 @@ void ObjectCacher::bh_read_finish(int64_t poolid, sobject_t oid, loff_t start,
// finishers?
for (map<loff_t, list<Context*> >::iterator it = bh->waitfor_read.begin();
it != bh->waitfor_read.end();
- it++)
+ ++it)
ls.splice(ls.end(), it->second);
bh->waitfor_read.clear();
@@ -802,7 +802,7 @@ void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid, loff_t start,
// apply to bh's!
for (map<loff_t, BufferHead*>::iterator p = ob->data_lower_bound(start);
p != ob->data.end();
- p++) {
+ ++p) {
BufferHead *bh = p->second;
if (bh->start() > start+(loff_t)length)
@@ -940,7 +940,7 @@ bool ObjectCacher::is_cached(ObjectSet *oset, vector<ObjectExtent>& extents, sna
assert(lock.is_locked());
for (vector<ObjectExtent>::iterator ex_it = extents.begin();
ex_it != extents.end();
- ex_it++) {
+ ++ex_it) {
ldout(cct, 10) << "is_cached " << *ex_it << dendl;
// get Object cache
@@ -978,7 +978,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
for (vector<ObjectExtent>::iterator ex_it = rd->extents.begin();
ex_it != rd->extents.end();
- ex_it++) {
+ ++ex_it) {
ldout(cct, 10) << "readx " << *ex_it << dendl;
total_bytes_read += ex_it->length;
@@ -1002,7 +1002,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bool wait = false;
for (map<loff_t, BufferHead*>::iterator bh_it = o->data.begin();
bh_it != o->data.end();
- bh_it++) {
+ ++bh_it) {
BufferHead *bh = bh_it->second;
if (bh->is_dirty() || bh->is_tx()) {
ldout(cct, 10) << "readx flushing " << *bh << dendl;
@@ -1023,7 +1023,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bool allzero = true;
for (map<loff_t, BufferHead*>::iterator bh_it = o->data.begin();
bh_it != o->data.end();
- bh_it++) {
+ ++bh_it) {
ldout(cct, 20) << "readx ob has bh " << *bh_it->second << dendl;
if (!bh_it->second->is_zero() && !bh_it->second->is_rx()) {
allzero = false;
@@ -1054,7 +1054,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
// read missing
for (map<loff_t, BufferHead*>::iterator bh_it = missing.begin();
bh_it != missing.end();
- bh_it++) {
+ ++bh_it) {
bh_read(bh_it->second);
if (success && onfinish) {
ldout(cct, 10) << "readx missed, waiting on " << *bh_it->second
@@ -1068,7 +1068,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
// bump rx
for (map<loff_t, BufferHead*>::iterator bh_it = rx.begin();
bh_it != rx.end();
- bh_it++) {
+ ++bh_it) {
touch_bh(bh_it->second); // bump in lru, so we don't lose it.
if (success && onfinish) {
ldout(cct, 10) << "readx missed, waiting on " << *bh_it->second
@@ -1084,7 +1084,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
// make a plain list
for (map<loff_t, BufferHead*>::iterator bh_it = hits.begin();
bh_it != hits.end();
- bh_it++) {
+ ++bh_it) {
ldout(cct, 10) << "readx hit bh " << *bh_it->second << dendl;
if (bh_it->second->is_error() && bh_it->second->error)
error = bh_it->second->error;
@@ -1129,11 +1129,11 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bhoff += len;
foff += len;
if (opos == bh->end()) {
- bh_it++;
+ ++bh_it;
bhoff = 0;
}
if (foff == f_it->second) {
- f_it++;
+ ++f_it;
foff = 0;
}
if (bh_it == hits.end()) break;
@@ -1148,7 +1148,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
// bump hits in lru
for (list<BufferHead*>::iterator bhit = hit_ls.begin();
bhit != hit_ls.end();
- bhit++)
+ ++bhit)
touch_bh(*bhit);
if (!success) {
@@ -1181,7 +1181,7 @@ int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
rd->bl->clear();
for (map<uint64_t,bufferlist>::iterator i = stripe_map.begin();
i != stripe_map.end();
- i++) {
+ ++i) {
assert(pos == i->first);
ldout(cct, 10) << "readx adding buffer len " << i->second.length() << " at " << pos << dendl;
pos += i->second.length();
@@ -1215,7 +1215,7 @@ int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset, Mutex& wait_on_lock)
for (vector<ObjectExtent>::iterator ex_it = wr->extents.begin();
ex_it != wr->extents.end();
- ex_it++) {
+ ++ex_it) {
// get object cache
sobject_t soid(ex_it->oid, CEPH_NOSNAP);
Object *o = get_object(soid, oset, ex_it->oloc);
@@ -1237,7 +1237,7 @@ int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset, Mutex& wait_on_lock)
loff_t opos = ex_it->offset;
for (vector<pair<uint64_t, uint64_t> >::iterator f_it = ex_it->buffer_extents.begin();
f_it != ex_it->buffer_extents.end();
- f_it++) {
+ ++f_it) {
ldout(cct, 10) << "writex writing " << f_it->first << "~" << f_it->second << " into " << *bh << " at " << opos << dendl;
uint64_t bhoff = bh->start() - opos;
assert(f_it->second <= bh->length() - bhoff);
@@ -1395,7 +1395,7 @@ bool ObjectCacher::set_is_cached(ObjectSet *oset)
Object *ob = *p;
for (map<loff_t,BufferHead*>::iterator q = ob->data.begin();
q != ob->data.end();
- q++) {
+ ++q) {
BufferHead *bh = q->second;
if (!bh->is_dirty() && !bh->is_tx())
return true;
@@ -1417,7 +1417,7 @@ bool ObjectCacher::set_is_dirty_or_committing(ObjectSet *oset)
for (map<loff_t,BufferHead*>::iterator p = ob->data.begin();
p != ob->data.end();
- p++) {
+ ++p) {
BufferHead *bh = p->second;
if (bh->is_dirty() || bh->is_tx())
return true;
@@ -1447,7 +1447,7 @@ bool ObjectCacher::flush(Object *ob, loff_t offset, loff_t length)
assert(lock.is_locked());
bool clean = true;
ldout(cct, 10) << "flush " << *ob << " " << offset << "~" << length << dendl;
- for (map<loff_t,BufferHead*>::iterator p = ob->data_lower_bound(offset); p != ob->data.end(); p++) {
+ for (map<loff_t,BufferHead*>::iterator p = ob->data_lower_bound(offset); p != ob->data.end(); ++p) {
BufferHead *bh = p->second;
ldout(cct, 20) << "flush " << *bh << dendl;
if (length && bh->start() > offset+length) {
@@ -1583,7 +1583,7 @@ loff_t ObjectCacher::release(Object *ob)
for (map<loff_t,BufferHead*>::iterator p = ob->data.begin();
p != ob->data.end();
- p++) {
+ ++p) {
BufferHead *bh = p->second;
if (bh->is_clean() || bh->is_zero())
clean.push_back(bh);
@@ -1593,7 +1593,7 @@ loff_t ObjectCacher::release(Object *ob)
for (list<BufferHead*>::iterator p = clean.begin();
p != clean.end();
- p++) {
+ ++p) {
bh_remove(ob, *p);
delete *p;
}
@@ -1667,7 +1667,7 @@ uint64_t ObjectCacher::release_all()
hash_map<sobject_t, Object*>::iterator p = i->begin();
while (p != i->end()) {
hash_map<sobject_t, Object*>::iterator n = p;
- n++;
+ ++n;
Object *ob = p->second;
@@ -1760,7 +1760,7 @@ void ObjectCacher::verify_stats() const
Object *ob = p->second;
for (map<loff_t, BufferHead*>::const_iterator q = ob->data.begin();
q != ob->data.end();
- q++) {
+ ++q) {
BufferHead *bh = q->second;
switch (bh->get_state()) {
case BufferHead::STATE_MISSING:
diff --git a/src/osdc/ObjectCacher.h b/src/osdc/ObjectCacher.h
index 80b92d9a362..401045238b9 100644
--- a/src/osdc/ObjectCacher.h
+++ b/src/osdc/ObjectCacher.h
@@ -242,9 +242,9 @@ class ObjectCacher {
map<loff_t,BufferHead*>::iterator p = data.lower_bound(offset);
if (p != data.begin() &&
(p == data.end() || p->first > offset)) {
- p--; // might overlap!
+ --p; // might overlap!
if (p->first + p->second->length() <= offset)
- p++; // doesn't overlap.
+ ++p; // doesn't overlap.
}
return p;
}
diff --git a/src/osdc/Objecter.cc b/src/osdc/Objecter.cc
index 67df150c265..178ea67dade 100644
--- a/src/osdc/Objecter.cc
+++ b/src/osdc/Objecter.cc
@@ -550,7 +550,7 @@ void Objecter::handle_osd_map(MOSDMap *m)
for (map<int,OSDSession*>::iterator p = osd_sessions.begin();
p != osd_sessions.end(); ) {
OSDSession *s = p->second;
- p++;
+ ++p;
if (osdmap->is_up(s->osd)) {
if (s->con && s->con->get_peer_addr() != osdmap->get_inst(s->osd).addr)
close_session(s);
@@ -589,7 +589,7 @@ void Objecter::handle_osd_map(MOSDMap *m)
(was_pausewr && !pausewr))
for (map<tid_t,Op*>::iterator p = ops.begin();
p != ops.end();
- p++) {
+ ++p) {
Op *op = p->second;
if (op->paused &&
!((op->flags & CEPH_OSD_FLAG_READ) && pauserd) && // not still paused as a read
@@ -598,7 +598,7 @@ void Objecter::handle_osd_map(MOSDMap *m)
}
// resend requests
- for (map<tid_t, Op*>::iterator p = need_resend.begin(); p != need_resend.end(); p++) {
+ for (map<tid_t, Op*>::iterator p = need_resend.begin(); p != need_resend.end(); ++p) {
Op *op = p->second;
if (op->should_resend) {
if (op->session) {
@@ -609,7 +609,7 @@ void Objecter::handle_osd_map(MOSDMap *m)
cancel_op(op);
}
}
- for (list<LingerOp*>::iterator p = need_resend_linger.begin(); p != need_resend_linger.end(); p++) {
+ for (list<LingerOp*>::iterator p = need_resend_linger.begin(); p != need_resend_linger.end(); ++p) {
LingerOp *op = *p;
if (op->session) {
logger->inc(l_osdc_linger_resend);
@@ -921,7 +921,7 @@ void Objecter::tick()
unsigned laggy_ops = 0;
for (map<tid_t,Op*>::iterator p = ops.begin();
p != ops.end();
- p++) {
+ ++p) {
Op *op = p->second;
if (op->session && op->stamp < cutoff) {
ldout(cct, 2) << " tid " << p->first << " on osd." << op->session->osd << " is laggy" << dendl;
@@ -931,7 +931,7 @@ void Objecter::tick()
}
for (map<uint64_t,LingerOp*>::iterator p = linger_ops.begin();
p != linger_ops.end();
- p++) {
+ ++p) {
LingerOp *op = p->second;
if (op->session) {
ldout(cct, 10) << " pinging osd that serves lingering tid " << p->first << " (osd." << op->session->osd << ")" << dendl;
@@ -951,7 +951,7 @@ void Objecter::tick()
// (osd reply message policy is lossy)
for (set<OSDSession*>::iterator i = toping.begin();
i != toping.end();
- i++) {
+ ++i) {
messenger->send_message(new MPing, (*i)->con);
}
}
@@ -1968,7 +1968,7 @@ void Objecter::_sg_read_finish(vector<ObjectExtent>& extents, vector<bufferlist>
vector<bufferlist>::iterator bit = resultbl.begin();
for (vector<ObjectExtent>::iterator eit = extents.begin();
eit != extents.end();
- eit++, bit++) {
+ ++eit, ++bit) {
r.add_partial_result(cct, *bit, eit->buffer_extents);
}
bl->clear();
@@ -2028,7 +2028,7 @@ void Objecter::ms_handle_remote_reset(Connection *con)
void Objecter::dump_active()
{
ldout(cct, 20) << "dump_active .. " << num_homeless_ops << " homeless" << dendl;
- for (map<tid_t,Op*>::iterator p = ops.begin(); p != ops.end(); p++) {
+ for (map<tid_t,Op*>::iterator p = ops.begin(); p != ops.end(); ++p) {
Op *op = p->second;
ldout(cct, 20) << op->tid << "\t" << op->pgid << "\tosd." << (op->session ? op->session->osd : -1)
<< "\t" << op->oid << "\t" << op->ops << dendl;
diff --git a/src/osdc/Objecter.h b/src/osdc/Objecter.h
index f9583400c6e..912f00d91e5 100644
--- a/src/osdc/Objecter.h
+++ b/src/osdc/Objecter.h
@@ -360,8 +360,9 @@ struct ObjectOperation {
if (psnaps) {
psnaps->clones.clear();
- vector<clone_info>::iterator ci;
- for (ci = resp.clones.begin(); ci != resp.clones.end(); ci++) {
+ for (vector<clone_info>::iterator ci = resp.clones.begin();
+ ci != resp.clones.end();
+ ++ci) {
librados::clone_info_t clone;
clone.cloneid = ci->cloneid;
@@ -1594,7 +1595,7 @@ public:
C_GatherBuilder gather(cct);
vector<bufferlist> resultbl(extents.size());
int i=0;
- for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); p++) {
+ for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
read_trunc(p->oid, p->oloc, p->offset, p->length,
snap, &resultbl[i++], flags, trunc_size, trunc_seq, gather.new_sub());
}
@@ -1616,11 +1617,11 @@ public:
} else {
C_GatherBuilder gack(cct, onack);
C_GatherBuilder gcom(cct, oncommit);
- for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); p++) {
+ for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
bufferlist cur;
for (vector<pair<uint64_t,uint64_t> >::iterator bit = p->buffer_extents.begin();
bit != p->buffer_extents.end();
- bit++)
+ ++bit)
bl.copy(bit->first, bit->second, cur);
assert(cur.length() == p->length);
write_trunc(p->oid, p->oloc, p->offset, p->length,
diff --git a/src/osdc/Striper.cc b/src/osdc/Striper.cc
index 7b5a402893f..b062845d4b5 100644
--- a/src/osdc/Striper.cc
+++ b/src/osdc/Striper.cc
@@ -131,7 +131,7 @@ void Striper::assimilate_extents(map<object_t,vector<ObjectExtent> >& object_ext
// make final list
for (map<object_t, vector<ObjectExtent> >::iterator it = object_extents.begin();
it != object_extents.end();
- it++) {
+ ++it) {
for (vector<ObjectExtent>::iterator p = it->second.begin(); p != it->second.end(); ++p) {
extents.push_back(*p);
}
@@ -224,7 +224,7 @@ void Striper::StripedReadResult::add_partial_sparse_result(CephContext *cct,
// skip zero-length extent
if (s->second == 0) {
ldout(cct, 30) << " s len 0, skipping" << dendl;
- s++;
+ ++s;
continue;
}
@@ -257,7 +257,7 @@ void Striper::StripedReadResult::add_partial_sparse_result(CephContext *cct,
}
if (actual == left) {
ldout(cct, 30) << " s advancing" << dendl;
- s++;
+ ++s;
}
}
}
@@ -294,7 +294,7 @@ void Striper::StripedReadResult::assemble_result(CephContext *cct, bufferlist& b
} else {
bl.claim_prepend(p->second.first);
}
- p++;
+ ++p;
}
partial.clear();
}
diff --git a/src/osdmaptool.cc b/src/osdmaptool.cc
index c1091b69e64..2e55026076c 100644
--- a/src/osdmaptool.cc
+++ b/src/osdmaptool.cc
@@ -282,7 +282,7 @@ int main(int argc, const char **argv)
hash_map<pg_t,vector<int> > m;
for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin();
p != osdmap.get_pools().end();
- p++) {
+ ++p) {
const pg_pool_t *pool = osdmap.get_pg_pool(p->first);
for (ps_t ps = 0; ps < pool->get_pg_num(); ps++) {
pg_t pgid(ps, p->first, -1);
diff --git a/src/rados.cc b/src/rados.cc
index 51f998c1894..6856491014b 100644
--- a/src/rados.cc
+++ b/src/rados.cc
@@ -742,8 +742,7 @@ int LoadGen::run()
cout << "waiting for all operations to complete" << std::endl;
// now wait on all the pending requests
- vector<librados::AioCompletion *>::iterator citer;
- for (citer = completions.begin(); citer != completions.end(); citer++) {
+ for (vector<librados::AioCompletion *>::iterator citer = completions.begin(); citer != completions.end(); ++citer) {
librados::AioCompletion *c = *citer;
c->wait_for_complete();
c->release();
@@ -1733,7 +1732,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
header.hexdump(cout);
cout << "\n";
cout << kv.size() << " keys\n";
- for (map<string,bufferlist>::iterator q = kv.begin(); q != kv.end(); q++) {
+ for (map<string,bufferlist>::iterator q = kv.begin(); q != kv.end(); ++q) {
cout << "key '" << q->first << "' (" << q->second.length() << " bytes):\n";
q->second.hexdump(cout);
cout << "\n";
@@ -1823,7 +1822,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
io_ctx.snap_list(&snaps);
for (vector<snap_t>::iterator i = snaps.begin();
i != snaps.end();
- i++) {
+ ++i) {
string s;
time_t t;
if (io_ctx.snap_get_name(*i, &s) < 0)
@@ -2018,8 +2017,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
else
ret = 0;
- std::list<obj_watch_t>::iterator i;
- for (i = lw.begin(); i != lw.end(); i++) {
+ for (std::list<obj_watch_t>::iterator i = lw.begin(); i != lw.end(); ++i) {
cout << "watcher=client." << i->watcher_id << " cookie=" << i->cookie << std::endl;
}
} else if (strcmp(nargs[0], "listsnaps") == 0) {
@@ -2042,7 +2040,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
vector<snap_t> snaps;
io_ctx.snap_list(&snaps);
for (vector<snap_t>::iterator i = snaps.begin();
- i != snaps.end(); i++) {
+ i != snaps.end(); ++i) {
string s;
if (io_ctx.snap_get_name(*i, &s) < 0)
continue;
@@ -2060,7 +2058,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
}
for (std::vector<clone_info_t>::iterator ci = ls.clones.begin();
- ci != ls.clones.end(); ci++) {
+ ci != ls.clones.end(); ++ci) {
if (formatter) formatter->open_object_section("clone");
@@ -2120,7 +2118,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts,
cout << "\t[";
for (std::vector< std::pair<uint64_t,uint64_t> >::iterator ovi = ci->overlap.begin();
- ovi != ci->overlap.end(); ovi++) {
+ ovi != ci->overlap.end(); ++ovi) {
if (formatter) {
formatter->open_object_section("section");
formatter->dump_unsigned("start", ovi->first);
@@ -2244,7 +2242,7 @@ int main(int argc, const char **argv)
} else {
if (val[0] == '-')
usage_exit();
- i++;
+ ++i;
}
}
diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc
index 79cf2ced233..4f26dda7d20 100644
--- a/src/rgw/rgw_acl_s3.cc
+++ b/src/rgw/rgw_acl_s3.cc
@@ -328,8 +328,7 @@ static int parse_acl_header(RGWRados *store, RGWEnv *env,
hacl_str = hacl;
get_str_list(hacl_str, ",", grantees);
- list<string>::iterator it = grantees.begin();
- for (; it != grantees.end(); it++) {
+ for (list<string>::iterator it = grantees.begin(); it != grantees.end(); ++it) {
ACLGrant grant;
int ret = parse_grantee_str(store, *it, perm, grant);
if (ret < 0)
@@ -395,8 +394,7 @@ int RGWAccessControlList_S3::create_from_grants(std::list<ACLGrant>& grants)
acl_user_map.clear();
grant_map.clear();
- std::list<ACLGrant>::iterator it = grants.begin();
- for (; it != grants.end(); it++) {
+ for (std::list<ACLGrant>::iterator it = grants.begin(); it != grants.end(); ++it) {
ACLGrant g = *it;
add_grant(&g);
}
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc
index d7cda7b6b20..d7e7130b5d4 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/rgw_admin.cc
@@ -749,7 +749,7 @@ static int remove_bucket(RGWRados *store, rgw_bucket& bucket, bool delete_childr
while (!objs.empty()) {
std::vector<RGWObjEnt>::iterator it = objs.begin();
- for (it = objs.begin(); it != objs.end(); it++) {
+ for (it = objs.begin(); it != objs.end(); ++it) {
ret = remove_object(store, bucket, (*it).name);
if (ret < 0)
return ret;
@@ -1556,7 +1556,7 @@ next:
if (!m.empty() && purge_data) {
int ret;
- for (std::map<string, RGWBucketEnt>::iterator it = m.begin(); it != m.end(); it++) {
+ for (std::map<string, RGWBucketEnt>::iterator it = m.begin(); it != m.end(); ++it) {
ret = remove_bucket(store, ((*it).second).bucket, true);
if (ret < 0)
diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc
index 3c707d4c3e8..85a631902fa 100644
--- a/src/rgw/rgw_rados.cc
+++ b/src/rgw/rgw_rados.cc
@@ -2490,7 +2490,7 @@ int RGWRados::clone_objs_impl(void *ctx, rgw_obj& dst_obj,
if (range.src_ofs + (int64_t)range.len != next.src_ofs ||
range.dst_ofs + (int64_t)range.len != next.dst_ofs)
break;
- range_iter++;
+ range_iter = next_iter;
range.len += next.len;
}
if (range.len) {
@@ -2843,7 +2843,7 @@ struct get_obj_data : public RefCountedObject {
if (r < 0)
return r;
- for (; aiter != completion_map.end(); aiter++) {
+ for (; aiter != completion_map.end(); ++aiter) {
completion = aiter->second;
if (!completion->is_complete()) {
/* reached a request that is not yet complete, stop */