diff options
author | Samuel Just <sam.just@inktank.com> | 2013-05-23 15:23:05 -0700 |
---|---|---|
committer | Samuel Just <sam.just@inktank.com> | 2013-05-23 19:42:32 -0700 |
commit | b8a25e08a638c31b9cfc2c1bf6d9bad40e921a9f (patch) | |
tree | cc1e29b46fc7f149bcdea344f6f2ee715dd751b6 /src | |
parent | eb69c7df1902706b74876b6803ffcae68bd5ff76 (diff) | |
download | ceph-b8a25e08a638c31b9cfc2c1bf6d9bad40e921a9f.tar.gz |
OSD,PG: pass tphandle down to _scan_list
Signed-off-by: Samuel Just <sam.just@inktank.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/osd/OSD.h | 12 | ||||
-rw-r--r-- | src/osd/PG.cc | 51 | ||||
-rw-r--r-- | src/osd/PG.h | 26 |
3 files changed, 56 insertions, 33 deletions
diff --git a/src/osd/OSD.h b/src/osd/OSD.h index f52973456f6..ac2c634c1f2 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1339,8 +1339,10 @@ protected: osd->scrub_queue.pop_front(); return pg; } - void _process(PG *pg) { - pg->scrub(); + void _process( + PG *pg, + ThreadPool::TPHandle &handle) { + pg->scrub(handle); pg->put("ScrubWQ"); } void _clear() { @@ -1424,7 +1426,9 @@ protected: rep_scrub_queue.pop_front(); return msg; } - void _process(MOSDRepScrub *msg) { + void _process( + MOSDRepScrub *msg, + ThreadPool::TPHandle &handle) { osd->osd_lock.Lock(); if (osd->is_stopping()) { osd->osd_lock.Unlock(); @@ -1433,7 +1437,7 @@ protected: if (osd->_have_pg(msg->pgid)) { PG *pg = osd->_lookup_lock_pg(msg->pgid); osd->osd_lock.Unlock(); - pg->replica_scrub(msg); + pg->replica_scrub(msg, handle); msg->put(); pg->unlock(); } else { diff --git a/src/osd/PG.cc b/src/osd/PG.cc index ae88be652da..a419c68de17 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -3248,7 +3248,9 @@ void PG::sub_op_scrub_map(OpRequestRef op) /* * pg lock may or may not be held */ -void PG::_scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep) +void PG::_scan_list( + ScrubMap &map, vector<hobject_t> &ls, bool deep, + ThreadPool::TPHandle &handle) { dout(10) << "_scan_list scanning " << ls.size() << " objects" << (deep ? " deeply" : "") << dendl; @@ -3581,8 +3583,10 @@ void PG::_scan_snaps(ScrubMap &smap) * build a scrub map over a chunk without releasing the lock * only used by chunky scrub */ -int PG::build_scrub_map_chunk(ScrubMap &map, - hobject_t start, hobject_t end, bool deep) +int PG::build_scrub_map_chunk( + ScrubMap &map, + hobject_t start, hobject_t end, bool deep, + ThreadPool::TPHandle &handle) { dout(10) << "build_scrub_map" << dendl; dout(20) << "scrub_map_chunk [" << start << "," << end << ")" << dendl; @@ -3597,7 +3601,7 @@ int PG::build_scrub_map_chunk(ScrubMap &map, return ret; } - _scan_list(map, ls, deep); + _scan_list(map, ls, deep, handle); _scan_snaps(map); // pg attrs @@ -3614,7 +3618,7 @@ int PG::build_scrub_map_chunk(ScrubMap &map, * build a (sorted) summary of pg content for purposes of scrubbing * called while holding pg lock */ -void PG::build_scrub_map(ScrubMap &map) +void PG::build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle) { dout(10) << "build_scrub_map" << dendl; @@ -3631,7 +3635,7 @@ void PG::build_scrub_map(ScrubMap &map) vector<hobject_t> ls; osd->store->collection_list(coll, ls); - _scan_list(map, ls, false); + _scan_list(map, ls, false, handle); lock(); _scan_snaps(map); @@ -3656,7 +3660,9 @@ void PG::build_scrub_map(ScrubMap &map) * build a summary of pg content changed starting after v * called while holding pg lock */ -void PG::build_inc_scrub_map(ScrubMap &map, eversion_t v) +void PG::build_inc_scrub_map( + ScrubMap &map, eversion_t v, + ThreadPool::TPHandle &handle) { map.valid_through = last_update_applied; map.incr_since = v; @@ -3680,7 +3686,7 @@ void PG::build_inc_scrub_map(ScrubMap &map, eversion_t v) } } - _scan_list(map, ls, false); + _scan_list(map, ls, false, handle); // pg attrs osd->store->collection_getattrs(coll, map.attrs); @@ -3728,7 +3734,9 @@ void PG::repair_object(const hobject_t& soid, ScrubMap::object *po, int bad_peer * for pushes to complete in case of recent recovery. Build a single * scrubmap of objects that are in the range [msg->start, msg->end). */ -void PG::replica_scrub(MOSDRepScrub *msg) +void PG::replica_scrub( + MOSDRepScrub *msg, + ThreadPool::TPHandle &handle) { assert(!scrubber.active_rep_scrub); dout(7) << "replica_scrub" << dendl; @@ -3762,7 +3770,9 @@ void PG::replica_scrub(MOSDRepScrub *msg) return; } - build_scrub_map_chunk(map, msg->start, msg->end, msg->deep); + build_scrub_map_chunk( + map, msg->start, msg->end, msg->deep, + handle); } else { if (msg->scrub_from > eversion_t()) { @@ -3777,10 +3787,10 @@ void PG::replica_scrub(MOSDRepScrub *msg) return; } } - build_inc_scrub_map(map, msg->scrub_from); + build_inc_scrub_map(map, msg->scrub_from, handle); scrubber.finalizing = 0; } else { - build_scrub_map(map); + build_scrub_map(map, handle); } if (msg->map_epoch < info.history.same_interval_since) { @@ -3808,7 +3818,7 @@ void PG::replica_scrub(MOSDRepScrub *msg) * scrub will be chunky if all OSDs in PG support chunky scrub * scrub will fall back to classic in any other case */ -void PG::scrub() +void PG::scrub(ThreadPool::TPHandle &handle) { lock(); if (deleting) { @@ -3853,9 +3863,9 @@ void PG::scrub() } if (scrubber.is_chunky) { - chunky_scrub(); + chunky_scrub(handle); } else { - classic_scrub(); + classic_scrub(handle); } unlock(); @@ -3900,7 +3910,7 @@ void PG::scrub() * Flag set when we're in the finalize stage. * */ -void PG::classic_scrub() +void PG::classic_scrub(ThreadPool::TPHandle &handle) { if (!scrubber.active) { dout(10) << "scrub start" << dendl; @@ -3931,7 +3941,7 @@ void PG::classic_scrub() // Unlocks and relocks... scrubber.primary_scrubmap = ScrubMap(); - build_scrub_map(scrubber.primary_scrubmap); + build_scrub_map(scrubber.primary_scrubmap, handle); if (scrubber.epoch_start != info.history.same_interval_since) { dout(10) << "scrub pg changed, aborting" << dendl; @@ -3978,7 +3988,7 @@ void PG::classic_scrub() if (scrubber.primary_scrubmap.valid_through != log.head) { ScrubMap incr; - build_inc_scrub_map(incr, scrubber.primary_scrubmap.valid_through); + build_inc_scrub_map(incr, scrubber.primary_scrubmap.valid_through, handle); scrubber.primary_scrubmap.merge_incr(incr); } @@ -4061,7 +4071,7 @@ void PG::classic_scrub() * scrubber.state encodes the current state of the scrub (refer to state diagram * for details). */ -void PG::chunky_scrub() { +void PG::chunky_scrub(ThreadPool::TPHandle &handle) { // check for map changes if (scrubber.is_chunky_scrub_active()) { if (scrubber.epoch_start != info.history.same_interval_since) { @@ -4193,7 +4203,8 @@ void PG::chunky_scrub() { // build my own scrub map ret = build_scrub_map_chunk(scrubber.primary_scrubmap, scrubber.start, scrubber.end, - scrubber.deep); + scrubber.deep, + handle); if (ret < 0) { dout(5) << "error building scrub map: " << ret << ", aborting" << dendl; scrub_clear_state(); diff --git a/src/osd/PG.h b/src/osd/PG.h index 9f10d8dfbd0..fe898791c0c 100644 --- a/src/osd/PG.h +++ b/src/osd/PG.h @@ -43,6 +43,7 @@ #include "messages/MOSDRepScrub.h" #include "messages/MOSDPGLog.h" #include "common/tracked_int_ptr.hpp" +#include "common/WorkQueue.h" #include <list> #include <memory> @@ -1035,24 +1036,29 @@ public: map<hobject_t, int> &authoritative, map<hobject_t, set<int> > &inconsistent_snapcolls, ostream &errorstream); - void scrub(); - void classic_scrub(); - void chunky_scrub(); + void scrub(ThreadPool::TPHandle &handle); + void classic_scrub(ThreadPool::TPHandle &handle); + void chunky_scrub(ThreadPool::TPHandle &handle); void scrub_compare_maps(); void scrub_process_inconsistent(); void scrub_finalize(); void scrub_finish(); void scrub_clear_state(); bool scrub_gather_replica_maps(); - void _scan_list(ScrubMap &map, vector<hobject_t> &ls, bool deep); + void _scan_list( + ScrubMap &map, vector<hobject_t> &ls, bool deep, + ThreadPool::TPHandle &handle); void _scan_snaps(ScrubMap &map); void _request_scrub_map_classic(int replica, eversion_t version); void _request_scrub_map(int replica, eversion_t version, hobject_t start, hobject_t end, bool deep); - int build_scrub_map_chunk(ScrubMap &map, - hobject_t start, hobject_t end, bool deep); - void build_scrub_map(ScrubMap &map); - void build_inc_scrub_map(ScrubMap &map, eversion_t v); + int build_scrub_map_chunk( + ScrubMap &map, + hobject_t start, hobject_t end, bool deep, + ThreadPool::TPHandle &handle); + void build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle); + void build_inc_scrub_map( + ScrubMap &map, eversion_t v, ThreadPool::TPHandle &handle); virtual void _scrub(ScrubMap &map) { } virtual void _scrub_clear_state() { } virtual void _scrub_finish() { } @@ -1071,7 +1077,9 @@ public: void reg_next_scrub(); void unreg_next_scrub(); - void replica_scrub(class MOSDRepScrub *op); + void replica_scrub( + class MOSDRepScrub *op, + ThreadPool::TPHandle &handle); void sub_op_scrub_map(OpRequestRef op); void sub_op_scrub_reserve(OpRequestRef op); void sub_op_scrub_reserve_reply(OpRequestRef op); |