diff options
-rw-r--r-- | src/crimson/os/alienstore/alien_store.h | 1 | ||||
-rw-r--r-- | src/rgw/rgw_admin.cc | 8 | ||||
-rw-r--r-- | src/rgw/rgw_rados.cc | 12 | ||||
-rw-r--r-- | src/rgw/rgw_trim_mdlog.cc | 45 | ||||
-rw-r--r-- | src/rgw/rgw_zone.h | 1 | ||||
-rwxr-xr-x | src/script/backport-create-issue | 2 | ||||
-rwxr-xr-x | src/script/backport-resolve-issue | 4 | ||||
-rwxr-xr-x | src/script/build-integration-branch | 2 | ||||
-rwxr-xr-x | src/script/ceph-backport.sh | 1 | ||||
-rw-r--r-- | src/test/objectstore/allocator_replay_test.cc | 104 |
10 files changed, 169 insertions, 11 deletions
diff --git a/src/crimson/os/alienstore/alien_store.h b/src/crimson/os/alienstore/alien_store.h index 3c4dfa154ae..7a3db432f87 100644 --- a/src/crimson/os/alienstore/alien_store.h +++ b/src/crimson/os/alienstore/alien_store.h @@ -134,7 +134,6 @@ private: // number of cores that are PREVENTED from being scheduled // to run alien store threads. static constexpr int N_CORES_FOR_SEASTAR = 3; - constexpr static unsigned MAX_KEYS_PER_OMAP_GET_CALL = 32; mutable std::unique_ptr<crimson::os::ThreadPool> tp; const std::string type; const std::string path; diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc index 80262c20cd9..6d10387fb35 100644 --- a/src/rgw/rgw_admin.cc +++ b/src/rgw/rgw_admin.cc @@ -8307,7 +8307,13 @@ next: } auto num_shards = g_conf()->rgw_md_log_max_shards; - ret = crs.run(dpp(), create_admin_meta_log_trim_cr(dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards)); + auto mltcr = create_admin_meta_log_trim_cr( + dpp(), static_cast<rgw::sal::RadosStore*>(store), &http, num_shards); + if (!mltcr) { + cerr << "Cluster misconfigured! Unable to trim." << std::endl; + return -EIO; + } + ret = crs.run(dpp(), mltcr); if (ret < 0) { cerr << "automated mdlog trim failed with " << cpp_strerror(ret) << std::endl; return -ret; diff --git a/src/rgw/rgw_rados.cc b/src/rgw/rgw_rados.cc index 680da9f4fc4..5fb9e9f3c48 100644 --- a/src/rgw/rgw_rados.cc +++ b/src/rgw/rgw_rados.cc @@ -611,10 +611,16 @@ public: } int process(const DoutPrefixProvider *dpp) override { list<RGWCoroutinesStack*> stacks; + auto metatrimcr = create_meta_log_trim_cr(this, static_cast<rgw::sal::RadosStore*>(store), &http, + cct->_conf->rgw_md_log_max_shards, + trim_interval); + if (!metatrimcr) { + ldpp_dout(dpp, -1) << "Bailing out of trim thread!" << dendl; + return -EINVAL; + } auto meta = new RGWCoroutinesStack(store->ctx(), &crs); - meta->call(create_meta_log_trim_cr(this, static_cast<rgw::sal::RadosStore*>(store), &http, - cct->_conf->rgw_md_log_max_shards, - trim_interval)); + meta->call(metatrimcr); + stacks.push_back(meta); if (store->svc()->zone->sync_module_exports_data()) { diff --git a/src/rgw/rgw_trim_mdlog.cc b/src/rgw/rgw_trim_mdlog.cc index cb4b28bb475..4ddde03363d 100644 --- a/src/rgw/rgw_trim_mdlog.cc +++ b/src/rgw/rgw_trim_mdlog.cc @@ -677,9 +677,48 @@ class MetaPeerTrimPollCR : public MetaTrimPollCR { {} }; +namespace { +bool sanity_check_endpoints(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store) { + bool retval = true; + auto current = store->svc()->mdlog->get_period_history()->get_current(); + const auto& period = current.get_period(); + for (const auto& [_, zonegroup] : period.get_map().zonegroups) { + if (zonegroup.endpoints.empty()) { + ldpp_dout(dpp, -1) + << __PRETTY_FUNCTION__ << ":" << __LINE__ + << " WARNING: Cluster is is misconfigured! " + << " Zonegroup " << zonegroup.get_name() + << " (" << zonegroup.get_id() << ") in Realm " + << period.get_realm_name() << " ( " << period.get_realm() << ") " + << " has no endpoints!" << dendl; + } + for (const auto& [_, zone] : zonegroup.zones) { + if (zone.endpoints.empty()) { + ldpp_dout(dpp, -1) + << __PRETTY_FUNCTION__ << ":" << __LINE__ + << " ERROR: Cluster is is misconfigured! " + << " Zone " << zone.name << " (" << zone.id << ") in Zonegroup " + << zonegroup.get_name() << " ( " << zonegroup.get_id() + << ") in Realm " << period.get_realm_name() + << " ( " << period.get_realm() << ") " + << " has no endpoints! Trimming is impossible." << dendl; + retval = false; + } + } + } + return retval; +} +} + RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards, utime_t interval) { + if (!sanity_check_endpoints(dpp, store)) { + ldpp_dout(dpp, -1) + << __PRETTY_FUNCTION__ << ":" << __LINE__ + << " ERROR: Cluster is is misconfigured! Refusing to trim." << dendl; + return nullptr; + } if (store->svc()->zone->is_meta_master()) { return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval); } @@ -705,6 +744,12 @@ RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw:: RGWHTTPManager *http, int num_shards) { + if (!sanity_check_endpoints(dpp, store)) { + ldpp_dout(dpp, -1) + << __PRETTY_FUNCTION__ << ":" << __LINE__ + << " ERROR: Cluster is is misconfigured! Refusing to trim." << dendl; + return nullptr; + } if (store->svc()->zone->is_meta_master()) { return new MetaMasterAdminTrimCR(dpp, store, http, num_shards); } diff --git a/src/rgw/rgw_zone.h b/src/rgw/rgw_zone.h index a84d492e1f5..9e89cbf150a 100644 --- a/src/rgw/rgw_zone.h +++ b/src/rgw/rgw_zone.h @@ -1271,6 +1271,7 @@ public: const rgw_zone_id& get_master_zone() const { return master_zone; } const std::string& get_master_zonegroup() const { return master_zonegroup; } const std::string& get_realm() const { return realm_id; } + const std::string& get_realm_name() const { return realm_name; } const RGWPeriodMap& get_map() const { return period_map; } RGWPeriodConfig& get_config() { return period_config; } const RGWPeriodConfig& get_config() const { return period_config; } diff --git a/src/script/backport-create-issue b/src/script/backport-create-issue index fdd0d453448..b3e1a6da660 100755 --- a/src/script/backport-create-issue +++ b/src/script/backport-create-issue @@ -122,7 +122,7 @@ def connect_to_redmine(a): def releases(): return ('argonaut', 'bobtail', 'cuttlefish', 'dumpling', 'emperor', 'firefly', 'giant', 'hammer', 'infernalis', 'jewel', 'kraken', - 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific') + 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific', 'quincy') def populate_status_dict(r): for status in r.issue_status.all(): diff --git a/src/script/backport-resolve-issue b/src/script/backport-resolve-issue index 2501f103ec2..7d27ac2ebb5 100755 --- a/src/script/backport-resolve-issue +++ b/src/script/backport-resolve-issue @@ -324,7 +324,7 @@ def read_from_file(fs): def releases(): return ('argonaut', 'bobtail', 'cuttlefish', 'dumpling', 'emperor', 'firefly', 'giant', 'hammer', 'infernalis', 'jewel', 'kraken', - 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific') + 'luminous', 'mimic', 'nautilus', 'octopus', 'pacific', 'quincy') def report_params(a): global dry_run @@ -352,7 +352,7 @@ def ver_to_release(): return {'v9.2': 'infernalis', 'v10.2': 'jewel', 'v11.2': 'kraken', 'v12.2': 'luminous', 'v13.2': 'mimic', 'v14.2': 'nautilus', 'v15.2': 'octopus', 'v16.0': 'pacific', 'v16.1': 'pacific', - 'v16.2': 'pacific'} + 'v16.2': 'pacific', 'v17.0': 'quincy'} def usage(): logging.error("Redmine credentials are required to perform this operation. " diff --git a/src/script/build-integration-branch b/src/script/build-integration-branch index d92821b2f1f..b4f2a6121d7 100755 --- a/src/script/build-integration-branch +++ b/src/script/build-integration-branch @@ -33,7 +33,7 @@ postfix = "-" + time.strftime(TIME_FORMAT, time.localtime()) current_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).strip().decode() -if current_branch in 'mimic nautilus octopus pacific'.split(): +if current_branch in 'mimic nautilus octopus pacific quincy'.split(): postfix += '-' + current_branch print(f"Adding current branch name '-{current_branch}' as a postfix") diff --git a/src/script/ceph-backport.sh b/src/script/ceph-backport.sh index 2721cfda853..7c4410051f7 100755 --- a/src/script/ceph-backport.sh +++ b/src/script/ceph-backport.sh @@ -1084,6 +1084,7 @@ function try_known_milestones { nautilus) mn="12" ;; octopus) mn="13" ;; pacific) mn="14" ;; + quincy) mn="15" ;; esac echo "$mn" } diff --git a/src/test/objectstore/allocator_replay_test.cc b/src/test/objectstore/allocator_replay_test.cc index 811cc92cdea..401eaa42703 100644 --- a/src/test/objectstore/allocator_replay_test.cc +++ b/src/test/objectstore/allocator_replay_test.cc @@ -18,8 +18,14 @@ using namespace std; void usage(const string &name) { - cerr << "Usage: " << name << " <log_to_replay> <raw_duplicate|free_dump|try_alloc count want alloc_unit>" - << std::endl; + cerr << "Usage: " << name << " <log_to_replay> <raw_duplicate|free_dump|try_alloc count want alloc_unit|replay_alloc alloc_list_file>" << std::endl; +} + +void usage_replay_alloc(const string &name) { + cerr << "Detailed replay_alloc usage: " << name << " <allocator_dump_JSON> replay_alloc <alloc_list_file> [number of replays]" << std::endl; + cerr << "The number of replays defaults to 1." << std::endl; + cerr << "The \"alloc_list_file\" parameter should be a file with allocation requests, one per line." << std::endl; + cerr << "Allocation request format (space separated, optional parameters are 0 if not given): want unit [max] [hint]" << std::endl; } int replay_and_check_for_duplicate(char* fname) @@ -389,5 +395,99 @@ int main(int argc, char **argv) << ", unit:" << alloc_unit << std::endl; return 0; }); + } else if (strcmp(argv[2], "replay_alloc") == 0) { + if (argc < 4) { + std::cerr << "Error: insufficient arguments for \"replay_alloc\" option." + << std::endl; + usage_replay_alloc(argv[0]); + return 1; + } + return replay_free_dump_and_apply(argv[1], + [&](Allocator *a, const string &aname) { + ceph_assert(a); + std::cout << "Fragmentation:" << a->get_fragmentation() + << std::endl; + std::cout << "Fragmentation score:" << a->get_fragmentation_score() + << std::endl; + std::cout << "Free:" << std::hex << a->get_free() << std::dec + << std::endl; + { + /* replay a set of allocation requests */ + char s[4096]; + + FILE *f_alloc_list = fopen(argv[3], "r"); + if (!f_alloc_list) { + std::cerr << "error: unable to open " << argv[3] << std::endl; + return -1; + } + + /* Replay user specified number of times to simulate extended activity + * Defaults to 1 replay. + */ + auto replay_count = 1; + if (argc == 5) { + replay_count = atoi(argv[4]); + } + + for (auto i = 0; i < replay_count; ++i) { + while (fgets(s, sizeof(s), f_alloc_list) != nullptr) { + /* parse allocation request */ + uint64_t want = 0, unit = 0, max = 0, hint = 0; + + if (std::sscanf(s, "%ji %ji %ji %ji", &want, &unit, &max, &hint) < 2) + { + cerr << "Error: malformed allocation request:" << std::endl; + cerr << s << std::endl; + /* do not attempt to allocate a malformed request */ + continue; + } + + /* timestamp for allocation start */ + auto t0 = ceph::mono_clock::now(); + + /* allocate */ + PExtentVector extents; + auto r = a->allocate(want, unit, max, hint, &extents); + if (r < 0) { + /* blind replays of allocations may run out of space, provide info for easy confirmation */ + std::cerr << "Error: allocation failure code: " << r + << " requested want/unit/max/hint (hex): " << std::hex + << want << "/" << unit << "/" << max << "/" << hint + << std::dec << std::endl; + std::cerr << "Fragmentation:" << a->get_fragmentation() + << std::endl; + std::cerr << "Fragmentation score:" << a->get_fragmentation_score() + << std::endl; + std::cerr << "Free:" << std::hex << a->get_free() << std::dec + << std::endl; + /* return 0 if the allocator ran out of space */ + if (r == -ENOSPC) { + return 0; + } + return -1; + } + + /* Outputs the allocation's duration in nanoseconds and the allocation request parameters */ + std::cout << "Duration (ns): " << (ceph::mono_clock::now() - t0).count() + << " want/unit/max/hint (hex): " << std::hex + << want << "/" << unit << "/" << max << "/" << hint + << std::dec << std::endl; + + /* Do not release. */ + //alloc->release(extents); + extents.clear(); + } + fseek(f_alloc_list, 0, SEEK_SET); + } + fclose(f_alloc_list); + std::cout << "Fragmentation:" << a->get_fragmentation() + << std::endl; + std::cout << "Fragmentation score:" << a->get_fragmentation_score() + << std::endl; + std::cout << "Free:" << std::hex << a->get_free() << std::dec + << std::endl; + } + return 0; + }); } } |