Skip to content

Commit 6822380

Browse files
add blk metrics for gc (#385)
1 parent b92e6d8 commit 6822380

File tree

6 files changed

+14
-13
lines changed

6 files changed

+14
-13
lines changed

conanfile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
class HomeObjectConan(ConanFile):
1212
name = "homeobject"
13-
version = "3.0.16"
13+
version = "3.0.17"
1414

1515
homepage = "https://github.com/eBay/HomeObject"
1616
description = "Blob Store built on HomeStore"

src/lib/homestore_backend/gc_manager.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -774,9 +774,6 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
774774
*/
775775
}
776776
#endif
777-
778-
// for emergent gc, we directly use the current shard header as the new header
779-
780777
// TODO::involve ratelimiter in the following code, where read/write are scheduled. or do we need a central
781778
// ratelimter shared by all components except client io?
782779
auto succeed_copying_shard =
@@ -786,6 +783,8 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
786783
&valid_blob_indexes, &data_service, task_id, &last_shard_state, &copied_blobs, pg_id,
787784
header_sgs = std::move(header_sgs)](auto&& err) {
788785
RELEASE_ASSERT(header_sgs.iovs.size() == 1, "header_sgs.iovs.size() should be 1, but not!");
786+
// shard header occupies one blk
787+
COUNTER_INCREMENT(metrics_, gc_write_blk_count, 1);
789788
iomanager.iobuf_free(reinterpret_cast< uint8_t* >(header_sgs.iovs[0].iov_base));
790789
if (err) {
791790
GCLOGE(task_id, pg_id, shard_id,
@@ -822,6 +821,7 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
822821
data_service.async_read(pba, data_sgs, total_size)
823822
.thenValue([this, k, &hints, &move_from_chunk, &move_to_chunk, &data_service, task_id,
824823
pg_id, data_sgs = std::move(data_sgs), pba, &copied_blobs](auto&& err) {
824+
COUNTER_INCREMENT(metrics_, gc_read_blk_count, pba.blk_count());
825825
RELEASE_ASSERT(data_sgs.iovs.size() == 1,
826826
"data_sgs.iovs.size() should be 1, but not!");
827827

@@ -865,6 +865,7 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
865865
return data_service.async_alloc_write(data_sgs, hints, new_pba)
866866
.thenValue([this, shard_id, blob_id, new_pba, &move_to_chunk, task_id, pg_id,
867867
&copied_blobs, data_sgs = std::move(data_sgs)](auto&& err) {
868+
COUNTER_INCREMENT(metrics_, gc_write_blk_count, new_pba.blk_count());
868869
RELEASE_ASSERT(data_sgs.iovs.size() == 1,
869870
"data_sgs.iovs.size() should be 1, but not!");
870871
iomanager.iobuf_free(
@@ -937,8 +938,9 @@ bool GCManager::pdev_gc_actor::copy_valid_data(
937938
return folly::makeFuture< std::error_code >(std::error_code{});
938939
}
939940

940-
// write shard footer
941+
// write shard footer, which occupies one blk
941942
homestore::MultiBlkId out_blkids;
943+
COUNTER_INCREMENT(metrics_, gc_write_blk_count, 1);
942944
return data_service.async_alloc_write(footer_sgs, hints, out_blkids);
943945
})
944946
.thenValue([this, &move_to_chunk, &shard_id, footer_sgs, task_id, pg_id](auto&& err) {

src/lib/homestore_backend/gc_manager.hpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,8 @@ class GCManager {
132132
REGISTER_GAUGE(failed_egc_task_count, "Number of failed emergent gc tasks");
133133
REGISTER_GAUGE(total_reclaimed_space_by_gc, "Total reclaimed space by gc task");
134134
REGISTER_GAUGE(total_reclaimed_space_by_egc, "Total reclaimed space by emergent gc task");
135+
REGISTER_COUNTER(gc_read_blk_count, "Total read blk count by gc in this pdev");
136+
REGISTER_COUNTER(gc_write_blk_count, "Total written blk count by gc in this pdev");
135137

136138
// gc task level histogram metrics
137139
REGISTER_HISTOGRAM(reclaim_ratio_gc, "the ratio of reclaimed blks to total blks in a gc task",
@@ -149,6 +151,7 @@ class GCManager {
149151
register_me_to_farm();
150152
attach_gather_cb(std::bind(&pdev_gc_metrics::on_gather, this));
151153
}
154+
152155
~pdev_gc_metrics() { deregister_me_from_farm(); }
153156
pdev_gc_metrics(const pdev_gc_metrics&) = delete;
154157
pdev_gc_metrics(pdev_gc_metrics&&) noexcept = delete;
@@ -313,7 +316,7 @@ class GCManager {
313316
void drain_pg_pending_gc_task(const pg_id_t pg_id);
314317
void decr_pg_pending_gc_task(const pg_id_t pg_id);
315318
void incr_pg_pending_gc_task(const pg_id_t pg_id);
316-
auto& get_gc_actore_superblks() { return m_gc_actor_sbs; }
319+
auto& get_gc_actor_superblks() { return m_gc_actor_sbs; }
317320
std::shared_ptr< pdev_gc_actor > get_pdev_gc_actor(uint32_t pdev_id);
318321

319322
private:

src/lib/homestore_backend/hs_cp_callbacks.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ folly::Future< bool > HSHomeObject::MyCPCallbacks::cp_flush(CP* cp) {
5656

5757
// flush gc durable_entities
5858
auto gc_manager = home_obj_.gc_manager();
59-
auto& gc_actor_superblks = gc_manager->get_gc_actore_superblks();
59+
auto& gc_actor_superblks = gc_manager->get_gc_actor_superblks();
6060
for (auto& gc_actor_sb : gc_actor_superblks) {
6161
const auto pdev_id = gc_actor_sb->pdev_id;
6262
const auto gc_actor = gc_manager->get_pdev_gc_actor(pdev_id);

src/lib/homestore_backend/hs_homeobject.hpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -479,10 +479,6 @@ class HSHomeObject : public HomeObjectImpl {
479479
homestore::MultiBlkId pbas;
480480
};
481481

482-
struct BlobInfoData : public BlobInfo {
483-
Blob blob;
484-
};
485-
486482
enum class BlobState : uint8_t {
487483
ALIVE = 0,
488484
TOMBSTONE = 1,

src/lib/homestore_backend/replication_state_machine.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -724,14 +724,14 @@ folly::Future< std::error_code > ReplicationStateMachine::on_fetch_data(const in
724724
auto rc = index_table->get(get_req);
725725
if (sisl_unlikely(homestore::btree_status_t::success != rc)) {
726726
// blob never exists or has been gc
727-
LOGD("on_fetch_data failed to get from index table, blob never exists or has been gc, blob_id={}, "
727+
LOGD("on_fetch_data: failed to get from index table, blob never exists or has been gc, blob_id={}, "
728728
"shardID=0x{:x}, pg={}",
729729
blob_id, shard_id, pg_id);
730730
should_return_delete_marker = true;
731731
} else {
732732
pbas = index_value.pbas();
733733
if (sisl_unlikely(pbas == HSHomeObject::tombstone_pbas)) {
734-
LOGD("on_fetch_data: blob has been deleted, blob_id={}, shardID=0x{:x}, pg={}", blob_id,
734+
LOGD("on_fetch_data: got tombstone pba for blob_id={}, shardID=0x{:x}, pg={}", blob_id,
735735
shard_id, pg_id);
736736
should_return_delete_marker = true;
737737
}

0 commit comments

Comments
 (0)