12
12
13
13
#include < string>
14
14
15
- #include " core/bloom.h"
16
-
17
15
extern " C" {
18
16
#include < unistd.h>
19
17
@@ -30,23 +28,17 @@ using absl::StripTrailingAsciiWhitespace;
30
28
31
29
namespace {
32
30
constexpr auto kUsageHistPoints = std::array{50 , 90 , 99 };
33
- constexpr auto kInitialSBFCap = 1000 ;
34
- constexpr auto kFProb = 0.001 ;
35
- constexpr auto kGrowthFactor = 2 ;
36
31
constexpr auto kHistSignificantFigures = 3 ;
37
32
38
- } // namespace
39
-
40
- FilterWithSize::FilterWithSize ()
41
- : sbf{SBF{kInitialSBFCap , kFProb , kGrowthFactor , PMR_NS::get_default_resource ()}}, size{0 } {
33
+ HllBufferPtr InitHllPtr () {
34
+ HllBufferPtr p;
35
+ p.size = getDenseHllSize ();
36
+ p.hll = new uint8_t [p.size ];
37
+ CHECK_EQ (0 , createDenseHll (p));
38
+ return p;
42
39
}
43
40
44
- void FilterWithSize::Add (uintptr_t address) {
45
- const auto s = std::to_string (address);
46
- if (sbf.Add (s)) {
47
- size += 1 ;
48
- }
49
- }
41
+ } // namespace
50
42
51
43
void CollectedPageStats::Merge (CollectedPageStats&& other, uint16_t shard_id) {
52
44
this ->pages_scanned += other.pages_scanned ;
@@ -98,34 +90,47 @@ std::string CollectedPageStats::ToString() const {
98
90
return response;
99
91
}
100
92
101
- PageUsage::UniquePages::UniquePages () {
93
+ PageUsage::UniquePages::UniquePages ()
94
+ : pages_scanned{InitHllPtr ()},
95
+ pages_marked_for_realloc{InitHllPtr ()},
96
+ pages_full{InitHllPtr ()},
97
+ pages_reserved_for_malloc{InitHllPtr ()},
98
+ pages_with_heap_mismatch{InitHllPtr ()},
99
+ pages_above_threshold{InitHllPtr ()} {
102
100
hdr_histogram* h = nullptr ;
103
101
const auto init_result = hdr_init (1 , 100 , kHistSignificantFigures , &h);
104
102
CHECK_EQ (0 , init_result) << " failed to initialize histogram" ;
105
103
page_usage_hist = h;
106
104
}
107
105
108
106
PageUsage::UniquePages::~UniquePages () {
107
+ delete[] pages_scanned.hll ;
108
+ delete[] pages_marked_for_realloc.hll ;
109
+ delete[] pages_full.hll ;
110
+ delete[] pages_reserved_for_malloc.hll ;
111
+ delete[] pages_with_heap_mismatch.hll ;
112
+ delete[] pages_above_threshold.hll ;
109
113
hdr_close (page_usage_hist);
110
114
}
111
115
112
116
void PageUsage::UniquePages::AddStat (mi_page_usage_stats_t stat) {
113
- const auto address = stat.page_address ;
114
- pages_scanned.Add (address);
117
+ const auto data = reinterpret_cast <const unsigned char *>(&stat.page_address );
118
+ constexpr size_t size = sizeof (stat.page_address );
119
+ pfadd_dense (pages_scanned, data, size);
115
120
if (stat.flags == MI_DFLY_PAGE_BELOW_THRESHOLD) {
116
- pages_marked_for_realloc. Add (address );
121
+ pfadd_dense (pages_marked_for_realloc, data, size );
117
122
} else {
118
123
if (stat.flags & MI_DFLY_PAGE_FULL) {
119
- pages_full. Add (address );
124
+ pfadd_dense (pages_full, data, size );
120
125
} else if (stat.flags & MI_DFLY_HEAP_MISMATCH) {
121
- pages_with_heap_mismatch. Add (address );
126
+ pfadd_dense (pages_with_heap_mismatch, data, size );
122
127
} else if (stat.flags & MI_DFLY_PAGE_USED_FOR_MALLOC) {
123
- pages_reserved_for_malloc. Add (address );
128
+ pfadd_dense (pages_reserved_for_malloc, data, size );
124
129
} else {
125
130
// We record usage only for pages which have usage above the given threshold but which are not
126
131
// full. This allows tuning the threshold for future commands. This also excludes full pages,
127
132
// so the only pages here have: threshold < usage% < 100
128
- pages_above_threshold. Add (address );
133
+ pfadd_dense (pages_above_threshold, data, size );
129
134
const double perc = static_cast <double >(stat.used ) / static_cast <double >(stat.capacity );
130
135
hdr_record_value (page_usage_hist, perc * 100 );
131
136
}
@@ -137,16 +142,18 @@ CollectedPageStats PageUsage::UniquePages::CollectedStats() const {
137
142
for (const auto p : kUsageHistPoints ) {
138
143
usage[p] = hdr_value_at_percentile (page_usage_hist, p);
139
144
}
140
- return CollectedPageStats{.pages_scanned = pages_scanned.size ,
141
- .pages_marked_for_realloc = pages_marked_for_realloc.size ,
142
- .pages_full = pages_full.size ,
143
- .pages_reserved_for_malloc = pages_reserved_for_malloc.size ,
144
- .pages_with_heap_mismatch = pages_with_heap_mismatch.size ,
145
- .pages_above_threshold = pages_above_threshold.size ,
146
- .objects_skipped_not_required = objects_skipped_not_required,
147
- .objects_skipped_not_supported = objects_skipped_not_supported,
148
- .page_usage_hist = std::move (usage),
149
- .shard_wide_summary = {}};
145
+
146
+ return CollectedPageStats{
147
+ .pages_scanned = static_cast <uint64_t >(pfcountSingle (pages_scanned)),
148
+ .pages_marked_for_realloc = static_cast <uint64_t >(pfcountSingle (pages_marked_for_realloc)),
149
+ .pages_full = static_cast <uint64_t >(pfcountSingle (pages_full)),
150
+ .pages_reserved_for_malloc = static_cast <uint64_t >(pfcountSingle (pages_reserved_for_malloc)),
151
+ .pages_with_heap_mismatch = static_cast <uint64_t >(pfcountSingle (pages_with_heap_mismatch)),
152
+ .pages_above_threshold = static_cast <uint64_t >(pfcountSingle (pages_above_threshold)),
153
+ .objects_skipped_not_required = objects_skipped_not_required,
154
+ .objects_skipped_not_supported = objects_skipped_not_supported,
155
+ .page_usage_hist = std::move (usage),
156
+ .shard_wide_summary = {}};
150
157
}
151
158
152
159
PageUsage::PageUsage (CollectPageStats collect_stats, float threshold)
0 commit comments