Skip to content

Commit 150f1d3

Browse files
yamilmoralescopybara-github
authored andcommitted
No public description
PiperOrigin-RevId: 781188275
1 parent 1d26596 commit 150f1d3

File tree

9 files changed

+819
-603
lines changed

9 files changed

+819
-603
lines changed

centipede/BUILD

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,15 @@ cc_library(
115115
hdrs = ["int_utils.h"],
116116
)
117117

118+
cc_library(
119+
name = "flag_util",
120+
srcs = ["flag_util.cc"],
121+
hdrs = ["flag_util.h"],
122+
deps = [
123+
"@abseil-cpp//absl/base:nullability",
124+
],
125+
)
126+
118127
cc_library(
119128
name = "rolling_hash",
120129
hdrs = ["rolling_hash.h"],
@@ -1024,6 +1033,10 @@ cc_library(
10241033
# e.g. feature.cc. These files are compiled by the engine and the runner
10251034
# separately, with different compiler flags.
10261035
RUNNER_SOURCES_NO_MAIN = [
1036+
"flag_util.cc",
1037+
"flag_util.h",
1038+
"coverage_state.cc",
1039+
"coverage_state.h",
10271040
"byte_array_mutator.cc",
10281041
"byte_array_mutator.h",
10291042
"callstack.h",
@@ -1210,6 +1223,34 @@ cc_library(
12101223
deps = ["@abseil-cpp//absl/flags:flag"],
12111224
)
12121225

1226+
cc_library(
1227+
name = "coverage_state",
1228+
srcs = [
1229+
"coverage_state.cc",
1230+
"runner_dl_info.cc",
1231+
"runner_dl_info.h",
1232+
"runner_sancov_object.cc",
1233+
"runner_sancov_object.h",
1234+
"runner_utils.cc",
1235+
"runner_utils.h",
1236+
],
1237+
hdrs = ["coverage_state.h"],
1238+
deps = [
1239+
":callstack",
1240+
":feature",
1241+
":flag_util",
1242+
":foreach_nonzero",
1243+
":int_utils",
1244+
":pc_info",
1245+
":reverse_pc_table",
1246+
":runner_cmp_trace",
1247+
":runner_result",
1248+
"@abseil-cpp//absl/base:core_headers",
1249+
"@abseil-cpp//absl/base:nullability",
1250+
"@abseil-cpp//absl/numeric:bits",
1251+
],
1252+
)
1253+
12131254
################################################################################
12141255
# General-purpose testing utilities
12151256
################################################################################

centipede/coverage_state.cc

Lines changed: 297 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,297 @@
1+
// Copyright 2022 The Centipede Authors.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// https://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "./centipede/coverage_state.h"
16+
17+
#include <cstddef>
18+
#include <cstdint>
19+
20+
#include "./centipede/feature.h"
21+
#include "./centipede/int_utils.h"
22+
#include "./centipede/runner_utils.h"
23+
24+
namespace fuzztest::internal {
25+
namespace {
26+
27+
// Returns the length of the common prefix of `s1` and `s2`, but not more
28+
// than 63. I.e. the returned value is in [0, 64).
29+
size_t LengthOfCommonPrefix(const void *s1, const void *s2, size_t n) {
30+
const auto *p1 = static_cast<const uint8_t *>(s1);
31+
const auto *p2 = static_cast<const uint8_t *>(s2);
32+
static constexpr size_t kMaxLen = 63;
33+
if (n > kMaxLen) n = kMaxLen;
34+
for (size_t i = 0; i < n; ++i) {
35+
if (p1[i] != p2[i]) return i;
36+
}
37+
return n;
38+
}
39+
40+
class ThreadTerminationDetector {
41+
public:
42+
// A dummy method to trigger the construction and make sure that the
43+
// destructor will be called on the thread termination.
44+
__attribute__((optnone)) void EnsureAlive() {}
45+
46+
~ThreadTerminationDetector() { tls.OnThreadStop(); }
47+
};
48+
49+
thread_local ThreadTerminationDetector termination_detector;
50+
51+
} // namespace
52+
53+
CoverageState coverage_state __attribute__((init_priority(200)));
54+
55+
// We use __thread instead of thread_local so that the compiler warns if
56+
// the initializer for `tls` is not a constant expression.
57+
// `tls` thus must not have a CTOR.
58+
// This avoids calls to __tls_init() in hot functions that use `tls`.
59+
__thread ThreadLocalRunnerState tls;
60+
61+
void CoverageState::CleanUpDetachedTls() {
62+
LockGuard lock(tls_list_mu);
63+
while (detached_tls_list) {
64+
ThreadLocalRunnerState *tls = detached_tls_list;
65+
detached_tls_list = detached_tls_list->next;
66+
delete tls;
67+
}
68+
}
69+
70+
__attribute__((noinline)) // so that we see it in profile.
71+
void PrepareCoverage(bool full_clear) {
72+
coverage_state.CleanUpDetachedTls();
73+
if (coverage_state.run_time_flags.path_level != 0) {
74+
coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) {
75+
tls.path_ring_buffer.Reset(coverage_state.run_time_flags.path_level);
76+
tls.call_stack.Reset(coverage_state.run_time_flags.callstack_level);
77+
tls.lowest_sp = tls.top_frame_sp;
78+
});
79+
}
80+
{
81+
fuzztest::internal::LockGuard lock(
82+
coverage_state.execution_result_override_mu);
83+
if (coverage_state.execution_result_override != nullptr) {
84+
coverage_state.execution_result_override->ClearAndResize(0);
85+
}
86+
}
87+
if (!full_clear) return;
88+
coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) {
89+
if (coverage_state.run_time_flags.use_auto_dictionary) {
90+
tls.cmp_trace2.Clear();
91+
tls.cmp_trace4.Clear();
92+
tls.cmp_trace8.Clear();
93+
tls.cmp_traceN.Clear();
94+
}
95+
});
96+
coverage_state.pc_counter_set.ForEachNonZeroByte(
97+
[](size_t idx, uint8_t value) {}, 0,
98+
coverage_state.actual_pc_counter_set_size_aligned);
99+
if (coverage_state.run_time_flags.use_dataflow_features)
100+
coverage_state.data_flow_feature_set.ForEachNonZeroBit([](size_t idx) {});
101+
if (coverage_state.run_time_flags.use_cmp_features) {
102+
coverage_state.cmp_feature_set.ForEachNonZeroBit([](size_t idx) {});
103+
coverage_state.cmp_eq_set.ForEachNonZeroBit([](size_t idx) {});
104+
coverage_state.cmp_moddiff_set.ForEachNonZeroBit([](size_t idx) {});
105+
coverage_state.cmp_hamming_set.ForEachNonZeroBit([](size_t idx) {});
106+
coverage_state.cmp_difflog_set.ForEachNonZeroBit([](size_t idx) {});
107+
}
108+
if (coverage_state.run_time_flags.path_level != 0)
109+
coverage_state.path_feature_set.ForEachNonZeroBit([](size_t idx) {});
110+
if (coverage_state.run_time_flags.callstack_level != 0)
111+
coverage_state.callstack_set.ForEachNonZeroBit([](size_t idx) {});
112+
for (auto *p = coverage_state.user_defined_begin;
113+
p != coverage_state.user_defined_end; ++p) {
114+
*p = 0;
115+
}
116+
coverage_state.sancov_objects.ClearInlineCounters();
117+
}
118+
119+
static void MaybeAddFeature(feature_t feature) {
120+
if (!coverage_state.run_time_flags.skip_seen_features) {
121+
coverage_state.g_features.push_back(feature);
122+
} else if (!coverage_state.seen_features.get(feature)) {
123+
coverage_state.g_features.push_back(feature);
124+
coverage_state.seen_features.set(feature);
125+
}
126+
}
127+
128+
// Adds a kPCs and/or k8bitCounters feature to `g_features` based on arguments.
129+
// `idx` is a pc_index.
130+
// `counter_value` (non-zero) is a counter value associated with that PC.
131+
static void AddPcIndxedAndCounterToFeatures(size_t idx, uint8_t counter_value) {
132+
if (coverage_state.run_time_flags.use_pc_features) {
133+
MaybeAddFeature(feature_domains::kPCs.ConvertToMe(idx));
134+
}
135+
if (coverage_state.run_time_flags.use_counter_features) {
136+
MaybeAddFeature(feature_domains::k8bitCounters.ConvertToMe(
137+
Convert8bitCounterToNumber(idx, counter_value)));
138+
}
139+
}
140+
__attribute__((noinline)) // so that we see it in profile.
141+
void PostProcessCoverage(int target_return_value) {
142+
coverage_state.g_features.clear();
143+
144+
if (target_return_value == -1) return;
145+
146+
// Convert counters to features.
147+
coverage_state.pc_counter_set.ForEachNonZeroByte(
148+
[](size_t idx, uint8_t value) {
149+
AddPcIndxedAndCounterToFeatures(idx, value);
150+
},
151+
0, coverage_state.actual_pc_counter_set_size_aligned);
152+
153+
// Convert data flow bit set to features.
154+
if (coverage_state.run_time_flags.use_dataflow_features) {
155+
coverage_state.data_flow_feature_set.ForEachNonZeroBit([](size_t idx) {
156+
MaybeAddFeature(feature_domains::kDataFlow.ConvertToMe(idx));
157+
});
158+
}
159+
160+
// Convert cmp bit set to features.
161+
if (coverage_state.run_time_flags.use_cmp_features) {
162+
// TODO(kcc): remove cmp_feature_set.
163+
coverage_state.cmp_feature_set.ForEachNonZeroBit([](size_t idx) {
164+
MaybeAddFeature(feature_domains::kCMP.ConvertToMe(idx));
165+
});
166+
coverage_state.cmp_eq_set.ForEachNonZeroBit([](size_t idx) {
167+
MaybeAddFeature(feature_domains::kCMPEq.ConvertToMe(idx));
168+
});
169+
coverage_state.cmp_moddiff_set.ForEachNonZeroBit([](size_t idx) {
170+
MaybeAddFeature(feature_domains::kCMPModDiff.ConvertToMe(idx));
171+
});
172+
coverage_state.cmp_hamming_set.ForEachNonZeroBit([](size_t idx) {
173+
MaybeAddFeature(feature_domains::kCMPHamming.ConvertToMe(idx));
174+
});
175+
coverage_state.cmp_difflog_set.ForEachNonZeroBit([](size_t idx) {
176+
MaybeAddFeature(feature_domains::kCMPDiffLog.ConvertToMe(idx));
177+
});
178+
}
179+
180+
// Convert path bit set to features.
181+
if (coverage_state.run_time_flags.path_level != 0) {
182+
coverage_state.path_feature_set.ForEachNonZeroBit([](size_t idx) {
183+
MaybeAddFeature(feature_domains::kBoundedPath.ConvertToMe(idx));
184+
});
185+
}
186+
187+
// Iterate all threads and get features from TLS data.
188+
coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) {
189+
if (coverage_state.run_time_flags.callstack_level != 0) {
190+
RunnerCheck(tls.top_frame_sp >= tls.lowest_sp,
191+
"bad values of tls.top_frame_sp and tls.lowest_sp");
192+
size_t sp_diff = tls.top_frame_sp - tls.lowest_sp;
193+
MaybeAddFeature(feature_domains::kCallStack.ConvertToMe(sp_diff));
194+
}
195+
});
196+
197+
if (coverage_state.run_time_flags.callstack_level != 0) {
198+
coverage_state.callstack_set.ForEachNonZeroBit([](size_t idx) {
199+
MaybeAddFeature(feature_domains::kCallStack.ConvertToMe(idx));
200+
});
201+
}
202+
203+
// Copy the features from __centipede_extra_features to g_features.
204+
// Zero features are ignored - we treat them as default (unset) values.
205+
for (auto *p = coverage_state.user_defined_begin;
206+
p != coverage_state.user_defined_end; ++p) {
207+
if (auto user_feature = *p) {
208+
// User domain ID is upper 32 bits
209+
feature_t user_domain_id = user_feature >> 32;
210+
// User feature ID is lower 32 bits.
211+
feature_t user_feature_id = user_feature & ((1ULL << 32) - 1);
212+
// There is no hard guarantee how many user domains are actually
213+
// available. If a user domain ID is out of range, alias it to an existing
214+
// domain. This is kinder than silently dropping the feature.
215+
user_domain_id %= std::size(feature_domains::kUserDomains);
216+
MaybeAddFeature(feature_domains::kUserDomains[user_domain_id].ConvertToMe(
217+
user_feature_id));
218+
*p = 0; // cleanup for the next iteration.
219+
}
220+
}
221+
222+
// Iterates all non-zero inline 8-bit counters, if they are present.
223+
// Calls AddPcIndxedAndCounterToFeatures on non-zero counters and zeroes them.
224+
if (coverage_state.run_time_flags.use_pc_features ||
225+
coverage_state.run_time_flags.use_counter_features) {
226+
coverage_state.sancov_objects.ForEachNonZeroInlineCounter(
227+
[](size_t idx, uint8_t counter_value) {
228+
AddPcIndxedAndCounterToFeatures(idx, counter_value);
229+
});
230+
}
231+
}
232+
233+
void ThreadLocalRunnerState::TraceMemCmp(uintptr_t caller_pc, const uint8_t *s1,
234+
const uint8_t *s2, size_t n,
235+
bool is_equal) {
236+
if (coverage_state.run_time_flags.use_cmp_features) {
237+
const uintptr_t pc_offset =
238+
caller_pc - coverage_state.main_object.start_address;
239+
const uintptr_t hash =
240+
fuzztest::internal::Hash64Bits(pc_offset) ^ tls.path_ring_buffer.hash();
241+
const size_t lcp = LengthOfCommonPrefix(s1, s2, n);
242+
// lcp is a 6-bit number.
243+
coverage_state.cmp_feature_set.set((hash << 6) | lcp);
244+
}
245+
if (!is_equal && coverage_state.run_time_flags.use_auto_dictionary) {
246+
cmp_traceN.Capture(n, s1, s2);
247+
}
248+
}
249+
250+
void ThreadLocalRunnerState::OnThreadStart() {
251+
termination_detector.EnsureAlive();
252+
tls.started = true;
253+
tls.lowest_sp = tls.top_frame_sp =
254+
reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
255+
tls.stack_region_low = GetCurrentThreadStackRegionLow();
256+
if (tls.stack_region_low == 0) {
257+
fprintf(stderr,
258+
"Disabling stack limit check due to missing stack region info.\n");
259+
}
260+
tls.call_stack.Reset(coverage_state.run_time_flags.callstack_level);
261+
tls.path_ring_buffer.Reset(coverage_state.run_time_flags.path_level);
262+
LockGuard lock(coverage_state.tls_list_mu);
263+
// Add myself to state.tls_list.
264+
auto *old_list = coverage_state.tls_list;
265+
tls.next = old_list;
266+
coverage_state.tls_list = &tls;
267+
if (old_list != nullptr) old_list->prev = &tls;
268+
}
269+
270+
void ThreadLocalRunnerState::OnThreadStop() {
271+
LockGuard lock(coverage_state.tls_list_mu);
272+
// Remove myself from state.tls_list. The list never
273+
// becomes empty because the main thread does not call OnThreadStop().
274+
if (&tls == coverage_state.tls_list) {
275+
coverage_state.tls_list = tls.next;
276+
tls.prev = nullptr;
277+
} else {
278+
auto *prev_tls = tls.prev;
279+
auto *next_tls = tls.next;
280+
prev_tls->next = next_tls;
281+
if (next_tls != nullptr) next_tls->prev = prev_tls;
282+
}
283+
tls.next = tls.prev = nullptr;
284+
if (tls.ignore) return;
285+
// Create a detached copy on heap and add it to detached_tls_list to
286+
// collect its coverage later.
287+
//
288+
// TODO(xinhaoyuan): Consider refactoring the list operations into class
289+
// methods instead of duplicating them.
290+
ThreadLocalRunnerState *detached_tls = new ThreadLocalRunnerState(tls);
291+
auto *old_list = coverage_state.detached_tls_list;
292+
detached_tls->next = old_list;
293+
coverage_state.detached_tls_list = detached_tls;
294+
if (old_list != nullptr) old_list->prev = detached_tls;
295+
}
296+
297+
} // namespace fuzztest::internal

0 commit comments

Comments
 (0)