|
| 1 | +// Copyright 2022 The Centipede Authors. |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// https://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +#include "./centipede/coverage_state.h" |
| 16 | + |
| 17 | +#include <cstddef> |
| 18 | +#include <cstdint> |
| 19 | + |
| 20 | +#include "./centipede/feature.h" |
| 21 | +#include "./centipede/int_utils.h" |
| 22 | +#include "./centipede/runner_utils.h" |
| 23 | + |
| 24 | +namespace fuzztest::internal { |
| 25 | +namespace { |
| 26 | + |
| 27 | +// Returns the length of the common prefix of `s1` and `s2`, but not more |
| 28 | +// than 63. I.e. the returned value is in [0, 64). |
| 29 | +size_t LengthOfCommonPrefix(const void *s1, const void *s2, size_t n) { |
| 30 | + const auto *p1 = static_cast<const uint8_t *>(s1); |
| 31 | + const auto *p2 = static_cast<const uint8_t *>(s2); |
| 32 | + static constexpr size_t kMaxLen = 63; |
| 33 | + if (n > kMaxLen) n = kMaxLen; |
| 34 | + for (size_t i = 0; i < n; ++i) { |
| 35 | + if (p1[i] != p2[i]) return i; |
| 36 | + } |
| 37 | + return n; |
| 38 | +} |
| 39 | + |
| 40 | +class ThreadTerminationDetector { |
| 41 | + public: |
| 42 | + // A dummy method to trigger the construction and make sure that the |
| 43 | + // destructor will be called on the thread termination. |
| 44 | + __attribute__((optnone)) void EnsureAlive() {} |
| 45 | + |
| 46 | + ~ThreadTerminationDetector() { tls.OnThreadStop(); } |
| 47 | +}; |
| 48 | + |
| 49 | +thread_local ThreadTerminationDetector termination_detector; |
| 50 | + |
| 51 | +} // namespace |
| 52 | + |
| 53 | +CoverageState coverage_state __attribute__((init_priority(200))); |
| 54 | + |
| 55 | +// We use __thread instead of thread_local so that the compiler warns if |
| 56 | +// the initializer for `tls` is not a constant expression. |
| 57 | +// `tls` thus must not have a CTOR. |
| 58 | +// This avoids calls to __tls_init() in hot functions that use `tls`. |
| 59 | +__thread ThreadLocalRunnerState tls; |
| 60 | + |
| 61 | +void CoverageState::CleanUpDetachedTls() { |
| 62 | + LockGuard lock(tls_list_mu); |
| 63 | + while (detached_tls_list) { |
| 64 | + ThreadLocalRunnerState *tls = detached_tls_list; |
| 65 | + detached_tls_list = detached_tls_list->next; |
| 66 | + delete tls; |
| 67 | + } |
| 68 | +} |
| 69 | + |
| 70 | +__attribute__((noinline)) // so that we see it in profile. |
| 71 | +void PrepareCoverage(bool full_clear) { |
| 72 | + coverage_state.CleanUpDetachedTls(); |
| 73 | + if (coverage_state.run_time_flags.path_level != 0) { |
| 74 | + coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) { |
| 75 | + tls.path_ring_buffer.Reset(coverage_state.run_time_flags.path_level); |
| 76 | + tls.call_stack.Reset(coverage_state.run_time_flags.callstack_level); |
| 77 | + tls.lowest_sp = tls.top_frame_sp; |
| 78 | + }); |
| 79 | + } |
| 80 | + { |
| 81 | + fuzztest::internal::LockGuard lock( |
| 82 | + coverage_state.execution_result_override_mu); |
| 83 | + if (coverage_state.execution_result_override != nullptr) { |
| 84 | + coverage_state.execution_result_override->ClearAndResize(0); |
| 85 | + } |
| 86 | + } |
| 87 | + if (!full_clear) return; |
| 88 | + coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) { |
| 89 | + if (coverage_state.run_time_flags.use_auto_dictionary) { |
| 90 | + tls.cmp_trace2.Clear(); |
| 91 | + tls.cmp_trace4.Clear(); |
| 92 | + tls.cmp_trace8.Clear(); |
| 93 | + tls.cmp_traceN.Clear(); |
| 94 | + } |
| 95 | + }); |
| 96 | + coverage_state.pc_counter_set.ForEachNonZeroByte( |
| 97 | + [](size_t idx, uint8_t value) {}, 0, |
| 98 | + coverage_state.actual_pc_counter_set_size_aligned); |
| 99 | + if (coverage_state.run_time_flags.use_dataflow_features) |
| 100 | + coverage_state.data_flow_feature_set.ForEachNonZeroBit([](size_t idx) {}); |
| 101 | + if (coverage_state.run_time_flags.use_cmp_features) { |
| 102 | + coverage_state.cmp_feature_set.ForEachNonZeroBit([](size_t idx) {}); |
| 103 | + coverage_state.cmp_eq_set.ForEachNonZeroBit([](size_t idx) {}); |
| 104 | + coverage_state.cmp_moddiff_set.ForEachNonZeroBit([](size_t idx) {}); |
| 105 | + coverage_state.cmp_hamming_set.ForEachNonZeroBit([](size_t idx) {}); |
| 106 | + coverage_state.cmp_difflog_set.ForEachNonZeroBit([](size_t idx) {}); |
| 107 | + } |
| 108 | + if (coverage_state.run_time_flags.path_level != 0) |
| 109 | + coverage_state.path_feature_set.ForEachNonZeroBit([](size_t idx) {}); |
| 110 | + if (coverage_state.run_time_flags.callstack_level != 0) |
| 111 | + coverage_state.callstack_set.ForEachNonZeroBit([](size_t idx) {}); |
| 112 | + for (auto *p = coverage_state.user_defined_begin; |
| 113 | + p != coverage_state.user_defined_end; ++p) { |
| 114 | + *p = 0; |
| 115 | + } |
| 116 | + coverage_state.sancov_objects.ClearInlineCounters(); |
| 117 | +} |
| 118 | + |
| 119 | +static void MaybeAddFeature(feature_t feature) { |
| 120 | + if (!coverage_state.run_time_flags.skip_seen_features) { |
| 121 | + coverage_state.g_features.push_back(feature); |
| 122 | + } else if (!coverage_state.seen_features.get(feature)) { |
| 123 | + coverage_state.g_features.push_back(feature); |
| 124 | + coverage_state.seen_features.set(feature); |
| 125 | + } |
| 126 | +} |
| 127 | + |
| 128 | +// Adds a kPCs and/or k8bitCounters feature to `g_features` based on arguments. |
| 129 | +// `idx` is a pc_index. |
| 130 | +// `counter_value` (non-zero) is a counter value associated with that PC. |
| 131 | +static void AddPcIndxedAndCounterToFeatures(size_t idx, uint8_t counter_value) { |
| 132 | + if (coverage_state.run_time_flags.use_pc_features) { |
| 133 | + MaybeAddFeature(feature_domains::kPCs.ConvertToMe(idx)); |
| 134 | + } |
| 135 | + if (coverage_state.run_time_flags.use_counter_features) { |
| 136 | + MaybeAddFeature(feature_domains::k8bitCounters.ConvertToMe( |
| 137 | + Convert8bitCounterToNumber(idx, counter_value))); |
| 138 | + } |
| 139 | +} |
| 140 | +__attribute__((noinline)) // so that we see it in profile. |
| 141 | +void PostProcessCoverage(int target_return_value) { |
| 142 | + coverage_state.g_features.clear(); |
| 143 | + |
| 144 | + if (target_return_value == -1) return; |
| 145 | + |
| 146 | + // Convert counters to features. |
| 147 | + coverage_state.pc_counter_set.ForEachNonZeroByte( |
| 148 | + [](size_t idx, uint8_t value) { |
| 149 | + AddPcIndxedAndCounterToFeatures(idx, value); |
| 150 | + }, |
| 151 | + 0, coverage_state.actual_pc_counter_set_size_aligned); |
| 152 | + |
| 153 | + // Convert data flow bit set to features. |
| 154 | + if (coverage_state.run_time_flags.use_dataflow_features) { |
| 155 | + coverage_state.data_flow_feature_set.ForEachNonZeroBit([](size_t idx) { |
| 156 | + MaybeAddFeature(feature_domains::kDataFlow.ConvertToMe(idx)); |
| 157 | + }); |
| 158 | + } |
| 159 | + |
| 160 | + // Convert cmp bit set to features. |
| 161 | + if (coverage_state.run_time_flags.use_cmp_features) { |
| 162 | + // TODO(kcc): remove cmp_feature_set. |
| 163 | + coverage_state.cmp_feature_set.ForEachNonZeroBit([](size_t idx) { |
| 164 | + MaybeAddFeature(feature_domains::kCMP.ConvertToMe(idx)); |
| 165 | + }); |
| 166 | + coverage_state.cmp_eq_set.ForEachNonZeroBit([](size_t idx) { |
| 167 | + MaybeAddFeature(feature_domains::kCMPEq.ConvertToMe(idx)); |
| 168 | + }); |
| 169 | + coverage_state.cmp_moddiff_set.ForEachNonZeroBit([](size_t idx) { |
| 170 | + MaybeAddFeature(feature_domains::kCMPModDiff.ConvertToMe(idx)); |
| 171 | + }); |
| 172 | + coverage_state.cmp_hamming_set.ForEachNonZeroBit([](size_t idx) { |
| 173 | + MaybeAddFeature(feature_domains::kCMPHamming.ConvertToMe(idx)); |
| 174 | + }); |
| 175 | + coverage_state.cmp_difflog_set.ForEachNonZeroBit([](size_t idx) { |
| 176 | + MaybeAddFeature(feature_domains::kCMPDiffLog.ConvertToMe(idx)); |
| 177 | + }); |
| 178 | + } |
| 179 | + |
| 180 | + // Convert path bit set to features. |
| 181 | + if (coverage_state.run_time_flags.path_level != 0) { |
| 182 | + coverage_state.path_feature_set.ForEachNonZeroBit([](size_t idx) { |
| 183 | + MaybeAddFeature(feature_domains::kBoundedPath.ConvertToMe(idx)); |
| 184 | + }); |
| 185 | + } |
| 186 | + |
| 187 | + // Iterate all threads and get features from TLS data. |
| 188 | + coverage_state.ForEachTls([](ThreadLocalRunnerState &tls) { |
| 189 | + if (coverage_state.run_time_flags.callstack_level != 0) { |
| 190 | + RunnerCheck(tls.top_frame_sp >= tls.lowest_sp, |
| 191 | + "bad values of tls.top_frame_sp and tls.lowest_sp"); |
| 192 | + size_t sp_diff = tls.top_frame_sp - tls.lowest_sp; |
| 193 | + MaybeAddFeature(feature_domains::kCallStack.ConvertToMe(sp_diff)); |
| 194 | + } |
| 195 | + }); |
| 196 | + |
| 197 | + if (coverage_state.run_time_flags.callstack_level != 0) { |
| 198 | + coverage_state.callstack_set.ForEachNonZeroBit([](size_t idx) { |
| 199 | + MaybeAddFeature(feature_domains::kCallStack.ConvertToMe(idx)); |
| 200 | + }); |
| 201 | + } |
| 202 | + |
| 203 | + // Copy the features from __centipede_extra_features to g_features. |
| 204 | + // Zero features are ignored - we treat them as default (unset) values. |
| 205 | + for (auto *p = coverage_state.user_defined_begin; |
| 206 | + p != coverage_state.user_defined_end; ++p) { |
| 207 | + if (auto user_feature = *p) { |
| 208 | + // User domain ID is upper 32 bits |
| 209 | + feature_t user_domain_id = user_feature >> 32; |
| 210 | + // User feature ID is lower 32 bits. |
| 211 | + feature_t user_feature_id = user_feature & ((1ULL << 32) - 1); |
| 212 | + // There is no hard guarantee how many user domains are actually |
| 213 | + // available. If a user domain ID is out of range, alias it to an existing |
| 214 | + // domain. This is kinder than silently dropping the feature. |
| 215 | + user_domain_id %= std::size(feature_domains::kUserDomains); |
| 216 | + MaybeAddFeature(feature_domains::kUserDomains[user_domain_id].ConvertToMe( |
| 217 | + user_feature_id)); |
| 218 | + *p = 0; // cleanup for the next iteration. |
| 219 | + } |
| 220 | + } |
| 221 | + |
| 222 | + // Iterates all non-zero inline 8-bit counters, if they are present. |
| 223 | + // Calls AddPcIndxedAndCounterToFeatures on non-zero counters and zeroes them. |
| 224 | + if (coverage_state.run_time_flags.use_pc_features || |
| 225 | + coverage_state.run_time_flags.use_counter_features) { |
| 226 | + coverage_state.sancov_objects.ForEachNonZeroInlineCounter( |
| 227 | + [](size_t idx, uint8_t counter_value) { |
| 228 | + AddPcIndxedAndCounterToFeatures(idx, counter_value); |
| 229 | + }); |
| 230 | + } |
| 231 | +} |
| 232 | + |
| 233 | +void ThreadLocalRunnerState::TraceMemCmp(uintptr_t caller_pc, const uint8_t *s1, |
| 234 | + const uint8_t *s2, size_t n, |
| 235 | + bool is_equal) { |
| 236 | + if (coverage_state.run_time_flags.use_cmp_features) { |
| 237 | + const uintptr_t pc_offset = |
| 238 | + caller_pc - coverage_state.main_object.start_address; |
| 239 | + const uintptr_t hash = |
| 240 | + fuzztest::internal::Hash64Bits(pc_offset) ^ tls.path_ring_buffer.hash(); |
| 241 | + const size_t lcp = LengthOfCommonPrefix(s1, s2, n); |
| 242 | + // lcp is a 6-bit number. |
| 243 | + coverage_state.cmp_feature_set.set((hash << 6) | lcp); |
| 244 | + } |
| 245 | + if (!is_equal && coverage_state.run_time_flags.use_auto_dictionary) { |
| 246 | + cmp_traceN.Capture(n, s1, s2); |
| 247 | + } |
| 248 | +} |
| 249 | + |
| 250 | +void ThreadLocalRunnerState::OnThreadStart() { |
| 251 | + termination_detector.EnsureAlive(); |
| 252 | + tls.started = true; |
| 253 | + tls.lowest_sp = tls.top_frame_sp = |
| 254 | + reinterpret_cast<uintptr_t>(__builtin_frame_address(0)); |
| 255 | + tls.stack_region_low = GetCurrentThreadStackRegionLow(); |
| 256 | + if (tls.stack_region_low == 0) { |
| 257 | + fprintf(stderr, |
| 258 | + "Disabling stack limit check due to missing stack region info.\n"); |
| 259 | + } |
| 260 | + tls.call_stack.Reset(coverage_state.run_time_flags.callstack_level); |
| 261 | + tls.path_ring_buffer.Reset(coverage_state.run_time_flags.path_level); |
| 262 | + LockGuard lock(coverage_state.tls_list_mu); |
| 263 | + // Add myself to state.tls_list. |
| 264 | + auto *old_list = coverage_state.tls_list; |
| 265 | + tls.next = old_list; |
| 266 | + coverage_state.tls_list = &tls; |
| 267 | + if (old_list != nullptr) old_list->prev = &tls; |
| 268 | +} |
| 269 | + |
| 270 | +void ThreadLocalRunnerState::OnThreadStop() { |
| 271 | + LockGuard lock(coverage_state.tls_list_mu); |
| 272 | + // Remove myself from state.tls_list. The list never |
| 273 | + // becomes empty because the main thread does not call OnThreadStop(). |
| 274 | + if (&tls == coverage_state.tls_list) { |
| 275 | + coverage_state.tls_list = tls.next; |
| 276 | + tls.prev = nullptr; |
| 277 | + } else { |
| 278 | + auto *prev_tls = tls.prev; |
| 279 | + auto *next_tls = tls.next; |
| 280 | + prev_tls->next = next_tls; |
| 281 | + if (next_tls != nullptr) next_tls->prev = prev_tls; |
| 282 | + } |
| 283 | + tls.next = tls.prev = nullptr; |
| 284 | + if (tls.ignore) return; |
| 285 | + // Create a detached copy on heap and add it to detached_tls_list to |
| 286 | + // collect its coverage later. |
| 287 | + // |
| 288 | + // TODO(xinhaoyuan): Consider refactoring the list operations into class |
| 289 | + // methods instead of duplicating them. |
| 290 | + ThreadLocalRunnerState *detached_tls = new ThreadLocalRunnerState(tls); |
| 291 | + auto *old_list = coverage_state.detached_tls_list; |
| 292 | + detached_tls->next = old_list; |
| 293 | + coverage_state.detached_tls_list = detached_tls; |
| 294 | + if (old_list != nullptr) old_list->prev = detached_tls; |
| 295 | +} |
| 296 | + |
| 297 | +} // namespace fuzztest::internal |
0 commit comments