Skip to content

Commit bfb78a2

Browse files
committed
update patches
1 parent 768e4d6 commit bfb78a2

File tree

5 files changed

+503
-0
lines changed

5 files changed

+503
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@ generated_templates/
1616
ProgramTemplates.swift
1717
ProgramTemplateWeights.swift
1818
fog_logs/
19+
crashes/
20+
ebg_logs/
1921

2022
# custom GCE configuration
2123
Cloud/GCE/config.sh

v8_patch/v8_patch/cov-cc.diff

Lines changed: 177 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,177 @@
1+
diff --git a/src/fuzzilli/cov.cc b/src/fuzzilli/cov.cc
2+
index bf8b6925993..c5e049a516f 100644
3+
--- a/src/fuzzilli/cov.cc
4+
+++ b/src/fuzzilli/cov.cc
5+
@@ -1,9 +1,16 @@
6+
// Copyright 2020 the V8 project authors. All rights reserved.
7+
-// Use of this source code is governed by a BSD-style license that can be
8+
-// found in the LICENSE file.
9+
+// Use of this source code is governed by a BSD-style license that can
10+
+// be found in the LICENSE file.
11+
12+
#include "src/fuzzilli/cov.h"
13+
14+
+// Include V8 headers first to avoid macro conflicts
15+
+#include "src/base/platform/memory.h"
16+
+#include "src/objects/feedback-vector.h"
17+
+#include "src/sandbox/hardware-support.h"
18+
+
19+
+// Include system headers after V8 headers
20+
+#include <cstddef>
21+
#include <fcntl.h>
22+
#include <inttypes.h>
23+
#include <stdio.h>
24+
@@ -14,14 +21,31 @@
25+
#include <sys/wait.h>
26+
#include <unistd.h>
27+
28+
-#include "src/base/platform/memory.h"
29+
-#include "src/sandbox/hardware-support.h"
30+
-
31+
-#define SHM_SIZE 0x100000
32+
+#define SHM_SIZE 0x202000
33+
#define MAX_EDGES ((SHM_SIZE - 4) * 8)
34+
+#define MAX_FEEDBACK_NEXUS 100000
35+
+
36+
+
37+
+struct FeedbackNexusData {
38+
+ uint32_t vector_address; // Address of FeedbackVector in V8 heap
39+
+ uint32_t ic_state; // InlineCacheState
40+
+};
41+
+
42+
+struct optimization_turbofan_data {
43+
+ uint32_t flags; // Flags used for optimization passes in PipelineImpl::OptimizeTurbofanGraph
44+
+ //uint32_t address_code;
45+
+ //uint32_t address_shared_info;
46+
+ //uint8_t bailout_reason;
47+
+ //bool is_osr;
48+
+};
49+
50+
struct shmem_data {
51+
uint32_t num_edges;
52+
+ uint32_t feedback_nexus_count;
53+
+ uint32_t max_feedback_nexus;
54+
+ uint32_t turbofan_flags;
55+
+ uint64_t turbofan_optimization_bits;
56+
+ FeedbackNexusData feedback_nexus_data[MAX_FEEDBACK_NEXUS];
57+
unsigned char edges[];
58+
};
59+
60+
@@ -83,6 +107,12 @@ extern "C" void __sanitizer_cov_trace_pc_guard_init(uint32_t* start,
61+
62+
shmem->num_edges = static_cast<uint32_t>(stop - start);
63+
builtins_start = 1 + shmem->num_edges;
64+
+
65+
+ // Initialize feedback nexus fields
66+
+ shmem->feedback_nexus_count = 0;
67+
+ shmem->max_feedback_nexus = MAX_FEEDBACK_NEXUS;
68+
+ memset(shmem->feedback_nexus_data, 0, sizeof(FeedbackNexusData) * MAX_FEEDBACK_NEXUS);
69+
+
70+
fprintf(stderr,
71+
"[COV] edge counters initialized. Shared memory: %s with %u edges\n",
72+
shm_key, shmem->num_edges);
73+
@@ -115,12 +145,15 @@ void sanitizer_cov_prepare_for_hardware_sandbox() {
74+
#endif
75+
76+
uint32_t sanitizer_cov_count_discovered_edges() {
77+
+ // Calculate offset to edges array (after feedback nexus data)
78+
+ unsigned char* edges_ptr = (unsigned char*)shmem + offsetof(struct shmem_data, edges);
79+
+
80+
uint32_t on_edges_counter = 0;
81+
for (uint32_t i = 1; i < builtins_start; ++i) {
82+
const uint32_t byteIndex = i >> 3; // Divide by 8 using a shift operation
83+
const uint32_t bitIndex = i & 7; // Modulo 8 using a bitwise AND operation
84+
85+
- if (shmem->edges[byteIndex] & (1 << bitIndex)) {
86+
+ if (edges_ptr[byteIndex] & (1 << bitIndex)) {
87+
++on_edges_counter;
88+
}
89+
}
90+
@@ -128,14 +161,26 @@ uint32_t sanitizer_cov_count_discovered_edges() {
91+
}
92+
93+
extern "C" void __sanitizer_cov_trace_pc_guard(uint32_t* guard) {
94+
- // There's a small race condition here: if this function executes in two
95+
- // threads for the same edge at the same time, the first thread might disable
96+
- // the edge (by setting the guard to zero) before the second thread fetches
97+
- // the guard value (and thus the index). However, our instrumentation ignores
98+
- // the first edge (see libcoverage.c) and so the race is unproblematic.
99+
+ /*
100+
+ // There's a small race condition here: if this function executes in two
101+
+ // threads for the same edge at the same time, the first thread might disable
102+
+ // the edge (by setting the guard to zero) before the second thread fetches
103+
+ // the guard value (and thus the index). However, our instrumentation ignores
104+
+ // the first edge (see libcoverage.c) and so the race is unproblematic.
105+
+ uint32_t index = *guard;
106+
+ shmem->edges[index / 8] |= 1 << (index % 8);
107+
+ *guard = 0;
108+
+ */
109+
+ if (!guard || *guard == 0) return; // guard already cleared — possible race
110+
uint32_t index = *guard;
111+
- shmem->edges[index / 8] |= 1 << (index % 8);
112+
*guard = 0;
113+
+
114+
+ // Check again in case another thread zeroed it just now (race hit)
115+
+ if (index == 0) return;
116+
+
117+
+ // Calculate offset to edges array (after feedback nexus data)
118+
+ unsigned char* edges_ptr = (unsigned char*)shmem + offsetof(struct shmem_data, edges);
119+
+ edges_ptr[index / 8] |= 1 << (index % 8);
120+
}
121+
122+
void cov_init_builtins_edges(uint32_t num_edges) {
123+
@@ -161,12 +206,53 @@ void cov_update_builtins_basic_block_coverage(
124+
fprintf(stderr, "[COV] Error: Size of builtins cov map changed.\n");
125+
exit(-1);
126+
}
127+
+
128+
+ // Calculate offset to edges array (after feedback nexus data)
129+
+ unsigned char* edges_ptr = (unsigned char*)shmem + offsetof(struct shmem_data, edges);
130+
+
131+
for (uint32_t i = 0; i < cov_map.size(); ++i) {
132+
if (cov_map[i]) {
133+
const uint32_t byteIndex = (i + builtins_start) >> 3;
134+
const uint32_t bitIndex = (i + builtins_start) & 7;
135+
136+
- shmem->edges[byteIndex] |= (1 << bitIndex);
137+
+ edges_ptr[byteIndex] |= (1 << bitIndex);
138+
}
139+
}
140+
}
141+
+
142+
+
143+
+void cov_serialize_feedback_nexus(v8::internal::FeedbackNexus* nexus, FeedbackNexusData* data) {
144+
+ if (!nexus || !data) return;
145+
+ data->vector_address = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(nexus->vector().ptr()));
146+
+ data->ic_state = static_cast<uint32_t>(nexus->ic_state());
147+
+}
148+
+
149+
+void cov_add_feedback_nexus(v8::internal::FeedbackNexus* nexus) {
150+
+ if (!shmem || !nexus) return;
151+
+
152+
+ // Check if we have space
153+
+ if (shmem->feedback_nexus_count >= MAX_FEEDBACK_NEXUS) {
154+
+ fprintf(stderr, "[COV] Warning: Feedback nexus buffer full, dropping entry\n");
155+
+ return;
156+
+ }
157+
+ cov_serialize_feedback_nexus(nexus,
158+
+ &shmem->feedback_nexus_data[shmem->feedback_nexus_count]);
159+
+ shmem->feedback_nexus_count++;
160+
+
161+
+ // printf("[COV] Added feedback nexus: %p\n", nexus);
162+
+ // printf("[COV] Feedback nexus count: %d\n", shmem->feedback_nexus_count);
163+
+ // printf("[COV] Feedback nexus data: %p\n", shmem->feedback_nexus_data);
164+
+ // printf("[COV] Feedback nexus data: %p\n", shmem->feedback_nexus_data[shmem->feedback_nexus_count]);
165+
+ // printf("[COV] Feedback nexus data: %p\n", shmem->feedback_nexus_data[shmem->feedback_nexus_count].vector_address);
166+
+ // printf("[COV] Feedback nexus data: %p\n", shmem->feedback_nexus_data[shmem->feedback_nexus_count].ic_state);
167+
+}
168+
+
169+
+void cov_set_turbofan_optimization_bits(uint64_t bit) {
170+
+ if (!shmem) return;
171+
+ shmem->turbofan_optimization_bits |= bit;
172+
+}
173+
+
174+
+void cov_set_maglev_optimization_bits(uint64_t /*bit*/) {
175+
+ // No-op: maglev bitmap is not exported in shmem layout.
176+
+}
177+
+// } // namespace v8

v8_patch/v8_patch/cov-h.diff

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
diff --git a/src/fuzzilli/cov.h b/src/fuzzilli/cov.h
2+
index b48645576fc..090ac49186c 100644
3+
--- a/src/fuzzilli/cov.h
4+
+++ b/src/fuzzilli/cov.h
5+
@@ -19,4 +19,18 @@ void sanitizer_cov_prepare_for_hardware_sandbox();
6+
void cov_init_builtins_edges(uint32_t num_edges);
7+
void cov_update_builtins_basic_block_coverage(const std::vector<bool>& cov_map);
8+
9+
+// Forward declaration for FeedbackNexus
10+
+namespace v8 {
11+
+namespace internal {
12+
+class FeedbackNexus;
13+
+}
14+
+}
15+
+
16+
+// Global function declaration
17+
+void cov_add_feedback_nexus(v8::internal::FeedbackNexus* nexus);
18+
+
19+
+// Track optimization passes for maglev and turbofan
20+
+void cov_set_turbofan_optimization_bits(uint64_t bit);
21+
+void cov_set_maglev_optimization_bits(uint64_t bit);
22+
+
23+
#endif // V8_FUZZILLI_COV_H_
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
diff --git a/src/codegen/optimized-compilation-info.h b/src/codegen/optimized-compilation-info.h
2+
index cf7dd4d6365..96b7d312cfb 100644
3+
--- a/src/codegen/optimized-compilation-info.h
4+
+++ b/src/codegen/optimized-compilation-info.h
5+
@@ -6,6 +6,7 @@
6+
#define V8_CODEGEN_OPTIMIZED_COMPILATION_INFO_H_
7+
8+
#include <memory>
9+
+#include <array>
10+
11+
#include "src/base/vector.h"
12+
#include "src/codegen/bailout-reason.h"
13+
@@ -99,6 +100,87 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
14+
FLAGS(DEF_SETTER)
15+
#undef DEF_SETTER
16+
17+
+ // Optimization tracing bitmap
18+
+ //
19+
+ // This bitmap tracks which compiler optimizations/passes are ran for a given
20+
+ // optimized compilation. Bits correspond to major TurboFan/TurboShaft
21+
+ // pipeline phases.
22+
+ //
23+
+ // phases (from pipeline passes in src/compiler/pipeline.cc):
24+
+ // - BrokerInitAndSerialization: HeapBrokerInitializationPhase
25+
+ // - GraphCreation:
26+
+ // - GraphBuilderPhase
27+
+ // - InliningPhase
28+
+ // - Lowering and typed optimizations:
29+
+ // - EarlyGraphTrimmingPhase
30+
+ // - TyperPhase
31+
+ // - TypedLoweringPhase
32+
+ // - LoopPeelingPhase or LoopExitEliminationPhase
33+
+ // - LoadEliminationPhase
34+
+ // - EscapeAnalysisPhase
35+
+ // - TypeAssertionsPhase
36+
+ // - SimplifiedLoweringPhase
37+
+ // - JS <-> Wasm related (conditional):
38+
+ // - JSWasmInliningPhase
39+
+ // - WasmTypingPhase
40+
+ // - WasmGCOptimizationPhase
41+
+ // - JSWasmLoweringPhase
42+
+ // - WasmOptimizationPhase
43+
+ // - Post-typing cleanup:
44+
+ // - UntyperPhase (debug-only)
45+
+ // - Generic lowering and early block optimizations:
46+
+ // - GenericLoweringPhase
47+
+ // - EarlyOptimizationPhase
48+
+ // - Backend (scheduling/ISel/RA/codegen):
49+
+ // - ComputeScheduledGraph/Scheduling
50+
+ // - InstructionSelection
51+
+ // - RegisterAllocation
52+
+ // - CodeGeneration
53+
+ //
54+
+ enum class OptimizationBit : int {
55+
+ kBrokerInitAndSerialization = 0,
56+
+ kGraphBuilder,
57+
+ kInlining,
58+
+ kEarlyGraphTrimming,
59+
+ kTyper,
60+
+ kTypedLowering,
61+
+ kLoopPeeling,
62+
+ kLoopExitElimination,
63+
+ kLoadElimination,
64+
+ kEscapeAnalysis,
65+
+ kTypeAssertions,
66+
+ kSimplifiedLowering,
67+
+ kJSWasmInlining,
68+
+ kWasmTyping,
69+
+ kWasmGCOptimization,
70+
+ kJSWasmLowering,
71+
+ kWasmOptimization,
72+
+ kUntyper,
73+
+ kGenericLowering,
74+
+ kEarlyOptimization,
75+
+ kScheduledGraph,
76+
+ kInstructionSelection,
77+
+ kRegisterAllocation,
78+
+ kCodeGeneration
79+
+ };
80+
+
81+
+ // set a bit indicating an optimization phase ran during this compilation
82+
+ void SetOptimizationBit(OptimizationBit bit) {
83+
+ const int index = static_cast<int>(bit);
84+
+ const int word = index / 64;
85+
+ const int offset = index % 64;
86+
+ optimization_bits_[word] |= (uint64_t{1} << offset);
87+
+ }
88+
+
89+
+ // query whether a given optimization phase bit is set
90+
+ bool HasOptimizationBit(OptimizationBit bit) const {
91+
+ const int index = static_cast<int>(bit);
92+
+ const int word = index / 64;
93+
+ const int offset = index % 64;
94+
+ return (optimization_bits_[word] & (uint64_t{1} << offset)) != 0;
95+
+ }
96+
+
97+
+
98+
// Construct a compilation info for optimized compilation.
99+
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
100+
IndirectHandle<SharedFunctionInfo> shared,
101+
@@ -354,6 +436,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
102+
// handles above. The only difference is that is created in the
103+
// CanonicalHandleScope(i.e step 1) is different).
104+
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
105+
+ // Two 64-bit words give space for up to 128 optimization bits.
106+
+ std::array<uint64_t, 2> optimization_bits_ = {0, 0};
107+
};
108+
109+
} // namespace internal

0 commit comments

Comments
 (0)