2
2
// Use of this source code is governed by a BSD-style license that can be
3
3
// found in the LICENSE file.
4
4
5
- pub mod sync;
6
-
7
5
use std:: collections:: HashSet ;
8
6
use std:: iter;
9
7
use std:: sync:: { mpsc, Arc , Weak } ;
10
8
11
9
use attribution_server:: { AttributionServer , AttributionServerHandle } ;
12
10
use fidl:: AsHandleRef ;
13
11
use fidl_fuchsia_memory_attribution as fattribution;
12
+ use starnix_logging:: log_error;
14
13
use starnix_sync:: Mutex ;
15
14
use starnix_uapi:: pid_t;
16
15
use zx:: HandleBased ;
@@ -22,10 +21,39 @@ use crate::task::{Kernel, ThreadGroup};
22
21
/// it once, to reduce overhead.
23
22
const MINIMUM_RESCAN_INTERVAL : zx:: MonotonicDuration = zx:: MonotonicDuration :: from_millis ( 100 ) ;
24
23
25
- /// If a new code path is added which mutates the PID table without notifying
26
- /// the scanner thread, this timeout ensures we will at least eventually unpark
27
- /// and process the changes, albeit with a larger latency.
28
- const MAXIMUM_RESCAN_INTERVAL : std:: time:: Duration = std:: time:: Duration :: from_secs ( 5 ) ;
24
+ enum MemoryAttributionLifecycleEventType {
25
+ Creation ,
26
+ NameChange ,
27
+ Destruction ,
28
+ }
29
+
30
+ pub struct MemoryAttributionLifecycleEvent {
31
+ pid : pid_t ,
32
+ event_type : MemoryAttributionLifecycleEventType ,
33
+ }
34
+
35
+ impl MemoryAttributionLifecycleEvent {
36
+ pub fn creation ( pid : pid_t ) -> Self {
37
+ MemoryAttributionLifecycleEvent {
38
+ pid,
39
+ event_type : MemoryAttributionLifecycleEventType :: Creation ,
40
+ }
41
+ }
42
+
43
+ pub fn name_change ( pid : pid_t ) -> Self {
44
+ MemoryAttributionLifecycleEvent {
45
+ pid,
46
+ event_type : MemoryAttributionLifecycleEventType :: NameChange ,
47
+ }
48
+ }
49
+
50
+ pub fn destruction ( pid : pid_t ) -> Self {
51
+ MemoryAttributionLifecycleEvent {
52
+ pid,
53
+ event_type : MemoryAttributionLifecycleEventType :: Destruction ,
54
+ }
55
+ }
56
+ }
29
57
30
58
pub struct MemoryAttributionManager {
31
59
/// Holds state for the hanging-get attribution protocol.
@@ -60,10 +88,13 @@ impl MemoryAttributionManager {
60
88
let ( initial_state_tx, initial_state_rx) = mpsc:: sync_channel ( 1 ) ;
61
89
initial_state_tx. send ( InitialState { processes } ) . unwrap ( ) ;
62
90
let weak_kernel = weak_kernel. clone ( ) ;
63
- let notifier = sync:: spawn_thread ( & kernel, move |waiter| {
64
- Self :: run ( weak_kernel, publisher_rx, initial_state_rx, waiter) ;
91
+
92
+ let ( pid_sender, pid_receiver) = std:: sync:: mpsc:: channel ( ) ;
93
+
94
+ kernel. kthreads . spawn ( move |_, _| {
95
+ Self :: run ( weak_kernel, publisher_rx, initial_state_rx, pid_receiver) ;
65
96
} ) ;
66
- kernel. pids . write ( ) . set_thread_group_notifier ( notifier ) ;
97
+ kernel. pids . write ( ) . set_thread_group_notifier ( pid_sender ) ;
67
98
}
68
99
events
69
100
} ) ) ;
@@ -95,53 +126,122 @@ impl MemoryAttributionManager {
95
126
kernel : Weak < Kernel > ,
96
127
publisher : mpsc:: Receiver < attribution_server:: Publisher > ,
97
128
initial_state : mpsc:: Receiver < InitialState > ,
98
- waiter : sync :: Waiter ,
129
+ pid_receiver : mpsc :: Receiver < MemoryAttributionLifecycleEvent > ,
99
130
) {
100
131
let publisher = publisher. recv ( ) . unwrap ( ) ;
101
132
let initial_state = initial_state. recv ( ) . unwrap ( ) ;
102
- let InitialState { mut processes } = initial_state;
133
+ let InitialState { processes } = initial_state;
103
134
104
- loop {
105
- waiter. wait ( MAXIMUM_RESCAN_INTERVAL ) ;
135
+ let Some ( kernel) = kernel. upgrade ( ) else {
136
+ return ;
137
+ } ;
106
138
107
- let Some ( kernel) = kernel. upgrade ( ) else { break } ;
139
+ let ( mut processes, updates) = scan_processes ( & kernel, processes) ;
140
+ // If there are updates to send, send them now.
141
+ if !updates. is_empty ( ) {
142
+ _ = publisher. on_update ( updates) ;
143
+ }
144
+
145
+ loop {
146
+ // There may be multiple pending notifications in the receiving channel. We would like
147
+ // to process them all at once. For that, we first wait up to the timeout for any event,
148
+ // then we try to pull as many events as available from the channel, until it is empty.
149
+ // We keep track whether we hit a timeout to do a full scan; if the full scan finds some
150
+ // updates to send, it means we have uninstrumented sources of changes that need to be
151
+ // fixed.
152
+ let events = match pid_receiver. recv ( ) {
153
+ Ok ( v) => itertools:: chain ( std:: iter:: once ( v) , pid_receiver. try_iter ( ) ) ,
154
+ Err ( _) => {
155
+ return ;
156
+ }
157
+ } ;
108
158
109
159
let mut updates = vec ! [ ] ;
110
- // Find removed processes.
111
- let mut new_processes = HashSet :: new ( ) ;
112
- {
113
- let pids = kernel. pids . read ( ) ;
114
-
115
- // Find added processes.
116
- for thread_group in pids. get_thread_groups ( ) {
117
- let pid = thread_group. leader ;
118
- new_processes. insert ( pid) ;
119
- // TODO(https://fxbug.dev/379733655): Remove this
120
- #[ allow( clippy:: set_contains_or_insert) ]
121
- if !processes. contains ( & pid) {
160
+ let pids = kernel. pids . read ( ) ;
161
+ for event in events {
162
+ let pid = event. pid ;
163
+ match event. event_type {
164
+ MemoryAttributionLifecycleEventType :: Creation => {
165
+ if !processes. insert ( pid) {
166
+ log_error ! (
167
+ "{} is already known, memory attribution is likely incorrect" ,
168
+ pid
169
+ ) ;
170
+ }
171
+ let thread_group = match pids. get_thread_group ( pid) {
172
+ Some ( tg) => tg,
173
+ None => {
174
+ // The thread group is missing. This can happen if it has already
175
+ // exited.
176
+ continue ;
177
+ }
178
+ } ;
179
+ let name = get_thread_group_identifier ( & thread_group) ;
180
+ let mut update = attribution_info_for_thread_group ( name, & thread_group) ;
181
+ updates. append ( & mut update) ;
182
+ }
183
+ MemoryAttributionLifecycleEventType :: NameChange => {
184
+ if !processes. contains ( & pid) {
185
+ log_error ! (
186
+ "{} is unknown, memory attribution is likely incorrect" ,
187
+ pid
188
+ ) ;
189
+ }
190
+ let thread_group = match pids. get_thread_group ( pid) {
191
+ Some ( tg) => tg,
192
+ None => continue ,
193
+ } ;
122
194
let name = get_thread_group_identifier ( & thread_group) ;
123
195
let mut update = attribution_info_for_thread_group ( name, & thread_group) ;
124
- processes. insert ( pid) ;
125
196
updates. append ( & mut update) ;
126
197
}
198
+ MemoryAttributionLifecycleEventType :: Destruction => {
199
+ if !processes. remove ( & pid) {
200
+ log_error ! (
201
+ "{} is unknown, memory attribution is likely incorrect" ,
202
+ pid
203
+ ) ;
204
+ }
205
+ updates. push ( fattribution:: AttributionUpdate :: Remove ( pid as u64 ) ) ;
206
+ }
127
207
}
128
208
}
129
-
130
- for pid in processes. difference ( & new_processes) {
131
- updates. push ( fattribution:: AttributionUpdate :: Remove ( * pid as u64 ) ) ;
132
- }
133
- processes = new_processes;
134
-
135
209
// If there are updates to send, send them now.
136
210
if !updates. is_empty ( ) {
137
211
_ = publisher. on_update ( updates) ;
138
212
}
139
-
140
213
zx:: MonotonicInstant :: after ( MINIMUM_RESCAN_INTERVAL ) . sleep ( ) ;
141
214
}
142
215
}
143
216
}
144
217
218
+ /// Do a full scan of the current Starnix processes. This is useful to establish an initial state.
219
+ fn scan_processes (
220
+ kernel : & Kernel ,
221
+ mut processes : HashSet < pid_t > ,
222
+ ) -> ( HashSet < pid_t > , Vec < fattribution:: AttributionUpdate > ) {
223
+ let mut updates = vec ! [ ] ;
224
+ let pids = kernel. pids . read ( ) ;
225
+ let mut new_processes = HashSet :: new ( ) ;
226
+ for thread_group in pids. get_thread_groups ( ) {
227
+ let pid = thread_group. leader ;
228
+ new_processes. insert ( pid) ;
229
+ // TODO(https://fxbug.dev/379733655): Remove this
230
+ #[ allow( clippy:: set_contains_or_insert) ]
231
+ if !processes. contains ( & pid) {
232
+ let name = get_thread_group_identifier ( & thread_group) ;
233
+ let mut update = attribution_info_for_thread_group ( name, & thread_group) ;
234
+ processes. insert ( pid) ;
235
+ updates. append ( & mut update) ;
236
+ }
237
+ }
238
+
239
+ for pid in processes. difference ( & new_processes) {
240
+ updates. push ( fattribution:: AttributionUpdate :: Remove ( * pid as u64 ) ) ;
241
+ }
242
+ ( new_processes, updates)
243
+ }
244
+
145
245
fn get_thread_group_identifier ( thread_group : & ThreadGroup ) -> String {
146
246
let name = match thread_group. process . is_invalid_handle ( ) {
147
247
// The system task has an invalid Zircon process handle.
@@ -176,9 +276,19 @@ fn new_principal(pid: i32, name: String) -> fattribution::AttributionUpdate {
176
276
177
277
/// Builds an `UpdatedPrincipal` event. If the task has an invalid root VMAR, returns `None`.
178
278
fn updated_principal ( thread_group : & ThreadGroup ) -> Option < fattribution:: AttributionUpdate > {
179
- let Some ( process_koid) = thread_group. process . get_koid ( ) . ok ( ) else { return None } ;
180
- let Some ( mm) = get_mm ( thread_group) else { return None } ;
181
- let Some ( vmar_info) = mm. get_restricted_vmar_info ( ) else { return None } ;
279
+ let Some ( process_koid) = thread_group. process . get_koid ( ) . ok ( ) else {
280
+ return None ;
281
+ } ;
282
+ let Some ( mm) = get_mm ( thread_group) else {
283
+ log_error ! (
284
+ "No memory manager for ThreadGroup {}, this should not happen." ,
285
+ thread_group. leader
286
+ ) ;
287
+ return None ;
288
+ } ;
289
+ let Some ( vmar_info) = mm. get_restricted_vmar_info ( ) else {
290
+ return None ;
291
+ } ;
182
292
let update = fattribution:: AttributionUpdate :: Update ( fattribution:: UpdatedPrincipal {
183
293
identifier : Some ( thread_group. leader as u64 ) ,
184
294
resources : Some ( fattribution:: Resources :: Data ( fattribution:: Data {
0 commit comments