1
1
use crate :: event:: Event ;
2
- use crate :: file_header:: FILE_HEADER_SIZE ;
3
- use crate :: { ProfilerFiles , RawEvent , StringTable , TimestampKind } ;
2
+ use crate :: file_header:: {
3
+ read_file_header, write_file_header, CURRENT_FILE_FORMAT_VERSION , FILE_HEADER_SIZE ,
4
+ FILE_MAGIC_EVENT_STREAM ,
5
+ } ;
6
+ use crate :: serialization:: ByteVecSink ;
7
+ use crate :: {
8
+ ProfilerFiles , RawEvent , SerializationSink , StringTable , StringTableBuilder , Timestamp ,
9
+ TimestampKind ,
10
+ } ;
4
11
use std:: error:: Error ;
5
12
use std:: fs;
6
13
use std:: mem;
7
14
use std:: path:: Path ;
15
+ use std:: sync:: Arc ;
8
16
use std:: time:: { Duration , SystemTime } ;
9
17
10
18
pub struct ProfilingData {
@@ -21,6 +29,15 @@ impl ProfilingData {
21
29
fs:: read ( paths. string_index_file ) . expect ( "couldn't read string_index file" ) ;
22
30
let event_data = fs:: read ( paths. events_file ) . expect ( "couldn't read events file" ) ;
23
31
32
+ let event_data_format = read_file_header ( & event_data, FILE_MAGIC_EVENT_STREAM ) ?;
33
+ if event_data_format != CURRENT_FILE_FORMAT_VERSION {
34
+ Err ( format ! (
35
+ "Event stream file format version '{}' is not supported
36
+ by this version of `measureme`." ,
37
+ event_data_format
38
+ ) ) ?;
39
+ }
40
+
24
41
let string_table = StringTable :: new ( string_data, index_data) ?;
25
42
26
43
Ok ( ProfilingData {
@@ -152,3 +169,237 @@ impl<'a> Iterator for MatchingEventsIterator<'a> {
152
169
None
153
170
}
154
171
}
172
+
173
+ /// A `ProfilingDataBuilder` allows for programmatically building
174
+ /// `ProfilingData` objects. This is useful for writing tests that expect
175
+ /// `ProfilingData` with predictable events (and especially timestamps) in it.
176
+ ///
177
+ /// `ProfilingDataBuilder` provides a convenient interface but its
178
+ /// implementation might not be efficient, which why it should only be used for
179
+ /// writing tests and other things that are not performance sensitive.
180
+ pub struct ProfilingDataBuilder {
181
+ event_sink : ByteVecSink ,
182
+ string_table_data_sink : Arc < ByteVecSink > ,
183
+ string_table_index_sink : Arc < ByteVecSink > ,
184
+ string_table : StringTableBuilder < ByteVecSink > ,
185
+ }
186
+
187
+ impl ProfilingDataBuilder {
188
+ pub fn new ( ) -> ProfilingDataBuilder {
189
+ let event_sink = ByteVecSink :: new ( ) ;
190
+ let string_table_data_sink = Arc :: new ( ByteVecSink :: new ( ) ) ;
191
+ let string_table_index_sink = Arc :: new ( ByteVecSink :: new ( ) ) ;
192
+
193
+ // The first thing in every file we generate must be the file header.
194
+ write_file_header ( & event_sink, FILE_MAGIC_EVENT_STREAM ) ;
195
+
196
+ let string_table = StringTableBuilder :: new (
197
+ string_table_data_sink. clone ( ) ,
198
+ string_table_index_sink. clone ( ) ,
199
+ ) ;
200
+
201
+ ProfilingDataBuilder {
202
+ event_sink,
203
+ string_table_data_sink,
204
+ string_table_index_sink,
205
+ string_table,
206
+ }
207
+ }
208
+
209
+ /// Record and interval event. Provide an `inner` function for recording
210
+ /// nested events.
211
+ pub fn interval < F > (
212
+ & mut self ,
213
+ event_kind : & str ,
214
+ event_id : & str ,
215
+ thread_id : u64 ,
216
+ start_nanos : u64 ,
217
+ end_nanos : u64 ,
218
+ inner : F ,
219
+ ) -> & mut Self
220
+ where
221
+ F : FnOnce ( & mut Self ) ,
222
+ {
223
+ let event_kind = self . string_table . alloc ( event_kind) ;
224
+ let event_id = self . string_table . alloc ( event_id) ;
225
+
226
+ self . write_raw_event ( & RawEvent {
227
+ event_kind,
228
+ id : event_id,
229
+ thread_id,
230
+ timestamp : Timestamp :: new ( start_nanos, TimestampKind :: Start ) ,
231
+ } ) ;
232
+
233
+ inner ( self ) ;
234
+
235
+ self . write_raw_event ( & RawEvent {
236
+ event_kind,
237
+ id : event_id,
238
+ thread_id,
239
+ timestamp : Timestamp :: new ( end_nanos, TimestampKind :: End ) ,
240
+ } ) ;
241
+
242
+ self
243
+ }
244
+
245
+ /// Record and instant event with the given data.
246
+ pub fn instant (
247
+ & mut self ,
248
+ event_kind : & str ,
249
+ event_id : & str ,
250
+ thread_id : u64 ,
251
+ timestamp_nanos : u64 ,
252
+ ) -> & mut Self {
253
+ let event_kind = self . string_table . alloc ( event_kind) ;
254
+ let event_id = self . string_table . alloc ( event_id) ;
255
+
256
+ self . write_raw_event ( & RawEvent {
257
+ event_kind,
258
+ id : event_id,
259
+ thread_id,
260
+ timestamp : Timestamp :: new ( timestamp_nanos, TimestampKind :: Instant ) ,
261
+ } ) ;
262
+
263
+ self
264
+ }
265
+
266
+ /// Convert this builder into a `ProfilingData` object that can be iterated.
267
+ pub fn into_profiling_data ( self ) -> ProfilingData {
268
+ // Drop the string table, so that the `string_table_data_sink` and
269
+ // `string_table_index_sink` fields are the only event-sink references
270
+ // left. This enables us to unwrap the `Arc`s and get the byte data out.
271
+ drop ( self . string_table ) ;
272
+
273
+ let event_data = self . event_sink . into_bytes ( ) ;
274
+ let data_bytes = Arc :: try_unwrap ( self . string_table_data_sink )
275
+ . unwrap ( )
276
+ . into_bytes ( ) ;
277
+ let index_bytes = Arc :: try_unwrap ( self . string_table_index_sink )
278
+ . unwrap ( )
279
+ . into_bytes ( ) ;
280
+
281
+ assert_eq ! (
282
+ read_file_header( & event_data, FILE_MAGIC_EVENT_STREAM ) . unwrap( ) ,
283
+ CURRENT_FILE_FORMAT_VERSION
284
+ ) ;
285
+ let string_table = StringTable :: new ( data_bytes, index_bytes) . unwrap ( ) ;
286
+
287
+ ProfilingData {
288
+ event_data,
289
+ string_table,
290
+ }
291
+ }
292
+
293
+ fn write_raw_event ( & mut self , raw_event : & RawEvent ) {
294
+ let raw_event_bytes: & [ u8 ] = unsafe {
295
+ std:: slice:: from_raw_parts (
296
+ raw_event as * const _ as * const u8 ,
297
+ std:: mem:: size_of :: < RawEvent > ( ) ,
298
+ )
299
+ } ;
300
+
301
+ self . event_sink
302
+ . write_atomic ( std:: mem:: size_of :: < RawEvent > ( ) , |bytes| {
303
+ debug_assert_eq ! ( bytes. len( ) , std:: mem:: size_of:: <RawEvent >( ) ) ;
304
+ bytes. copy_from_slice ( raw_event_bytes) ;
305
+ } ) ;
306
+ }
307
+ }
308
+
309
+ #[ cfg( test) ]
310
+ mod tests {
311
+ use super :: * ;
312
+ use std:: borrow:: Cow ;
313
+
314
+ fn event (
315
+ event_kind : & ' static str ,
316
+ label : & ' static str ,
317
+ thread_id : u64 ,
318
+ nanos : u64 ,
319
+ timestamp_kind : TimestampKind ,
320
+ ) -> Event < ' static > {
321
+ let timestamp = SystemTime :: UNIX_EPOCH + Duration :: from_nanos ( nanos) ;
322
+
323
+ Event {
324
+ event_kind : Cow :: from ( event_kind) ,
325
+ label : Cow :: from ( label) ,
326
+ additional_data : & [ ] ,
327
+ timestamp,
328
+ timestamp_kind,
329
+ thread_id,
330
+ }
331
+ }
332
+
333
+ #[ test]
334
+ fn build_interval_sequence ( ) {
335
+ let mut builder = ProfilingDataBuilder :: new ( ) ;
336
+
337
+ builder
338
+ . interval ( "k1" , "id1" , 0 , 10 , 100 , |_| { } )
339
+ . interval ( "k2" , "id2" , 1 , 100 , 110 , |_| { } )
340
+ . interval ( "k3" , "id3" , 0 , 120 , 140 , |_| { } ) ;
341
+
342
+ let profiling_data = builder. into_profiling_data ( ) ;
343
+
344
+ let events: Vec < Event < ' _ > > = profiling_data. iter ( ) . collect ( ) ;
345
+
346
+ assert_eq ! ( events[ 0 ] , event( "k1" , "id1" , 0 , 10 , TimestampKind :: Start ) ) ;
347
+ assert_eq ! ( events[ 1 ] , event( "k1" , "id1" , 0 , 100 , TimestampKind :: End ) ) ;
348
+ assert_eq ! ( events[ 2 ] , event( "k2" , "id2" , 1 , 100 , TimestampKind :: Start ) ) ;
349
+ assert_eq ! ( events[ 3 ] , event( "k2" , "id2" , 1 , 110 , TimestampKind :: End ) ) ;
350
+ assert_eq ! ( events[ 4 ] , event( "k3" , "id3" , 0 , 120 , TimestampKind :: Start ) ) ;
351
+ assert_eq ! ( events[ 5 ] , event( "k3" , "id3" , 0 , 140 , TimestampKind :: End ) ) ;
352
+ }
353
+
354
+ #[ test]
355
+ fn build_nested_intervals ( ) {
356
+ let mut b = ProfilingDataBuilder :: new ( ) ;
357
+
358
+ b. interval ( "k1" , "id1" , 0 , 10 , 100 , |b| {
359
+ b. interval ( "k2" , "id2" , 0 , 20 , 100 , |b| {
360
+ b. interval ( "k3" , "id3" , 0 , 30 , 90 , |_| { } ) ;
361
+ } ) ;
362
+ } ) ;
363
+
364
+ let profiling_data = b. into_profiling_data ( ) ;
365
+
366
+ let events: Vec < Event < ' _ > > = profiling_data. iter ( ) . collect ( ) ;
367
+
368
+ assert_eq ! ( events[ 0 ] , event( "k1" , "id1" , 0 , 10 , TimestampKind :: Start ) ) ;
369
+ assert_eq ! ( events[ 1 ] , event( "k2" , "id2" , 0 , 20 , TimestampKind :: Start ) ) ;
370
+ assert_eq ! ( events[ 2 ] , event( "k3" , "id3" , 0 , 30 , TimestampKind :: Start ) ) ;
371
+ assert_eq ! ( events[ 3 ] , event( "k3" , "id3" , 0 , 90 , TimestampKind :: End ) ) ;
372
+ assert_eq ! ( events[ 4 ] , event( "k2" , "id2" , 0 , 100 , TimestampKind :: End ) ) ;
373
+ assert_eq ! ( events[ 5 ] , event( "k1" , "id1" , 0 , 100 , TimestampKind :: End ) ) ;
374
+ }
375
+
376
+ #[ test]
377
+ fn build_intervals_and_instants ( ) {
378
+ let mut b = ProfilingDataBuilder :: new ( ) ;
379
+
380
+ b. interval ( "k1" , "id1" , 0 , 10 , 100 , |b| {
381
+ b. interval ( "k2" , "id2" , 0 , 20 , 100 , |b| {
382
+ b. interval ( "k3" , "id3" , 0 , 30 , 90 , |b| {
383
+ b. instant ( "k4" , "id4" , 0 , 70 ) ;
384
+ b. instant ( "k5" , "id5" , 0 , 75 ) ;
385
+ } ) ;
386
+ } )
387
+ . instant ( "k6" , "id6" , 0 , 95 ) ;
388
+ } ) ;
389
+
390
+ let profiling_data = b. into_profiling_data ( ) ;
391
+
392
+ let events: Vec < Event < ' _ > > = profiling_data. iter ( ) . collect ( ) ;
393
+
394
+ assert_eq ! ( events[ 0 ] , event( "k1" , "id1" , 0 , 10 , TimestampKind :: Start ) ) ;
395
+ assert_eq ! ( events[ 1 ] , event( "k2" , "id2" , 0 , 20 , TimestampKind :: Start ) ) ;
396
+ assert_eq ! ( events[ 2 ] , event( "k3" , "id3" , 0 , 30 , TimestampKind :: Start ) ) ;
397
+ assert_eq ! ( events[ 3 ] , event( "k4" , "id4" , 0 , 70 , TimestampKind :: Instant ) ) ;
398
+ assert_eq ! ( events[ 4 ] , event( "k5" , "id5" , 0 , 75 , TimestampKind :: Instant ) ) ;
399
+ assert_eq ! ( events[ 5 ] , event( "k3" , "id3" , 0 , 90 , TimestampKind :: End ) ) ;
400
+ assert_eq ! ( events[ 6 ] , event( "k2" , "id2" , 0 , 100 , TimestampKind :: End ) ) ;
401
+ assert_eq ! ( events[ 7 ] , event( "k6" , "id6" , 0 , 95 , TimestampKind :: Instant ) ) ;
402
+ assert_eq ! ( events[ 8 ] , event( "k1" , "id1" , 0 , 100 , TimestampKind :: End ) ) ;
403
+ }
404
+
405
+ }
0 commit comments