@@ -13,6 +13,7 @@ const native_arch = builtin.cpu.arch;
13
13
const native_os = builtin .os .tag ;
14
14
const native_endian = native_arch .endian ();
15
15
16
+ pub const MemoryAccessor = @import ("debug/MemoryAccessor.zig" );
16
17
pub const Dwarf = @import ("debug/Dwarf.zig" );
17
18
pub const Pdb = @import ("debug/Pdb.zig" );
18
19
pub const SelfInfo = @import ("debug/SelfInfo.zig" );
@@ -243,7 +244,7 @@ pub inline fn getContext(context: *ThreadContext) bool {
243
244
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
244
245
/// unbuffered, and ignores any error returned.
245
246
/// TODO multithreaded awareness
246
- pub fn dumpStackTraceFromBase (context : * const ThreadContext ) void {
247
+ pub fn dumpStackTraceFromBase (context : * ThreadContext ) void {
247
248
nosuspend {
248
249
if (comptime builtin .target .isWasm ()) {
249
250
if (native_os == .wasi ) {
@@ -545,7 +546,7 @@ pub const StackIterator = struct {
545
546
// using DWARF and MachO unwind info.
546
547
unwind_state : if (have_ucontext ) ? struct {
547
548
debug_info : * SelfInfo ,
548
- dwarf_context : Dwarf .UnwindContext ,
549
+ dwarf_context : SelfInfo .UnwindContext ,
549
550
last_error : ? UnwindError = null ,
550
551
failed : bool = false ,
551
552
} else void = if (have_ucontext ) null else {},
@@ -569,16 +570,16 @@ pub const StackIterator = struct {
569
570
};
570
571
}
571
572
572
- pub fn initWithContext (first_address : ? usize , debug_info : * SelfInfo , context : * const posix.ucontext_t ) ! StackIterator {
573
+ pub fn initWithContext (first_address : ? usize , debug_info : * SelfInfo , context : * posix.ucontext_t ) ! StackIterator {
573
574
// The implementation of DWARF unwinding on aarch64-macos is not complete. However, Apple mandates that
574
575
// the frame pointer register is always used, so on this platform we can safely use the FP-based unwinder.
575
- if (comptime builtin .target .isDarwin () and native_arch == .aarch64 ) {
576
+ if (builtin .target .isDarwin () and native_arch == .aarch64 ) {
576
577
return init (first_address , context .mcontext .ss .fp );
577
578
} else {
578
579
var iterator = init (first_address , null );
579
580
iterator .unwind_state = .{
580
581
.debug_info = debug_info ,
581
- .dwarf_context = try Dwarf .UnwindContext .init (debug_info .allocator , context ),
582
+ .dwarf_context = try SelfInfo .UnwindContext .init (debug_info .allocator , context ),
582
583
};
583
584
584
585
return iterator ;
@@ -644,116 +645,6 @@ pub const StackIterator = struct {
644
645
return address ;
645
646
}
646
647
647
- fn isValidMemory (address : usize ) bool {
648
- // We are unable to determine validity of memory for freestanding targets
649
- if (native_os == .freestanding or native_os == .uefi ) return true ;
650
-
651
- const aligned_address = address & ~ @as (usize , @intCast ((mem .page_size - 1 )));
652
- if (aligned_address == 0 ) return false ;
653
- const aligned_memory = @as ([* ]align (mem .page_size ) u8 , @ptrFromInt (aligned_address ))[0.. mem .page_size ];
654
-
655
- if (native_os == .windows ) {
656
- var memory_info : windows.MEMORY_BASIC_INFORMATION = undefined ;
657
-
658
- // The only error this function can throw is ERROR_INVALID_PARAMETER.
659
- // supply an address that invalid i'll be thrown.
660
- const rc = windows .VirtualQuery (aligned_memory , & memory_info , aligned_memory .len ) catch {
661
- return false ;
662
- };
663
-
664
- // Result code has to be bigger than zero (number of bytes written)
665
- if (rc == 0 ) {
666
- return false ;
667
- }
668
-
669
- // Free pages cannot be read, they are unmapped
670
- if (memory_info .State == windows .MEM_FREE ) {
671
- return false ;
672
- }
673
-
674
- return true ;
675
- } else if (have_msync ) {
676
- posix .msync (aligned_memory , posix .MSF .ASYNC ) catch | err | {
677
- switch (err ) {
678
- error .UnmappedMemory = > return false ,
679
- else = > unreachable ,
680
- }
681
- };
682
-
683
- return true ;
684
- } else {
685
- // We are unable to determine validity of memory on this target.
686
- return true ;
687
- }
688
- }
689
-
690
- pub const MemoryAccessor = struct {
691
- var cached_pid : posix.pid_t = -1 ;
692
-
693
- mem : switch (native_os ) {
694
- .linux = > File ,
695
- else = > void ,
696
- },
697
-
698
- pub const init : MemoryAccessor = .{
699
- .mem = switch (native_os ) {
700
- .linux = > .{ .handle = -1 },
701
- else = > {},
702
- },
703
- };
704
-
705
- fn read (ma : * MemoryAccessor , address : usize , buf : []u8 ) bool {
706
- switch (native_os ) {
707
- .linux = > while (true ) switch (ma .mem .handle ) {
708
- -2 = > break ,
709
- -1 = > {
710
- const linux = std .os .linux ;
711
- const pid = switch (@atomicLoad (posix .pid_t , & cached_pid , .monotonic )) {
712
- -1 = > pid : {
713
- const pid = linux .getpid ();
714
- @atomicStore (posix .pid_t , & cached_pid , pid , .monotonic );
715
- break :pid pid ;
716
- },
717
- else = > | pid | pid ,
718
- };
719
- const bytes_read = linux .process_vm_readv (
720
- pid ,
721
- &.{.{ .base = buf .ptr , .len = buf .len }},
722
- &.{.{ .base = @ptrFromInt (address ), .len = buf .len }},
723
- 0 ,
724
- );
725
- switch (linux .E .init (bytes_read )) {
726
- .SUCCESS = > return bytes_read == buf .len ,
727
- .FAULT = > return false ,
728
- .INVAL , .PERM , .SRCH = > unreachable , // own pid is always valid
729
- .NOMEM = > {},
730
- .NOSYS = > {}, // QEMU is known not to implement this syscall.
731
- else = > unreachable , // unexpected
732
- }
733
- var path_buf : [
734
- std .fmt .count ("/proc/{d}/mem" , .{math .minInt (posix .pid_t )})
735
- ]u8 = undefined ;
736
- const path = std .fmt .bufPrint (& path_buf , "/proc/{d}/mem" , .{pid }) catch
737
- unreachable ;
738
- ma .mem = std .fs .openFileAbsolute (path , .{}) catch {
739
- ma .mem .handle = -2 ;
740
- break ;
741
- };
742
- },
743
- else = > return (ma .mem .pread (buf , address ) catch return false ) == buf .len ,
744
- },
745
- else = > {},
746
- }
747
- if (! isValidMemory (address )) return false ;
748
- @memcpy (buf , @as ([* ]const u8 , @ptrFromInt (address )));
749
- return true ;
750
- }
751
- pub fn load (ma : * MemoryAccessor , comptime Type : type , address : usize ) ? Type {
752
- var result : Type = undefined ;
753
- return if (ma .read (address , std .mem .asBytes (& result ))) result else null ;
754
- }
755
- };
756
-
757
648
fn next_unwind (it : * StackIterator ) ! usize {
758
649
const unwind_state = & it .unwind_state .? ;
759
650
const module = try unwind_state .debug_info .getModuleForAddress (unwind_state .dwarf_context .pc );
@@ -762,7 +653,13 @@ pub const StackIterator = struct {
762
653
// __unwind_info is a requirement for unwinding on Darwin. It may fall back to DWARF, but unwinding
763
654
// via DWARF before attempting to use the compact unwind info will produce incorrect results.
764
655
if (module .unwind_info ) | unwind_info | {
765
- if (Dwarf .unwindFrameMachO (& unwind_state .dwarf_context , & it .ma , unwind_info , module .eh_frame , module .base_address )) | return_address | {
656
+ if (SelfInfo .unwindFrameMachO (
657
+ & unwind_state .dwarf_context ,
658
+ & it .ma ,
659
+ unwind_info ,
660
+ module .eh_frame ,
661
+ module .base_address ,
662
+ )) | return_address | {
766
663
return return_address ;
767
664
} else | err | {
768
665
if (err != error .RequiresDWARFUnwind ) return err ;
@@ -773,7 +670,7 @@ pub const StackIterator = struct {
773
670
}
774
671
775
672
if (try module .getDwarfInfoForAddress (unwind_state .debug_info .allocator , unwind_state .dwarf_context .pc )) | di | {
776
- return di . unwindFrame ( & unwind_state .dwarf_context , & it .ma , null );
673
+ return SelfInfo . unwindFrameDwarf ( di , & unwind_state .dwarf_context , & it .ma , null );
777
674
} else return error .MissingDebugInfo ;
778
675
}
779
676
@@ -822,11 +719,6 @@ pub const StackIterator = struct {
822
719
}
823
720
};
824
721
825
- const have_msync = switch (native_os ) {
826
- .wasi , .emscripten , .windows = > false ,
827
- else = > true ,
828
- };
829
-
830
722
pub fn writeCurrentStackTrace (
831
723
out_stream : anytype ,
832
724
debug_info : * SelfInfo ,
@@ -1333,7 +1225,7 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
1333
1225
posix .abort ();
1334
1226
}
1335
1227
1336
- fn dumpSegfaultInfoPosix (sig : i32 , code : i32 , addr : usize , ctx_ptr : ? * const anyopaque ) void {
1228
+ fn dumpSegfaultInfoPosix (sig : i32 , code : i32 , addr : usize , ctx_ptr : ? * anyopaque ) void {
1337
1229
const stderr = io .getStdErr ().writer ();
1338
1230
_ = switch (sig ) {
1339
1231
posix .SIG .SEGV = > if (native_arch == .x86_64 and native_os == .linux and code == 128 ) // SI_KERNEL
@@ -1359,7 +1251,7 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*const anyo
1359
1251
.arm ,
1360
1252
.aarch64 ,
1361
1253
= > {
1362
- const ctx : * const posix.ucontext_t = @ptrCast (@alignCast (ctx_ptr ));
1254
+ const ctx : * posix.ucontext_t = @ptrCast (@alignCast (ctx_ptr ));
1363
1255
dumpStackTraceFromBase (ctx );
1364
1256
},
1365
1257
else = > {},
@@ -1585,6 +1477,99 @@ pub const SafetyLock = struct {
1585
1477
}
1586
1478
};
1587
1479
1480
+ /// Deprecated. Don't use this, just read from your memory directly.
1481
+ ///
1482
+ /// This only exists because someone was too lazy to rework logic that used to
1483
+ /// operate on an open file to operate on a memory buffer instead.
1484
+ pub const DeprecatedFixedBufferReader = struct {
1485
+ buf : []const u8 ,
1486
+ pos : usize = 0 ,
1487
+ endian : std.builtin.Endian ,
1488
+
1489
+ pub const Error = error { EndOfBuffer , Overflow , InvalidBuffer };
1490
+
1491
+ pub fn seekTo (fbr : * DeprecatedFixedBufferReader , pos : u64 ) Error ! void {
1492
+ if (pos > fbr .buf .len ) return error .EndOfBuffer ;
1493
+ fbr .pos = @intCast (pos );
1494
+ }
1495
+
1496
+ pub fn seekForward (fbr : * DeprecatedFixedBufferReader , amount : u64 ) Error ! void {
1497
+ if (fbr .buf .len - fbr .pos < amount ) return error .EndOfBuffer ;
1498
+ fbr .pos += @intCast (amount );
1499
+ }
1500
+
1501
+ pub inline fn readByte (fbr : * DeprecatedFixedBufferReader ) Error ! u8 {
1502
+ if (fbr .pos >= fbr .buf .len ) return error .EndOfBuffer ;
1503
+ defer fbr .pos += 1 ;
1504
+ return fbr .buf [fbr .pos ];
1505
+ }
1506
+
1507
+ pub fn readByteSigned (fbr : * DeprecatedFixedBufferReader ) Error ! i8 {
1508
+ return @bitCast (try fbr .readByte ());
1509
+ }
1510
+
1511
+ pub fn readInt (fbr : * DeprecatedFixedBufferReader , comptime T : type ) Error ! T {
1512
+ const size = @divExact (@typeInfo (T ).Int .bits , 8 );
1513
+ if (fbr .buf .len - fbr .pos < size ) return error .EndOfBuffer ;
1514
+ defer fbr .pos += size ;
1515
+ return std .mem .readInt (T , fbr .buf [fbr .pos .. ][0.. size ], fbr .endian );
1516
+ }
1517
+
1518
+ pub fn readIntChecked (
1519
+ fbr : * DeprecatedFixedBufferReader ,
1520
+ comptime T : type ,
1521
+ ma : * MemoryAccessor ,
1522
+ ) Error ! T {
1523
+ if (ma .load (T , @intFromPtr (fbr .buf [fbr .pos .. ].ptr )) == null )
1524
+ return error .InvalidBuffer ;
1525
+
1526
+ return fbr .readInt (T );
1527
+ }
1528
+
1529
+ pub fn readUleb128 (fbr : * DeprecatedFixedBufferReader , comptime T : type ) Error ! T {
1530
+ return std .leb .readUleb128 (T , fbr );
1531
+ }
1532
+
1533
+ pub fn readIleb128 (fbr : * DeprecatedFixedBufferReader , comptime T : type ) Error ! T {
1534
+ return std .leb .readIleb128 (T , fbr );
1535
+ }
1536
+
1537
+ pub fn readAddress (fbr : * DeprecatedFixedBufferReader , format : std.dwarf.Format ) Error ! u64 {
1538
+ return switch (format ) {
1539
+ .@"32" = > try fbr .readInt (u32 ),
1540
+ .@"64" = > try fbr .readInt (u64 ),
1541
+ };
1542
+ }
1543
+
1544
+ pub fn readAddressChecked (
1545
+ fbr : * DeprecatedFixedBufferReader ,
1546
+ format : std.dwarf.Format ,
1547
+ ma : * MemoryAccessor ,
1548
+ ) Error ! u64 {
1549
+ return switch (format ) {
1550
+ .@"32" = > try fbr .readIntChecked (u32 , ma ),
1551
+ .@"64" = > try fbr .readIntChecked (u64 , ma ),
1552
+ };
1553
+ }
1554
+
1555
+ pub fn readBytes (fbr : * DeprecatedFixedBufferReader , len : usize ) Error ! []const u8 {
1556
+ if (fbr .buf .len - fbr .pos < len ) return error .EndOfBuffer ;
1557
+ defer fbr .pos += len ;
1558
+ return fbr .buf [fbr .pos .. ][0.. len ];
1559
+ }
1560
+
1561
+ pub fn readBytesTo (fbr : * DeprecatedFixedBufferReader , comptime sentinel : u8 ) Error ! [:sentinel ]const u8 {
1562
+ const end = @call (.always_inline , std .mem .indexOfScalarPos , .{
1563
+ u8 ,
1564
+ fbr .buf ,
1565
+ fbr .pos ,
1566
+ sentinel ,
1567
+ }) orelse return error .EndOfBuffer ;
1568
+ defer fbr .pos = end + 1 ;
1569
+ return fbr .buf [fbr .pos .. end :sentinel ];
1570
+ }
1571
+ };
1572
+
1588
1573
/// Detect whether the program is being executed in the Valgrind virtual machine.
1589
1574
///
1590
1575
/// When Valgrind integrations are disabled, this returns comptime-known false.
0 commit comments