diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index 87092cb23513..ed1c0df72d0d 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -23,10 +23,11 @@ fn findProgramByNameFake(entries: []const Filesystem.Entry, name: []const u8, pa } const path_env = path orelse return null; var fib = std.heap.FixedBufferAllocator.init(buf); + const fib_initial_state = fib.savestate(); var it = mem.tokenizeScalar(u8, path_env, std.fs.path.delimiter); while (it.next()) |path_dir| { - defer fib.reset(); + defer fib.restore(fib_initial_state); const full_path = std.fs.path.join(fib.allocator(), &.{ path_dir, name }) catch continue; if (canExecuteFake(entries, full_path)) return full_path; } @@ -84,10 +85,11 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con } const path_env = path orelse return null; var fib = std.heap.FixedBufferAllocator.init(buf); + const fib_initial_state = fib.savestate(); var it = mem.tokenizeScalar(u8, path_env, std.fs.path.delimiter); while (it.next()) |path_dir| { - defer fib.reset(); + defer fib.restore(fib_initial_state); const full_path = std.fs.path.join(fib.allocator(), &.{ path_dir, name }) catch continue; if (canExecutePosix(full_path)) return full_path; } diff --git a/lib/compiler/aro/aro/Toolchain.zig b/lib/compiler/aro/aro/Toolchain.zig index 75c04d72158c..d25cda0e5a00 100644 --- a/lib/compiler/aro/aro/Toolchain.zig +++ b/lib/compiler/aro/aro/Toolchain.zig @@ -216,6 +216,7 @@ pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.ArrayList([]const u8) fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8 { var path_buf: [std.fs.max_path_bytes]u8 = undefined; var fib = std.heap.FixedBufferAllocator.init(&path_buf); + const fib_initial_state = fib.savestate(); var tool_specific_buf: [64]u8 = undefined; var possible_name_buf: [2][]const u8 = undefined; @@ -223,7 +224,7 @@ fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8 for (possible_names) |tool_name| { for (tc.program_paths.items) |program_path| { - defer fib.reset(); + defer fib.restore(fib_initial_state); const candidate = std.fs.path.join(fib.allocator(), &.{ program_path, tool_name }) catch continue; diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 42c708cbfc7f..439120e7df44 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -37,6 +37,7 @@ pub fn main() void { return mainSimple() catch @panic("test failure\n"); } + const fba_initial_state = fba.savestate(); const args = std.process.argsAlloc(fba.allocator()) catch @panic("unable to parse command line args"); @@ -61,7 +62,7 @@ pub fn main() void { fuzz_abi.fuzzer_init(.fromSlice(cache_dir)); } - fba.reset(); + fba.restore(fba_initial_state); if (listen) { return mainServer() catch @panic("internal test runner failure"); diff --git a/lib/std/heap.zig b/lib/std/heap.zig index b5fd8229597b..3eadc56f8e10 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -10,8 +10,10 @@ const windows = std.os.windows; const Alignment = std.mem.Alignment; pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator; +pub const BumpAllocator = @import("heap/BumpAllocator.zig"); pub const SmpAllocator = @import("heap/SmpAllocator.zig"); -pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig"); +/// Deprecated; to be removed after 0.16.0 is tagged. +pub const FixedBufferAllocator = BumpAllocator; pub const PageAllocator = @import("heap/PageAllocator.zig"); pub const SbrkAllocator = @import("heap/sbrk_allocator.zig").SbrkAllocator; pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig"); @@ -451,7 +453,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { ra: usize, ) bool { const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + if (mem.sliceOwnsPtr(u8, &self.buffer, @ptrCast(buf.ptr))) { return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, alignment, new_len, ra); } else { return self.fallback_allocator.rawResize(buf, alignment, new_len, ra); @@ -466,7 +468,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return_address: usize, ) ?[*]u8 { const self: *Self = @ptrCast(@alignCast(context)); - if (self.fixed_buffer_allocator.ownsPtr(memory.ptr)) { + if (mem.sliceOwnsPtr(u8, &self.buffer, @ptrCast(memory.ptr))) { return FixedBufferAllocator.remap(&self.fixed_buffer_allocator, memory, alignment, new_len, return_address); } else { return self.fallback_allocator.rawRemap(memory, alignment, new_len, return_address); @@ -480,7 +482,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type { ra: usize, ) void { const self: *Self = @ptrCast(@alignCast(ctx)); - if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) { + if (mem.sliceOwnsPtr(u8, &self.buffer, @ptrCast(buf.ptr))) { return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, alignment, ra); } else { return self.fallback_allocator.rawFree(buf, alignment, ra); @@ -998,8 +1000,8 @@ const page_size_max_default: ?usize = switch (builtin.os.tag) { test { _ = @import("heap/memory_pool.zig"); _ = ArenaAllocator; + _ = BumpAllocator; _ = GeneralPurposeAllocator; - _ = FixedBufferAllocator; _ = ThreadSafeAllocator; _ = SbrkAllocator; if (builtin.target.cpu.arch.isWasm()) { diff --git a/lib/std/heap/BumpAllocator.zig b/lib/std/heap/BumpAllocator.zig new file mode 100644 index 000000000000..31da621e044e --- /dev/null +++ b/lib/std/heap/BumpAllocator.zig @@ -0,0 +1,263 @@ +const std = @import("../std.zig"); +const assert = std.debug.assert; +const Alignment = std.mem.Alignment; +const Allocator = std.mem.Allocator; +const safety = std.debug.runtime_safety; + +start: if (safety) [*]u8 else void, +bump: [*]u8, +end: [*]u8, + +pub fn init(buffer: []u8) @This() { + return .{ + .start = if (safety) buffer.ptr else {}, + .bump = buffer.ptr, + .end = buffer.ptr + buffer.len, + }; +} + +pub fn allocator(self: *@This()) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .remap = remap, + .free = free, + }, + }; +} + +/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` +/// Using this at the same time as the interface returned by `allocator` is not thread safe. +/// Deprecated; to be removed after 0.16.0 is tagged. +pub fn threadSafeAllocator(self: *@This()) Allocator { + return .{ + .ptr = self, + .vtable = &.{ + .alloc = threadSafeAlloc, + .resize = Allocator.noResize, + .remap = Allocator.noRemap, + .free = Allocator.noFree, + }, + }; +} + +/// Save the current state of the bump allocator +pub fn savestate(self: *@This()) usize { + return @intFromPtr(self.bump); +} + +/// Restore a previously saved allocator state (see savestate). +/// Use @intFromPtr(buffer.ptr) to reset the bump allocator. +pub fn restore(self: *@This(), state: usize) void { + if (safety) assert(state >= @intFromPtr(self.start)); + assert(state <= @intFromPtr(self.end)); + self.bump = @ptrFromInt(state); +} + +pub fn alloc( + ctx: *anyopaque, + length: usize, + alignment: Alignment, + _: usize, +) ?[*]u8 { + const self: *@This() = @ptrCast(@alignCast(ctx)); + + if (@inComptime()) { + // Alignment greater than 1 requires us to know the self.bump + // pointer value, which is not currently possible at comptime. + assert(alignment == .@"1" or alignment == .@"0"); + const address = self.bump; + self.bump += length; + return address; + } + + // Forward alignment is slightly more expensive than backwards alignment, + // but in exchange we can grow our last allocation without wasting memory. + const aligned = alignment.forward(@intFromPtr(self.bump)); + const end_addr, const overflow = @addWithOverflow(aligned, length); + + // Guard against overflowing a usize, not just exceeding the end pointer. + // Bitwise OR is used here as short-circuiting emits another branch. + const exceed = end_addr > @intFromPtr(self.end); + if ((overflow == 1) | exceed) return null; + + self.bump = @ptrFromInt(end_addr); + return @ptrFromInt(aligned); +} + +pub fn resize( + ctx: *anyopaque, + memory: []u8, + _: Alignment, + new_length: usize, + _: usize, +) bool { + const self: *@This() = @ptrCast(@alignCast(ctx)); + + // We cannot compare pointer values at comptime, which is + // required to see if growing an allocation would OOM. + const shrinking = new_length <= memory.len; + if (@inComptime()) return shrinking; + + // Allocating memory sets the bump pointer to the next free address. + // If memory is not the most recent allocation, it cannot be grown. + if (memory.ptr + memory.len != self.bump) return shrinking; + + const alloc_base = @intFromPtr(memory.ptr); + if (safety) assert(alloc_base >= @intFromPtr(self.start)); + assert(alloc_base <= @intFromPtr(self.bump)); + + // For the most recent allocation, we can OOM iff we are not shrinking the + // allocation, and alloc_base + new_length exceeds or overflows self.end. + const end_addr, const overflow = @addWithOverflow(alloc_base, new_length); + const exceed = end_addr > @intFromPtr(self.end); + if (!shrinking and ((overflow == 1) | exceed)) return false; + + self.bump = @ptrFromInt(end_addr); + return true; +} + +pub fn remap( + ctx: *anyopaque, + memory: []u8, + _: Alignment, + new_length: usize, + _: usize, +) ?[*]u8 { + if (resize(ctx, memory, undefined, new_length, undefined)) { + return memory.ptr; + } else { + return null; + } +} + +pub fn free( + ctx: *anyopaque, + memory: []u8, + _: Alignment, + _: usize, +) void { + const self: *@This() = @ptrCast(@alignCast(ctx)); + + // Only the last allocation can be freed, and only fully + // if the alignment cost for it's allocation was a noop. + if (memory.ptr + memory.len != self.bump) return; + self.bump = self.bump - memory.len; + + // The safety checks below verify that we own the memory just freed. + // We cannot run these safety checks at comptime due to @intFromPtr. + if (@inComptime()) return; + + const alloc_base = @intFromPtr(memory.ptr); + if (safety) assert(alloc_base >= @intFromPtr(self.start)); + assert(alloc_base <= @intFromPtr(self.bump)); +} + +/// Deprecated; to be removed after 0.16.0 is tagged. +fn threadSafeAlloc( + ctx: *anyopaque, + length: usize, + alignment: Alignment, + _: usize, +) ?[*]u8 { + const self: *@This() = @ptrCast(@alignCast(ctx)); + + var old_bump: [*]u8 = @atomicLoad([*]u8, &self.bump, .seq_cst); + while (true) { + const aligned = alignment.forward(@intFromPtr(old_bump)); + const end_addr, const overflow = @addWithOverflow(aligned, length); + + const exceed = end_addr > @intFromPtr(self.end); + if ((overflow == 1) | exceed) return null; + + const new_bump: [*]u8 = @ptrFromInt(end_addr); + if (@cmpxchgWeak([*]u8, &self.bump, old_bump, new_bump, .seq_cst, .seq_cst)) |prev| { + old_bump = prev; + continue; + } + + return @ptrFromInt(aligned); + } +} + +test "BumpAllocator" { + var buffer: [1 << 20]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + try std.heap.testAllocator(gpa); + try std.heap.testAllocatorAligned(gpa); + try std.heap.testAllocatorAlignedShrink(gpa); + try std.heap.testAllocatorLargeAlignment(gpa); +} + +test "savestate and restore" { + var buffer: [256]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + const state_before = bump_allocator.savestate(); + _ = try gpa.alloc(u8, buffer.len); + + bump_allocator.restore(state_before); + _ = try gpa.alloc(u8, buffer.len); +} + +test "reuse memory on realloc" { + var buffer: [10]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + const slice_0 = try gpa.alloc(u8, 5); + const slice_1 = try gpa.realloc(slice_0, 10); + try std.testing.expect(slice_1.ptr == slice_0.ptr); +} + +test "don't grow one allocation into another" { + var buffer: [10]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + const slice_0 = try gpa.alloc(u8, 3); + const slice_1 = try gpa.alloc(u8, 3); + const slice_2 = try gpa.realloc(slice_0, 4); + try std.testing.expect(slice_2.ptr == slice_1.ptr + 3); +} + +test "avoid integer overflow for obscene allocations" { + var buffer: [10]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + _ = try gpa.alloc(u8, 5); + const problem = gpa.alloc(u8, std.math.maxInt(usize)); + try std.testing.expectError(error.OutOfMemory, problem); +} + +test "works at comptime for alignments <= 1" { + comptime { + var buffer: [256]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.allocator(); + + var list: std.ArrayList(u8) = .empty; + defer list.deinit(gpa); + for ("Hello, World!\n") |byte| { + try list.append(gpa, byte); + } + } +} + +// Deprecated; to be removed after 0.16.0 is tagged. +test "thread safe version" { + var buffer: [1 << 20]u8 = undefined; + var bump_allocator: @This() = .init(&buffer); + const gpa = bump_allocator.threadSafeAllocator(); + + try std.heap.testAllocator(gpa); + try std.heap.testAllocatorAligned(gpa); + try std.heap.testAllocatorAlignedShrink(gpa); + try std.heap.testAllocatorLargeAlignment(gpa); +} diff --git a/lib/std/heap/FixedBufferAllocator.zig b/lib/std/heap/FixedBufferAllocator.zig deleted file mode 100644 index 0951dd3bcc88..000000000000 --- a/lib/std/heap/FixedBufferAllocator.zig +++ /dev/null @@ -1,230 +0,0 @@ -const std = @import("../std.zig"); -const Allocator = std.mem.Allocator; -const assert = std.debug.assert; -const mem = std.mem; - -const FixedBufferAllocator = @This(); - -end_index: usize, -buffer: []u8, - -pub fn init(buffer: []u8) FixedBufferAllocator { - return .{ - .buffer = buffer, - .end_index = 0, - }; -} - -/// Using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe. -pub fn allocator(self: *FixedBufferAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = alloc, - .resize = resize, - .remap = remap, - .free = free, - }, - }; -} - -/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator` -/// -/// Using this at the same time as the interface returned by `allocator` is not thread safe. -pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator { - return .{ - .ptr = self, - .vtable = &.{ - .alloc = threadSafeAlloc, - .resize = Allocator.noResize, - .remap = Allocator.noRemap, - .free = Allocator.noFree, - }, - }; -} - -pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool { - return sliceContainsPtr(self.buffer, ptr); -} - -pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool { - return sliceContainsSlice(self.buffer, slice); -} - -/// This has false negatives when the last allocation had an -/// adjusted_index. In such case we won't be able to determine what the -/// last allocation was because the alignForward operation done in alloc is -/// not reversible. -pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool { - return buf.ptr + buf.len == self.buffer.ptr + self.end_index; -} - -pub fn alloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - const ptr_align = alignment.toByteUnits(); - const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null; - const adjusted_index = self.end_index + adjust_off; - const new_end_index = adjusted_index + n; - if (new_end_index > self.buffer.len) return null; - self.end_index = new_end_index; - return self.buffer.ptr + adjusted_index; -} - -pub fn resize( - ctx: *anyopaque, - buf: []u8, - alignment: mem.Alignment, - new_size: usize, - return_address: usize, -) bool { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = alignment; - _ = return_address; - assert(@inComptime() or self.ownsSlice(buf)); - - if (!self.isLastAllocation(buf)) { - if (new_size > buf.len) return false; - return true; - } - - if (new_size <= buf.len) { - const sub = buf.len - new_size; - self.end_index -= sub; - return true; - } - - const add = new_size - buf.len; - if (add + self.end_index > self.buffer.len) return false; - - self.end_index += add; - return true; -} - -pub fn remap( - context: *anyopaque, - memory: []u8, - alignment: mem.Alignment, - new_len: usize, - return_address: usize, -) ?[*]u8 { - return if (resize(context, memory, alignment, new_len, return_address)) memory.ptr else null; -} - -pub fn free( - ctx: *anyopaque, - buf: []u8, - alignment: mem.Alignment, - return_address: usize, -) void { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = alignment; - _ = return_address; - assert(@inComptime() or self.ownsSlice(buf)); - - if (self.isLastAllocation(buf)) { - self.end_index -= buf.len; - } -} - -fn threadSafeAlloc(ctx: *anyopaque, n: usize, alignment: mem.Alignment, ra: usize) ?[*]u8 { - const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx)); - _ = ra; - const ptr_align = alignment.toByteUnits(); - var end_index = @atomicLoad(usize, &self.end_index, .seq_cst); - while (true) { - const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null; - const adjusted_index = end_index + adjust_off; - const new_end_index = adjusted_index + n; - if (new_end_index > self.buffer.len) return null; - end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse - return self.buffer[adjusted_index..new_end_index].ptr; - } -} - -pub fn reset(self: *FixedBufferAllocator) void { - self.end_index = 0; -} - -fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool { - return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and - @intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len); -} - -fn sliceContainsSlice(container: []u8, slice: []u8) bool { - return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and - (@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len); -} - -var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined; - -test FixedBufferAllocator { - var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..])); - const a = fixed_buffer_allocator.allocator(); - - try std.heap.testAllocator(a); - try std.heap.testAllocatorAligned(a); - try std.heap.testAllocatorLargeAlignment(a); - try std.heap.testAllocatorAlignedShrink(a); -} - -test reset { - var buf: [8]u8 align(@alignOf(u64)) = undefined; - var fba = FixedBufferAllocator.init(buf[0..]); - const a = fba.allocator(); - - const X = 0xeeeeeeeeeeeeeeee; - const Y = 0xffffffffffffffff; - - const x = try a.create(u64); - x.* = X; - try std.testing.expectError(error.OutOfMemory, a.create(u64)); - - fba.reset(); - const y = try a.create(u64); - y.* = Y; - - // we expect Y to have overwritten X. - try std.testing.expect(x.* == y.*); - try std.testing.expect(y.* == Y); -} - -test "reuse memory on realloc" { - var small_fixed_buffer: [10]u8 = undefined; - // check if we re-use the memory - { - var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const a = fixed_buffer_allocator.allocator(); - - const slice0 = try a.alloc(u8, 5); - try std.testing.expect(slice0.len == 5); - const slice1 = try a.realloc(slice0, 10); - try std.testing.expect(slice1.ptr == slice0.ptr); - try std.testing.expect(slice1.len == 10); - try std.testing.expectError(error.OutOfMemory, a.realloc(slice1, 11)); - } - // check that we don't re-use the memory if it's not the most recent block - { - var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]); - const a = fixed_buffer_allocator.allocator(); - - var slice0 = try a.alloc(u8, 2); - slice0[0] = 1; - slice0[1] = 2; - const slice1 = try a.alloc(u8, 2); - const slice2 = try a.realloc(slice0, 4); - try std.testing.expect(slice0.ptr != slice2.ptr); - try std.testing.expect(slice1.ptr != slice2.ptr); - try std.testing.expect(slice2[0] == 1); - try std.testing.expect(slice2[1] == 2); - } -} - -test "thread safe version" { - var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); - - try std.heap.testAllocator(fixed_buffer_allocator.threadSafeAllocator()); - try std.heap.testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator()); - try std.heap.testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator()); - try std.heap.testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator()); -} diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 3cd0507bee95..3fa047faf96a 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -681,6 +681,13 @@ test orderZ { try testing.expect(orderZ(u8, "", "a") == .lt); } +/// Returns true if a slice owns the memory for an element pointer +pub fn sliceOwnsPtr(comptime T: type, slice: []const T, elem: *T) bool { + const above_base = @intFromPtr(elem) >= @intFromPtr(slice.ptr); + const below_limit = @intFromPtr(elem) < @intFromPtr(slice.ptr) + slice.len * @sizeOf(T); + return above_base and below_limit; +} + /// Returns true if lhs < rhs, false otherwise pub fn lessThan(comptime T: type, lhs: []const T, rhs: []const T) bool { return order(T, lhs, rhs) == .lt; diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig index b910e26c7134..7906e6b9642a 100644 --- a/lib/std/os/plan9.zig +++ b/lib/std/os/plan9.zig @@ -293,10 +293,11 @@ pub fn openat(dirfd: i32, path: [*:0]const u8, flags: u32, _: mode_t) usize { const rc = fd2path(dirfd, &dir_path_buf, std.fs.max_path_bytes); if (rc != 0) return rc; var fba = std.heap.FixedBufferAllocator.init(&total_path_buf); + const fba_initial_state = fba.savestate(); var alloc = fba.allocator(); const dir_path = std.mem.span(@as([*:0]u8, @ptrCast(&dir_path_buf))); const total_path = std.fs.path.join(alloc, &.{ dir_path, std.mem.span(path) }) catch unreachable; // the allocation shouldn't fail because it should not exceed max_path_bytes - fba.reset(); + fba.restore(fba_initial_state); const total_path_z = alloc.dupeZ(u8, total_path) catch unreachable; // should not exceed max_path_bytes + 1 return open(total_path_z.ptr, flags); } diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index d08f5b60ba9d..2db297a890a4 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -6414,10 +6414,10 @@ fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: * anything_changed.* = !mem.eql(u8, formatted, source); return formatted; } -fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocator, source: [:0]const u8, expected_source: []const u8) !void { +fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocator, fba_initial_state: usize, source: [:0]const u8, expected_source: []const u8) !void { // reset the fixed buffer allocator each run so that it can be re-used for each // iteration of the failing index - fba.reset(); + fba.restore(fba_initial_state); var anything_changed: bool = undefined; const result_source = try testParse(source, allocator, &anything_changed); try std.testing.expectEqualStrings(expected_source, result_source); @@ -6431,7 +6431,8 @@ fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocat } fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ &fixed_allocator, source, expected_source }); + const fba_initial_state = fixed_allocator.savestate(); + return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ &fixed_allocator, fba_initial_state, source, expected_source }); } fn testCanonical(source: [:0]const u8) !void { return testTransform(source, source); diff --git a/src/crash_report.zig b/src/crash_report.zig index b051752c7a02..f6bf80bcde36 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -116,6 +116,7 @@ fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8 const comp = zcu.comp; var fba: std.heap.FixedBufferAllocator = .init(crash_heap); + const fba_initial_state = fba.savestate(); const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse { const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool)); @@ -146,7 +147,7 @@ fn dumpCrashContextSema(anal: *AnalyzeBody, stderr: *Io.Writer, crash_heap: []u8 var parent = anal.parent; while (parent) |curr| { - fba.reset(); + fba.restore(fba_initial_state); const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool)); try stderr.print(" in {f}\n", .{cur_block_file.path.fmt(comp)}); _, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {