Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/browser/browser.zig
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ pub const Page = struct {
// load polyfills
try polyfill.load(self.arena, self.scope);

// _ = try session.browser.app.loop.timeout(1 * std.time.ns_per_ms, &self.microtask_node);
_ = try session.browser.app.loop.timeout(1 * std.time.ns_per_ms, &self.microtask_node);
}

fn microtaskCallback(node: *Loop.CallbackNode, repeat_delay: *?u63) void {
Expand Down
29 changes: 22 additions & 7 deletions src/runtime/loop.zig
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ pub const Loop = struct {
// event are finished.
events_nb: usize,

// Used to stop repeating timeouts when loop.run is called.
stopping: bool,
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@krichprollsch do operations on events_nb really need to be synchronized/atomic? Isn't this all single threaded?

If I'm wrong, I should add atomic operations around stopping too.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well I guess it was in case of future multi-threads use...
I'm not sure we should maintain it since many other things would break in this case...


// ctx_id is incremented each time the loop is reset.
// All callbacks store an initial ctx_id and compare before execution.
// If a ctx is outdated, the callback is ignored.
Expand All @@ -62,11 +65,12 @@ pub const Loop = struct {
pub const ConnectError = IO.ConnectError;

pub fn init(alloc: std.mem.Allocator) !Self {
return Self{
return .{
.alloc = alloc,
.cancelled = .{},
.io = try IO.init(32, 0),
.events_nb = 0,
.stopping = false,
.timeout_pool = MemoryPool(ContextTimeout).init(alloc),
.event_callback_pool = MemoryPool(EventCallbackContext).init(alloc),
};
Expand Down Expand Up @@ -98,6 +102,10 @@ pub const Loop = struct {
// Note that I/O events callbacks might register more I/O events
// on the go when they are executed (ie. nested I/O events).
pub fn run(self: *Self) !void {
// stop repeating / interval timeouts from re-registering
self.stopping = true;
defer self.stopping = false;

while (self.eventsNb() > 0) {
try self.io.run_for_ns(10 * std.time.ns_per_ms);
// at each iteration we might have new events registred by previous callbacks
Expand Down Expand Up @@ -134,6 +142,7 @@ pub const Loop = struct {
const ContextTimeout = struct {
loop: *Self,
ctx_id: u32,
initial: bool = true,
callback_node: ?*CallbackNode,
};

Expand All @@ -145,8 +154,11 @@ pub const Loop = struct {
var repeating = false;
const loop = ctx.loop;

defer {
if (ctx.initial) {
loop.removeEvent();
}

defer {
if (repeating == false) {
loop.timeout_pool.destroy(ctx);
loop.alloc.destroy(completion);
Expand Down Expand Up @@ -174,10 +186,13 @@ pub const Loop = struct {
if (ctx.callback_node) |cn| {
var repeat_in: ?u63 = null;
cn.func(cn, &repeat_in);
if (repeat_in) |r| {
// prevents our context and completion from being cleaned up
repeating = true;
loop.scheduleTimeout(r, ctx, completion);
if (loop.stopping == false) {
if (repeat_in) |r| {
// prevents our context and completion from being cleaned up
repeating = true;
ctx.initial = false;
loop.scheduleTimeout(r, ctx, completion);
}
}
}
}
Expand All @@ -195,12 +210,12 @@ pub const Loop = struct {
.callback_node = callback_node,
};

self.addEvent();
self.scheduleTimeout(nanoseconds, ctx, completion);
return @intFromPtr(completion);
}

fn scheduleTimeout(self: *Self, nanoseconds: u63, ctx: *ContextTimeout, completion: *Completion) void {
self.addEvent();
self.io.timeout(*ContextTimeout, ctx, timeoutCallback, completion, nanoseconds);
}

Expand Down