Skip to content

Commit 3a5aa87

Browse files
committed
Optimize the lifecycle of async requests
Async HTTP request work by emitting a "Progress" object to a callback. This object has a "done" flag which, when `true`, indicates that all data has been emitting and no future "Progress" objects will be sent. Callers like XHR buffer the response and wait for "done = true" to then process the request. The HTTP client relies on two important object pools: the connection and the state (with all the buffers for reading/writing). In its current implementation, the async flow does not release these pooled objects until the final callback has returned. At best, this is inefficient: we're keeping the connection and state objects checked out for longer than they have to be. At worse, it can lead to a deadlock. If the calling code issues a new request when done == true, we'll eventually run out of state objects in the pool. This commit now releases the state objects before emit the final "done" Progress message. For this to work, this final message will always have null data and an empty header object.
1 parent f436744 commit 3a5aa87

File tree

2 files changed

+35
-8
lines changed

2 files changed

+35
-8
lines changed

src/browser/xhr/xhr.zig

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -468,7 +468,6 @@ pub const XMLHttpRequest = struct {
468468

469469
if (progress.first) {
470470
const header = progress.header;
471-
472471
log.debug(.http, "request header", .{
473472
.source = "xhr",
474473
.url = self.url,
@@ -522,7 +521,7 @@ pub const XMLHttpRequest = struct {
522521
log.info(.http, "request complete", .{
523522
.source = "xhr",
524523
.url = self.url,
525-
.status = progress.header.status,
524+
.status = self.response_status,
526525
});
527526

528527
// Not that the request is done, the http/client will free the request

src/http/client.zig

Lines changed: 34 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -828,6 +828,7 @@ fn AsyncHandler(comptime H: type, comptime L: type) type {
828828
wait,
829829
done,
830830
need_more,
831+
handler_error,
831832
};
832833

833834
fn deinit(self: *Self) void {
@@ -966,12 +967,35 @@ fn AsyncHandler(comptime H: type, comptime L: type) type {
966967
switch (status) {
967968
.wait => {},
968969
.need_more => self.receive(),
970+
.handler_error => {
971+
// handler should never have been called if we're redirecting
972+
std.debug.assert(self.redirect == null);
973+
self.request.requestCompleted(self.reader.response);
974+
self.deinit();
975+
return;
976+
},
969977
.done => {
970978
const redirect = self.redirect orelse {
979+
var handler = self.handler;
971980
self.request.requestCompleted(self.reader.response);
972981
self.deinit();
982+
983+
// Emit the done chunk. We expect the caller to do
984+
// processing once the full request is completed. By
985+
// emiting this AFTER we've relreased the connection,
986+
// we free the connection and its state for re-use.
987+
// If we don't do this this way, we can end up with
988+
// _a lot_ of pending request/states.
989+
// DO NOT USE `self` here, it's no longer valid.
990+
handler.onHttpResponse(.{
991+
.data = null,
992+
.done = true,
993+
.first = false,
994+
.header = .{},
995+
}) catch {};
973996
return;
974997
};
998+
975999
self.request.redirectAsync(redirect, self.loop, self.handler) catch |err| {
9761000
self.handleError("Setup async redirect", err);
9771001
return;
@@ -1116,22 +1140,22 @@ fn AsyncHandler(comptime H: type, comptime L: type) type {
11161140
self.handler.onHttpResponse(.{
11171141
.data = chunk,
11181142
.first = first,
1119-
.done = next == null,
1143+
.done = false,
11201144
.header = reader.response,
1121-
}) catch return .done;
1145+
}) catch return .handler_error;
11221146

11231147
first = false;
11241148
}
11251149
}
1126-
} else if (result.data != null or done or would_be_first) {
1150+
} else if (result.data != null or would_be_first) {
11271151
// If we have data. Or if the request is done. Or if this is the
11281152
// first time we have a complete header. Emit the chunk.
11291153
self.handler.onHttpResponse(.{
1130-
.done = done,
1154+
.done = false,
11311155
.data = result.data,
11321156
.first = would_be_first,
11331157
.header = reader.response,
1134-
}) catch return .done;
1158+
}) catch return .handler_error;
11351159
}
11361160

11371161
if (done == true) {
@@ -3135,7 +3159,8 @@ const CaptureHandler = struct {
31353159
const progress = try progress_;
31363160
const allocator = self.response.arena.allocator();
31373161
try self.response.body.appendSlice(allocator, progress.data orelse "");
3138-
if (progress.done) {
3162+
if (progress.first) {
3163+
std.debug.assert(!progress.done);
31393164
self.response.status = progress.header.status;
31403165
try self.response.headers.ensureTotalCapacity(allocator, progress.header.headers.items.len);
31413166
for (progress.header.headers.items) |header| {
@@ -3144,6 +3169,9 @@ const CaptureHandler = struct {
31443169
.value = try allocator.dupe(u8, header.value),
31453170
});
31463171
}
3172+
}
3173+
3174+
if (progress.done) {
31473175
self.reset.set();
31483176
}
31493177
}

0 commit comments

Comments
 (0)