Skip to content

Commit 8a66c9d

Browse files
authored
chore: Add issue references (#226)
1 parent 36e1b16 commit 8a66c9d

File tree

3 files changed

+12
-0
lines changed

3 files changed

+12
-0
lines changed

c-dependencies/js-compute-runtime/builtins/fastly.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ bool Fastly::getGeolocationForIpAddress(JSContext *cx, unsigned argc, JS::Value
6363

6464
// TODO(performance): consider allowing logger creation during initialization, but then throw
6565
// when trying to log.
66+
// https://github.com/fastly/js-compute-runtime/issues/225
6667
bool Fastly::getLogger(JSContext *cx, unsigned argc, JS::Value *vp) {
6768
JS::CallArgs args = CallArgsFromVp(argc, vp);
6869
REQUEST_HANDLER_ONLY("fastly.getLogger");

c-dependencies/js-compute-runtime/js-compute-builtins.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ static JS::PersistentRootedObjectVector *pending_body_reads;
123123

124124
// TODO(performance): introduce a version that writes into an existing buffer, and use that
125125
// with the hostcall buffer where possible.
126+
// https://github.com/fastly/js-compute-runtime/issues/215
126127
UniqueChars encode(JSContext *cx, HandleString str, size_t *encoded_len) {
127128
UniqueChars text = JS_EncodeStringToUTF8(cx, str);
128129
if (!text)
@@ -197,8 +198,10 @@ static char *read_from_handle_all(JSContext *cx, HandleType handle, size_t *nwri
197198
bool read_until_zero) {
198199
// TODO(performance): investigate passing a size hint in situations where we might know
199200
// the final size, e.g. via the `content-length` header.
201+
// https://github.com/fastly/js-compute-runtime/issues/216
200202
size_t buf_size = HANDLE_READ_CHUNK_SIZE;
201203
// TODO(performance): make use of malloc slack.
204+
// https://github.com/fastly/js-compute-runtime/issues/217
202205
char *buf = static_cast<char *>(JS_malloc(cx, buf_size));
203206
if (!buf) {
204207
JS_ReportOutOfMemory(cx);
@@ -223,6 +226,7 @@ static char *read_from_handle_all(JSContext *cx, HandleType handle, size_t *nwri
223226
}
224227

225228
// TODO(performance): make use of malloc slack, and use a smarter buffer growth strategy.
229+
// https://github.com/fastly/js-compute-runtime/issues/217
226230
size_t new_size = buf_size + HANDLE_READ_CHUNK_SIZE;
227231
new_buf = static_cast<char *>(JS_realloc(cx, buf, buf_size, new_size));
228232
if (!new_buf) {
@@ -740,6 +744,7 @@ bool bodyAll(JSContext *cx, CallArgs args, HandleObject self) {
740744
// we need to manually read all chunks from the stream.
741745
// TODO(performance): ensure that we're properly shortcutting reads from TransformStream
742746
// readables.
747+
// https://github.com/fastly/js-compute-runtime/issues/218
743748
RootedObject stream(cx, body_stream(self));
744749
if (stream && !builtins::NativeStreamSource::stream_is_body(cx, stream)) {
745750
if (!consume_content_stream_for_bodyAll(cx, self, stream, body_parser)) {
@@ -2148,8 +2153,10 @@ bool constructor(JSContext *cx, unsigned argc, Value *vp) {
21482153
// e.g. to represent cache entries. While that's perhaps not ideal to begin
21492154
// with, it exists, so we should handle it in a good way, and not be
21502155
// superfluously slow.
2156+
// https://github.com/fastly/js-compute-runtime/issues/219
21512157
// TODO(performance): enable creating Response objects during the init phase, and only
21522158
// creating the host-side representation when processing requests.
2159+
// https://github.com/fastly/js-compute-runtime/issues/220
21532160
ResponseHandle response_handle = {.handle = INVALID_HANDLE};
21542161
if (!HANDLE_RESULT(cx, xqd_resp_new(&response_handle))) {
21552162
return false;
@@ -2926,6 +2933,7 @@ bool append(JSContext *cx, unsigned argc, Value *vp) {
29262933
*
29272934
* Assumes that both the name and value are valid and normalized.
29282935
* TODO(performance): fully skip normalization.
2936+
* https://github.com/fastly/js-compute-runtime/issues/221
29292937
*/
29302938
bool maybe_add(JSContext *cx, HandleObject self, const char *name, const char *value) {
29312939
MOZ_ASSERT(is_instance(self));

c-dependencies/js-compute-runtime/js-compute-runtime.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ bool eval_stdin(JSContext *cx, MutableHandleValue result) {
273273
// by parsing the script (but not evaluating it) tend to be read-only, so
274274
// optimizing them for compactness makes sense and doesn't fragment writes
275275
// later on.
276+
// https://github.com/fastly/js-compute-runtime/issues/222
276277
JS::PrepareForFullGC(cx);
277278
JS::NonIncrementalGC(cx, JS::GCOptions::Shrink, JS::GCReason::API);
278279

@@ -300,6 +301,7 @@ bool eval_stdin(JSContext *cx, MutableHandleValue result) {
300301
// running GC like this. The working theory is that otherwise the engine might
301302
// mark chunk pages as free that then later the allocator doesn't turn into
302303
// chunks without further fragmentation. But that might be wrong.
304+
// https://github.com/fastly/js-compute-runtime/issues/223
303305
// JS_SetGCParameter(cx, JSGC_MAX_EMPTY_CHUNK_COUNT, 10);
304306

305307
// TODO(performance): verify that it's better to *not* perform a shrinking GC here, as
@@ -311,6 +313,7 @@ bool eval_stdin(JSContext *cx, MutableHandleValue result) {
311313
// object kinds that are initially allocated in the same vicinity, but that
312314
// the shrinking GC causes them to be intermingled with other objects. I.e.,
313315
// writes become more fragmented due to the shrinking GC.
316+
// https://github.com/fastly/js-compute-runtime/issues/224
314317
JS::PrepareForFullGC(cx);
315318
JS::NonIncrementalGC(cx, JS::GCOptions::Normal, JS::GCReason::API);
316319

0 commit comments

Comments
 (0)