Skip to content

Commit ebdbfcc

Browse files
dsnetgopherbot
authored andcommitted
encoding/json/jsontext: preserve buffer capacity in Encoder.Reset
This does the equivalent of CL 681177 for the Encoder. It preserves the internal buffer between resets. Change-Id: I5e9353b6d7755e067d4f9a4d1ea3d8f056253027 Reviewed-on: https://go-review.googlesource.com/c/go/+/690375 Reviewed-by: Johan Brandhorst-Satzkorn <[email protected]> Auto-Submit: Joseph Tsai <[email protected]> LUCI-TryBot-Result: Go LUCI <[email protected]> Reviewed-by: Damien Neil <[email protected]> Reviewed-by: Michael Knyszek <[email protected]>
1 parent 91c4f0c commit ebdbfcc

File tree

4 files changed

+109
-14
lines changed

4 files changed

+109
-14
lines changed

src/encoding/json/jsontext/decode.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -138,12 +138,10 @@ func (d *Decoder) Reset(r io.Reader, opts ...Options) {
138138
case d.s.Flags.Get(jsonflags.WithinArshalCall):
139139
panic("jsontext: cannot reset Decoder passed to json.UnmarshalerFrom")
140140
}
141-
// If the decoder was previously aliasing a bytes.Buffer,
142-
// invalidate the alias to avoid writing into the bytes.Buffer's
143-
// internal buffer.
141+
// Reuse the buffer if it does not alias a previous [bytes.Buffer].
144142
b := d.s.buf[:0]
145143
if _, ok := d.s.rd.(*bytes.Buffer); ok {
146-
b = nil // avoid reusing b since it aliases the previous bytes.Buffer.
144+
b = nil
147145
}
148146
d.s.reset(b, r, opts...)
149147
}

src/encoding/json/jsontext/decode_test.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1269,14 +1269,14 @@ func TestPeekableDecoder(t *testing.T) {
12691269
// TestDecoderReset tests that the decoder preserves its internal
12701270
// buffer between Reset calls to avoid frequent allocations when reusing the decoder.
12711271
// It ensures that the buffer capacity is maintained while avoiding aliasing
1272-
// issues with bytes.Buffer.
1272+
// issues with [bytes.Buffer].
12731273
func TestDecoderReset(t *testing.T) {
1274-
// Create a decoder with a reasonably large JSON input to ensure buffer growth
1274+
// Create a decoder with a reasonably large JSON input to ensure buffer growth.
12751275
largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}`
12761276
dec := NewDecoder(strings.NewReader(largeJSON))
12771277

12781278
t.Run("Test capacity preservation", func(t *testing.T) {
1279-
// Read the first JSON value to grow the internal buffer
1279+
// Read the first JSON value to grow the internal buffer.
12801280
val1, err := dec.ReadValue()
12811281
if err != nil {
12821282
t.Fatalf("first ReadValue failed: %v", err)
@@ -1285,22 +1285,22 @@ func TestDecoderReset(t *testing.T) {
12851285
t.Fatalf("first ReadValue = %q, want %q", val1, largeJSON)
12861286
}
12871287

1288-
// Get the buffer capacity after first use
1288+
// Get the buffer capacity after first use.
12891289
initialCapacity := cap(dec.s.buf)
12901290
if initialCapacity == 0 {
12911291
t.Fatalf("expected non-zero buffer capacity after first use")
12921292
}
12931293

1294-
// Reset with a new reader - this should preserve the buffer capacity
1294+
// Reset with a new reader - this should preserve the buffer capacity.
12951295
dec.Reset(strings.NewReader(largeJSON))
12961296

1297-
// Verify the buffer capacity is preserved (or at least not smaller)
1297+
// Verify the buffer capacity is preserved (or at least not smaller).
12981298
preservedCapacity := cap(dec.s.buf)
12991299
if preservedCapacity < initialCapacity {
13001300
t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
13011301
}
13021302

1303-
// Read the second JSON value to ensure the decoder still works correctly
1303+
// Read the second JSON value to ensure the decoder still works correctly.
13041304
val2, err := dec.ReadValue()
13051305
if err != nil {
13061306
t.Fatalf("second ReadValue failed: %v", err)
@@ -1317,7 +1317,7 @@ func TestDecoderReset(t *testing.T) {
13171317
dec.Reset(bb)
13181318
bbBuf = bb.Bytes()
13191319

1320-
// Read the third JSON value to ensure functionality with bytes.Buffer
1320+
// Read the third JSON value to ensure functionality with bytes.Buffer.
13211321
val3, err := dec.ReadValue()
13221322
if err != nil {
13231323
t.Fatalf("fourth ReadValue failed: %v", err)

src/encoding/json/jsontext/encode.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -107,12 +107,17 @@ func (e *Encoder) Reset(w io.Writer, opts ...Options) {
107107
case e.s.Flags.Get(jsonflags.WithinArshalCall):
108108
panic("jsontext: cannot reset Encoder passed to json.MarshalerTo")
109109
}
110-
e.s.reset(nil, w, opts...)
110+
// Reuse the buffer if it does not alias a previous [bytes.Buffer].
111+
b := e.s.Buf[:0]
112+
if _, ok := e.s.wr.(*bytes.Buffer); ok {
113+
b = nil
114+
}
115+
e.s.reset(b, w, opts...)
111116
}
112117

113118
func (e *encoderState) reset(b []byte, w io.Writer, opts ...Options) {
114119
e.state.reset()
115-
e.encodeBuffer = encodeBuffer{Buf: b, wr: w, bufStats: e.bufStats}
120+
e.encodeBuffer = encodeBuffer{Buf: b, wr: w, availBuffer: e.availBuffer, bufStats: e.bufStats}
116121
if bb, ok := w.(*bytes.Buffer); ok && bb != nil {
117122
e.Buf = bb.AvailableBuffer() // alias the unused buffer of bb
118123
}

src/encoding/json/jsontext/encode_test.go

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -735,3 +735,95 @@ func testEncoderErrors(t *testing.T, where jsontest.CasePos, opts []Options, cal
735735
t.Fatalf("%s: Encoder.OutputOffset = %v, want %v", where, gotOffset, wantOffset)
736736
}
737737
}
738+
739+
// TestEncoderReset tests that the encoder preserves its internal
740+
// buffer between Reset calls to avoid frequent allocations when reusing the encoder.
741+
// It ensures that the buffer capacity is maintained while avoiding aliasing
742+
// issues with [bytes.Buffer].
743+
func TestEncoderReset(t *testing.T) {
744+
// Create an encoder with a reasonably large JSON input to ensure buffer growth.
745+
largeJSON := `{"key1":"value1","key2":"value2","key3":"value3","key4":"value4","key5":"value5"}` + "\n"
746+
bb := new(bytes.Buffer)
747+
enc := NewEncoder(struct{ io.Writer }{bb}) // mask out underlying [bytes.Buffer]
748+
749+
t.Run("Test capacity preservation", func(t *testing.T) {
750+
// Write the first JSON value to grow the internal buffer.
751+
err := enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
752+
if err != nil {
753+
t.Fatalf("first WriteValue failed: %v", err)
754+
}
755+
if bb.String() != largeJSON {
756+
t.Fatalf("first WriteValue = %q, want %q", bb.String(), largeJSON)
757+
}
758+
759+
// Get the buffer capacity after first use.
760+
initialCapacity := cap(enc.s.Buf)
761+
initialCacheCapacity := cap(enc.s.availBuffer)
762+
if initialCapacity == 0 {
763+
t.Fatalf("expected non-zero buffer capacity after first use")
764+
}
765+
if initialCacheCapacity == 0 {
766+
t.Fatalf("expected non-zero cache capacity after first use")
767+
}
768+
769+
// Reset with a new writer - this should preserve the buffer capacity.
770+
bb.Reset()
771+
enc.Reset(struct{ io.Writer }{bb})
772+
773+
// Verify the buffer capacity is preserved (or at least not smaller).
774+
preservedCapacity := cap(enc.s.Buf)
775+
if preservedCapacity < initialCapacity {
776+
t.Fatalf("buffer capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
777+
}
778+
preservedCacheCapacity := cap(enc.s.availBuffer)
779+
if preservedCacheCapacity < initialCacheCapacity {
780+
t.Fatalf("cache capacity reduced after Reset: got %d, want at least %d", preservedCapacity, initialCapacity)
781+
}
782+
783+
// Write the second JSON value to ensure the encoder still works correctly.
784+
err = enc.WriteValue(append(enc.AvailableBuffer(), largeJSON...))
785+
if err != nil {
786+
t.Fatalf("second WriteValue failed: %v", err)
787+
}
788+
if bb.String() != largeJSON {
789+
t.Fatalf("second WriteValue = %q, want %q", bb.String(), largeJSON)
790+
}
791+
})
792+
793+
t.Run("Test aliasing with bytes.Buffer", func(t *testing.T) {
794+
// Test with bytes.Buffer to verify proper aliasing behavior.
795+
bb.Reset()
796+
enc.Reset(bb)
797+
798+
// Write the third JSON value to ensure functionality with bytes.Buffer.
799+
err := enc.WriteValue([]byte(largeJSON))
800+
if err != nil {
801+
t.Fatalf("fourth WriteValue failed: %v", err)
802+
}
803+
if bb.String() != largeJSON {
804+
t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
805+
}
806+
// The encoder buffer should alias bytes.Buffer's internal buffer.
807+
if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] != &bb.AvailableBuffer()[:1][0] {
808+
t.Fatalf("encoder buffer does not alias bytes.Buffer")
809+
}
810+
})
811+
812+
t.Run("Test aliasing removed after Reset", func(t *testing.T) {
813+
// Reset with a new reader and verify the buffer is not aliased.
814+
bb.Reset()
815+
enc.Reset(struct{ io.Writer }{bb})
816+
err := enc.WriteValue([]byte(largeJSON))
817+
if err != nil {
818+
t.Fatalf("fifth WriteValue failed: %v", err)
819+
}
820+
if bb.String() != largeJSON {
821+
t.Fatalf("fourth WriteValue = %q, want %q", bb.String(), largeJSON)
822+
}
823+
824+
// The encoder buffer should not alias the bytes.Buffer's internal buffer.
825+
if cap(enc.s.Buf) == 0 || cap(bb.AvailableBuffer()) == 0 || &enc.s.Buf[:1][0] == &bb.AvailableBuffer()[:1][0] {
826+
t.Fatalf("encoder buffer aliases bytes.Buffer")
827+
}
828+
})
829+
}

0 commit comments

Comments
 (0)