diff --git a/CHANGELOG.md b/CHANGELOG.md index c6fe673a8fc..9e8d56765ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372) - Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372) - Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372) +- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403) ### Removed diff --git a/sdk/log/race_off_test.go b/sdk/log/race_off_test.go deleted file mode 100644 index 6479bf9c54c..00000000000 --- a/sdk/log/race_off_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build !race -// +build !race - -package log - -func race() bool { - return false -} diff --git a/sdk/log/race_on_test.go b/sdk/log/race_on_test.go deleted file mode 100644 index c726aa8a5cd..00000000000 --- a/sdk/log/race_on_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -//go:build race -// +build race - -package log - -func race() bool { - return true -} diff --git a/sdk/log/record.go b/sdk/log/record.go index 9dfd69b645b..5b830b7ea8f 100644 --- a/sdk/log/record.go +++ b/sdk/log/record.go @@ -27,7 +27,25 @@ var logAttrDropped = sync.OnceFunc(func() { global.Warn("limit reached: dropping log Record attributes") }) -// indexPool is a pool of index maps used for de-duplication. +// uniquePool is a pool of unique attributes used for attributes de-duplication. +var uniquePool = sync.Pool{ + New: func() any { return new([]log.KeyValue) }, +} + +func getUnique() *[]log.KeyValue { + return uniquePool.Get().(*[]log.KeyValue) +} + +func putUnique(v *[]log.KeyValue) { + // To reduce peak allocation. + const maxUniqueSize = 1028 + if cap(*v) <= maxUniqueSize { + *v = (*v)[:0] + uniquePool.Put(v) + } +} + +// indexPool is a pool of index maps used for attributes de-duplication. var indexPool = sync.Pool{ New: func() any { return make(map[string]int) }, } @@ -41,6 +59,20 @@ func putIndex(index map[string]int) { indexPool.Put(index) } +// seenPool is a pool of seen keys used for maps de-duplication. +var seenPool = sync.Pool{ + New: func() any { return make(map[string]struct{}) }, +} + +func getSeen() map[string]struct{} { + return seenPool.Get().(map[string]struct{}) +} + +func putSeen(seen map[string]struct{}) { + clear(seen) + seenPool.Put(seen) +} + // Record is a log record emitted by the Logger. // A log record with non-empty event name is interpreted as an event record. // @@ -212,34 +244,36 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { } if !r.allowDupKeys { + // Use a slice from the pool to avoid modifying the original. + // Note, do not iterate attrs twice by just calling dedup(attrs) here. + unique := getUnique() + defer putUnique(unique) + // Used to find duplicates between attrs and existing attributes in r. rIndex := r.attrIndex() defer putIndex(rIndex) - // Unique attrs that need to be added to r. This uses the same underlying - // array as attrs. - // - // Note, do not iterate attrs twice by just calling dedup(attrs) here. - unique := attrs[:0] - // Used to find duplicates within attrs itself. The index value is the - // index of the element in unique. + // Used to find duplicates within attrs itself. + // The index value is the index of the element in unique. uIndex := getIndex() defer putIndex(uIndex) + dropped := 0 + // Deduplicate attrs within the scope of all existing attributes. for _, a := range attrs { // Last-value-wins for any duplicates in attrs. idx, found := uIndex[a.Key] if found { - r.addDropped(1) - unique[idx] = a + dropped++ + (*unique)[idx] = a continue } idx, found = rIndex[a.Key] if found { // New attrs overwrite any existing with the same key. - r.addDropped(1) + dropped++ if idx < 0 { r.front[-(idx + 1)] = a } else { @@ -247,11 +281,16 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { } } else { // Unique attribute. - unique = append(unique, a) - uIndex[a.Key] = len(unique) - 1 + (*unique) = append(*unique, a) + uIndex[a.Key] = len(*unique) - 1 } } - attrs = unique + + if dropped > 0 { + attrs = make([]log.KeyValue, len(*unique)) + copy(attrs, *unique) + r.addDropped(dropped) + } } if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit { @@ -294,15 +333,17 @@ func (r *Record) addAttrs(attrs []log.KeyValue) { var i int for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ { a := attrs[i] - r.front[r.nFront] = r.applyAttrLimits(a) + r.front[r.nFront] = r.applyAttrLimitsAndDedup(a) r.nFront++ } - for j, a := range attrs[i:] { - attrs[i+j] = r.applyAttrLimits(a) - } + // Make a copy to avoid modifying the original. + j := len(r.back) r.back = slices.Grow(r.back, len(attrs[i:])) r.back = append(r.back, attrs[i:]...) + for i, a := range r.back[j:] { + r.back[i+j] = r.applyAttrLimitsAndDedup(a) + } } // SetAttributes sets (and overrides) attributes to the log record. @@ -321,13 +362,13 @@ func (r *Record) SetAttributes(attrs ...log.KeyValue) { var i int for i = 0; i < len(attrs) && r.nFront < len(r.front); i++ { a := attrs[i] - r.front[r.nFront] = r.applyAttrLimits(a) + r.front[r.nFront] = r.applyAttrLimitsAndDedup(a) r.nFront++ } r.back = slices.Clone(attrs[i:]) for i, a := range r.back { - r.back[i] = r.applyAttrLimits(a) + r.back[i] = r.applyAttrLimitsAndDedup(a) } } @@ -342,20 +383,31 @@ func head(kvs []log.KeyValue, n int) (out []log.KeyValue, dropped int) { // dedup deduplicates kvs front-to-back with the last value saved. func dedup(kvs []log.KeyValue) (unique []log.KeyValue, dropped int) { + if len(kvs) <= 1 { + return kvs, 0 // No deduplication needed. + } + index := getIndex() defer putIndex(index) - - unique = kvs[:0] // Use the same underlying array as kvs. + u := getUnique() + defer putUnique(u) for _, a := range kvs { idx, found := index[a.Key] if found { dropped++ - unique[idx] = a + (*u)[idx] = a } else { - unique = append(unique, a) - index[a.Key] = len(unique) - 1 + *u = append(*u, a) + index[a.Key] = len(*u) - 1 } } + + if dropped == 0 { + return kvs, 0 + } + + unique = make([]log.KeyValue, len(*u)) + copy(unique, *u) return unique, dropped } @@ -421,59 +473,212 @@ func (r *Record) Clone() Record { return res } -func (r *Record) applyAttrLimits(attr log.KeyValue) log.KeyValue { - attr.Value = r.applyValueLimits(attr.Value) +func (r *Record) applyAttrLimitsAndDedup(attr log.KeyValue) log.KeyValue { + attr.Value = r.applyValueLimitsAndDedup(attr.Value) return attr } -func (r *Record) applyValueLimits(val log.Value) log.Value { +func (r *Record) applyValueLimitsAndDedup(val log.Value) log.Value { switch val.Kind() { case log.KindString: s := val.AsString() - if len(s) > r.attributeValueLengthLimit { + if r.attributeValueLengthLimit >= 0 && len(s) > r.attributeValueLengthLimit { val = log.StringValue(truncate(r.attributeValueLengthLimit, s)) } case log.KindSlice: sl := val.AsSlice() - for i := range sl { - sl[i] = r.applyValueLimits(sl[i]) + + // First check if any limits need to be applied. + needsChange := false + for _, v := range sl { + if r.needsValueLimitsOrDedup(v) { + needsChange = true + break + } + } + + if needsChange { + // Create a new slice to avoid modifying the original. + newSl := make([]log.Value, len(sl)) + for i, item := range sl { + newSl[i] = r.applyValueLimitsAndDedup(item) + } + val = log.SliceValue(newSl...) } - val = log.SliceValue(sl...) + case log.KindMap: kvs := val.AsMap() + var newKvs []log.KeyValue + var dropped int + if !r.allowDupKeys { - // Deduplicate then truncate. Do not do at the same time to avoid - // wasted truncation operations. - var dropped int - kvs, dropped = dedup(kvs) + // Deduplicate then truncate. + // Do not do at the same time to avoid wasted truncation operations. + newKvs, dropped = dedup(kvs) r.addDropped(dropped) + } else { + newKvs = kvs } - for i := range kvs { - kvs[i] = r.applyAttrLimits(kvs[i]) + + // Check if any attribute limits need to be applied. + needsChange := false + if dropped > 0 { + needsChange = true // Already changed by dedup. + } else { + for _, kv := range newKvs { + if r.needsValueLimitsOrDedup(kv.Value) { + needsChange = true + break + } + } + } + + if needsChange { + // Only create new slice if changes are needed. + if dropped == 0 { + // Make a copy to avoid modifying the original. + newKvs = make([]log.KeyValue, len(kvs)) + copy(newKvs, kvs) + } + + for i := range newKvs { + newKvs[i] = r.applyAttrLimitsAndDedup(newKvs[i]) + } + val = log.MapValue(newKvs...) } - val = log.MapValue(kvs...) } return val } +// needsValueLimitsOrDedup checks if a value would be modified by applyValueLimitsAndDedup. +func (r *Record) needsValueLimitsOrDedup(val log.Value) bool { + switch val.Kind() { + case log.KindString: + return r.attributeValueLengthLimit >= 0 && len(val.AsString()) > r.attributeValueLengthLimit + case log.KindSlice: + for _, v := range val.AsSlice() { + if r.needsValueLimitsOrDedup(v) { + return true + } + } + case log.KindMap: + kvs := val.AsMap() + if !r.allowDupKeys && len(kvs) > 1 { + // Check for duplicates. + hasDuplicates := func() bool { + seen := getSeen() + defer putSeen(seen) + for _, kv := range kvs { + if _, ok := seen[kv.Key]; ok { + return true + } + seen[kv.Key] = struct{}{} + } + return false + }() + if hasDuplicates { + return true + } + } + for _, kv := range kvs { + if r.needsValueLimitsOrDedup(kv.Value) { + return true + } + } + } + return false +} + func (r *Record) dedupeBodyCollections(val log.Value) log.Value { switch val.Kind() { case log.KindSlice: sl := val.AsSlice() - for i := range sl { - sl[i] = r.dedupeBodyCollections(sl[i]) + + // Check if any nested values need deduplication. + needsChange := false + for _, item := range sl { + if r.needsBodyDedup(item) { + needsChange = true + break + } + } + + if needsChange { + // Create a new slice to avoid modifying the original. + newSl := make([]log.Value, len(sl)) + for i, item := range sl { + newSl[i] = r.dedupeBodyCollections(item) + } + val = log.SliceValue(newSl...) } - val = log.SliceValue(sl...) + case log.KindMap: - kvs, _ := dedup(val.AsMap()) - for i := range kvs { - kvs[i].Value = r.dedupeBodyCollections(kvs[i].Value) + kvs := val.AsMap() + newKvs, dropped := dedup(kvs) + + // Check if any nested values need deduplication. + needsValueChange := false + for _, kv := range newKvs { + if r.needsBodyDedup(kv.Value) { + needsValueChange = true + break + } + } + + if dropped > 0 || needsValueChange { + // Only create new value if changes are needed. + if dropped == 0 { + // Make a copy to avoid modifying the original. + newKvs = make([]log.KeyValue, len(kvs)) + copy(newKvs, kvs) + } + + for i := range newKvs { + newKvs[i].Value = r.dedupeBodyCollections(newKvs[i].Value) + } + val = log.MapValue(newKvs...) } - val = log.MapValue(kvs...) } return val } +// needsBodyDedup checks if a value would be modified by dedupeBodyCollections. +func (r *Record) needsBodyDedup(val log.Value) bool { + switch val.Kind() { + case log.KindSlice: + for _, item := range val.AsSlice() { + if r.needsBodyDedup(item) { + return true + } + } + case log.KindMap: + kvs := val.AsMap() + if len(kvs) > 1 { + // Check for duplicates. + hasDuplicates := func() bool { + seen := getSeen() + defer putSeen(seen) + for _, kv := range kvs { + if _, ok := seen[kv.Key]; ok { + return true + } + seen[kv.Key] = struct{}{} + } + return false + }() + if hasDuplicates { + return true + } + } + for _, kv := range kvs { + if r.needsBodyDedup(kv.Value) { + return true + } + } + } + return false +} + // truncate returns a truncated version of s such that it contains less than // the limit number of characters. Truncation is applied by returning the limit // number of valid characters contained in s. diff --git a/sdk/log/record_test.go b/sdk/log/record_test.go index c6ffbd16b6a..6f363527df0 100644 --- a/sdk/log/record_test.go +++ b/sdk/log/record_test.go @@ -66,7 +66,7 @@ func TestRecordBody(t *testing.T) { want log.Value }{ { - name: "Bool", + name: "boolean value", body: log.BoolValue(true), want: log.BoolValue(true), }, @@ -98,7 +98,7 @@ func TestRecordBody(t *testing.T) { ), }, { - name: "nestedMap", + name: "nested map", body: log.MapValue( log.Map("key", log.Int64("key", 1), @@ -123,6 +123,268 @@ func TestRecordBody(t *testing.T) { log.Int64("1", 3), ), }, + { + name: "slice with nested deduplication", + body: log.SliceValue( + log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + log.StringValue("normal"), + log.SliceValue( + log.MapValue(log.String("nested", "val1"), log.String("nested", "val2")), + ), + ), + want: log.SliceValue( + log.MapValue(log.String("key", "value2")), + log.StringValue("normal"), + log.SliceValue( + log.MapValue(log.String("nested", "val2")), + ), + ), + }, + { + name: "empty slice", + body: log.SliceValue(), + want: log.SliceValue(), + }, + { + name: "empty map", + body: log.MapValue(), + want: log.MapValue(), + }, + { + name: "single key map", + body: log.MapValue(log.String("single", "value")), + want: log.MapValue(log.String("single", "value")), + }, + { + name: "slice with no deduplication needed", + body: log.SliceValue( + log.StringValue("value1"), + log.StringValue("value2"), + log.MapValue(log.String("unique1", "val1")), + log.MapValue(log.String("unique2", "val2")), + ), + want: log.SliceValue( + log.StringValue("value1"), + log.StringValue("value2"), + log.MapValue(log.String("unique1", "val1")), + log.MapValue(log.String("unique2", "val2")), + ), + }, + { + name: "deeply nested slice and map structure", + body: log.SliceValue( + log.MapValue( + log.String("outer", "value"), + log.Slice("inner_slice", + log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")), + ), + ), + ), + want: log.SliceValue( + log.MapValue( + log.String("outer", "value"), + log.Slice("inner_slice", + log.MapValue(log.String("deep", "value2")), + ), + ), + ), + }, + { + name: "slice with duplicates allowed", + allowDuplicates: true, + body: log.SliceValue( + log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + ), + want: log.SliceValue( + log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + ), + }, + { + name: "string value", + body: log.StringValue("test"), + want: log.StringValue("test"), + }, + { + name: "boolean value without deduplication", + body: log.BoolValue(true), + want: log.BoolValue(true), + }, + { + name: "integer value", + body: log.Int64Value(42), + want: log.Int64Value(42), + }, + { + name: "float value", + body: log.Float64Value(3.14), + want: log.Float64Value(3.14), + }, + { + name: "bytes value", + body: log.BytesValue([]byte("test")), + want: log.BytesValue([]byte("test")), + }, + { + name: "empty slice", + body: log.SliceValue(), + want: log.SliceValue(), + }, + { + name: "slice without nested deduplication", + body: log.SliceValue(log.StringValue("test"), log.BoolValue(true)), + want: log.SliceValue(log.StringValue("test"), log.BoolValue(true)), + }, + { + name: "slice with nested deduplication needed", + body: log.SliceValue(log.MapValue(log.String("key", "value1"), log.String("key", "value2"))), + want: log.SliceValue(log.MapValue(log.String("key", "value2"))), + }, + { + name: "empty map", + body: log.MapValue(), + want: log.MapValue(), + }, + { + name: "single key map", + body: log.MapValue(log.String("key", "value")), + want: log.MapValue(log.String("key", "value")), + }, + { + name: "map with duplicate keys", + body: log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + want: log.MapValue(log.String("key", "value2")), + }, + { + name: "map without duplicates", + body: log.MapValue(log.String("key1", "value1"), log.String("key2", "value2")), + want: log.MapValue(log.String("key1", "value1"), log.String("key2", "value2")), + }, + { + name: "map with nested slice deduplication", + body: log.MapValue( + log.Slice("slice", log.MapValue(log.String("nested", "val1"), log.String("nested", "val2"))), + ), + want: log.MapValue( + log.Slice("slice", log.MapValue(log.String("nested", "val2"))), + ), + }, + { + name: "deeply nested structure with deduplication", + body: log.SliceValue( + log.MapValue( + log.Map("nested", + log.String("key", "value1"), + log.String("key", "value2"), + ), + ), + ), + want: log.SliceValue( + log.MapValue( + log.Map("nested", + log.String("key", "value2"), + ), + ), + ), + }, + { + name: "deeply nested structure without deduplication", + body: log.SliceValue( + log.MapValue( + log.Map("nested", + log.String("key1", "value1"), + log.String("key2", "value2"), + ), + ), + ), + want: log.SliceValue( + log.MapValue( + log.Map("nested", + log.String("key1", "value1"), + log.String("key2", "value2"), + ), + ), + ), + }, + { + name: "string value for collection deduplication", + body: log.StringValue("test"), + want: log.StringValue("test"), + }, + { + name: "boolean value for collection deduplication", + body: log.BoolValue(true), + want: log.BoolValue(true), + }, + { + name: "empty slice for collection deduplication", + body: log.SliceValue(), + want: log.SliceValue(), + }, + { + name: "slice without nested deduplication for collection testing", + body: log.SliceValue(log.StringValue("test"), log.BoolValue(true)), + want: log.SliceValue(log.StringValue("test"), log.BoolValue(true)), + }, + { + name: "slice with nested map requiring deduplication", + body: log.SliceValue( + log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + log.StringValue("normal"), + ), + want: log.SliceValue( + log.MapValue(log.String("key", "value2")), + log.StringValue("normal"), + ), + }, + { + name: "deeply nested slice with map deduplication", + body: log.SliceValue( + log.SliceValue( + log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")), + ), + ), + want: log.SliceValue( + log.SliceValue( + log.MapValue(log.String("deep", "value2")), + ), + ), + }, + { + name: "empty map for collection deduplication", + body: log.MapValue(), + want: log.MapValue(), + }, + { + name: "map with nested slice containing duplicates", + body: log.MapValue( + log.String("outer", "value"), + log.Slice("nested_slice", + log.MapValue(log.String("inner", "val1"), log.String("inner", "val2")), + ), + ), + want: log.MapValue( + log.String("outer", "value"), + log.Slice("nested_slice", + log.MapValue(log.String("inner", "val2")), + ), + ), + }, + { + name: "map with key duplication and nested value deduplication", + body: log.MapValue( + log.String("key1", "value1"), + log.String("key1", "value2"), // key dedup + log.Slice("slice", + log.MapValue(log.String("nested", "val1"), log.String("nested", "val2")), // nested value dedup + ), + ), + want: log.MapValue( + log.String("key1", "value2"), + log.Slice("slice", + log.MapValue(log.String("nested", "val2")), + ), + ), + }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { @@ -536,6 +798,46 @@ func TestRecordAttrDeduplication(t *testing.T) { return out }(), }, + { + name: "AttributeWithDuplicateKeys", + attrs: []log.KeyValue{ + log.String("duplicate", "first"), + log.String("unique", "value"), + log.String("duplicate", "second"), + }, + want: []log.KeyValue{ + log.String("duplicate", "second"), + log.String("unique", "value"), + }, + }, + { + name: "ManyDuplicateKeys", + attrs: []log.KeyValue{ + log.String("key", "value1"), + log.String("key", "value2"), + log.String("key", "value3"), + log.String("key", "value4"), + log.String("key", "value5"), + }, + want: []log.KeyValue{ + log.String("key", "value5"), + }, + }, + { + name: "InterleavedDuplicates", + attrs: []log.KeyValue{ + log.String("a", "a1"), + log.String("b", "b1"), + log.String("a", "a2"), + log.String("c", "c1"), + log.String("b", "b2"), + }, + want: []log.KeyValue{ + log.String("a", "a2"), + log.String("b", "b2"), + log.String("c", "c1"), + }, + }, } for _, tc := range testcases { @@ -657,6 +959,84 @@ func TestApplyAttrLimitsDeduplication(t *testing.T) { ), wantDroppedAttrs: 10, }, + { + name: "EmptyMap", + input: log.MapValue(), + want: log.MapValue(), + wantDroppedAttrs: 0, + }, + { + name: "SingleKeyMap", + input: log.MapValue(log.String("key1", "value1")), + want: log.MapValue(log.String("key1", "value1")), + wantDroppedAttrs: 0, + }, + { + name: "EmptySlice", + input: log.SliceValue(), + want: log.SliceValue(), + wantDroppedAttrs: 0, + }, + { + name: "SliceWithNestedDedup", + input: log.SliceValue( + log.MapValue(log.String("key", "value1"), log.String("key", "value2")), + log.StringValue("normal"), + ), + want: log.SliceValue( + log.MapValue(log.String("key", "value2")), + log.StringValue("normal"), + ), + wantDroppedAttrs: 1, + }, + { + name: "NestedSliceInMap", + input: log.MapValue( + log.Slice("slice_key", + log.MapValue(log.String("nested", "value1"), log.String("nested", "value2")), + ), + ), + want: log.MapValue( + log.Slice("slice_key", + log.MapValue(log.String("nested", "value2")), + ), + ), + wantDroppedAttrs: 1, + }, + { + name: "DeeplyNestedStructure", + input: log.MapValue( + log.Map("level1", + log.Map("level2", + log.Slice("level3", + log.MapValue(log.String("deep", "value1"), log.String("deep", "value2")), + ), + ), + ), + ), + want: log.MapValue( + log.Map("level1", + log.Map("level2", + log.Slice("level3", + log.MapValue(log.String("deep", "value2")), + ), + ), + ), + ), + wantDroppedAttrs: 1, + }, + { + name: "NestedMapWithoutDuplicateKeys", + input: log.SliceValue((log.MapValue( + log.String("key1", "value1"), + log.String("key2", "value2"), + ))), + want: log.SliceValue(log.MapValue( + log.String("key1", "value1"), + log.String("key2", "value2"), + )), + wantDroppedAttrs: 0, + }, } for _, tc := range testcases { @@ -770,6 +1150,42 @@ func TestApplyAttrLimitsTruncation(t *testing.T) { log.Map("7", log.String("a", "")), ), }, + { + name: "LongStringTruncated", + limit: 5, + input: log.StringValue("This is a very long string that should be truncated"), + want: log.StringValue("This "), + }, + { + name: "LongBytesNotTruncated", + limit: 5, + input: log.BytesValue([]byte("This is a very long byte array")), + want: log.BytesValue([]byte("This is a very long byte array")), + }, + { + name: "TruncationInNestedMap", + limit: 3, + input: log.MapValue( + log.String("short", "ok"), + log.String("long", "toolong"), + ), + want: log.MapValue( + log.String("short", "ok"), + log.String("long", "too"), + ), + }, + { + name: "TruncationInNestedSlice", + limit: 4, + input: log.SliceValue( + log.StringValue("good"), + log.StringValue("toolong"), + ), + want: log.SliceValue( + log.StringValue("good"), + log.StringValue("tool"), + ), + }, } for _, tc := range testcases { @@ -926,11 +1342,59 @@ func TestTruncate(t *testing.T) { } } -func TestRecordMethodsInputConcurrentSafe(t *testing.T) { - if race() { - t.Skip("TODO: Fix bug https://github.com/open-telemetry/opentelemetry-go/issues/7364.") +func TestRecordAddAttributesDoesNotMutateInput(t *testing.T) { + attrs := []log.KeyValue{ + log.String("attr1", "very long value that will be truncated"), + log.String("attr2", "another very long value that will be truncated"), + log.String("attr3", "yet another very long value that will be truncated"), + log.String("attr4", "more very long value that will be truncated"), + log.String("attr5", "extra very long value that will be truncated"), + log.String("attr6", "additional very long value that will be truncated"), + log.String("attr7", "more additional very long value that will be truncated"), + } + + originalValues := make([]string, len(attrs)) + for i, kv := range attrs { + originalValues[i] = kv.Value.AsString() + } + + r := &Record{ + attributeValueLengthLimit: 20, // Short limit to trigger truncation. + attributeCountLimit: -1, // No count limit. + allowDupKeys: false, } + r.AddAttributes(attrs...) + + // Verify that the original shared slice was not mutated + for i, kv := range attrs { + if kv.Value.AsString() != originalValues[i] { + t.Errorf("Input slice was mutated! Attribute %d: original=%q, current=%q", + i, originalValues[i], kv.Value.AsString()) + } + } + + // Verify that the record has the truncated values + var gotAttrs []log.KeyValue + r.WalkAttributes(func(kv log.KeyValue) bool { + gotAttrs = append(gotAttrs, kv) + return true + }) + wantAttr := []log.KeyValue{ + log.String("attr1", "very long value that"), + log.String("attr2", "another very long va"), + log.String("attr3", "yet another very lon"), + log.String("attr4", "more very long value"), + log.String("attr5", "extra very long valu"), + log.String("attr6", "additional very long"), + log.String("attr7", "more additional very"), + } + if !slices.EqualFunc(gotAttrs, wantAttr, func(a, b log.KeyValue) bool { return a.Equal(b) }) { + t.Errorf("Attributes do not match.\ngot:\n%v\nwant:\n%v", printKVs(gotAttrs), printKVs(wantAttr)) + } +} + +func TestRecordMethodsInputConcurrentSafe(t *testing.T) { nestedSlice := log.Slice("nested_slice", log.SliceValue(log.StringValue("nested_inner1"), log.StringValue("nested_inner2")), log.StringValue("nested_outer"), @@ -990,7 +1454,7 @@ func TestRecordMethodsInputConcurrentSafe(t *testing.T) { gotBody := r.Body() wantBody := log.MapValue( log.String("nested_key1", "duplicate"), - log.Map("nested_map", log.String("nested_inner_key", "nested_inn")), + log.Map("nested_map", log.String("nested_inner_key", "nested_inner_value")), ) if !gotBody.Equal(wantBody) { t.Errorf("Body does not match.\ngot:\n%v\nwant:\n%v", gotBody, wantBody)