Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 82 additions & 0 deletions pkg/logger/chunking.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
package logger

import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)

const (
// MaxStringFieldSize is the maximum size in bytes for a single string field
// before it gets split across multiple log entries. This prevents oversized
// log entries from being rejected by log aggregation backends
// (e.g. Grafana Loki's 256KB per-entry limit).
MaxStringFieldSize = 204800 // 200KB per chunk
)

// chunkOversizedFields checks if any string field exceeds MaxStringFieldSize.
// If so, it returns multiple sets of fields — one per chunk of the oversized
// field — each annotated with chunk/total metadata. Other oversized string
// fields beyond the first are truncated as a safety net.
// Returns nil if no chunking is needed.
func chunkOversizedFields(fields []zapcore.Field) [][]zapcore.Field {
// Find the first oversized string field
oversizedIdx := -1
for i, f := range fields {
if f.Type == zapcore.StringType && len(f.String) > MaxStringFieldSize {
oversizedIdx = i
break
}
}

if oversizedIdx == -1 {
return nil
}

oversized := fields[oversizedIdx]
chunks := chunkString(oversized.String, MaxStringFieldSize)
totalChunks := len(chunks)

// Build base fields: everything except the oversized field.
// Truncate any other oversized string fields as a safety measure.
baseFields := make([]zapcore.Field, 0, len(fields)-1)
for i, f := range fields {
if i == oversizedIdx {
continue
}
if f.Type == zapcore.StringType && len(f.String) > MaxStringFieldSize {
f.String = f.String[:MaxStringFieldSize] + "...[truncated]"
}
baseFields = append(baseFields, f)
}

result := make([][]zapcore.Field, totalChunks)
for i, chunk := range chunks {
entryFields := make([]zapcore.Field, 0, len(baseFields)+3)
entryFields = append(entryFields, baseFields...)
entryFields = append(entryFields,
zap.String(oversized.Key, chunk),
zap.Int(oversized.Key+"_chunk", i+1),
zap.Int(oversized.Key+"_total_chunks", totalChunks),
)
result[i] = entryFields
}

return result
}

// chunkString splits s into pieces of at most chunkSize bytes.
func chunkString(s string, chunkSize int) []string {
if len(s) <= chunkSize {
return []string{s}
}

var chunks []string
for len(s) > chunkSize {
chunks = append(chunks, s[:chunkSize])
s = s[chunkSize:]
}
if len(s) > 0 {
chunks = append(chunks, s)
}
return chunks
}
144 changes: 144 additions & 0 deletions pkg/logger/chunking_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
package logger

import (
"strings"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)

func TestChunkString(t *testing.T) {
tests := []struct {
name string
input string
chunkSize int
expected []string
}{
{
name: "under limit returns single chunk",
input: "hello",
chunkSize: 100,
expected: []string{"hello"},
},
{
name: "exact limit returns single chunk",
input: "hello",
chunkSize: 5,
expected: []string{"hello"},
},
{
name: "over limit splits into chunks",
input: "abcdefghij",
chunkSize: 3,
expected: []string{"abc", "def", "ghi", "j"},
},
{
name: "empty string returns single chunk",
input: "",
chunkSize: 100,
expected: []string{""},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := chunkString(tt.input, tt.chunkSize)
assert.Equal(t, tt.expected, result)
})
}
}

func TestChunkOversizedFields_NoChunking(t *testing.T) {
fields := []zapcore.Field{
zap.String("msg", "short message"),
zap.Int("status", 200),
}

result := chunkOversizedFields(fields)
assert.Nil(t, result, "should return nil when no fields exceed limit")
}

func TestChunkOversizedFields_SingleOversizedField(t *testing.T) {
largeBody := strings.Repeat("x", MaxStringFieldSize*2+100)

fields := []zapcore.Field{
zap.String("status", "error"),
zap.String("body", largeBody),
zap.Int("code", 500),
}

chunks := chunkOversizedFields(fields)
require.NotNil(t, chunks)
assert.Len(t, chunks, 3, "should produce 3 chunks")

// Each chunk should contain the base fields + chunk of body + metadata
for i, chunkFields := range chunks {
fieldMap := fieldsToMap(chunkFields)

// Base fields present in every chunk
assert.Equal(t, "error", fieldMap["status"])
assert.Equal(t, int64(500), fieldMap["code"])

// Chunk metadata
assert.Equal(t, int64(i+1), fieldMap["body_chunk"])
assert.Equal(t, int64(3), fieldMap["body_total_chunks"])

// Body chunk is present and within limit
bodyChunk := fieldMap["body"].(string)
assert.LessOrEqual(t, len(bodyChunk), MaxStringFieldSize)
}

// Reassemble and verify full content
var reassembled strings.Builder
for _, chunkFields := range chunks {
fieldMap := fieldsToMap(chunkFields)
reassembled.WriteString(fieldMap["body"].(string))
}
assert.Equal(t, largeBody, reassembled.String())
}

func TestChunkOversizedFields_MultipleOversized(t *testing.T) {
// First oversized field gets chunked, second gets truncated
largeBody := strings.Repeat("a", MaxStringFieldSize+500)
largeResponse := strings.Repeat("b", MaxStringFieldSize+300)

fields := []zapcore.Field{
zap.String("body", largeBody),
zap.String("response", largeResponse),
}

chunks := chunkOversizedFields(fields)
require.NotNil(t, chunks)
assert.Len(t, chunks, 2, "first field splits into 2 chunks")

// The second oversized field (response) should be truncated in each chunk
for _, chunkFields := range chunks {
fieldMap := fieldsToMap(chunkFields)
response := fieldMap["response"].(string)
assert.Contains(t, response, "...[truncated]")
assert.LessOrEqual(t, len(response), MaxStringFieldSize+len("...[truncated]"))
}
}

func TestChunkOversizedFields_ExactLimit(t *testing.T) {
exactBody := strings.Repeat("x", MaxStringFieldSize)

fields := []zapcore.Field{
zap.String("body", exactBody),
}

result := chunkOversizedFields(fields)
assert.Nil(t, result, "exactly at limit should not trigger chunking")
}

// fieldsToMap converts a slice of zap fields to a map for easier test assertions.
func fieldsToMap(fields []zapcore.Field) map[string]interface{} {
enc := zapcore.NewMapObjectEncoder()
for _, f := range fields {
f.AddTo(enc)
}
return enc.Fields
}
24 changes: 24 additions & 0 deletions pkg/logger/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,18 +89,36 @@ func (l *logger) Sync() {
// Debug logs a message with the debug level
func (l *logger) Debug(msg string, fields ...Field) {
updateFields := l.getContextMetadataAsFields(fields)
if chunks := chunkOversizedFields(updateFields); chunks != nil {
for _, chunkFields := range chunks {
l.underlyingLogger.Debug(msg, chunkFields...)
}
return
}
l.underlyingLogger.Debug(msg, updateFields...)
}

// Info logs a message with the info level
func (l *logger) Info(msg string, fields ...Field) {
updateFields := l.getContextMetadataAsFields(fields)
if chunks := chunkOversizedFields(updateFields); chunks != nil {
for _, chunkFields := range chunks {
l.underlyingLogger.Info(msg, chunkFields...)
}
return
}
l.underlyingLogger.Info(msg, updateFields...)
}

// Warn logs a message with the warn level
func (l *logger) Warn(msg string, fields ...Field) {
updateFields := l.getContextMetadataAsFields(fields)
if chunks := chunkOversizedFields(updateFields); chunks != nil {
for _, chunkFields := range chunks {
l.underlyingLogger.Warn(msg, chunkFields...)
}
return
}
l.underlyingLogger.Warn(msg, updateFields...)
}

Expand All @@ -109,6 +127,12 @@ func (l *logger) Error(msg string, fields ...Field) {
trace.SpanFromContext(l.attachedContext).RecordError(errors.New(msg))
trace.SpanFromContext(l.attachedContext).SetStatus(codes.Error, msg)
updateFields := l.getContextMetadataAsFields(fields)
if chunks := chunkOversizedFields(updateFields); chunks != nil {
for _, chunkFields := range chunks {
l.underlyingLogger.Error(msg, chunkFields...)
}
return
}
l.underlyingLogger.Error(msg, updateFields...)
}

Expand Down