Skip to content

Commit cf65a90

Browse files
Merge pull request #233 from OlivierCazade/NETOBSERV-389
Kafka ingestor fix
2 parents f711714 + 7c96d50 commit cf65a90

File tree

2 files changed

+16
-2
lines changed

2 files changed

+16
-2
lines changed

pkg/pipeline/encode/encode_kafka.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,9 @@ func NewEncodeKafka(params config.StageParam) (Encoder, error) {
106106
WriteTimeout: time.Duration(writeTimeoutSecs) * time.Second,
107107
BatchSize: jsonEncodeKafka.BatchSize,
108108
BatchBytes: jsonEncodeKafka.BatchBytes,
109+
// Temporary fix may be we should implement a batching systems
110+
// https://github.com/segmentio/kafka-go/issues/326#issuecomment-519375403
111+
BatchTimeout: time.Nanosecond,
109112
}
110113

111114
return &encodeKafka{

pkg/pipeline/ingest/ingest_collector.go

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,10 @@ func (w *TransportWrapper) Send(_, data []byte) error {
9898
message := goflowpb.FlowMessage{}
9999
err := proto.Unmarshal(data, &message)
100100
if err != nil {
101+
// temporary fix
102+
// A PR was submitted to log this error from goflow2:
103+
// https://github.com/netsampler/goflow2/pull/86
104+
log.Error(err)
101105
return err
102106
}
103107
renderedMsg, err := RenderMessage(&message)
@@ -167,7 +171,7 @@ func (ingestC *ingestCollector) processLogLines(out chan<- []config.GenericMap)
167171
case record := <-ingestC.in:
168172
records = append(records, record)
169173
if len(records) >= ingestC.batchMaxLength {
170-
log.Debugf("ingestCollector sending %d entries", len(records))
174+
log.Debugf("ingestCollector sending %d entries, %d entries waiting", len(records), len(ingestC.in))
171175
linesProcessed.Add(float64(len(records)))
172176
queueLength.Set(float64(len(out)))
173177
out <- records
@@ -176,7 +180,14 @@ func (ingestC *ingestCollector) processLogLines(out chan<- []config.GenericMap)
176180
case <-flushRecords.C:
177181
// Process batch of records (if not empty)
178182
if len(records) > 0 {
179-
log.Debugf("ingestCollector sending %d entries", len(records))
183+
if len(ingestC.in) > 0 {
184+
for len(records) < ingestC.batchMaxLength && len(ingestC.in) > 0 {
185+
record := <-ingestC.in
186+
recordAsBytes, _ := json.Marshal(record)
187+
records = append(records, string(recordAsBytes))
188+
}
189+
}
190+
log.Debugf("ingestCollector sending %d entries, %d entries waiting", len(records), len(ingestC.in))
180191
linesProcessed.Add(float64(len(records)))
181192
queueLength.Set(float64(len(out)))
182193
out <- records

0 commit comments

Comments
 (0)