Skip to content

Commit 0e6ee1d

Browse files
committed
Merge branch 'bugfix/produce-ack-0'
2 parents 78ffb1b + 6eaafe8 commit 0e6ee1d

File tree

8 files changed

+304
-13
lines changed

8 files changed

+304
-13
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,7 @@ See:
109109
--log-level-fieldname string Log level fieldname for json format (default "@level")
110110
--log-msg-fieldname string Message fieldname for json format (default "@message")
111111
--log-time-fieldname string Time fieldname for json format (default "@timestamp")
112+
--producer-acks-0-disabled Assume fire-and-forget is never sent by the producer. Enabling this parameter will increase performance
112113
--proxy-listener-ca-chain-cert-file string PEM encoded CA's certificate file. If provided, client certificate is required and verified
113114
--proxy-listener-cert-file string PEM encoded file with server certificate
114115
--proxy-listener-cipher-suites stringSlice List of supported cipher suites

cmd/kafka-proxy/server.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,8 @@ func initFlags() {
142142
// http://kafka.apache.org/protocol.html#protocol_api_keys
143143
Server.Flags().IntSliceVar(&c.Kafka.ForbiddenApiKeys, "forbidden-api-keys", []int{}, "Forbidden Kafka request types. The restriction should prevent some Kafka operations e.g. 20 - DeleteTopics")
144144

145+
Server.Flags().BoolVar(&c.Kafka.Producer.Acks0Disabled, "producer-acks-0-disabled", false, "Assume fire-and-forget is never sent by the producer. Enabling this parameter will increase performance")
146+
145147
// TLS
146148
Server.Flags().BoolVar(&c.Kafka.TLS.Enable, "tls-enable", false, "Whether or not to use TLS when connecting to the broker")
147149
Server.Flags().BoolVar(&c.Kafka.TLS.InsecureSkipVerify, "tls-insecure-skip-verify", false, "It controls whether a client verifies the server's certificate chain and host name")

config/config.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,9 @@ type Config struct {
141141
Timeout time.Duration
142142
}
143143
}
144+
Producer struct {
145+
Acks0Disabled bool
146+
}
144147
}
145148
ForwardProxy struct {
146149
Url string

proxy/client.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,8 @@ func NewClient(conns *ConnSet, c *config.Config, netAddressMappingFunc config.Ne
155155
timeout: c.Auth.Gateway.Server.Timeout,
156156
tokenInfo: gatewayTokenInfo,
157157
},
158-
ForbiddenApiKeys: forbiddenApiKeys,
158+
ForbiddenApiKeys: forbiddenApiKeys,
159+
ProducerAcks0Disabled: c.Kafka.Producer.Acks0Disabled,
159160
},
160161
dialAddressMapping: dialAddressMapping,
161162
kafkaClientCert: kafkaClientCert,

proxy/processor.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ const (
1616
defaultReadTimeout = 30 * time.Second
1717
minOpenRequests = 16
1818

19+
apiKeyProduce = int16(0)
1920
apiKeySaslHandshake = int16(17)
2021
apiKeyApiApiVersions = int16(18)
2122

@@ -40,6 +41,7 @@ type ProcessorConfig struct {
4041
LocalSasl *LocalSasl
4142
AuthServer *AuthServer
4243
ForbiddenApiKeys map[int16]struct{}
44+
ProducerAcks0Disabled bool
4345
}
4446

4547
type processor struct {
@@ -59,6 +61,8 @@ type processor struct {
5961
forbiddenApiKeys map[int16]struct{}
6062
// metrics
6163
brokerAddress string
64+
// producer will never send request with acks=0
65+
producerAcks0Disabled bool
6266
}
6367

6468
func newProcessor(cfg ProcessorConfig, brokerAddress string) *processor {
@@ -102,6 +106,7 @@ func newProcessor(cfg ProcessorConfig, brokerAddress string) *processor {
102106
localSasl: cfg.LocalSasl,
103107
authServer: cfg.AuthServer,
104108
forbiddenApiKeys: cfg.ForbiddenApiKeys,
109+
producerAcks0Disabled: cfg.ProducerAcks0Disabled,
105110
}
106111
}
107112

@@ -124,6 +129,7 @@ func (p *processor) RequestsLoop(dst DeadlineWriter, src DeadlineReaderWriter) (
124129
buf: make([]byte, p.requestBufferSize),
125130
localSasl: p.localSasl,
126131
localSaslDone: false, // sequential processing - mutex is required
132+
producerAcks0Disabled: p.producerAcks0Disabled,
127133
}
128134

129135
return ctx.requestsLoop(dst, src)
@@ -141,6 +147,8 @@ type RequestsLoopContext struct {
141147

142148
localSasl *LocalSasl
143149
localSaslDone bool
150+
151+
producerAcks0Disabled bool
144152
}
145153

146154
// used by local authentication

proxy/processor_default.go

Lines changed: 81 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package proxy
22

33
import (
4+
"bytes"
45
"errors"
56
"fmt"
67
"github.com/grepplabs/kafka-proxy/proxy/protocol"
@@ -20,8 +21,12 @@ func (handler *DefaultRequestHandler) handleRequest(dst DeadlineWriter, src Dead
2021
// logrus.Println("Await Kafka request")
2122

2223
// waiting for first bytes or EOF - reset deadlines
23-
src.SetReadDeadline(time.Time{})
24-
dst.SetWriteDeadline(time.Time{})
24+
if err = src.SetReadDeadline(time.Time{}); err != nil {
25+
return true, err
26+
}
27+
if err = dst.SetWriteDeadline(time.Time{}); err != nil {
28+
return true, err
29+
}
2530

2631
keyVersionBuf := make([]byte, 8) // Size => int32 + ApiKey => int16 + ApiVersion => int16
2732

@@ -67,8 +72,9 @@ func (handler *DefaultRequestHandler) handleRequest(dst DeadlineWriter, src Dead
6772
return true, fmt.Errorf("only saslHandshake version 0 and 1 are supported, got version %d", requestKeyVersion.ApiVersion)
6873
}
6974
ctx.localSaslDone = true
70-
src.SetDeadline(time.Time{})
71-
75+
if err = src.SetDeadline(time.Time{}); err != nil {
76+
return false, err
77+
}
7278
// defaultRequestHandler was consumed but due to local handling enqueued defaultResponseHandler will not be.
7379
return false, ctx.putNextRequestHandler(defaultRequestHandler)
7480
case apiKeyApiApiVersions:
@@ -79,11 +85,18 @@ func (handler *DefaultRequestHandler) handleRequest(dst DeadlineWriter, src Dead
7985
}
8086
}
8187

82-
// send inFlightRequest to channel before myCopyN to prevent race condition in proxyResponses
83-
if err = sendRequestKeyVersion(ctx.openRequestsChannel, openRequestSendTimeout, requestKeyVersion); err != nil {
88+
mustReply, readBytes, err := handler.mustReply(requestKeyVersion, src, ctx)
89+
if err != nil {
8490
return true, err
8591
}
8692

93+
// send inFlightRequest to channel before myCopyN to prevent race condition in proxyResponses
94+
if mustReply {
95+
if err = sendRequestKeyVersion(ctx.openRequestsChannel, openRequestSendTimeout, requestKeyVersion); err != nil {
96+
return true, err
97+
}
98+
}
99+
87100
requestDeadline := time.Now().Add(ctx.timeout)
88101
err = dst.SetWriteDeadline(requestDeadline)
89102
if err != nil {
@@ -98,24 +111,82 @@ func (handler *DefaultRequestHandler) handleRequest(dst DeadlineWriter, src Dead
98111
if _, err = dst.Write(keyVersionBuf); err != nil {
99112
return false, err
100113
}
114+
// write - send to broker
115+
if len(readBytes) > 0 {
116+
if _, err = dst.Write(readBytes); err != nil {
117+
return false, err
118+
}
119+
}
101120
// 4 bytes were written as keyVersionBuf (ApiKey, ApiVersion)
102-
if readErr, err = myCopyN(dst, src, int64(requestKeyVersion.Length-4), ctx.buf); err != nil {
121+
if readErr, err = myCopyN(dst, src, int64(requestKeyVersion.Length-int32(4+len(readBytes))), ctx.buf); err != nil {
103122
return readErr, err
104123
}
105124
if requestKeyVersion.ApiKey == apiKeySaslHandshake {
106125
if requestKeyVersion.ApiVersion == 0 {
107126
return false, ctx.putNextHandlers(saslAuthV0RequestHandler, saslAuthV0ResponseHandler)
108127
}
109128
}
110-
return false, ctx.putNextHandlers(defaultRequestHandler, defaultResponseHandler)
129+
if mustReply {
130+
return false, ctx.putNextHandlers(defaultRequestHandler, defaultResponseHandler)
131+
} else {
132+
return false, ctx.putNextRequestHandler(defaultRequestHandler)
133+
}
134+
}
135+
136+
func (handler *DefaultRequestHandler) mustReply(requestKeyVersion *protocol.RequestKeyVersion, src io.Reader, ctx *RequestsLoopContext) (bool, []byte, error) {
137+
if requestKeyVersion.ApiKey == apiKeyProduce {
138+
if ctx.producerAcks0Disabled {
139+
return true, nil, nil
140+
}
141+
// header version for produce [0..8] is 1 (request_api_key,request_api_version,correlation_id (INT32),client_id, NULLABLE_STRING )
142+
acksReader := protocol.RequestAcksReader{}
143+
144+
var (
145+
acks int16
146+
err error
147+
)
148+
var bufferRead bytes.Buffer
149+
reader := io.TeeReader(src, &bufferRead)
150+
switch requestKeyVersion.ApiVersion {
151+
case 0, 1, 2:
152+
// CorrelationID + ClientID
153+
if err = acksReader.ReadAndDiscardHeaderV1Part(reader); err != nil {
154+
return false, nil, err
155+
}
156+
// acks (INT16)
157+
acks, err = acksReader.ReadAndDiscardProduceAcks(reader)
158+
if err != nil {
159+
return false, nil, err
160+
}
161+
162+
case 3, 4, 5, 6, 7, 8:
163+
// CorrelationID + ClientID
164+
if err = acksReader.ReadAndDiscardHeaderV1Part(reader); err != nil {
165+
return false, nil, err
166+
}
167+
// transactional_id (NULLABLE_STRING),acks (INT16)
168+
acks, err = acksReader.ReadAndDiscardProduceTxnAcks(reader)
169+
if err != nil {
170+
return false, nil, err
171+
}
172+
default:
173+
return false, nil, fmt.Errorf("produce version %d is not supported", requestKeyVersion.ApiVersion)
174+
}
175+
return acks != 0, bufferRead.Bytes(), nil
176+
}
177+
return true, nil, nil
111178
}
112179

113180
func (handler *DefaultResponseHandler) handleResponse(dst DeadlineWriter, src DeadlineReader, ctx *ResponsesLoopContext) (readErr bool, err error) {
114181
//logrus.Println("Await Kafka response")
115182

116183
// waiting for first bytes or EOF - reset deadlines
117-
src.SetReadDeadline(time.Time{})
118-
dst.SetWriteDeadline(time.Time{})
184+
if err = src.SetReadDeadline(time.Time{}); err != nil {
185+
return true, err
186+
}
187+
if err = dst.SetWriteDeadline(time.Time{}); err != nil {
188+
return true, err
189+
}
119190

120191
responseHeaderBuf := make([]byte, 8) // Size => int32, CorrelationId => int32
121192
if _, err = io.ReadFull(src, responseHeaderBuf); err != nil {

0 commit comments

Comments
 (0)