|
24 | 24 | import com.marklogic.client.io.Format; |
25 | 25 | import com.marklogic.client.io.StringHandle; |
26 | 26 | import org.apache.kafka.common.record.TimestampType; |
| 27 | +import org.apache.kafka.connect.data.Schema; |
| 28 | +import org.apache.kafka.connect.header.Header; |
27 | 29 | import org.apache.kafka.connect.sink.SinkRecord; |
28 | 30 | import org.junit.jupiter.api.Test; |
29 | 31 |
|
30 | 32 | import java.io.IOException; |
31 | | -import java.util.HashMap; |
32 | | -import java.util.Iterator; |
33 | | -import java.util.Map; |
| 33 | +import java.util.*; |
34 | 34 |
|
35 | 35 | import static org.junit.jupiter.api.Assertions.assertEquals; |
36 | 36 | import static org.junit.jupiter.api.Assertions.assertNotNull; |
@@ -78,7 +78,7 @@ void allPropertiesSet() { |
78 | 78 |
|
79 | 79 | @Test |
80 | 80 | void noPropertiesSet() { |
81 | | - Map<String, Object> kafkaConfig = new HashMap<String, Object>(); |
| 81 | + Map<String, Object> kafkaConfig = new HashMap<>(); |
82 | 82 | converter = new DefaultSinkRecordConverter(kafkaConfig); |
83 | 83 |
|
84 | 84 | DocumentWriteOperation op = converter.convert(newSinkRecord("doesn't matter")); |
@@ -221,7 +221,7 @@ void binaryContent() { |
221 | 221 |
|
222 | 222 | @Test |
223 | 223 | void includeKafkaMetadata() { |
224 | | - Map<String, Object> kafkaConfig = new HashMap<String, Object>(); |
| 224 | + Map<String, Object> kafkaConfig = new HashMap<>(); |
225 | 225 | kafkaConfig.put(MarkLogicSinkConfig.DMSDK_INCLUDE_KAFKA_METADATA, true); |
226 | 226 | converter = new DefaultSinkRecordConverter(kafkaConfig); |
227 | 227 |
|
@@ -260,6 +260,89 @@ void dontIncludeKafkaMetadata() { |
260 | 260 | assertNull(values.get("kafka-topic")); |
261 | 261 | } |
262 | 262 |
|
| 263 | + @Test |
| 264 | + void includeKafkaHeadersWithPrefix() { |
| 265 | + Map<String, Object> kafkaConfig = new HashMap<>(); |
| 266 | + kafkaConfig.put(MarkLogicSinkConfig.DMSDK_INCLUDE_KAFKA_HEADERS, true); |
| 267 | + kafkaConfig.put(MarkLogicSinkConfig.DMSDK_INCLUDE_KAFKA_HEADERS_PREFIX, "kafkaHeader_"); |
| 268 | + converter = new DefaultSinkRecordConverter(kafkaConfig); |
| 269 | + |
| 270 | + final int partition = 5; |
| 271 | + final long offset = 2; |
| 272 | + final String key = "some-key"; |
| 273 | + final Long timestamp = System.currentTimeMillis(); |
| 274 | + List<Header> headers = new ArrayList<Header>() {{ |
| 275 | + add(new TestHeaders("A", "1")); |
| 276 | + add(new TestHeaders("B", "2")); |
| 277 | + }}; |
| 278 | + DocumentWriteOperation op = converter.convert(new SinkRecord("topic1", partition, null, key, |
| 279 | + null, "some-value", offset, timestamp, TimestampType.CREATE_TIME, headers)); |
| 280 | + |
| 281 | + DocumentMetadataHandle metadata = (DocumentMetadataHandle) op.getMetadata(); |
| 282 | + DocumentMetadataHandle.DocumentMetadataValues values = metadata.getMetadataValues(); |
| 283 | + assertEquals("1", values.get("kafkaHeader_A")); |
| 284 | + assertEquals("2", values.get("kafkaHeader_B")); |
| 285 | + assertNull(values.get("kafka-offset")); |
| 286 | + } |
| 287 | + |
| 288 | + @Test |
| 289 | + void includeKafkaHeadersWithoutPrefix() { |
| 290 | + Map<String, Object> kafkaConfig = new HashMap<>(); |
| 291 | + kafkaConfig.put(MarkLogicSinkConfig.DMSDK_INCLUDE_KAFKA_HEADERS, true); |
| 292 | + converter = new DefaultSinkRecordConverter(kafkaConfig); |
| 293 | + |
| 294 | + final int partition = 5; |
| 295 | + final long offset = 2; |
| 296 | + final String key = "some-key"; |
| 297 | + final Long timestamp = System.currentTimeMillis(); |
| 298 | + List<Header> headers = new ArrayList<Header>() {{ |
| 299 | + add(new TestHeaders("A", "1")); |
| 300 | + add(new TestHeaders("B", "2")); |
| 301 | + }}; |
| 302 | + DocumentWriteOperation op = converter.convert(new SinkRecord("topic1", partition, null, key, |
| 303 | + null, "some-value", offset, timestamp, TimestampType.CREATE_TIME, headers)); |
| 304 | + |
| 305 | + DocumentMetadataHandle metadata = (DocumentMetadataHandle) op.getMetadata(); |
| 306 | + DocumentMetadataHandle.DocumentMetadataValues values = metadata.getMetadataValues(); |
| 307 | + assertEquals("1", values.get("A")); |
| 308 | + assertEquals("2", values.get("B")); |
| 309 | + assertNull(values.get("kafka-offset")); |
| 310 | + } |
| 311 | + |
| 312 | + static class TestHeaders implements Header { |
| 313 | + private final String key; |
| 314 | + private final String value; |
| 315 | + TestHeaders(String key, String value) { |
| 316 | + this.key = key; |
| 317 | + this.value = value; |
| 318 | + } |
| 319 | + |
| 320 | + @Override |
| 321 | + public String key() { |
| 322 | + return key; |
| 323 | + } |
| 324 | + |
| 325 | + @Override |
| 326 | + public Schema schema() { |
| 327 | + return null; |
| 328 | + } |
| 329 | + |
| 330 | + @Override |
| 331 | + public Object value() { |
| 332 | + return value; |
| 333 | + } |
| 334 | + |
| 335 | + @Override |
| 336 | + public Header with(Schema schema, Object o) { |
| 337 | + return null; |
| 338 | + } |
| 339 | + |
| 340 | + @Override |
| 341 | + public Header rename(String s) { |
| 342 | + return null; |
| 343 | + } |
| 344 | + } |
| 345 | + |
263 | 346 | private SinkRecord newSinkRecord(Object value) { |
264 | 347 | return new SinkRecord("test-topic", 1, null, null, null, value, 0); |
265 | 348 | } |
|
0 commit comments