You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# Should only need one task since it's using a WriteBatcher, which is multi-threaded
7
+
tasks.max=1
8
+
9
+
# Topics to consume from [comma separated list for multiple topics]
10
+
topics=marklogic
11
+
12
+
13
+
# MarkLogic connector-specific properties
14
+
15
+
# A MarkLogic host to connect to. The connector uses the Data Movement SDK, and thus it will connect to each of the
16
+
# hosts in a cluster.
17
+
ml.connection.host=localhost
18
+
19
+
# The port of a REST API server to connect to.
20
+
ml.connection.port=8000
21
+
22
+
# Optional - the name of a database to connect to. If your REST API server has a content database matching that of the
23
+
# one that you want to write documents to, you do not need to set this.
24
+
ml.connection.database=Documents
25
+
26
+
# Optional - set to "gateway" when using a load balancer, else leave blank. See https://docs.marklogic.com/guide/java/data-movement#id_26583 for more information.
27
+
ml.connection.type=
28
+
29
+
# Either DIGEST, BASIC, CERTIFICATE, KERBEROS, or NONE
30
+
ml.connection.securityContextType=DIGEST
31
+
32
+
# Set these based on the security context type defined above
33
+
ml.connection.username=admin
34
+
ml.connection.password=admin
35
+
ml.connection.certFile=
36
+
ml.connection.certPassword=
37
+
ml.connection.externalName=
38
+
39
+
# Set to "true" for a "simple" SSL strategy that uses the JVM's default SslContext and X509TrustManager and a
40
+
# "trust everything" HostnameVerifier. Further customization of an SSL connection via properties is not supported. If
41
+
# you need to do so, consider using the source code for this connector as a starting point.
42
+
ml.connection.simpleSsl=false
43
+
44
+
# Sets the number of documents to be written in a batch to MarkLogic. This may not have any impact depending on the
45
+
# connector receives data from Kafka, as the connector calls flushAsync on the DMSDK WriteBatcher after processing every
46
+
# collection of records. Thus, if the connector never receives at one time more than the value of this property, then
47
+
# the value of this property will have no impact.
48
+
ml.dmsdk.batchSize=100
49
+
50
+
# Sets the number of threads used by the Data Movement SDK for parallelizing writes to MarkLogic. Similar to the batch
51
+
# size property above, this may never come into play depending on how many records the connector receives at once.
52
+
ml.dmsdk.threadCount=8
53
+
54
+
# Optional - a comma-separated list of collections that each document should be written to
55
+
ml.document.collections=kafka-data
56
+
57
+
# Optional - specify the format of each document; either JSON, XML, BINARY, TEXT, or UNKNOWN
58
+
ml.document.format=JSON
59
+
60
+
# Optional - specify a mime type for each document; typically the format property above will be used instead of this
61
+
ml.document.mimeType=
62
+
63
+
# Optional - a comma-separated list of roles and capabilities that define the permissions for each document written to MarkLogic
0 commit comments