@@ -55,7 +55,7 @@ spark.cdm.connect.target.password cassandra
55
55
# .column
56
56
# .ttl
57
57
# .automatic : Default is true, unless .ttl.names is specified. When true, the TTL of the
58
- # target record will be determined by finding the maxiumum TTL of
58
+ # target record will be determined by finding the maximum TTL of
59
59
# all origin columns that can have TTL set (which excludes partition key,
60
60
# clustering key, collections/UDT/tuple, and frozen columns). When false, and
61
61
# .names is not set, the target record will have the TTL determined by the target
@@ -65,7 +65,7 @@ spark.cdm.connect.target.password cassandra
65
65
# the TTL of the target record.
66
66
# .writetime
67
67
# .automatic : Default is true, unless .writetime.names is specified. When true, the WRITETIME of
68
- # the target record will be determined by finding the maxiumum WRITETIME of
68
+ # the target record will be determined by finding the maximum WRITETIME of
69
69
# all origin columns that can have WRITETIME set (which excludes partition key,
70
70
# clustering key, collections/UDT/tuple, and frozen columns). When false, and
71
71
# .names is not set, the target record will have the WRITETIME determined by the target
@@ -129,7 +129,7 @@ spark.cdm.schema.origin.keyspaceTable keyspace_name.table_name
129
129
# counter gets DELETEd. Should the .missing record be re-inserted before
130
130
# the DELETE gets tombstoned, the counter will zombie back to life, and the
131
131
# counter will become 5323+5323 = 10646.
132
- # spark.tokenRange
132
+ # spark.cdm. tokenRange
133
133
# .partitionFile : Default is "./<keyspace>.<tablename>_partitions.csv". Note, this file is used as
134
134
# input as well as output when applicable. If the file exists, only the partition ranges
135
135
# in this file will be Migrated or Validated. Similarly, if exceptions occur during
@@ -138,7 +138,7 @@ spark.cdm.schema.origin.keyspaceTable keyspace_name.table_name
138
138
spark.cdm.autocorrect.missing false
139
139
spark.cdm.autocorrect.mismatch false
140
140
# spark.cdm.autocorrect.missing.counter false
141
- # spark.tokenrange.partitionFile /tokenrange/exception/path/keyspace.tablename_partitions.csv
141
+ # spark.cdm. tokenrange.partitionFile /tokenrange/exception/path/keyspace.tablename_partitions.csv
142
142
143
143
# ===========================================================================================================
144
144
# Performance and Operations Parameters affecting throughput, error handling, and similar concerns.
@@ -148,7 +148,7 @@ spark.cdm.autocorrect.mismatch false
148
148
# .numParts : Defaults is 10000. In standard operation, the full token range (-2^63..2^63-1)
149
149
# is divided into a number of parts which will be parallel-processed. You should
150
150
# aim for each part to comprise a total of ≈1-10GB of data to migrate. During
151
- # intial testing, you may want this to be a small number (even 1).
151
+ # initial testing, you may want this to be a small number (even 1).
152
152
# .batchSize : Defaults is 5. When writing to Target, this comprises the number of records that
153
153
# will be put into an UNLOGGED batch. CDM will tend to work on the same partition
154
154
# at a time so if your partition sizes are larger, this number may be increased.
@@ -165,15 +165,15 @@ spark.cdm.autocorrect.mismatch false
165
165
# Other Parameters:
166
166
# spark.cdm.perfops
167
167
# .consistency : The .consistency parameters may be one of:
168
- # ANY, ONE, TWO, THREE, QUORUM, LOCAL_ONE, EACH_QUORUM, SERIAL, LOCAL_SERIAL, ALL
168
+ # ANY, ONE, TWO, THREE, QUORUM, LOCAL_ONE, EACH_QUORUM, LOCAL_QUORUM, SERIAL, LOCAL_SERIAL, ALL
169
169
# .read : Default is LOCAL_QUORUM. Read consistency from Origin, and also from Target
170
170
# when records are read for comparison purposes.
171
171
# .write : Default is LOCAL_QUORUM. Write consistency to Target.
172
172
# .printStatsAfter : Default is 100000. Number of rows of processing after which a progress log
173
173
# entry will be made.
174
174
# .fetchSizeInRows : Default is 1000. This affects the frequency of reads from Origin, and also the
175
175
# frequency of flushes to Target.
176
- # .errorLimit : Default is 0. Controls how many errors a thread may encounter during MigrateData
176
+ # .errorLimit : Default is 0. Controls how many errors a thread may encounter during Migrate
177
177
# and DiffData operations before failing. It is recommended to set this to a non-
178
178
# zero value only when not doing a mutation-type operation, e.g. when running
179
179
# DiffData without .autocorrect.
@@ -196,7 +196,7 @@ spark.cdm.perfops.ratelimit.target 40000
196
196
# Partition and clustering columns cannot have null values, but if these
197
197
# are added as part of a schema transformation between Origin and Target
198
198
# it is possible that the Origin side is null. In this case, the
199
- # MigrateData operation would fail. This parameter allows a crude
199
+ # Migrate data operation would fail. This parameter allows a crude
200
200
# constant value to be used in its place, separate from the Constant
201
201
# Values feature.
202
202
# .custom
0 commit comments