@@ -68,10 +68,32 @@ spark.query.types 9,1,4,3
68
68
69
69
# The tool adds TTL & Writetime at row-level (not field-level).
70
70
# The largest TTL & Writetime values are used if multiple indexes are listed (comma separated)
71
- # Comma separated column indexes from "spark.query.origin" used to find largest TTL or Writetime
71
+ # Comma separated column indexes from "spark.query.origin" used to find largest TTL or Writetime. This is zero-based index which starts at 0
72
+ # Primary keys (partition & clustering) will not have TTLs/writetimes. Also, we don't use collection columns here for pulling TTL/writetime.
72
73
spark.query.ttl.cols 2,3
73
74
spark.query.writetime.cols 2,3
74
75
76
+ # ############################## EXAMPLE MAPPING USING A DEMO TABLE ##########################################
77
+ # If the origin and target clusters have a schema such as below,
78
+ # CREATE TABLE cycling.cyclist_name (
79
+ # pk1 uuid,
80
+ # pk2 date,
81
+ # cc1 boolean,
82
+ # firstname text,
83
+ # lastname text,
84
+ # phones list<text>,
85
+ # PRIMARY KEY((pk1,pk2),cc1)
86
+ # );
87
+ #
88
+ # then, our mapping would look like below,
89
+ # spark.query.origin pk1,pk2,cc1,firstname,lastname,phones
90
+ # spark.query.origin.partitionKey pk1,pk2
91
+ # spark.query.target.id pk1,pk2,cc1
92
+ # spark.query.types 9,15,10,0,0,6%0
93
+ # spark.query.ttl.cols 3,4
94
+ # spark.query.writetime.cols 3,4
95
+ # ############################################################################################################
96
+
75
97
# ENABLE ONLY IF YOU WANT TO MIGRATE/VALIDATE ROWS BASED ON CQL FILTER
76
98
# spark.query.condition
77
99
0 commit comments