@@ -34,6 +34,7 @@ public abstract class AbstractJobSession {
34
34
35
35
protected CqlSession sourceSession ;
36
36
protected CqlSession astraSession ;
37
+ protected List <MigrateDataType > selectColTypes = new ArrayList <MigrateDataType >();
37
38
protected List <MigrateDataType > idColTypes = new ArrayList <MigrateDataType >();
38
39
39
40
protected Integer batchSize = 1 ;
@@ -47,7 +48,6 @@ public abstract class AbstractJobSession {
47
48
protected List <Integer > writeTimeStampCols = new ArrayList <Integer >();
48
49
protected List <Integer > ttlCols = new ArrayList <Integer >();
49
50
protected Boolean isCounterTable ;
50
- protected Integer counterDeltaMaxIndex = 0 ;
51
51
52
52
protected String sourceKeyspaceTable ;
53
53
protected String astraKeyspaceTable ;
@@ -58,22 +58,22 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
58
58
this .sourceSession = sourceSession ;
59
59
this .astraSession = astraSession ;
60
60
61
- batchSize = new Integer (sparkConf .get ("spark.migrate. batchSize" , "1" ));
62
- printStatsAfter = new Integer (sparkConf .get ("spark.migrate. printStatsAfter" , "100000" ));
61
+ batchSize = new Integer (sparkConf .get ("spark.batchSize" , "1" ));
62
+ printStatsAfter = new Integer (sparkConf .get ("spark.printStatsAfter" , "100000" ));
63
63
if (printStatsAfter < 1 ) {
64
64
printStatsAfter = 100000 ;
65
65
}
66
66
67
- readLimiter = RateLimiter .create (new Integer (sparkConf .get ("spark.migrate. readRateLimit" , "20000" )));
68
- writeLimiter = RateLimiter .create (new Integer (sparkConf .get ("spark.migrate. writeRateLimit" , "40000" )));
69
- maxRetries = Integer .parseInt (sparkConf .get ("spark.migrate. maxRetries" , "10" ));
67
+ readLimiter = RateLimiter .create (new Integer (sparkConf .get ("spark.readRateLimit" , "20000" )));
68
+ writeLimiter = RateLimiter .create (new Integer (sparkConf .get ("spark.writeRateLimit" , "40000" )));
69
+ maxRetries = Integer .parseInt (sparkConf .get ("spark.maxRetries" , "10" ));
70
70
71
- sourceKeyspaceTable = sparkConf .get ("spark.migrate. source.keyspaceTable" );
72
- astraKeyspaceTable = sparkConf .get ("spark.migrate. destination.keyspaceTable" );
71
+ sourceKeyspaceTable = sparkConf .get ("spark.source.keyspaceTable" );
72
+ astraKeyspaceTable = sparkConf .get ("spark.destination.keyspaceTable" );
73
73
74
- isPreserveTTLWritetime = Boolean .parseBoolean (sparkConf .get ("spark.migrate. preserveTTLWriteTime" , "false" ));
74
+ isPreserveTTLWritetime = Boolean .parseBoolean (sparkConf .get ("spark.preserveTTLWriteTime" , "false" ));
75
75
if (isPreserveTTLWritetime ) {
76
- String ttlColsStr = sparkConf .get ("spark.migrate. source.ttl.cols" );
76
+ String ttlColsStr = sparkConf .get ("spark.source.ttl.cols" );
77
77
if (null != ttlColsStr && ttlColsStr .trim ().length () > 0 ) {
78
78
for (String ttlCol : ttlColsStr .split ("," )) {
79
79
ttlCols .add (Integer .parseInt (ttlCol ));
@@ -82,11 +82,11 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
82
82
}
83
83
84
84
writeTimeStampFilter = Boolean
85
- .parseBoolean (sparkConf .get ("spark.migrate. source.writeTimeStampFilter" , "false" ));
85
+ .parseBoolean (sparkConf .get ("spark.source.writeTimeStampFilter" , "false" ));
86
86
// batchsize set to 1 if there is a writeFilter
87
87
if (writeTimeStampFilter ) {
88
88
batchSize = 1 ;
89
- String writeTimestampColsStr = sparkConf .get ("spark.migrate. source.writeTimeStampFilter.cols" );
89
+ String writeTimestampColsStr = sparkConf .get ("spark.source.writeTimeStampFilter.cols" );
90
90
if (null != writeTimestampColsStr && writeTimestampColsStr .trim ().length () > 0 ) {
91
91
for (String writeTimeStampCol : writeTimestampColsStr .split ("," )) {
92
92
writeTimeStampCols .add (Integer .parseInt (writeTimeStampCol ));
@@ -95,12 +95,12 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
95
95
}
96
96
97
97
String minWriteTimeStampFilterStr =
98
- sparkConf .get ("spark.migrate. source.minWriteTimeStampFilter" , "0" );
98
+ sparkConf .get ("spark.source.minWriteTimeStampFilter" , "0" );
99
99
if (null != minWriteTimeStampFilterStr && minWriteTimeStampFilterStr .trim ().length () > 1 ) {
100
100
minWriteTimeStampFilter = Long .parseLong (minWriteTimeStampFilterStr );
101
101
}
102
102
String maxWriteTimeStampFilterStr =
103
- sparkConf .get ("spark.migrate. source.maxWriteTimeStampFilter" , "0" );
103
+ sparkConf .get ("spark.source.maxWriteTimeStampFilter" , "0" );
104
104
if (null != maxWriteTimeStampFilterStr && maxWriteTimeStampFilterStr .trim ().length () > 1 ) {
105
105
maxWriteTimeStampFilter = Long .parseLong (maxWriteTimeStampFilterStr );
106
106
}
@@ -115,18 +115,15 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
115
115
logger .info (" DEFAULT -- isPreserveTTLWritetime: " + isPreserveTTLWritetime );
116
116
logger .info (" DEFAULT -- TTLCols: " + ttlCols );
117
117
118
- hasRandomPartitioner = Boolean .parseBoolean (sparkConf .get ("spark.migrate. source.hasRandomPartitioner" , "false" ));
118
+ hasRandomPartitioner = Boolean .parseBoolean (sparkConf .get ("spark.source.hasRandomPartitioner" , "false" ));
119
119
120
- isCounterTable = Boolean .parseBoolean (sparkConf .get ("spark.migrate.source.counterTable" , "false" ));
120
+ isCounterTable = Boolean .parseBoolean (sparkConf .get ("spark.counterTable" , "false" ));
121
+ selectColTypes = getTypes (sparkConf .get ("spark.diff.select.types" ));
122
+ String partionKey = sparkConf .get ("spark.query.cols.partitionKey" );
123
+ String idCols = sparkConf .get ("spark.query.cols.id" );
124
+ idColTypes = getTypes (sparkConf .get ("spark.query.cols.id.types" ));
121
125
122
- counterDeltaMaxIndex = Integer
123
- .parseInt (sparkConf .get ("spark.migrate.source.counterTable.update.max.counter.index" , "0" ));
124
-
125
- String partionKey = sparkConf .get ("spark.migrate.query.cols.partitionKey" );
126
- String idCols = sparkConf .get ("spark.migrate.query.cols.id" );
127
- idColTypes = getTypes (sparkConf .get ("spark.migrate.query.cols.id.types" ));
128
-
129
- String selectCols = sparkConf .get ("spark.migrate.query.cols.select" );
126
+ String selectCols = sparkConf .get ("spark.query.cols.select" );
130
127
131
128
String idBinds = "" ;
132
129
int count = 1 ;
@@ -139,7 +136,7 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
139
136
count ++;
140
137
}
141
138
142
- sourceSelectCondition = sparkConf .get ("spark.migrate. query.cols.select.condition" , "" );
139
+ sourceSelectCondition = sparkConf .get ("spark.query.cols.select.condition" , "" );
143
140
sourceSelectStatement = sourceSession .prepare (
144
141
"select " + selectCols + " from " + sourceKeyspaceTable + " where token(" + partionKey .trim ()
145
142
+ ") >= ? and token(" + partionKey .trim () + ") <= ? " + sourceSelectCondition + " ALLOW FILTERING" );
0 commit comments