Skip to content

Commit 4b770f6

Browse files
authored
Merge pull request #16 from datastax/feature/ttl-writetime-independent
Feature/ttl writetime independent
2 parents ebf69ed + 91cae0f commit 4b770f6

File tree

5 files changed

+34
-37
lines changed

5 files changed

+34
-37
lines changed

pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
<groupId>datastax.astra.migrate</groupId>
55
<artifactId>cassandra-data-migrator</artifactId>
6-
<version>1.6</version>
6+
<version>1.7</version>
77
<packaging>jar</packaging>
88

99
<properties>

src/main/java/datastax/astra/migrate/AbstractJobSession.java

Lines changed: 28 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -36,20 +36,17 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
3636
sourceKeyspaceTable = sparkConf.get("spark.source.keyspaceTable");
3737
astraKeyspaceTable = sparkConf.get("spark.destination.keyspaceTable");
3838

39-
isPreserveTTLWritetime = Boolean.parseBoolean(sparkConf.get("spark.preserveTTLWriteTime", "false"));
40-
if (isPreserveTTLWritetime) {
41-
String ttlColsStr = sparkConf.get("spark.preserveTTLWriteTime.ttl.cols");
42-
if (null != ttlColsStr && ttlColsStr.trim().length() > 0) {
43-
for (String ttlCol : ttlColsStr.split(",")) {
44-
ttlCols.add(Integer.parseInt(ttlCol));
45-
}
39+
String ttlColsStr = sparkConf.get("spark.query.ttl.cols");
40+
if (null != ttlColsStr && ttlColsStr.trim().length() > 0) {
41+
for (String ttlCol : ttlColsStr.split(",")) {
42+
ttlCols.add(Integer.parseInt(ttlCol));
4643
}
44+
}
4745

48-
String writeTimestampColsStr = sparkConf.get("spark.preserveTTLWriteTime.writetime.cols");
49-
if (null != writeTimestampColsStr && writeTimestampColsStr.trim().length() > 0) {
50-
for (String writeTimeStampCol : writeTimestampColsStr.split(",")) {
51-
writeTimeStampCols.add(Integer.parseInt(writeTimeStampCol));
52-
}
46+
String writeTimestampColsStr = sparkConf.get("spark.query.writetime.cols");
47+
if (null != writeTimestampColsStr && writeTimestampColsStr.trim().length() > 0) {
48+
for (String writeTimeStampCol : writeTimestampColsStr.split(",")) {
49+
writeTimeStampCols.add(Integer.parseInt(writeTimeStampCol));
5350
}
5451
}
5552

@@ -82,26 +79,22 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
8279
logger.info("PARAM -- Destination Keyspace Table: " + astraKeyspaceTable);
8380
logger.info("PARAM -- ReadRateLimit: " + readLimiter.getRate());
8481
logger.info("PARAM -- WriteRateLimit: " + writeLimiter.getRate());
85-
logger.info("PARAM -- WriteTimestampFilter: " + writeTimeStampFilter);
86-
logger.info("PARAM -- WriteTimestampFilterCols: " + writeTimeStampCols);
87-
logger.info("PARAM -- isPreserveTTLWritetime: " + isPreserveTTLWritetime);
88-
logger.info("PARAM -- isPreserveTTLWritetime: " + isPreserveTTLWritetime);
8982
logger.info("PARAM -- TTLCols: " + ttlCols);
83+
logger.info("PARAM -- WriteTimestampFilterCols: " + writeTimeStampCols);
84+
logger.info("PARAM -- WriteTimestampFilter: " + writeTimeStampFilter);
9085

9186
String selectCols = sparkConf.get("spark.query.source");
9287
String partionKey = sparkConf.get("spark.query.source.partitionKey");
9388
String sourceSelectCondition = sparkConf.get("spark.query.condition", "");
9489

9590
final StringBuilder selectTTLWriteTimeCols = new StringBuilder();
96-
if (isPreserveTTLWritetime) {
97-
String[] allCols = selectCols.split(",");
98-
ttlCols.forEach(col -> {
99-
selectTTLWriteTimeCols.append(",ttl(" + allCols[col] + ")");
100-
});
101-
writeTimeStampCols.forEach(col -> {
102-
selectTTLWriteTimeCols.append(",writetime(" + allCols[col] + ")");
103-
});
104-
}
91+
String[] allCols = selectCols.split(",");
92+
ttlCols.forEach(col -> {
93+
selectTTLWriteTimeCols.append(",ttl(" + allCols[col] + ")");
94+
});
95+
writeTimeStampCols.forEach(col -> {
96+
selectTTLWriteTimeCols.append(",writetime(" + allCols[col] + ")");
97+
});
10598
String fullSelectQuery = "select " + selectCols + selectTTLWriteTimeCols.toString() + " from " + sourceKeyspaceTable + " where token(" + partionKey.trim()
10699
+ ") >= ? and token(" + partionKey.trim() + ") <= ? " + sourceSelectCondition + " ALLOW FILTERING";
107100
sourceSelectStatement = sourceSession.prepare(fullSelectQuery);
@@ -110,7 +103,7 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
110103
selectColTypes = getTypes(sparkConf.get("spark.query.types"));
111104
String idCols = sparkConf.get("spark.query.destination.id", "");
112105
idColTypes = selectColTypes.subList(0, idCols.split(",").length);
113-
106+
114107
String insertCols = sparkConf.get("spark.query.destination", "");
115108
if (null == insertCols || insertCols.trim().isEmpty()) {
116109
insertCols = selectCols;
@@ -147,11 +140,16 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
147140
}
148141
}
149142

150-
if (isPreserveTTLWritetime) {
151-
astraInsertStatement = astraSession.prepare("insert into " + astraKeyspaceTable + " (" + insertCols + ") VALUES (" + insertBinds + ") using TTL ? and TIMESTAMP ?");
152-
} else {
153-
astraInsertStatement = astraSession.prepare("insert into " + astraKeyspaceTable + " (" + insertCols + ") VALUES (" + insertBinds + ")");
143+
String fullInsertQuery = "insert into " + astraKeyspaceTable + " (" + insertCols + ") VALUES (" + insertBinds + ")";
144+
if (!ttlCols.isEmpty()) {
145+
fullInsertQuery += " USING TTL ?";
146+
if (!writeTimeStampCols.isEmpty()) {
147+
fullInsertQuery += " AND TIMESTAMP ?";
148+
}
149+
} else if (!writeTimeStampCols.isEmpty()) {
150+
fullInsertQuery += " USING TIMESTAMP ?";
154151
}
152+
astraInsertStatement = astraSession.prepare(fullInsertQuery);
155153
}
156154
}
157155

src/main/java/datastax/astra/migrate/BaseJobSession.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ public abstract class BaseJobSession {
3232
protected Integer batchSize = 1;
3333
protected Integer printStatsAfter = 100000;
3434

35-
protected Boolean isPreserveTTLWritetime = Boolean.FALSE;
3635
protected Boolean writeTimeStampFilter = Boolean.FALSE;
3736
protected Long minWriteTimeStampFilter = 0l;
3837
protected Long maxWriteTimeStampFilter = Long.MAX_VALUE;

src/main/java/datastax/astra/migrate/CopyJobSession.java

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,11 @@ public BoundStatement bindInsert(PreparedStatement insertStatement, Row sourceRo
173173
}
174174
}
175175

176-
if (isPreserveTTLWritetime) {
176+
if (!ttlCols.isEmpty()) {
177177
boundInsertStatement = boundInsertStatement.set(index, getLargestTTL(sourceRow), Integer.class);
178178
index++;
179+
}
180+
if (!writeTimeStampCols.isEmpty()) {
179181
if (customWritetime > 0) {
180182
boundInsertStatement = boundInsertStatement.set(index, customWritetime, Long.class);
181183
} else {

src/resources/sparkConf.properties

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,15 +28,13 @@ spark.query.source.partitionKey partition-key
2828
spark.query.destination partition-key,clustering-key,order-date,amount
2929
spark.query.destination.id partition-key,clustering-key
3030
spark.query.types 9,1,4,3
31+
spark.query.ttl.cols 2,3
32+
spark.query.writetime.cols 2,3
3133

3234
spark.counterTable false
3335
spark.counterTable.cql
3436
spark.counterTable.cql.index 0
3537

36-
spark.preserveTTLWriteTime true
37-
spark.preserveTTLWriteTime.ttl.cols 2,3
38-
spark.preserveTTLWriteTime.writetime.cols 2,3
39-
4038
spark.source.writeTimeStampFilter false
4139
spark.source.minWriteTimeStampFilter 0
4240
spark.source.maxWriteTimeStampFilter 9223372036854775807

0 commit comments

Comments
 (0)