Skip to content

Commit ca6bb39

Browse files
committed
Improve logger to use params instead of string-concat
1 parent 28cec61 commit ca6bb39

File tree

6 files changed

+40
-39
lines changed

6 files changed

+40
-39
lines changed

src/main/java/datastax/astra/migrate/AbstractJobSession.java

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -76,14 +76,14 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
7676
customWritetime = Long.parseLong(customWriteTimeStr);
7777
}
7878

79-
logger.info("PARAM -- Write Batch Size: " + batchSize);
80-
logger.info("PARAM -- Source Keyspace Table: " + sourceKeyspaceTable);
81-
logger.info("PARAM -- Destination Keyspace Table: " + astraKeyspaceTable);
82-
logger.info("PARAM -- ReadRateLimit: " + readLimiter.getRate());
83-
logger.info("PARAM -- WriteRateLimit: " + writeLimiter.getRate());
84-
logger.info("PARAM -- TTLCols: " + ttlCols);
85-
logger.info("PARAM -- WriteTimestampFilterCols: " + writeTimeStampCols);
86-
logger.info("PARAM -- WriteTimestampFilter: " + writeTimeStampFilter);
79+
logger.info("PARAM -- Write Batch Size: {}", batchSize);
80+
logger.info("PARAM -- Source Keyspace Table: {}", sourceKeyspaceTable);
81+
logger.info("PARAM -- Destination Keyspace Table: {}", astraKeyspaceTable);
82+
logger.info("PARAM -- ReadRateLimit: {}", readLimiter.getRate());
83+
logger.info("PARAM -- WriteRateLimit: {}", writeLimiter.getRate());
84+
logger.info("PARAM -- TTLCols: {}" + ttlCols);
85+
logger.info("PARAM -- WriteTimestampFilterCols: {}", writeTimeStampCols);
86+
logger.info("PARAM -- WriteTimestampFilter: {}", writeTimeStampFilter);
8787

8888
String selectCols = Util.getSparkProp(sc, "spark.query.origin");
8989
String partionKey = Util.getSparkProp(sc, "spark.query.origin.partitionKey");
@@ -122,7 +122,7 @@ protected AbstractJobSession(CqlSession sourceSession, CqlSession astraSession,
122122
fullSelectQuery = "select " + selectCols + selectTTLWriteTimeCols + " from " + sourceKeyspaceTable + " where " + insertBinds;
123123
}
124124
sourceSelectStatement = sourceSession.prepare(fullSelectQuery);
125-
logger.info("PARAM -- Query used: " + fullSelectQuery);
125+
logger.info("PARAM -- Query used: {}", fullSelectQuery);
126126

127127
astraSelectStatement = astraSession.prepare(
128128
"select " + insertCols + " from " + astraKeyspaceTable

src/main/java/datastax/astra/migrate/CopyJobSession.java

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ public static CopyJobSession getInstance(CqlSession sourceSession, CqlSession as
3636
}
3737

3838
public void getDataAndInsert(BigInteger min, BigInteger max) {
39-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Processing min: " + min + " max:" + max);
39+
logger.info("ThreadID: {} Processing min: {} max: {}", Thread.currentThread().getId(), min, max);
4040
int maxAttempts = maxRetries;
4141
for (int retryCount = 1; retryCount <= maxAttempts; retryCount++) {
4242

@@ -117,8 +117,9 @@ public void getDataAndInsert(BigInteger min, BigInteger max) {
117117

118118
retryCount = maxAttempts;
119119
} catch (Exception e) {
120-
logger.error("Error occurred retry#: " + retryCount, e);
121-
logger.error("Error with PartitionRange -- ThreadID: " + Thread.currentThread().getId() + " Processing min: " + min + " max:" + max + " -- Retry# " + retryCount);
120+
logger.error("Error occurred retry#: {}", retryCount, e);
121+
logger.error("Error with PartitionRange -- ThreadID: {} Processing min: {} max: {} -- Retry# {}",
122+
Thread.currentThread().getId(), min, max, retryCount);
122123
}
123124
}
124125
}
@@ -129,8 +130,8 @@ public synchronized void printCounts(boolean isFinal) {
129130
msg += " Final";
130131
logger.info("################################################################################################");
131132
}
132-
logger.info(msg + " Read Record Count: " + readCounter.get());
133-
logger.info(msg + " Write Record Count: " + writeCounter.get());
133+
logger.info("{} Read Record Count: {}", msg, readCounter.get());
134+
logger.info("{} Write Record Count: {}", msg, writeCounter.get());
134135
if (isFinal) {
135136
logger.info("################################################################################################");
136137
}

src/main/java/datastax/astra/migrate/CopyPKJobSession.java

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ public void getRowAndInsert(List<SplitPartitions.PKRows> rowsList) {
5151
Row pkRow = sourceSession.execute(bspk).one();
5252
if (null == pkRow) {
5353
missingCounter.incrementAndGet();
54-
logger.error("Could not find row with primary-key: " + row);
54+
logger.error("Could not find row with primary-key: {}", row);
5555
return;
5656
}
5757
ResultSet astraWriteResultSet = astraSession
@@ -70,9 +70,9 @@ public void printCounts(boolean isFinal) {
7070
if (isFinal) {
7171
logger.info("################################################################################################");
7272
}
73-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Read Record Count: " + readCounter.get());
74-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Read Missing Count: " + missingCounter.get());
75-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Inserted Record Count: " + writeCounter.get());
73+
logger.info("ThreadID: {} Read Record Count: {}", Thread.currentThread().getId(), readCounter.get());
74+
logger.info("ThreadID: {} Read Missing Count: {}", Thread.currentThread().getId(), missingCounter.get());
75+
logger.info("ThreadID: {} Inserted Record Count: {}", Thread.currentThread().getId(), writeCounter.get());
7676
if (isFinal) {
7777
logger.info("################################################################################################");
7878
}

src/main/java/datastax/astra/migrate/DiffJobSession.java

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ private DiffJobSession(CqlSession sourceSession, CqlSession astraSession, SparkC
3636
super(sourceSession, astraSession, sc);
3737

3838
autoCorrectMissing = Boolean.parseBoolean(Util.getSparkPropOr(sc, "spark.target.autocorrect.missing", "false"));
39-
logger.info("PARAM -- Autocorrect Missing: " + autoCorrectMissing);
39+
logger.info("PARAM -- Autocorrect Missing: {}", autoCorrectMissing);
4040

4141
autoCorrectMismatch = Boolean.parseBoolean(Util.getSparkPropOr(sc, "spark.target.autocorrect.mismatch", "false"));
42-
logger.info("PARAM -- Autocorrect Mismatch: " + autoCorrectMismatch);
42+
logger.info("PARAM -- Autocorrect Mismatch: {}", autoCorrectMismatch);
4343
}
4444

4545
public static DiffJobSession getInstance(CqlSession sourceSession, CqlSession astraSession, SparkConf sparkConf) {
@@ -55,7 +55,7 @@ public static DiffJobSession getInstance(CqlSession sourceSession, CqlSession as
5555
}
5656

5757
public void getDataAndDiff(BigInteger min, BigInteger max) {
58-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Processing min: " + min + " max:" + max);
58+
logger.info("ThreadID: {} Processing min: {} max: {}", Thread.currentThread().getId(), min, max);
5959
int maxAttempts = maxRetries;
6060
for (int retryCount = 1; retryCount <= maxAttempts; retryCount++) {
6161

@@ -88,9 +88,9 @@ public void getDataAndDiff(BigInteger min, BigInteger max) {
8888
diffAndClear(srcToTargetRowMap);
8989
retryCount = maxAttempts;
9090
} catch (Exception e) {
91-
logger.error("Error occurred retry#: " + retryCount, e);
92-
logger.error("Error with PartitionRange -- ThreadID: " + Thread.currentThread().getId()
93-
+ " Processing min: " + min + " max:" + max + " -- Retry# " + retryCount);
91+
logger.error("Error occurred retry#: {}", retryCount, e);
92+
logger.error("Error with PartitionRange -- ThreadID: {} Processing min: {} max: {} -- Retry# {}",
93+
Thread.currentThread().getId(), min, max, retryCount);
9494
}
9595
}
9696

@@ -102,7 +102,7 @@ private void diffAndClear(Map<Row, CompletionStage<AsyncResultSet>> srcToTargetR
102102
Row targetRow = srcToTargetRowMap.get(srcRow).toCompletableFuture().get().one();
103103
diff(srcRow, targetRow);
104104
} catch (Exception e) {
105-
logger.error("Could not perform diff for Key: " + getKey(srcRow), e);
105+
logger.error("Could not perform diff for Key: {}", getKey(srcRow), e);
106106
}
107107
}
108108
srcToTargetRowMap.clear();

src/main/java/datastax/astra/migrate/OriginCountJobSession.java

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ public static OriginCountJobSession getInstance(CqlSession sourceSession, SparkC
7979
}
8080

8181
public void getData(BigInteger min, BigInteger max) {
82-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Processing min: " + min + " max:" + max);
82+
logger.info("ThreadID: {} Processing min: {} max: {}", Thread.currentThread().getId(), min, max);
8383
int maxAttempts = maxRetries;
8484
for (int retryCount = 1; retryCount <= maxAttempts; retryCount++) {
8585

@@ -104,7 +104,7 @@ public void getData(BigInteger min, BigInteger max) {
104104
String[] colName = checkTableforselectCols.split(",");
105105
result = result + " - " + colName[index] + " : " + colData;
106106
}
107-
logger.error("ThreadID: " + Thread.currentThread().getId() + result + " - " + filterColName + " length: " + rowColcnt);
107+
logger.error("ThreadID: {}{} - {} length: {}", Thread.currentThread().getId(), result, filterColName, rowColcnt);
108108
continue;
109109
}
110110
}
@@ -126,24 +126,24 @@ public void getData(BigInteger min, BigInteger max) {
126126
String[] colName = checkTableforselectCols.split(",");
127127
result = result + " - " + colName[index] + " : " + colData;
128128
}
129-
logger.error("ThreadID: " + Thread.currentThread().getId() + result + " - " + filterColName + " length: " + rowColcnt);
129+
logger.error("ThreadID: {}{} - {} length: {}", Thread.currentThread().getId(), result, filterColName, rowColcnt);
130130
continue;
131131
}
132132
}
133133

134134
if (readCounter.incrementAndGet() % 1000 == 0) {
135-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Read Record Count: " + readCounter.get());
135+
logger.info("ThreadID: {} Read Record Count: {}", Thread.currentThread().getId(), readCounter.get());
136136
}
137137

138138
}
139139
}
140140

141-
142-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Final Read Record Count: " + readCounter.get());
141+
logger.info("ThreadID: {} Final Read Record Count: {}", Thread.currentThread().getId(), readCounter.get());
143142
retryCount = maxAttempts;
144143
} catch (Exception e) {
145-
logger.error("Error occurred retry#: " + retryCount, e);
146-
logger.error("Error with PartitionRange -- ThreadID: " + Thread.currentThread().getId() + " Processing min: " + min + " max:" + max + " -- Retry# " + retryCount);
144+
logger.error("Error occurred retry#: {}", retryCount, e);
145+
logger.error("Error with PartitionRange -- ThreadID: {} Processing min: {} max: {} -- Retry# {}",
146+
Thread.currentThread().getId(), min, max, retryCount);
147147
}
148148
}
149149

src/main/java/datastax/astra/migrate/SplitPartitions.java

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ public static void main(String[] args) throws IOException {
3131
}
3232

3333
public static Collection<Partition> getRandomSubPartitions(int splitSize, BigInteger min, BigInteger max, int coveragePercent) {
34-
logger.info("ThreadID: " + Thread.currentThread().getId() + " Splitting min: " + min + " max:" + max);
34+
logger.info("ThreadID: {} Splitting min: {} max: {}", Thread.currentThread().getId(), min, max);
3535
List<Partition> partitions = getSubPartitions(splitSize, min, max, coveragePercent);
3636
Collections.shuffle(partitions);
3737
Collections.shuffle(partitions);
@@ -41,8 +41,8 @@ public static Collection<Partition> getRandomSubPartitions(int splitSize, BigInt
4141
}
4242

4343
public static List<Partition> getSubPartitionsFromFile(int splitSize) throws IOException {
44-
logger.info("ThreadID: " + Thread.currentThread().getId() +
45-
" Splitting partitions in file: ./partitions.csv using a split-size of " + splitSize);
44+
logger.info("ThreadID: {} Splitting partitions in file: ./partitions.csv using a split-size of {}"
45+
, Thread.currentThread().getId(), splitSize);
4646
List<Partition> partitions = new ArrayList<Partition>();
4747
BufferedReader reader = Util.getfileReader("./partitions.csv");
4848
String line = null;
@@ -54,16 +54,16 @@ public static List<Partition> getSubPartitionsFromFile(int splitSize) throws IOE
5454
try {
5555
partitions.addAll(getSubPartitions(splitSize, new BigInteger(minMax[0]), new BigInteger(minMax[1]), 100));
5656
} catch (Exception e) {
57-
logger.error("Skipping partition: " + line, e);
57+
logger.error("Skipping partition: {}", line, e);
5858
}
5959
}
6060

6161
return partitions;
6262
}
6363

6464
public static List<PKRows> getRowPartsFromFile(int splitSize) throws IOException {
65-
logger.info("ThreadID: " + Thread.currentThread().getId() +
66-
" Splitting rows in file: ./primary_key_rows.csv using a split-size of " + splitSize);
65+
logger.info("ThreadID: {} Splitting rows in file: ./primary_key_rows.csv using a split-size of {}"
66+
, Thread.currentThread().getId(), splitSize);
6767
List<String> pkRows = new ArrayList<String>();
6868
BufferedReader reader = Util.getfileReader("./primary_key_rows.csv");
6969
String pkRow = null;

0 commit comments

Comments
 (0)