@@ -70,95 +70,86 @@ public synchronized void initCdmRun(Collection<SplitPartitions.Partition> parts,
70
70
trackRunFeature .initCdmRun (parts , TrackRun .RUN_TYPE .MIGRATE );
71
71
}
72
72
73
- public void getDataAndInsert (BigInteger min , BigInteger max ) {
73
+ private void getDataAndInsert (BigInteger min , BigInteger max ) {
74
74
ThreadContext .put (THREAD_CONTEXT_LABEL , getThreadLabel (min , max ));
75
75
logger .info ("ThreadID: {} Processing min: {} max: {}" , Thread .currentThread ().getId (), min , max );
76
76
if (trackRun )
77
77
trackRunFeature .updateCdmRun (min , TrackRun .RUN_STATUS .STARTED );
78
78
79
79
BatchStatement batch = BatchStatement .newInstance (BatchType .UNLOGGED );
80
- boolean done = false ;
81
- int maxAttempts = maxRetries + 1 ;
82
80
String guardrailCheck ;
83
- for (int attempts = 1 ; attempts <= maxAttempts && !done ; attempts ++) {
84
- jobCounter .threadReset ();
85
-
86
- try {
87
- OriginSelectByPartitionRangeStatement originSelectByPartitionRangeStatement = this .originSession
88
- .getOriginSelectByPartitionRangeStatement ();
89
- targetUpsertStatement = this .targetSession .getTargetUpsertStatement ();
90
- targetSelectByPKStatement = this .targetSession .getTargetSelectByPKStatement ();
91
- ResultSet resultSet = originSelectByPartitionRangeStatement
92
- .execute (originSelectByPartitionRangeStatement .bind (min , max ));
93
- Collection <CompletionStage <AsyncResultSet >> writeResults = new ArrayList <>();
94
-
95
- for (Row originRow : resultSet ) {
96
- rateLimiterOrigin .acquire (1 );
97
- jobCounter .threadIncrement (JobCounter .CounterType .READ );
98
-
99
- Record record = new Record (pkFactory .getTargetPK (originRow ), originRow , null );
100
- if (originSelectByPartitionRangeStatement .shouldFilterRecord (record )) {
101
- jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED );
102
- continue ;
103
- }
104
-
105
- for (Record r : pkFactory .toValidRecordList (record )) {
106
- if (guardrailEnabled ) {
107
- guardrailCheck = guardrailFeature .guardrailChecks (r );
108
- if (guardrailCheck != null && guardrailCheck != Guardrail .CLEAN_CHECK ) {
109
- logger .error ("Guardrails failed for PrimaryKey {}; {}" , r .getPk (), guardrailCheck );
110
- jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED );
111
- continue ;
112
- }
113
- }
81
+ jobCounter .threadReset ();
82
+
83
+ try {
84
+ OriginSelectByPartitionRangeStatement originSelectByPartitionRangeStatement = this .originSession
85
+ .getOriginSelectByPartitionRangeStatement ();
86
+ targetUpsertStatement = this .targetSession .getTargetUpsertStatement ();
87
+ targetSelectByPKStatement = this .targetSession .getTargetSelectByPKStatement ();
88
+ ResultSet resultSet = originSelectByPartitionRangeStatement
89
+ .execute (originSelectByPartitionRangeStatement .bind (min , max ));
90
+ Collection <CompletionStage <AsyncResultSet >> writeResults = new ArrayList <>();
91
+
92
+ for (Row originRow : resultSet ) {
93
+ rateLimiterOrigin .acquire (1 );
94
+ jobCounter .threadIncrement (JobCounter .CounterType .READ );
95
+
96
+ Record record = new Record (pkFactory .getTargetPK (originRow ), originRow , null );
97
+ if (originSelectByPartitionRangeStatement .shouldFilterRecord (record )) {
98
+ jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED );
99
+ continue ;
100
+ }
114
101
115
- BoundStatement boundUpsert = bind (r );
116
- if (null == boundUpsert ) {
117
- jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED ); // TODO: this previously
118
- // skipped, why not errCnt?
102
+ for (Record r : pkFactory .toValidRecordList (record )) {
103
+ if (guardrailEnabled ) {
104
+ guardrailCheck = guardrailFeature .guardrailChecks (r );
105
+ if (guardrailCheck != null && guardrailCheck != Guardrail .CLEAN_CHECK ) {
106
+ logger .error ("Guardrails failed for PrimaryKey {}; {}" , r .getPk (), guardrailCheck );
107
+ jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED );
119
108
continue ;
120
109
}
110
+ }
121
111
122
- rateLimiterTarget .acquire (1 );
123
- batch = writeAsync (batch , writeResults , boundUpsert );
124
- jobCounter .threadIncrement (JobCounter .CounterType .UNFLUSHED );
125
-
126
- if (jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ) > fetchSize ) {
127
- flushAndClearWrites (batch , writeResults );
128
- jobCounter .threadIncrement (JobCounter .CounterType .WRITE ,
129
- jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ));
130
- jobCounter .threadReset (JobCounter .CounterType .UNFLUSHED );
131
- }
112
+ BoundStatement boundUpsert = bind (r );
113
+ if (null == boundUpsert ) {
114
+ jobCounter .threadIncrement (JobCounter .CounterType .SKIPPED ); // TODO: this previously
115
+ // skipped, why not errCnt?
116
+ continue ;
132
117
}
133
- }
134
118
135
- flushAndClearWrites (batch , writeResults );
136
- jobCounter .threadIncrement (JobCounter .CounterType .WRITE ,
137
- jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ));
138
- jobCounter .threadReset (JobCounter .CounterType .UNFLUSHED );
139
- done = true ;
140
- if (trackRun )
141
- trackRunFeature .updateCdmRun (min , TrackRun .RUN_STATUS .PASS );
142
-
143
- } catch (Exception e ) {
144
- if (attempts == maxAttempts ) {
145
- jobCounter .threadIncrement (JobCounter .CounterType .ERROR ,
146
- jobCounter .getCount (JobCounter .CounterType .READ )
147
- - jobCounter .getCount (JobCounter .CounterType .WRITE )
148
- - jobCounter .getCount (JobCounter .CounterType .SKIPPED ));
149
- if (trackRun )
150
- trackRunFeature .updateCdmRun (min , TrackRun .RUN_STATUS .FAIL );
151
- else
152
- logPartitionsInFile (partitionFileOutput , min , max );
119
+ rateLimiterTarget .acquire (1 );
120
+ batch = writeAsync (batch , writeResults , boundUpsert );
121
+ jobCounter .threadIncrement (JobCounter .CounterType .UNFLUSHED );
122
+
123
+ if (jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ) > fetchSize ) {
124
+ flushAndClearWrites (batch , writeResults );
125
+ jobCounter .threadIncrement (JobCounter .CounterType .WRITE ,
126
+ jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ));
127
+ jobCounter .threadReset (JobCounter .CounterType .UNFLUSHED );
128
+ }
153
129
}
154
- logger .error ("Error occurred during Attempt#: {}" , attempts , e );
155
- logger .error ("Error with PartitionRange -- ThreadID: {} Processing min: {} max: {} -- Attempt# {}" ,
156
- Thread .currentThread ().getId (), min , max , attempts );
157
- logger .error ("Error stats " + jobCounter .getThreadCounters (false ));
158
- } finally {
159
- jobCounter .globalIncrement ();
160
- printCounts (false );
161
130
}
131
+
132
+ flushAndClearWrites (batch , writeResults );
133
+ jobCounter .threadIncrement (JobCounter .CounterType .WRITE ,
134
+ jobCounter .getCount (JobCounter .CounterType .UNFLUSHED ));
135
+ jobCounter .threadReset (JobCounter .CounterType .UNFLUSHED );
136
+ if (trackRun )
137
+ trackRunFeature .updateCdmRun (min , TrackRun .RUN_STATUS .PASS );
138
+
139
+ } catch (Exception e ) {
140
+ jobCounter .threadIncrement (JobCounter .CounterType .ERROR ,
141
+ jobCounter .getCount (JobCounter .CounterType .READ ) - jobCounter .getCount (JobCounter .CounterType .WRITE )
142
+ - jobCounter .getCount (JobCounter .CounterType .SKIPPED ));
143
+ if (trackRun )
144
+ trackRunFeature .updateCdmRun (min , TrackRun .RUN_STATUS .FAIL );
145
+ else
146
+ logPartitionsInFile (partitionFileOutput , min , max );
147
+ logger .error ("Error with PartitionRange -- ThreadID: {} Processing min: {} max: {}" ,
148
+ Thread .currentThread ().getId (), min , max );
149
+ logger .error ("Error stats " + jobCounter .getThreadCounters (false ));
150
+ } finally {
151
+ jobCounter .globalIncrement ();
152
+ printCounts (false );
162
153
}
163
154
}
164
155
0 commit comments