3434import com .swirlds .metrics .api .Metrics ;
3535import com .swirlds .virtualmap .config .VirtualMapConfig ;
3636import com .swirlds .virtualmap .config .VirtualMapReconnectMode ;
37+ import com .swirlds .virtualmap .datasource .DataSourceHashChunkPreloader ;
3738import com .swirlds .virtualmap .datasource .VirtualDataSource ;
3839import com .swirlds .virtualmap .datasource .VirtualDataSourceBuilder ;
3940import com .swirlds .virtualmap .datasource .VirtualHashChunk ;
5152import com .swirlds .virtualmap .internal .reconnect .ConcurrentBlockingIterator ;
5253import com .swirlds .virtualmap .internal .reconnect .LearnerPullVirtualTreeView ;
5354import com .swirlds .virtualmap .internal .reconnect .LearnerPushVirtualTreeView ;
54- import com .swirlds .virtualmap .internal .reconnect .NodeTraversalOrder ;
5555import com .swirlds .virtualmap .internal .reconnect .ParallelSyncTraversalOrder ;
5656import com .swirlds .virtualmap .internal .reconnect .ReconnectHashLeafFlusher ;
5757import com .swirlds .virtualmap .internal .reconnect .ReconnectHashListener ;
@@ -321,22 +321,13 @@ public static class ClassVersion {
321321 */
322322 private VirtualMapMetadata reconnectState ;
323323
324- private VirtualNodeCache reconnectCache ;
325-
326- /**
327- * The {@link RecordAccessor} for the state, cache, and data source needed during reconnect.
328- */
329- private RecordAccessor reconnectRecords ;
330-
331324 /**
332325 * During reconnect as a learner, this is the root node in the old learner merkle tree.
333326 */
334327 private VirtualMap originalMap ;
335328
336329 private ReconnectHashLeafFlusher reconnectFlusher ;
337330
338- private ReconnectNodeRemover nodeRemover ;
339-
340331 private final long fastCopyVersion ;
341332
342333 private VirtualMapStatistics statistics ;
@@ -423,8 +414,6 @@ private VirtualMap(final VirtualMap source) {
423414 reconnectHashingFuture = null ;
424415 reconnectHashingStarted = null ;
425416 reconnectIterator = null ;
426- reconnectCache = null ;
427- reconnectRecords = null ;
428417 pipeline = source .pipeline ;
429418 flushCandidateThreshold .set (source .flushCandidateThreshold .get ());
430419 statistics = source .statistics ;
@@ -869,10 +858,6 @@ public void onShutdown(final boolean immediately) {
869858 hasher .shutdown ();
870859 }
871860 cache .shutdown ();
872- if (reconnectCache != null ) {
873- reconnectCache .shutdown ();
874- reconnectCache = null ;
875- }
876861 closeDataSource ();
877862 }
878863
@@ -1214,6 +1199,19 @@ public void onHashChunkHashed(@NonNull VirtualHashChunk chunk) {
12141199 statistics .recordHash (end - start );
12151200 }
12161201
1202+ /**
1203+ * @return copy of underlying datasource with cache copy flushed into it, and running compaction
1204+ */
1205+ public VirtualDataSource detachAsDataSourceCopy () {
1206+ return pipeline .pausePipelineAndRun ("detach" , () -> {
1207+ final Path snapshotPath = dataSourceBuilder .snapshot (null , dataSource );
1208+ VirtualDataSource dataSourceCopy = dataSourceBuilder .build (getLabel (), snapshotPath , true , false );
1209+
1210+ flush (cache .snapshot (), metadata , dataSourceCopy );
1211+ return dataSourceCopy ;
1212+ });
1213+ }
1214+
12171215 /**
12181216 * Prepares a read-only copy so that it may be used even when removed from the pipeline.
12191217 * Can be called only on immutable hashed copy.
@@ -1291,31 +1289,7 @@ private void setupWithOriginalNode(@NonNull final VirtualMap originalMap) {
12911289
12921290 // Start with empty state, it will be updated from the teacher during reconnect
12931291 reconnectState = new VirtualMapMetadata ();
1294- reconnectRecords = originalMap .pipeline .pausePipelineAndRun ("copy" , () -> {
1295- // shutdown background compaction on original data source as it is no longer needed to be running as all
1296- // data
1297- // in that data source is only there as a starting point for reconnect now. So compacting it further is not
1298- // helpful and will just burn resources.
1299- originalMap .dataSource .stopAndDisableBackgroundCompaction ();
1300-
1301- // Take a snapshot, and use the snapshot database as my data source
1302- final Path snapshotPath = dataSourceBuilder .snapshot (null , originalMap .dataSource );
1303- this .dataSource = dataSourceBuilder .build (originalMap .getLabel (), snapshotPath , true , false );
1304-
1305- // The old map's cache is going to become immutable, but that's OK, because the old map
1306- // will NEVER be updated again.
1307- assert originalMap .isHashed () : "The system should have made sure this was hashed by this point!" ;
1308- final VirtualNodeCache snapshotCache = originalMap .cache .snapshot ();
1309- flush (snapshotCache , originalMap .metadata , this .dataSource );
1310-
1311- final int hashChunkHeight = dataSource .getHashChunkHeight ();
1312- reconnectCache = new VirtualNodeCache (
1313- virtualMapConfig ,
1314- hashChunkHeight ,
1315- dataSource ::loadHashChunk ,
1316- originalMap .cache .getFastCopyVersion ());
1317- return new RecordAccessor (reconnectState , hashChunkHeight , reconnectCache , dataSource );
1318- });
1292+ this .dataSource = originalMap .detachAsDataSourceCopy ();
13191293
13201294 // Set up the VirtualHasher which we will use during reconnect.
13211295 // Initial timeout is intentionally very long, timeout is reduced once we receive the first leaf in the tree.
@@ -1358,7 +1332,7 @@ public LearnerTreeView buildLearnerView(
13581332 final VirtualMapMetadata originalState = originalMap .getMetadata ();
13591333 reconnectFlusher =
13601334 new ReconnectHashLeafFlusher (dataSource , virtualMapConfig .reconnectFlushInterval (), statistics );
1361- nodeRemover = new ReconnectNodeRemover (
1335+ final ReconnectNodeRemover nodeRemover = new ReconnectNodeRemover (
13621336 originalMap .getRecords (),
13631337 originalState .getFirstLeafPath (),
13641338 originalState .getLastLeafPath (),
@@ -1367,42 +1341,36 @@ public LearnerTreeView buildLearnerView(
13671341 case VirtualMapReconnectMode .PUSH ->
13681342 new LearnerPushVirtualTreeView (
13691343 this , originalMap .records , originalState , reconnectState , nodeRemover , mapStats );
1370- case VirtualMapReconnectMode .PULL_TOP_TO_BOTTOM -> {
1371- final NodeTraversalOrder topToBottom = new TopToBottomTraversalOrder ();
1372- yield new LearnerPullVirtualTreeView (
1344+ case VirtualMapReconnectMode .PULL_TOP_TO_BOTTOM ->
1345+ new LearnerPullVirtualTreeView (
13731346 reconnectConfig ,
13741347 this ,
13751348 originalMap .records ,
13761349 originalState ,
13771350 reconnectState ,
13781351 nodeRemover ,
1379- topToBottom ,
1352+ new TopToBottomTraversalOrder () ,
13801353 mapStats );
1381- }
1382- case VirtualMapReconnectMode .PULL_TWO_PHASE_PESSIMISTIC -> {
1383- final NodeTraversalOrder twoPhasePessimistic = new TwoPhasePessimisticTraversalOrder ();
1384- yield new LearnerPullVirtualTreeView (
1354+ case VirtualMapReconnectMode .PULL_TWO_PHASE_PESSIMISTIC ->
1355+ new LearnerPullVirtualTreeView (
13851356 reconnectConfig ,
13861357 this ,
13871358 originalMap .records ,
13881359 originalState ,
13891360 reconnectState ,
13901361 nodeRemover ,
1391- twoPhasePessimistic ,
1362+ new TwoPhasePessimisticTraversalOrder () ,
13921363 mapStats );
1393- }
1394- case VirtualMapReconnectMode .PULL_PARALLEL_SYNC -> {
1395- final NodeTraversalOrder parallelSync = new ParallelSyncTraversalOrder ();
1396- yield new LearnerPullVirtualTreeView (
1364+ case VirtualMapReconnectMode .PULL_PARALLEL_SYNC ->
1365+ new LearnerPullVirtualTreeView (
13971366 reconnectConfig ,
13981367 this ,
13991368 originalMap .records ,
14001369 originalState ,
14011370 reconnectState ,
14021371 nodeRemover ,
1403- parallelSync ,
1372+ new ParallelSyncTraversalOrder () ,
14041373 mapStats );
1405- }
14061374 default ->
14071375 throw new UnsupportedOperationException ("Unknown reconnect mode: " + virtualMapConfig .reconnectMode ());
14081376 };
@@ -1442,8 +1410,9 @@ public void handleReconnectLeaf(@NonNull final VirtualLeafBytes<?> leafRecord) {
14421410
14431411 public void prepareReconnectHashing (final long firstLeafPath , final long lastLeafPath ) {
14441412 assert reconnectFlusher != null : "Cannot prepare reconnect hashing, since reconnect is not started" ;
1413+ final DataSourceHashChunkPreloader hashChunkPreloader = new DataSourceHashChunkPreloader (dataSource );
14451414 // The hash listener will be responsible for flushing stuff to the reconnect data source
1446- final ReconnectHashListener hashListener = new ReconnectHashListener (reconnectFlusher );
1415+ final ReconnectHashListener hashListener = new ReconnectHashListener (reconnectFlusher , hashChunkPreloader );
14471416
14481417 // This background thread will be responsible for hashing the tree and sending the
14491418 // data to the hash listener to flush.
@@ -1452,7 +1421,7 @@ public void prepareReconnectHashing(final long firstLeafPath, final long lastLea
14521421 .setThreadName ("hasher" )
14531422 .setRunnable (() -> reconnectHashingFuture .complete (hasher .hash (
14541423 dataSource .getHashChunkHeight (),
1455- reconnectCache :: preloadHashChunk ,
1424+ hashChunkPreloader ,
14561425 reconnectIterator ,
14571426 firstLeafPath ,
14581427 lastLeafPath ,
@@ -1483,14 +1452,9 @@ public void endLearnerReconnect() {
14831452 logger .warn (RECONNECT .getMarker (), "virtual map hashing thread was never started" );
14841453 }
14851454 logger .info (RECONNECT .getMarker (), "call postInit()" );
1486- nodeRemover = null ;
14871455 originalMap = null ;
14881456 metadata = new VirtualMapMetadata (reconnectState .getSize ());
14891457 postInit ();
1490- if (reconnectCache != null ) {
1491- reconnectCache .shutdown ();
1492- reconnectCache = null ;
1493- }
14941458 } catch (ExecutionException e ) {
14951459 throw new MerkleSynchronizationException (e );
14961460 } catch (InterruptedException e ) {
0 commit comments