|
179 | 179 | import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; |
180 | 180 | import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; |
181 | 181 | import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; |
| 182 | +import static org.elasticsearch.repositories.ProjectRepo.projectRepoString; |
182 | 183 |
|
183 | 184 | /** |
184 | 185 | * BlobStore - based implementation of Snapshot Repository |
@@ -1522,8 +1523,8 @@ private void cleanupUnlinkedRootAndIndicesBlobs(RepositoryData newRepositoryData |
1522 | 1523 | } catch (Exception e) { |
1523 | 1524 | logger.warn( |
1524 | 1525 | () -> format( |
1525 | | - "[%s] The following blobs are no longer part of any snapshot [%s] but failed to remove them", |
1526 | | - metadata.name(), |
| 1526 | + "%s The following blobs are no longer part of any snapshot [%s] but failed to remove them", |
| 1527 | + toStringShort(), |
1527 | 1528 | staleRootBlobs |
1528 | 1529 | ), |
1529 | 1530 | e |
@@ -1551,8 +1552,8 @@ private void cleanupUnlinkedRootAndIndicesBlobs(RepositoryData newRepositoryData |
1551 | 1552 | logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexId); |
1552 | 1553 | } catch (IOException e) { |
1553 | 1554 | logger.warn(() -> format(""" |
1554 | | - [%s] index %s is no longer part of any snapshot in the repository, \ |
1555 | | - but failed to clean up its index folder""", metadata.name(), indexId), e); |
| 1555 | + %s index %s is no longer part of any snapshot in the repository, \ |
| 1556 | + but failed to clean up its index folder""", toStringShort(), indexId), e); |
1556 | 1557 | } |
1557 | 1558 | })); |
1558 | 1559 | } |
@@ -1625,7 +1626,7 @@ private void logStaleRootLevelBlobs( |
1625 | 1626 | .collect(Collectors.toSet()); |
1626 | 1627 | final List<String> blobsToLog = blobsToDelete.stream().filter(b -> blobNamesToIgnore.contains(b) == false).toList(); |
1627 | 1628 | if (blobsToLog.isEmpty() == false) { |
1628 | | - logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToLog); |
| 1629 | + logger.info("{} Found stale root level blobs {}. Cleaning them up", toStringShort(), blobsToLog); |
1629 | 1630 | } |
1630 | 1631 | } |
1631 | 1632 | } |
@@ -2153,9 +2154,9 @@ private RateLimiter getRateLimiter( |
2153 | 2154 | if (warnIfOverRecovery && effectiveRecoverySpeed.getBytes() > 0) { |
2154 | 2155 | if (maxConfiguredBytesPerSec.getBytes() > effectiveRecoverySpeed.getBytes()) { |
2155 | 2156 | logger.warn( |
2156 | | - "repository [{}] has a rate limit [{}={}] per second which is above the effective recovery rate limit " |
| 2157 | + "repository {} has a rate limit [{}={}] per second which is above the effective recovery rate limit " |
2157 | 2158 | + "[{}={}] per second, thus the repository rate limit will be superseded by the recovery rate limit", |
2158 | | - metadata.name(), |
| 2159 | + toStringShort(), |
2159 | 2160 | settingKey, |
2160 | 2161 | maxConfiguredBytesPerSec, |
2161 | 2162 | INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), |
@@ -2362,7 +2363,7 @@ public void onResponse(RepositoryData repositoryData) { |
2362 | 2363 | @Override |
2363 | 2364 | public void onFailure(Exception e) { |
2364 | 2365 | logger.warn( |
2365 | | - () -> format("[%s] Exception when initializing repository generation in cluster state", metadata.name()), |
| 2366 | + () -> format("%s Exception when initializing repository generation in cluster state", toStringShort()), |
2366 | 2367 | e |
2367 | 2368 | ); |
2368 | 2369 | acquireAndClearRepoDataInitialized().onFailure(e); |
@@ -2611,56 +2612,53 @@ private static String previousWriterMessage(@Nullable Tuple<Long, String> previo |
2611 | 2612 | private void markRepoCorrupted(long corruptedGeneration, Exception originalException, ActionListener<Void> listener) { |
2612 | 2613 | assert corruptedGeneration != RepositoryData.UNKNOWN_REPO_GEN; |
2613 | 2614 | assert bestEffortConsistency == false; |
2614 | | - logger.warn(() -> "Marking repository [" + metadata.name() + "] as corrupted", originalException); |
2615 | | - submitUnbatchedTask( |
2616 | | - "mark repository corrupted [" + metadata.name() + "][" + corruptedGeneration + "]", |
2617 | | - new ClusterStateUpdateTask() { |
2618 | | - @Override |
2619 | | - public ClusterState execute(ClusterState currentState) { |
2620 | | - final var project = currentState.metadata().getProject(projectId); |
2621 | | - final RepositoriesMetadata state = RepositoriesMetadata.get(project); |
2622 | | - final RepositoryMetadata repoState = state.repository(metadata.name()); |
2623 | | - if (repoState.generation() != corruptedGeneration) { |
2624 | | - throw new IllegalStateException( |
2625 | | - "Tried to mark repo generation [" |
2626 | | - + corruptedGeneration |
2627 | | - + "] as corrupted but its state concurrently changed to [" |
2628 | | - + repoState |
2629 | | - + "]" |
2630 | | - ); |
2631 | | - } |
2632 | | - return ClusterState.builder(currentState) |
2633 | | - .putProjectMetadata( |
2634 | | - ProjectMetadata.builder(project) |
2635 | | - .putCustom( |
2636 | | - RepositoriesMetadata.TYPE, |
2637 | | - state.withUpdatedGeneration( |
2638 | | - metadata.name(), |
2639 | | - RepositoryData.CORRUPTED_REPO_GEN, |
2640 | | - repoState.pendingGeneration() |
2641 | | - ) |
2642 | | - ) |
2643 | | - ) |
2644 | | - .build(); |
2645 | | - } |
2646 | | - |
2647 | | - @Override |
2648 | | - public void onFailure(Exception e) { |
2649 | | - listener.onFailure( |
2650 | | - new RepositoryException( |
2651 | | - metadata.name(), |
2652 | | - "Failed marking repository state as corrupted", |
2653 | | - ExceptionsHelper.useOrSuppress(e, originalException) |
2654 | | - ) |
| 2615 | + logger.warn(() -> "Marking repository " + toStringShort() + " as corrupted", originalException); |
| 2616 | + submitUnbatchedTask("mark repository corrupted " + toStringShort() + "[" + corruptedGeneration + "]", new ClusterStateUpdateTask() { |
| 2617 | + @Override |
| 2618 | + public ClusterState execute(ClusterState currentState) { |
| 2619 | + final var project = currentState.metadata().getProject(projectId); |
| 2620 | + final RepositoriesMetadata state = RepositoriesMetadata.get(project); |
| 2621 | + final RepositoryMetadata repoState = state.repository(metadata.name()); |
| 2622 | + if (repoState.generation() != corruptedGeneration) { |
| 2623 | + throw new IllegalStateException( |
| 2624 | + "Tried to mark repo generation [" |
| 2625 | + + corruptedGeneration |
| 2626 | + + "] as corrupted but its state concurrently changed to [" |
| 2627 | + + repoState |
| 2628 | + + "]" |
2655 | 2629 | ); |
2656 | 2630 | } |
| 2631 | + return ClusterState.builder(currentState) |
| 2632 | + .putProjectMetadata( |
| 2633 | + ProjectMetadata.builder(project) |
| 2634 | + .putCustom( |
| 2635 | + RepositoriesMetadata.TYPE, |
| 2636 | + state.withUpdatedGeneration( |
| 2637 | + metadata.name(), |
| 2638 | + RepositoryData.CORRUPTED_REPO_GEN, |
| 2639 | + repoState.pendingGeneration() |
| 2640 | + ) |
| 2641 | + ) |
| 2642 | + ) |
| 2643 | + .build(); |
| 2644 | + } |
2657 | 2645 |
|
2658 | | - @Override |
2659 | | - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { |
2660 | | - listener.onResponse(null); |
2661 | | - } |
| 2646 | + @Override |
| 2647 | + public void onFailure(Exception e) { |
| 2648 | + listener.onFailure( |
| 2649 | + new RepositoryException( |
| 2650 | + metadata.name(), |
| 2651 | + "Failed marking repository state as corrupted", |
| 2652 | + ExceptionsHelper.useOrSuppress(e, originalException) |
| 2653 | + ) |
| 2654 | + ); |
2662 | 2655 | } |
2663 | | - ); |
| 2656 | + |
| 2657 | + @Override |
| 2658 | + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { |
| 2659 | + listener.onResponse(null); |
| 2660 | + } |
| 2661 | + }); |
2664 | 2662 | } |
2665 | 2663 |
|
2666 | 2664 | private RepositoryData getRepositoryData(long indexGen) { |
@@ -2786,9 +2784,9 @@ public ClusterState execute(ClusterState currentState) { |
2786 | 2784 | final boolean uninitializedMeta = meta.generation() == RepositoryData.UNKNOWN_REPO_GEN || bestEffortConsistency; |
2787 | 2785 | if (uninitializedMeta == false && meta.pendingGeneration() != genInState) { |
2788 | 2786 | logger.info( |
2789 | | - "Trying to write new repository data over unfinished write, repo [{}] is at " |
| 2787 | + "Trying to write new repository data over unfinished write, repo {} is at " |
2790 | 2788 | + "safe generation [{}] and pending generation [{}]", |
2791 | | - meta.name(), |
| 2789 | + toStringShort(), |
2792 | 2790 | genInState, |
2793 | 2791 | meta.pendingGeneration() |
2794 | 2792 | ); |
@@ -2920,9 +2918,9 @@ public void onFailure(Exception e) { |
2920 | 2918 | assert newRepositoryData.getUuid().equals(RepositoryData.MISSING_UUID) == false; |
2921 | 2919 | logger.info( |
2922 | 2920 | Strings.format( |
2923 | | - "Generated new repository UUID [%s] for repository [%s] in generation [%d]", |
| 2921 | + "Generated new repository UUID [%s] for repository %s in generation [%d]", |
2924 | 2922 | newRepositoryData.getUuid(), |
2925 | | - metadata.name(), |
| 2923 | + toStringShort(), |
2926 | 2924 | newGen |
2927 | 2925 | ) |
2928 | 2926 | ); |
@@ -3211,7 +3209,7 @@ private long latestGeneration(Collection<String> rootBlobs) { |
3211 | 3209 | } catch (NumberFormatException nfe) { |
3212 | 3210 | // the index- blob wasn't of the format index-N where N is a number, |
3213 | 3211 | // no idea what this blob is but it doesn't belong in the repository! |
3214 | | - logger.warn("[{}] Unknown blob in the repository: {}", metadata.name(), blobName); |
| 3212 | + logger.warn("[{}] Unknown blob in the repository: {}", toStringShort(), blobName); |
3215 | 3213 | } |
3216 | 3214 | } |
3217 | 3215 | return latest; |
@@ -3890,7 +3888,11 @@ public void verify(String seed, DiscoveryNode localNode) { |
3890 | 3888 |
|
3891 | 3889 | @Override |
3892 | 3890 | public String toString() { |
3893 | | - return "BlobStoreRepository[" + "[" + metadata.name() + "], [" + blobStore.get() + ']' + ']'; |
| 3891 | + return "BlobStoreRepository[" + toStringShort() + ", [" + blobStore.get() + ']' + ']'; |
| 3892 | + } |
| 3893 | + |
| 3894 | + private String toStringShort() { |
| 3895 | + return projectRepoString(projectId, metadata.name()); |
3894 | 3896 | } |
3895 | 3897 |
|
3896 | 3898 | /** |
@@ -4011,10 +4013,10 @@ private Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> buildBlobStoreIndex |
4011 | 4013 | // keeping hold of its data blobs. |
4012 | 4014 | try { |
4013 | 4015 | final var message = Strings.format( |
4014 | | - "index %s shard generation [%s] in [%s][%s] not found - falling back to reading all shard snapshots", |
| 4016 | + "index %s shard generation [%s] in %s[%s] not found - falling back to reading all shard snapshots", |
4015 | 4017 | indexId, |
4016 | 4018 | generation, |
4017 | | - metadata.name(), |
| 4019 | + toStringShort(), |
4018 | 4020 | shardContainer.path() |
4019 | 4021 | ); |
4020 | 4022 | logger.error(message, noSuchFileException); |
@@ -4055,17 +4057,17 @@ private Tuple<BlobStoreIndexShardSnapshots, ShardGeneration> buildBlobStoreIndex |
4055 | 4057 | } |
4056 | 4058 | } |
4057 | 4059 | logger.error( |
4058 | | - "read shard snapshots [{}] due to missing shard generation [{}] for index {} in [{}][{}]", |
| 4060 | + "read shard snapshots [{}] due to missing shard generation [{}] for index {} in {}[{}]", |
4059 | 4061 | messageBuilder, |
4060 | 4062 | generation, |
4061 | 4063 | indexId, |
4062 | | - metadata.name(), |
| 4064 | + toStringShort(), |
4063 | 4065 | shardContainer.path() |
4064 | 4066 | ); |
4065 | 4067 | return new Tuple<>(blobStoreIndexShardSnapshots, generation); |
4066 | 4068 | } catch (Exception fallbackException) { |
4067 | 4069 | logger.error( |
4068 | | - Strings.format("failed while reading all shard snapshots from [%s][%s]", metadata.name(), shardContainer.path()), |
| 4070 | + Strings.format("failed while reading all shard snapshots from %s[%s]", toStringShort(), shardContainer.path()), |
4069 | 4071 | fallbackException |
4070 | 4072 | ); |
4071 | 4073 | noSuchFileException.addSuppressed(fallbackException); |
|
0 commit comments