Skip to content

Commit 33ea311

Browse files
authored
Reducing error-level stack trace logging for normal events in GeoIpDownloader (#114924)
1 parent 8b87969 commit 33ea311

File tree

3 files changed

+25
-26
lines changed

3 files changed

+25
-26
lines changed

docs/changelog/114924.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
pr: 114924
2+
summary: Reducing error-level stack trace logging for normal events in `GeoIpDownloader`
3+
area: Ingest Node
4+
type: bug
5+
issues: []

modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111

1212
import org.apache.logging.log4j.LogManager;
1313
import org.apache.logging.log4j.Logger;
14-
import org.elasticsearch.ElasticsearchException;
1514
import org.elasticsearch.action.ActionListener;
1615
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
1716
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
@@ -139,11 +138,18 @@ void updateDatabases() throws IOException {
139138
if (geoipIndex != null) {
140139
logger.trace("The {} index is not null", GeoIpDownloader.DATABASES_INDEX);
141140
if (clusterState.getRoutingTable().index(geoipIndex.getWriteIndex()).allPrimaryShardsActive() == false) {
142-
throw new ElasticsearchException("not all primary shards of [" + DATABASES_INDEX + "] index are active");
141+
logger.debug(
142+
"Not updating geoip database because not all primary shards of the [" + DATABASES_INDEX + "] index are active."
143+
);
144+
return;
143145
}
144146
var blockException = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, geoipIndex.getWriteIndex().getName());
145147
if (blockException != null) {
146-
throw blockException;
148+
logger.debug(
149+
"Not updating geoip database because there is a write block on the " + geoipIndex.getWriteIndex().getName() + " index",
150+
blockException
151+
);
152+
return;
147153
}
148154
}
149155
if (eagerDownloadSupplier.get() || atLeastOneGeoipProcessorSupplier.get()) {

modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
package org.elasticsearch.ingest.geoip;
1111

12-
import org.elasticsearch.ElasticsearchException;
1312
import org.elasticsearch.action.ActionListener;
1413
import org.elasticsearch.action.ActionRequest;
1514
import org.elasticsearch.action.ActionResponse;
@@ -25,11 +24,9 @@
2524
import org.elasticsearch.action.index.TransportIndexAction;
2625
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
2726
import org.elasticsearch.cluster.ClusterState;
28-
import org.elasticsearch.cluster.block.ClusterBlockException;
2927
import org.elasticsearch.cluster.block.ClusterBlocks;
3028
import org.elasticsearch.cluster.metadata.IndexMetadata;
3129
import org.elasticsearch.cluster.service.ClusterService;
32-
import org.elasticsearch.common.ReferenceDocs;
3330
import org.elasticsearch.common.settings.ClusterSettings;
3431
import org.elasticsearch.common.settings.Settings;
3532
import org.elasticsearch.index.reindex.BulkByScrollResponse;
@@ -583,37 +580,28 @@ void processDatabase(Map<String, Object> databaseInfo) {
583580
assertFalse(it.hasNext());
584581
}
585582

586-
public void testUpdateDatabasesWriteBlock() {
583+
public void testUpdateDatabasesWriteBlock() throws IOException {
584+
/*
585+
* Here we make sure that we bail out before making an httpClient request if there is write block on the .geoip_databases index
586+
*/
587587
ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()));
588588
var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
589589
state = ClusterState.builder(state)
590590
.blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
591591
.build();
592592
when(clusterService.state()).thenReturn(state);
593-
var e = expectThrows(ClusterBlockException.class, () -> geoIpDownloader.updateDatabases());
594-
assertThat(
595-
e.getMessage(),
596-
equalTo(
597-
"index ["
598-
+ geoIpIndex
599-
+ "] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, "
600-
+ "index has read-only-allow-delete block; for more information, see "
601-
+ ReferenceDocs.FLOOD_STAGE_WATERMARK
602-
+ "];"
603-
)
604-
);
593+
geoIpDownloader.updateDatabases();
605594
verifyNoInteractions(httpClient);
606595
}
607596

608-
public void testUpdateDatabasesIndexNotReady() {
597+
public void testUpdateDatabasesIndexNotReady() throws IOException {
598+
/*
599+
* Here we make sure that we bail out before making an httpClient request if there are unallocated shards on the .geoip_databases
600+
* index
601+
*/
609602
ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true);
610-
var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName();
611-
state = ClusterState.builder(state)
612-
.blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK))
613-
.build();
614603
when(clusterService.state()).thenReturn(state);
615-
var e = expectThrows(ElasticsearchException.class, () -> geoIpDownloader.updateDatabases());
616-
assertThat(e.getMessage(), equalTo("not all primary shards of [.geoip_databases] index are active"));
604+
geoIpDownloader.updateDatabases();
617605
verifyNoInteractions(httpClient);
618606
}
619607

0 commit comments

Comments
 (0)