diff --git a/solr/core/src/java/org/apache/solr/api/ApiBag.java b/solr/core/src/java/org/apache/solr/api/ApiBag.java index 1d9a6d2a80d..70a1964263a 100644 --- a/solr/core/src/java/org/apache/solr/api/ApiBag.java +++ b/solr/core/src/java/org/apache/solr/api/ApiBag.java @@ -163,7 +163,7 @@ && getCommands().get(entry.getKey()).equals(entry.getValue())) { getCommands().put(entry.getKey(), entry.getValue()); } - // Reference to Api must be saved to to merge uncached values (i.e. 'spec') lazily + // Reference to Api must be saved to merge uncached values (i.e. 'spec') lazily if (newCommandsAdded) { combinedApis.add(api); } diff --git a/solr/core/src/java/org/apache/solr/blockcache/BlockDirectory.java b/solr/core/src/java/org/apache/solr/blockcache/BlockDirectory.java index 43f702ebce9..fdd48350f7a 100644 --- a/solr/core/src/java/org/apache/solr/blockcache/BlockDirectory.java +++ b/solr/core/src/java/org/apache/solr/blockcache/BlockDirectory.java @@ -349,7 +349,7 @@ boolean useReadCache(String name, IOContext context) { boolean useWriteCache(String name, IOContext context) { if (!blockCacheWriteEnabled || name.startsWith(IndexFileNames.PENDING_SEGMENTS)) { // for safety, don't bother caching pending commits. - // the cache does support renaming (renameCacheFile), but thats a scary optimization. + // the cache does support renaming (renameCacheFile), but that's a scary optimization. return false; } if (blockCacheFileTypes != null && !isCachableFile(name)) { diff --git a/solr/core/src/java/org/apache/solr/cloud/Overseer.java b/solr/core/src/java/org/apache/solr/cloud/Overseer.java index 10746a1fcda..f67d358b6ab 100644 --- a/solr/core/src/java/org/apache/solr/cloud/Overseer.java +++ b/solr/core/src/java/org/apache/solr/cloud/Overseer.java @@ -406,7 +406,7 @@ public void run() { } // Return true whenever the exception thrown by ZkStateWriter is correspond - // to a invalid state or 'bad' message (in this case, we should remove that message from queue) + // to an invalid state or 'bad' message (in this case, we should remove that message from queue) private boolean isBadMessage(Exception e) { if (e instanceof KeeperException ke) { return ke.code() == KeeperException.Code.NONODE diff --git a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java index f2079cfcbea..2bebe2ba3ac 100644 --- a/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java +++ b/solr/core/src/java/org/apache/solr/handler/admin/RebalanceLeaders.java @@ -325,7 +325,7 @@ private void addInactiveToResults(Slice slice, Replica replica) { } // Provide some feedback to the user about what actually happened, or in this case where no action - // was necesary since this preferred replica was already the leader + // was necessary since this preferred replica was already the leader private void addAlreadyLeaderToResults(Slice slice, Replica replica) { SimpleOrderedMap> alreadyLeaders = results.get(ALREADY_LEADERS); if (alreadyLeaders == null) { diff --git a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java index 0d54489696e..9c978be34b4 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java +++ b/solr/core/src/java/org/apache/solr/handler/component/SearchHandler.java @@ -580,7 +580,7 @@ private void processComponents( String reqPath = (String) req.getContext().get(PATH); if (!"/select".equals(reqPath)) { params.set(CommonParams.QT, reqPath); - } // else if path is /select, then the qt gets passed thru if set + } // else if path is /select, then the qt gets passed through if set } if (queryLimits.isLimitsEnabled()) { if (queryLimits.adjustShardRequestLimits(sreq, shard, params, rb)) { @@ -746,7 +746,7 @@ private static boolean prepareComponents( } protected String stageToString(int stage) { - // This should probably be a enum, but that change should be its own ticket. + // This should probably be an enum, but that change should be its own ticket. switch (stage) { case STAGE_START: return "START"; diff --git a/solr/core/src/java/org/apache/solr/handler/component/StatsField.java b/solr/core/src/java/org/apache/solr/handler/component/StatsField.java index 914f39f9c94..1a679760784 100644 --- a/solr/core/src/java/org/apache/solr/handler/component/StatsField.java +++ b/solr/core/src/java/org/apache/solr/handler/component/StatsField.java @@ -624,7 +624,7 @@ public static final class HllOptions { // NOTE: this explanation linked to from the java-hll jdocs... // https://github.com/aggregateknowledge/postgresql-hll/blob/master/README.markdown#explanation-of-parameters-and-tuning - // ..if i'm understanding the regwidth chart correctly, a value of 6 should be a enough + // ..if i'm understanding the regwidth chart correctly, a value of 6 should be enough // to support any max cardinality given that we're always dealing with hashes and // the cardinality of the set of all long values is 2**64 == 1.9e19 // diff --git a/solr/core/src/java/org/apache/solr/handler/tagger/OffsetCorrector.java b/solr/core/src/java/org/apache/solr/handler/tagger/OffsetCorrector.java index d5ad59fab97..dfa76fbf96e 100644 --- a/solr/core/src/java/org/apache/solr/handler/tagger/OffsetCorrector.java +++ b/solr/core/src/java/org/apache/solr/handler/tagger/OffsetCorrector.java @@ -98,7 +98,7 @@ public int[] correctPair(int leftOffset, int rightOffset) { // Find the ancestor tag enclosing offsetPair. And bump out left offset along the way. int iTag = startTag; for (; !tagEnclosesOffset(iTag, rightOffset); iTag = getParentTag(iTag)) { - // Ensure there is nothing except whitespace thru OpenEndOff + // Ensure there is nothing except whitespace through OpenEndOff int tagOpenEndOff = getOpenEndOff(iTag); if (hasNonWhitespace(tagOpenEndOff, leftOffset)) return null; leftOffset = getOpenStartOff(iTag); @@ -106,7 +106,7 @@ public int[] correctPair(int leftOffset, int rightOffset) { final int ancestorTag = iTag; // Bump out rightOffset until we get to ancestorTag. for (iTag = endTag; iTag != ancestorTag; iTag = getParentTag(iTag)) { - // Ensure there is nothing except whitespace thru CloseStartOff + // Ensure there is nothing except whitespace through CloseStartOff int tagCloseStartOff = getCloseStartOff(iTag); if (hasNonWhitespace(rightOffset, tagCloseStartOff)) return null; rightOffset = getCloseEndOff(iTag); diff --git a/solr/core/src/java/org/apache/solr/rest/schema/analysis/ManagedSynonymGraphFilterFactory.java b/solr/core/src/java/org/apache/solr/rest/schema/analysis/ManagedSynonymGraphFilterFactory.java index 1d91fb8686f..6b9e6d6bedd 100644 --- a/solr/core/src/java/org/apache/solr/rest/schema/analysis/ManagedSynonymGraphFilterFactory.java +++ b/solr/core/src/java/org/apache/solr/rest/schema/analysis/ManagedSynonymGraphFilterFactory.java @@ -433,7 +433,7 @@ protected SynonymMap loadSynonyms( ManagedSynonymParser parser = new ManagedSynonymParser((SynonymManager) res, dedup, analyzer); - // null is safe here because there's no actual parsing done against a input Reader + // null is safe here because there's no actual parsing done against an input Reader parser.parse(null); return parser.build(); } diff --git a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java index d85932bb3ed..9f127f7d460 100644 --- a/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java +++ b/solr/core/src/java/org/apache/solr/search/CollapsingQParserPlugin.java @@ -2183,7 +2183,7 @@ public SortedDocValues getSorted(FieldInfo ignored) throws IOException { if (collapseFieldType instanceof StrField) { if (blockCollapse) { - // NOTE: for now we don't worry about wether this is a sortSpec of min/max + // NOTE: for now we don't worry about whether this is a sortSpec of min/max // groupHeadSelector, we use a "sort spec' based block collector unless/until there is // some (performance?) reason to specialize return new BlockOrdSortSpecCollector( @@ -2212,7 +2212,7 @@ public SortedDocValues getSorted(FieldInfo ignored) throws IOException { } else if (isNumericCollapsible(collapseFieldType)) { if (blockCollapse) { - // NOTE: for now we don't worry about wether this is a sortSpec of min/max + // NOTE: for now we don't worry about whether this is a sortSpec of min/max // groupHeadSelector, we use a "sort spec' based block collector unless/until there is // some (performance?) reason to specialize return new BlockIntSortSpecCollector( @@ -3407,7 +3407,7 @@ private BoostedDocsCollector(final IntIntHashMap boostDocsMap) { boostedDocsIdsIter = getMergeBoost(); } - /** True if there are any requested boosts (regardless of wether any have been collected) */ + /** True if there are any requested boosts (regardless of whether any have been collected) */ public boolean hasBoosts() { return hasBoosts; } diff --git a/solr/core/src/java/org/apache/solr/search/facet/FacetRangeProcessor.java b/solr/core/src/java/org/apache/solr/search/facet/FacetRangeProcessor.java index 08b034bc0cb..a3d7165ea46 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/FacetRangeProcessor.java +++ b/solr/core/src/java/org/apache/solr/search/facet/FacetRangeProcessor.java @@ -49,7 +49,7 @@ class FacetRangeProcessor extends FacetProcessor { // TODO: the code paths for initial faceting, vs refinement, are very different... - // TODO: ...it might make sense to have seperate classes w/a common base? + // TODO: ...it might make sense to have separate classes w/a common base? // TODO: let FacetRange.createFacetProcessor decide which one to instantiate? final SchemaField sf; diff --git a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java index 9ea0df74159..93ea6474965 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java +++ b/solr/core/src/java/org/apache/solr/search/facet/SlotAcc.java @@ -834,7 +834,7 @@ public static List otherStructsOf(FacetProcessor process abstract static class CountSlotAcc extends SlotAcc implements ReadOnlyCountSlotAcc { public CountSlotAcc(FacetContext fcontext) { super(fcontext); - // assume we are the 'count' by default unless/untill our creator overrides this + // assume we are the 'count' by default unless/until our creator overrides this this.key = "count"; } diff --git a/solr/core/src/java/org/apache/solr/search/neural/AbstractVectorQParserBase.java b/solr/core/src/java/org/apache/solr/search/neural/AbstractVectorQParserBase.java index d7ab9c72938..065180d7e4a 100644 --- a/solr/core/src/java/org/apache/solr/search/neural/AbstractVectorQParserBase.java +++ b/solr/core/src/java/org/apache/solr/search/neural/AbstractVectorQParserBase.java @@ -79,7 +79,7 @@ protected static DenseVectorField getCheckedFieldType(SchemaField schemaField) { protected Query getFilterQuery() throws SolrException, SyntaxError { // Default behavior of FQ wrapping, and suitability of some local params - // depends on wether we are a sub-query or not + // depends on whether we are a sub-query or not final boolean isSubQuery = recurseCount != 0; // include/exclude tags for global fqs to wrap; diff --git a/solr/core/src/java/org/apache/solr/update/DocumentBuilder.java b/solr/core/src/java/org/apache/solr/update/DocumentBuilder.java index c11502672f7..61e1ae8d0b9 100644 --- a/solr/core/src/java/org/apache/solr/update/DocumentBuilder.java +++ b/solr/core/src/java/org/apache/solr/update/DocumentBuilder.java @@ -356,7 +356,7 @@ private static boolean addCopyFields( destinationField, fieldValue, destinationField.getName().equals(uniqueKeyFieldName) ? false : forInPlaceUpdate); - // record the field as having a originalFieldValue + // record the field as having an originalFieldValue usedFields.add(destinationField.getName()); used = true; } diff --git a/solr/core/src/java/org/apache/solr/update/TransactionLog.java b/solr/core/src/java/org/apache/solr/update/TransactionLog.java index 2169eaa695c..c81d9f26623 100644 --- a/solr/core/src/java/org/apache/solr/update/TransactionLog.java +++ b/solr/core/src/java/org/apache/solr/update/TransactionLog.java @@ -648,7 +648,7 @@ public void close() { try { Files.deleteIfExists(tlog); } catch (IOException e) { - // TODO: should this class care if a file couldnt be deleted? + // TODO: should this class care if a file couldn't be deleted? // this just emulates previous behavior, where only SecurityException would be handled. } } diff --git a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java index daea8ce83a4..50c711458c9 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java +++ b/solr/core/src/java/org/apache/solr/update/processor/AddSchemaFieldsUpdateProcessorFactory.java @@ -571,7 +571,7 @@ private TypeMapping mapValueClassesToFieldType(List fields) { NEXT_TYPE_MAPPING: for (TypeMapping typeMapping : typeMappings) { for (SolrInputField field : fields) { - // We do a assert and a null check because even after SOLR-12710 is addressed + // We do an assert and a null check because even after SOLR-12710 is addressed // older SolrJ versions can send null values causing an NPE assert field.getValues() != null; if (field.getValues() != null) { diff --git a/solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java b/solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java index 01fab007eb3..dfee7e1f8f7 100644 --- a/solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java +++ b/solr/core/src/java/org/apache/solr/update/processor/AtomicUpdateDocumentMerger.java @@ -268,7 +268,7 @@ public static Set computeInPlaceUpdatableFields(AddUpdateCommand cmd) th // not an in-place update if there are fields that are not maps return Collections.emptySet(); } - // else it's a atomic update map... + // else it's an atomic update map... Map fieldValueMap = (Map) fieldValue; for (Entry entry : fieldValueMap.entrySet()) { String op = entry.getKey(); diff --git a/solr/core/src/test/org/apache/solr/blockcache/BlockDirectoryTest.java b/solr/core/src/test/org/apache/solr/blockcache/BlockDirectoryTest.java index 8cd07e58629..dbd69bca4d9 100644 --- a/solr/core/src/test/org/apache/solr/blockcache/BlockDirectoryTest.java +++ b/solr/core/src/test/org/apache/solr/blockcache/BlockDirectoryTest.java @@ -257,7 +257,7 @@ public static void rm(Path file) { try { IOUtils.rm(file); } catch (Throwable ignored) { - // TODO: should this class care if a file couldnt be deleted? + // TODO: should this class care if a file couldn't be deleted? // this just emulates previous behavior, where only SecurityException would be handled. } } diff --git a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java index 6800052434e..9082928a828 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeWithPullReplicasTest.java @@ -280,7 +280,7 @@ public void test() throws Exception { commit(); - // TODO: assert we didnt kill everyone + // TODO: assert we didn't kill everyone zkStateReader.updateLiveNodes(); assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java index c9c2e03a62c..483c9b9ecf5 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudPseudoReturnFields.java @@ -72,7 +72,7 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase { @BeforeClass public static void createMiniSolrCloudCluster() throws Exception { - // replication factor will impact wether we expect a list of urls from the '[shard]' + // replication factor will impact whether we expect a list of urls from the '[shard]' // augmenter... repFactor = usually() ? 1 : 2; // ... and we definitely want to ensure forwarded requests to other shards work ... @@ -218,7 +218,7 @@ public void testMultiValued() throws Exception { // that way we can first sanity check a single value in a multivalued field is returned // correctly as a "List" of one element, *AND* then we could be testing that a (single valued) // pseudo-field correctly overrides that actual (real) value in a multivalued field (ie: not - // returning a an List) + // returning a List) // // (NOTE: not doing this yet due to how it will impact most other tests, many of which are // currently @AwaitsFix status) diff --git a/solr/core/src/test/org/apache/solr/schema/DocValuesMissingTest.java b/solr/core/src/test/org/apache/solr/schema/DocValuesMissingTest.java index 244e162802a..dfbebaa72dc 100644 --- a/solr/core/src/test/org/apache/solr/schema/DocValuesMissingTest.java +++ b/solr/core/src/test/org/apache/solr/schema/DocValuesMissingTest.java @@ -436,7 +436,7 @@ public void testDynDateMissingFacet() { @Test public void testStringSort() { - // note: cant use checkSortMissingDefault because + // note: can't use checkSortMissingDefault because // nothing sorts lower then the default of "" for (String field : new String[] {"stringdv", "dyn_stringdv"}) { assertU(adoc("id", "0")); // missing @@ -520,7 +520,7 @@ public void testStringMissingFacet() { /** bool (and dynamic bool) with default lucene sort (treats as "") */ @Test public void testBoolSort() { - // note: cant use checkSortMissingDefault because + // note: can't use checkSortMissingDefault because // nothing sorts lower then the default of "" and // bool fields are, at root, string fields. for (String field : new String[] {"booldv", "dyn_booldv"}) { diff --git a/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java b/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java index 592fc824b1f..c9134c6c43b 100644 --- a/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java +++ b/solr/core/src/test/org/apache/solr/update/SoftAutoCommitTest.java @@ -107,7 +107,7 @@ public void testSoftAndHardCommitMaxDocs() throws Exception { softTracker.setTimeUpperBound(-1); hardTracker.setDocsUpperBound(hardCommitMaxDocs); hardTracker.setTimeUpperBound(-1); - // simplify whats going on by only having soft auto commits trigger new searchers + // simplify what's going on by only having soft auto commits trigger new searchers hardTracker.setOpenSearcher(false); // Note: doc id counting starts at 0, see comment at start of test regarding "upper bound" @@ -199,7 +199,7 @@ private void doTestSoftAndHardCommitMaxTimeMixedAdds(final CommitWithinType comm hardTracker.setTimeUpperBound( commitWithinType.equals(CommitWithinType.HARD) ? -1 : hardCommitWaitMillis); hardTracker.setDocsUpperBound(-1); - // simplify whats going on by only having soft auto commits trigger new searchers + // simplify what's going on by only having soft auto commits trigger new searchers hardTracker.setOpenSearcher(false); // Add a single document diff --git a/solr/solrj-zookeeper/src/java/org/apache/solr/common/cloud/CollectionPropertiesZkStateReader.java b/solr/solrj-zookeeper/src/java/org/apache/solr/common/cloud/CollectionPropertiesZkStateReader.java index 4242e0beee9..083f7ec0def 100644 --- a/solr/solrj-zookeeper/src/java/org/apache/solr/common/cloud/CollectionPropertiesZkStateReader.java +++ b/solr/solrj-zookeeper/src/java/org/apache/solr/common/cloud/CollectionPropertiesZkStateReader.java @@ -242,7 +242,7 @@ void refreshAndWatch(boolean notifyWatchers) { collectionPropsObservers.remove(coll); // This is the one time we know it's safe to throw this out. We just failed to set the - // watch due to an NoNodeException, so it isn't held by ZK and can't re-set itself due + // watch due to a NoNodeException, so it isn't held by ZK and can't re-set itself due // to an update. collectionPropsWatchers.remove(coll); } diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java index 50ab432931c..0756e132585 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/SolrExampleTests.java @@ -1474,7 +1474,7 @@ public void testPivotFacetsStats() throws Exception { "{!key=pivot_key stats=s1}features,manu,cat,inStock" }) { - // for any of these pivot params, the assertions we check should be teh same + // for any of these pivot params, the assertions we check should be the same // (we stop asserting at the "manu" level) SolrQuery query = new SolrQuery("*:*"); diff --git a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java index 2ae2398d98b..cc4ed5b8369 100644 --- a/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java +++ b/solr/test-framework/src/java/org/apache/solr/BaseDistributedSearchTestCase.java @@ -983,7 +983,7 @@ protected void compareSolrResponses(SolrResponse a, SolrResponse b) { protected void compareResponses(QueryResponse a, QueryResponse b) { if (System.getProperty("remove.version.field") != null) { - // we don't care if one has a version and the other doesnt - + // we don't care if one has a version and the other doesn't - // control vs distrib // TODO: this should prob be done by adding an ignore on _version_ rather than mutating the // responses? diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java index a80134a4252..fba15eb5416 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/AbstractChaosMonkeyNothingIsSafeTestBase.java @@ -246,7 +246,7 @@ public void test() throws Exception { commit(); - // TODO: assert we didnt kill everyone + // TODO: assert we didn't kill everyone zkStateReader.updateLiveNodes(); assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0); diff --git a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java index 1d7276a2e22..9cf6c15e0aa 100644 --- a/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java +++ b/solr/test-framework/src/java/org/apache/solr/cloud/api/collections/AbstractIncrementalBackupTest.java @@ -714,7 +714,7 @@ private void assertFolderAreSame(URI uri1, URI uri2) throws IOException { } public void verify(List newFilesCopiedOver) throws IOException { - // Verify zk files are reuploaded to a appropriate each time a backup is called + // Verify zk files are reuploaded to an appropriate each time a backup is called // TODO make a little change to zk files and make sure that backed up files match with zk data BackupId prevBackupId = new BackupId(Math.max(0, numBackup - 1)); diff --git a/solr/test-framework/src/java/org/apache/solr/util/LogListener.java b/solr/test-framework/src/java/org/apache/solr/util/LogListener.java index 9bbd08e6e09..17039025517 100644 --- a/solr/test-framework/src/java/org/apache/solr/util/LogListener.java +++ b/solr/test-framework/src/java/org/apache/solr/util/LogListener.java @@ -200,7 +200,7 @@ private LogListener(final String name, final String loggerName, final Level leve config.addLogger(loggerName, loggerConfig); } - // Regardless of wether loggerConfig exactly matches loggerName, or is an ancestor, if it's + // Regardless of whether loggerConfig exactly matches loggerName, or is an ancestor, if it's // level is (strictly) more specific // then our configured level, it will be impossible to listen for the events we want - so track // the original level and modify as needed...