Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion solr/core/src/java/org/apache/solr/api/ApiBag.java
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ && getCommands().get(entry.getKey()).equals(entry.getValue())) {
getCommands().put(entry.getKey(), entry.getValue());
}

// Reference to Api must be saved to to merge uncached values (i.e. 'spec') lazily
// Reference to Api must be saved to merge uncached values (i.e. 'spec') lazily
if (newCommandsAdded) {
combinedApis.add(api);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ boolean useReadCache(String name, IOContext context) {
boolean useWriteCache(String name, IOContext context) {
if (!blockCacheWriteEnabled || name.startsWith(IndexFileNames.PENDING_SEGMENTS)) {
// for safety, don't bother caching pending commits.
// the cache does support renaming (renameCacheFile), but thats a scary optimization.
// the cache does support renaming (renameCacheFile), but that's a scary optimization.
return false;
}
if (blockCacheFileTypes != null && !isCachableFile(name)) {
Expand Down
2 changes: 1 addition & 1 deletion solr/core/src/java/org/apache/solr/cloud/Overseer.java
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ public void run() {
}

// Return true whenever the exception thrown by ZkStateWriter is correspond
// to a invalid state or 'bad' message (in this case, we should remove that message from queue)
// to an invalid state or 'bad' message (in this case, we should remove that message from queue)
private boolean isBadMessage(Exception e) {
if (e instanceof KeeperException ke) {
return ke.code() == KeeperException.Code.NONODE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ private void addInactiveToResults(Slice slice, Replica replica) {
}

// Provide some feedback to the user about what actually happened, or in this case where no action
// was necesary since this preferred replica was already the leader
// was necessary since this preferred replica was already the leader
private void addAlreadyLeaderToResults(Slice slice, Replica replica) {
SimpleOrderedMap<SimpleOrderedMap<String>> alreadyLeaders = results.get(ALREADY_LEADERS);
if (alreadyLeaders == null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ private void processComponents(
String reqPath = (String) req.getContext().get(PATH);
if (!"/select".equals(reqPath)) {
params.set(CommonParams.QT, reqPath);
} // else if path is /select, then the qt gets passed thru if set
} // else if path is /select, then the qt gets passed through if set
}
if (queryLimits.isLimitsEnabled()) {
if (queryLimits.adjustShardRequestLimits(sreq, shard, params, rb)) {
Expand Down Expand Up @@ -746,7 +746,7 @@ private static boolean prepareComponents(
}

protected String stageToString(int stage) {
// This should probably be a enum, but that change should be its own ticket.
// This should probably be an enum, but that change should be its own ticket.
switch (stage) {
case STAGE_START:
return "START";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ public static final class HllOptions {

// NOTE: this explanation linked to from the java-hll jdocs...
// https://github.com/aggregateknowledge/postgresql-hll/blob/master/README.markdown#explanation-of-parameters-and-tuning
// ..if i'm understanding the regwidth chart correctly, a value of 6 should be a enough
// ..if i'm understanding the regwidth chart correctly, a value of 6 should be enough
// to support any max cardinality given that we're always dealing with hashes and
// the cardinality of the set of all long values is 2**64 == 1.9e19
//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,15 +98,15 @@ public int[] correctPair(int leftOffset, int rightOffset) {
// Find the ancestor tag enclosing offsetPair. And bump out left offset along the way.
int iTag = startTag;
for (; !tagEnclosesOffset(iTag, rightOffset); iTag = getParentTag(iTag)) {
// Ensure there is nothing except whitespace thru OpenEndOff
// Ensure there is nothing except whitespace through OpenEndOff
int tagOpenEndOff = getOpenEndOff(iTag);
if (hasNonWhitespace(tagOpenEndOff, leftOffset)) return null;
leftOffset = getOpenStartOff(iTag);
}
final int ancestorTag = iTag;
// Bump out rightOffset until we get to ancestorTag.
for (iTag = endTag; iTag != ancestorTag; iTag = getParentTag(iTag)) {
// Ensure there is nothing except whitespace thru CloseStartOff
// Ensure there is nothing except whitespace through CloseStartOff
int tagCloseStartOff = getCloseStartOff(iTag);
if (hasNonWhitespace(rightOffset, tagCloseStartOff)) return null;
rightOffset = getCloseEndOff(iTag);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,7 @@ protected SynonymMap loadSynonyms(

ManagedSynonymParser parser =
new ManagedSynonymParser((SynonymManager) res, dedup, analyzer);
// null is safe here because there's no actual parsing done against a input Reader
// null is safe here because there's no actual parsing done against an input Reader
parser.parse(null);
return parser.build();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2183,7 +2183,7 @@ public SortedDocValues getSorted(FieldInfo ignored) throws IOException {

if (collapseFieldType instanceof StrField) {
if (blockCollapse) {
// NOTE: for now we don't worry about wether this is a sortSpec of min/max
// NOTE: for now we don't worry about whether this is a sortSpec of min/max
// groupHeadSelector, we use a "sort spec' based block collector unless/until there is
// some (performance?) reason to specialize
return new BlockOrdSortSpecCollector(
Expand Down Expand Up @@ -2212,7 +2212,7 @@ public SortedDocValues getSorted(FieldInfo ignored) throws IOException {
} else if (isNumericCollapsible(collapseFieldType)) {

if (blockCollapse) {
// NOTE: for now we don't worry about wether this is a sortSpec of min/max
// NOTE: for now we don't worry about whether this is a sortSpec of min/max
// groupHeadSelector, we use a "sort spec' based block collector unless/until there is
// some (performance?) reason to specialize
return new BlockIntSortSpecCollector(
Expand Down Expand Up @@ -3407,7 +3407,7 @@ private BoostedDocsCollector(final IntIntHashMap boostDocsMap) {
boostedDocsIdsIter = getMergeBoost();
}

/** True if there are any requested boosts (regardless of wether any have been collected) */
/** True if there are any requested boosts (regardless of whether any have been collected) */
public boolean hasBoosts() {
return hasBoosts;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@

class FacetRangeProcessor extends FacetProcessor<FacetRange> {
// TODO: the code paths for initial faceting, vs refinement, are very different...
// TODO: ...it might make sense to have seperate classes w/a common base?
// TODO: ...it might make sense to have separate classes w/a common base?
// TODO: let FacetRange.createFacetProcessor decide which one to instantiate?

final SchemaField sf;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,7 @@ public static List<SweepCountAccStruct> otherStructsOf(FacetProcessor<?> process
abstract static class CountSlotAcc extends SlotAcc implements ReadOnlyCountSlotAcc {
public CountSlotAcc(FacetContext fcontext) {
super(fcontext);
// assume we are the 'count' by default unless/untill our creator overrides this
// assume we are the 'count' by default unless/until our creator overrides this
this.key = "count";
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ protected static DenseVectorField getCheckedFieldType(SchemaField schemaField) {
protected Query getFilterQuery() throws SolrException, SyntaxError {

// Default behavior of FQ wrapping, and suitability of some local params
// depends on wether we are a sub-query or not
// depends on whether we are a sub-query or not
final boolean isSubQuery = recurseCount != 0;

// include/exclude tags for global fqs to wrap;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ private static boolean addCopyFields(
destinationField,
fieldValue,
destinationField.getName().equals(uniqueKeyFieldName) ? false : forInPlaceUpdate);
// record the field as having a originalFieldValue
// record the field as having an originalFieldValue
usedFields.add(destinationField.getName());
used = true;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ public void close() {
try {
Files.deleteIfExists(tlog);
} catch (IOException e) {
// TODO: should this class care if a file couldnt be deleted?
// TODO: should this class care if a file couldn't be deleted?
// this just emulates previous behavior, where only SecurityException would be handled.
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -571,7 +571,7 @@ private TypeMapping mapValueClassesToFieldType(List<SolrInputField> fields) {
NEXT_TYPE_MAPPING:
for (TypeMapping typeMapping : typeMappings) {
for (SolrInputField field : fields) {
// We do a assert and a null check because even after SOLR-12710 is addressed
// We do an assert and a null check because even after SOLR-12710 is addressed
// older SolrJ versions can send null values causing an NPE
assert field.getValues() != null;
if (field.getValues() != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ public static Set<String> computeInPlaceUpdatableFields(AddUpdateCommand cmd) th
// not an in-place update if there are fields that are not maps
return Collections.emptySet();
}
// else it's a atomic update map...
// else it's an atomic update map...
Map<String, Object> fieldValueMap = (Map<String, Object>) fieldValue;
for (Entry<String, Object> entry : fieldValueMap.entrySet()) {
String op = entry.getKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ public static void rm(Path file) {
try {
IOUtils.rm(file);
} catch (Throwable ignored) {
// TODO: should this class care if a file couldnt be deleted?
// TODO: should this class care if a file couldn't be deleted?
// this just emulates previous behavior, where only SecurityException would be handled.
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ public void test() throws Exception {

commit();

// TODO: assert we didnt kill everyone
// TODO: assert we didn't kill everyone

zkStateReader.updateLiveNodes();
assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ public class TestCloudPseudoReturnFields extends SolrCloudTestCase {

@BeforeClass
public static void createMiniSolrCloudCluster() throws Exception {
// replication factor will impact wether we expect a list of urls from the '[shard]'
// replication factor will impact whether we expect a list of urls from the '[shard]'
// augmenter...
repFactor = usually() ? 1 : 2;
// ... and we definitely want to ensure forwarded requests to other shards work ...
Expand Down Expand Up @@ -218,7 +218,7 @@ public void testMultiValued() throws Exception {
// that way we can first sanity check a single value in a multivalued field is returned
// correctly as a "List" of one element, *AND* then we could be testing that a (single valued)
// pseudo-field correctly overrides that actual (real) value in a multivalued field (ie: not
// returning a an List)
// returning a List)
//
// (NOTE: not doing this yet due to how it will impact most other tests, many of which are
// currently @AwaitsFix status)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ public void testDynDateMissingFacet() {
@Test
public void testStringSort() {

// note: cant use checkSortMissingDefault because
// note: can't use checkSortMissingDefault because
// nothing sorts lower then the default of ""
for (String field : new String[] {"stringdv", "dyn_stringdv"}) {
assertU(adoc("id", "0")); // missing
Expand Down Expand Up @@ -520,7 +520,7 @@ public void testStringMissingFacet() {
/** bool (and dynamic bool) with default lucene sort (treats as "") */
@Test
public void testBoolSort() {
// note: cant use checkSortMissingDefault because
// note: can't use checkSortMissingDefault because
// nothing sorts lower then the default of "" and
// bool fields are, at root, string fields.
for (String field : new String[] {"booldv", "dyn_booldv"}) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ public void testSoftAndHardCommitMaxDocs() throws Exception {
softTracker.setTimeUpperBound(-1);
hardTracker.setDocsUpperBound(hardCommitMaxDocs);
hardTracker.setTimeUpperBound(-1);
// simplify whats going on by only having soft auto commits trigger new searchers
// simplify what's going on by only having soft auto commits trigger new searchers
hardTracker.setOpenSearcher(false);

// Note: doc id counting starts at 0, see comment at start of test regarding "upper bound"
Expand Down Expand Up @@ -199,7 +199,7 @@ private void doTestSoftAndHardCommitMaxTimeMixedAdds(final CommitWithinType comm
hardTracker.setTimeUpperBound(
commitWithinType.equals(CommitWithinType.HARD) ? -1 : hardCommitWaitMillis);
hardTracker.setDocsUpperBound(-1);
// simplify whats going on by only having soft auto commits trigger new searchers
// simplify what's going on by only having soft auto commits trigger new searchers
hardTracker.setOpenSearcher(false);

// Add a single document
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ void refreshAndWatch(boolean notifyWatchers) {
collectionPropsObservers.remove(coll);

// This is the one time we know it's safe to throw this out. We just failed to set the
// watch due to an NoNodeException, so it isn't held by ZK and can't re-set itself due
// watch due to a NoNodeException, so it isn't held by ZK and can't re-set itself due
// to an update.
collectionPropsWatchers.remove(coll);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1474,7 +1474,7 @@ public void testPivotFacetsStats() throws Exception {
"{!key=pivot_key stats=s1}features,manu,cat,inStock"
}) {

// for any of these pivot params, the assertions we check should be teh same
// for any of these pivot params, the assertions we check should be the same
// (we stop asserting at the "manu" level)

SolrQuery query = new SolrQuery("*:*");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -983,7 +983,7 @@ protected void compareSolrResponses(SolrResponse a, SolrResponse b) {

protected void compareResponses(QueryResponse a, QueryResponse b) {
if (System.getProperty("remove.version.field") != null) {
// we don't care if one has a version and the other doesnt -
// we don't care if one has a version and the other doesn't -
// control vs distrib
// TODO: this should prob be done by adding an ignore on _version_ rather than mutating the
// responses?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ public void test() throws Exception {

commit();

// TODO: assert we didnt kill everyone
// TODO: assert we didn't kill everyone

zkStateReader.updateLiveNodes();
assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ private void assertFolderAreSame(URI uri1, URI uri2) throws IOException {
}

public void verify(List<URI> newFilesCopiedOver) throws IOException {
// Verify zk files are reuploaded to a appropriate each time a backup is called
// Verify zk files are reuploaded to an appropriate each time a backup is called
// TODO make a little change to zk files and make sure that backed up files match with zk data
BackupId prevBackupId = new BackupId(Math.max(0, numBackup - 1));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ private LogListener(final String name, final String loggerName, final Level leve
config.addLogger(loggerName, loggerConfig);
}

// Regardless of wether loggerConfig exactly matches loggerName, or is an ancestor, if it's
// Regardless of whether loggerConfig exactly matches loggerName, or is an ancestor, if it's
// level is (strictly) more specific
// then our configured level, it will be impossible to listen for the events we want - so track
// the original level and modify as needed...
Expand Down
Loading