Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add missing spaces in concatenated strings #12967

Merged
merged 1 commit into from
Dec 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ public class Lucene90HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene90HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene90HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
final int beamWidth;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
* <li><b>[int8]</b> if equals to -1, dense – all documents have values for a field. If equals to
* 0, sparse – some documents missing values.
* <li><b>array[int]</b> for sparse case, the docids of documents having vectors, in order
* <li><b>[int]</b> the maximum number of connections (neigbours) that each node can have
* <li><b>[int]</b> the maximum number of connections (neighbours) that each node can have
* <li><b>[int]</b> number of levels in the graph
* <li>Graph nodes by level. For each level
* <ul>
Expand Down Expand Up @@ -110,8 +110,8 @@ public class Lucene91HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene91HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene91HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
final int beamWidth;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@
* <li>DocIds were encoded by {@link IndexedDISI#writeBitSet(DocIdSetIterator, IndexOutput, byte)}
* <li>OrdToDoc was encoded by {@link org.apache.lucene.util.packed.DirectMonotonicWriter}, note
* that only in sparse case
* <li><b>[int]</b> the maximum number of connections (neigbours) that each node can have
* <li><b>[int]</b> the maximum number of connections (neighbours) that each node can have
* <li><b>[int]</b> number of levels in the graph
* <li>Graph nodes by level. For each level
* <ul>
Expand Down Expand Up @@ -123,7 +123,7 @@ public class Lucene92HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link #DEFAULT_BEAM_WIDTH}. See {@link HnswGraph} for details.
* node. Defaults to {@link #DEFAULT_BEAM_WIDTH}. See {@link HnswGraph} for details.
*/
final int beamWidth;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
* <li>DocIds were encoded by {@link IndexedDISI#writeBitSet(DocIdSetIterator, IndexOutput, byte)}
* <li>OrdToDoc was encoded by {@link org.apache.lucene.util.packed.DirectMonotonicWriter}, note
* that only in sparse case
* <li><b>[int]</b> the maximum number of connections (neigbours) that each node can have
* <li><b>[int]</b> the maximum number of connections (neighbours) that each node can have
* <li><b>[int]</b> number of levels in the graph
* <li>Graph nodes by level. For each level
* <ul>
Expand Down Expand Up @@ -124,8 +124,8 @@ public class Lucene94HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene94HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene94HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
final int beamWidth;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
* </ul>
* </ul>
* <li>After all levels are encoded memory offsets for each node's neighbor nodes encoded by
* {@link org.apache.lucene.util.packed.DirectMonotonicWriter} are appened to the end of the
* {@link org.apache.lucene.util.packed.DirectMonotonicWriter} are appended to the end of the
* file.
* </ul>
*
Expand All @@ -84,7 +84,7 @@
* <li>DocIds were encoded by {@link IndexedDISI#writeBitSet(DocIdSetIterator, IndexOutput, byte)}
* <li>OrdToDoc was encoded by {@link org.apache.lucene.util.packed.DirectMonotonicWriter}, note
* that only in sparse case
* <li><b>[vint]</b> the maximum number of connections (neigbours) that each node can have
* <li><b>[vint]</b> the maximum number of connections (neighbours) that each node can have
* <li><b>[vint]</b> number of levels in the graph
* <li>Graph nodes by level. For each level
* <ul>
Expand Down Expand Up @@ -141,8 +141,8 @@ public class Lucene95HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene95HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene95HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
final int beamWidth;

Expand All @@ -161,14 +161,14 @@ public Lucene95HnswVectorsFormat(int maxConn, int beamWidth) {
super("Lucene95HnswVectorsFormat");
if (maxConn <= 0 || maxConn > MAXIMUM_MAX_CONN) {
throw new IllegalArgumentException(
"maxConn must be postive and less than or equal to"
"maxConn must be positive and less than or equal to "
+ MAXIMUM_MAX_CONN
+ "; maxConn="
+ maxConn);
}
if (beamWidth <= 0 || beamWidth > MAXIMUM_BEAM_WIDTH) {
throw new IllegalArgumentException(
"beamWidth must be postive and less than or equal to"
"beamWidth must be positive and less than or equal to "
+ MAXIMUM_BEAM_WIDTH
+ "; beamWidth="
+ beamWidth);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public void testPackedInts() throws IOException {
final long expectedBytesUsed = RamUsageTester.ramUsed(r);
final long computedBytesUsed = r.ramBytesUsed();
assertEquals(
r.getClass() + "expected " + expectedBytesUsed + ", got: " + computedBytesUsed,
r.getClass() + " expected " + expectedBytesUsed + ", got: " + computedBytesUsed,
expectedBytesUsed,
computedBytesUsed);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ public class FixedGapTermsIndexReader extends TermsIndexReaderBase {
// number of places to multiply out the actual ord, and we
// will overflow int during those multiplies. So to avoid
// having to upgrade each multiple to long in multiple
// places (error prone), we use long here:
// places (error-prone), we use long here:
private final long indexInterval;

private final int packedIntsVersion;
Expand Down Expand Up @@ -118,7 +118,7 @@ public FixedGapTermsIndexReader(SegmentReadState state) throws IOException {
+ packedIndexStart
+ " indexStart: "
+ indexStart
+ "numIndexTerms: "
+ " numIndexTerms: "
+ numIndexTerms,
in);
}
Expand Down Expand Up @@ -272,7 +272,7 @@ public FieldIndexData(

// slurp in the images from disk:

try {
try (clone) {
final long numTermBytes = packedIndexStart - indexStart;
termBytes.copy(clone, numTermBytes);

Expand All @@ -283,8 +283,6 @@ public FieldIndexData(
// records offsets into byte[] term data
termOffsets =
MonotonicBlockPackedReader.of(clone, packedIntsVersion, blocksize, 1 + numIndexTerms);
} finally {
clone.close();
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ public final class Lucene99HnswScalarQuantizedVectorsFormat extends KnnVectorsFo

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene99HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene99HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
private final int beamWidth;

Expand Down Expand Up @@ -99,14 +99,14 @@ public Lucene99HnswScalarQuantizedVectorsFormat(
super("Lucene99HnswScalarQuantizedVectorsFormat");
if (maxConn <= 0 || maxConn > MAXIMUM_MAX_CONN) {
throw new IllegalArgumentException(
"maxConn must be positive and less than or equal to"
"maxConn must be positive and less than or equal to "
+ MAXIMUM_MAX_CONN
+ "; maxConn="
+ maxConn);
}
if (beamWidth <= 0 || beamWidth > MAXIMUM_BEAM_WIDTH) {
throw new IllegalArgumentException(
"beamWidth must be positive and less than or equal to"
"beamWidth must be positive and less than or equal to "
+ MAXIMUM_BEAM_WIDTH
+ "; beamWidth="
+ beamWidth);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
* </ul>
* </ul>
* <li>After all levels are encoded memory offsets for each node's neighbor nodes encoded by
* {@link org.apache.lucene.util.packed.DirectMonotonicWriter} are appened to the end of the
* {@link org.apache.lucene.util.packed.DirectMonotonicWriter} are appended to the end of the
* file.
* </ul>
*
Expand All @@ -71,7 +71,7 @@
* <li>DocIds were encoded by {@link IndexedDISI#writeBitSet(DocIdSetIterator, IndexOutput, byte)}
* <li>OrdToDoc was encoded by {@link org.apache.lucene.util.packed.DirectMonotonicWriter}, note
* that only in sparse case
* <li><b>[vint]</b> the maximum number of connections (neigbours) that each node can have
* <li><b>[vint]</b> the maximum number of connections (neighbours) that each node can have
* <li><b>[vint]</b> number of levels in the graph
* <li>Graph nodes by level. For each level
* <ul>
Expand Down Expand Up @@ -129,8 +129,8 @@ public final class Lucene99HnswVectorsFormat extends KnnVectorsFormat {

/**
* The number of candidate neighbors to track while searching the graph for each newly inserted
* node. Defaults to to {@link Lucene99HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link
* HnswGraph} for details.
* node. Defaults to {@link Lucene99HnswVectorsFormat#DEFAULT_BEAM_WIDTH}. See {@link HnswGraph}
* for details.
*/
private final int beamWidth;

Expand Down Expand Up @@ -170,14 +170,14 @@ public Lucene99HnswVectorsFormat(
super("Lucene99HnswVectorsFormat");
if (maxConn <= 0 || maxConn > MAXIMUM_MAX_CONN) {
throw new IllegalArgumentException(
"maxConn must be positive and less than or equal to"
"maxConn must be positive and less than or equal to "
+ MAXIMUM_MAX_CONN
+ "; maxConn="
+ maxConn);
}
if (beamWidth <= 0 || beamWidth > MAXIMUM_BEAM_WIDTH) {
throw new IllegalArgumentException(
"beamWidth must be positive and less than or equal to"
"beamWidth must be positive and less than or equal to "
+ MAXIMUM_BEAM_WIDTH
+ "; beamWidth="
+ beamWidth);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ abstract class DocValuesUpdate {
final String field;
// used in BufferedDeletes to apply this update only to a slice of docs. It's initialized to
// BufferedUpdates.MAX_INT
// since it's safe and most often used this way we safe object creations.
// since it's safe and most often used this way we save object creations.
final int docIDUpTo;
final boolean hasValue;

Expand All @@ -57,7 +57,7 @@ abstract class DocValuesUpdate {
*/
protected DocValuesUpdate(
DocValuesType type, Term term, String field, int docIDUpTo, boolean hasValue) {
assert docIDUpTo >= 0 : docIDUpTo + "must be >= 0";
assert docIDUpTo >= 0 : docIDUpTo + " must be >= 0";
this.type = type;
this.term = term;
this.field = field;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
*
* <p>When flush is called by IndexWriter we check out all DWPTs that are associated with the
* current {@link DocumentsWriterDeleteQueue} out of the {@link DocumentsWriterPerThreadPool} and
* write them to disk. The flush process can piggy-back on incoming indexing threads or even block
* write them to disk. The flush process can piggyback on incoming indexing threads or even block
* them from adding documents if flushing can't keep up with new documents being added. Unless the
* stall control kicks in to block indexing threads flushes are happening concurrently to actual
* index requests.
Expand Down Expand Up @@ -94,7 +94,7 @@ final class DocumentsWriter implements Closeable, Accountable {
volatile DocumentsWriterDeleteQueue deleteQueue;
private final DocumentsWriterFlushQueue ticketQueue = new DocumentsWriterFlushQueue();
/*
* we preserve changes during a full flush since IW might not checkout before
* we preserve changes during a full flush since IW might not check out before
* we release all changes. NRT Readers otherwise suddenly return true from
* isCurrent while there are actually changes currently committed. See also
* #anyChanges() & #flushAllThreads
Expand Down Expand Up @@ -236,7 +236,7 @@ synchronized void abort() throws IOException {
}
}

final boolean flushOneDWPT() throws IOException {
boolean flushOneDWPT() throws IOException {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ahh because the class itself is already final, great!

if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "startFlushOneDWPT");
}
Expand Down Expand Up @@ -382,7 +382,7 @@ private boolean preUpdate() throws IOException {
while (flushControl.anyStalledThreads()
|| (flushControl.numQueuedFlushes() > 0 && config.checkPendingFlushOnUpdate)) {
// Help out flushing any queued DWPTs so we can un-stall:
// Try pick up pending threads here if possible
// Try pickup pending threads here if possible
// no need to loop over the next pending flushes... doFlush will take care of this
hasEvents |= maybeFlush();
flushControl.waitIfStalled(); // block if stalled
Expand Down Expand Up @@ -460,7 +460,7 @@ private void doFlush(DocumentsWriterPerThread flushingDWPT) throws IOException {
|| flushingDWPT.deleteQueue == currentFullFlushDelQueue
: "expected: "
+ currentFullFlushDelQueue
+ "but was: "
+ " but was: "
+ flushingDWPT.deleteQueue
+ " "
+ flushControl.isFullFlush();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ void onDocValuesUpdate(FieldInfo info, DocValuesFieldUpdates.Iterator iterator)

private boolean assertPendingDeletes() {
assert pendingDeleteCount + info.getSoftDelCount() >= 0
: " illegal pending delete count: " + pendingDeleteCount + info.getSoftDelCount();
: "illegal pending delete count: " + (pendingDeleteCount + info.getSoftDelCount());
assert info.info.maxDoc() >= getDelCount();
return true;
}
Expand All @@ -199,13 +199,13 @@ private void ensureInitialized(IOSupplier<CodecReader> readerIOSupplier) throws
if (dvGeneration == -2) {
FieldInfos fieldInfos = readFieldInfos();
FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
// we try to only open a reader if it's really necessary ie. indices that are mainly append
// we try to only open a reader if it's really necessary i.e. indices that are mainly append
// only might have
// big segments that don't even have any docs in the soft deletes field. In such a case it's
// simply
// enough to look at the FieldInfo for the field and check if the field has DocValues
if (fieldInfo != null && fieldInfo.getDocValuesType() != DocValuesType.NONE) {
// in order to get accurate numbers we need to have a least one reader see here.
// in order to get accurate numbers we need to have at least one reader see here.
onNewReader(readerIOSupplier.get(), info);
} else {
// we are safe here since we don't have any doc values for the soft-delete field on disk
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ public String toString() {
}
if (reader.maxDoc() != docMap.size()) {
throw new IllegalArgumentException(
"reader.maxDoc() should be equal to docMap.size(), got"
"reader.maxDoc() should be equal to docMap.size(), got "
+ reader.maxDoc()
+ " != "
+ docMap.size());
Expand Down Expand Up @@ -721,8 +721,7 @@ private synchronized <T> T getOrCreate(String field, boolean norms, IOSupplier<T
private boolean assertCreatedOnlyOnce(String field, boolean norms) {
assert Thread.holdsLock(this);
// this is mainly there to make sure we change anything in the way we merge we realize it early
Integer timesCached =
cacheStats.compute(field + "N:" + norms, (s, i) -> i == null ? 1 : i.intValue() + 1);
int timesCached = cacheStats.compute(field + "N:" + norms, (s, i) -> i == null ? 1 : i + 1);
if (timesCached > 1) {
assert norms == false : "[" + field + "] norms must not be cached twice";
boolean isSortField = false;
Expand Down
12 changes: 6 additions & 6 deletions lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public class IndexSearcher {
}

/**
* By default we count hits accurately up to 1000. This makes sure that we don't spend most time
* By default, we count hits accurately up to 1000. This makes sure that we don't spend most time
* on computing hit counts
*/
private static final int TOTAL_HITS_THRESHOLD = 1000;
Expand Down Expand Up @@ -222,7 +222,7 @@ public IndexSearcher(IndexReader r, Executor executor) {
*/
public IndexSearcher(IndexReaderContext context, Executor executor) {
assert context.isTopLevel
: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
: "IndexSearcher's ReaderContext must be topLevel for reader " + context.reader();
reader = context.reader();
this.taskExecutor =
executor == null ? new TaskExecutor(Runnable::run) : new TaskExecutor(executor);
Expand All @@ -231,7 +231,7 @@ public IndexSearcher(IndexReaderContext context, Executor executor) {
Function<List<LeafReaderContext>, LeafSlice[]> slicesProvider =
executor == null
? leaves ->
leaves.size() == 0
leaves.isEmpty()
? new LeafSlice[0]
: new LeafSlice[] {new LeafSlice(new ArrayList<>(leaves))}
: this::slices;
Expand Down Expand Up @@ -613,7 +613,7 @@ private <C extends Collector, T> T search(
if (leafSlices.length == 0) {
// there are no segments, nothing to offload to the executor, but we do need to call reduce to
// create some kind of empty result
assert leafContexts.size() == 0;
assert leafContexts.isEmpty();
return collectorManager.reduce(Collections.singletonList(firstCollector));
} else {
final List<C> collectors = new ArrayList<>(leafSlices.length);
Expand Down Expand Up @@ -820,7 +820,7 @@ public Weight createWeight(Query query, ScoreMode scoreMode, float boost) throws
}

/**
* Returns this searchers the top-level {@link IndexReaderContext}.
* Returns this searcher's top-level {@link IndexReaderContext}.
*
* @see IndexReader#getContext()
*/
Expand Down Expand Up @@ -932,7 +932,7 @@ public int getMaxClauseCount() {

/**
* Thrown when a client attempts to execute a Query that has more than {@link
* #getMaxClauseCount()} total clauses cumulatively in all of it's children.
* #getMaxClauseCount()} total clauses cumulatively in all of its children.
*
* @see #rewrite
*/
Expand Down
Loading
Loading