diff --git a/dev-tools/scripts/StageArtifacts.java b/dev-tools/scripts/StageArtifacts.java index 052c251ff78c..be6f2fab5363 100644 --- a/dev-tools/scripts/StageArtifacts.java +++ b/dev-tools/scripts/StageArtifacts.java @@ -362,7 +362,7 @@ public static void main(String[] args) throws Exception { // Ignore locally generated maven metadata files. .filter(path -> !path.getFileName().toString().startsWith("maven-metadata.")) .sorted(Comparator.comparing(Path::toString)) - .collect(Collectors.toList()); + .toList(); } // Figure out nexus profile ID based on POMs. It is assumed that all artifacts diff --git a/gradle/generation/extract-jdk-apis/ExtractJdkApis.java b/gradle/generation/extract-jdk-apis/ExtractJdkApis.java index 58c7d2ec584b..82f43c1336fc 100644 --- a/gradle/generation/extract-jdk-apis/ExtractJdkApis.java +++ b/gradle/generation/extract-jdk-apis/ExtractJdkApis.java @@ -82,7 +82,7 @@ public static void main(String... args) throws IOException { // Collect all files to process: final List filesToExtract; try (var stream = Files.walk(jrtPath)) { - filesToExtract = stream.filter(p -> pattern.matches(jrtPath.relativize(p))).collect(Collectors.toList()); + filesToExtract = stream.filter(p -> pattern.matches(jrtPath.relativize(p))).toList(); } // Process all class files: diff --git a/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java b/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java index 988deaf99e59..42b9880d9615 100644 --- a/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java +++ b/lucene/analysis.tests/src/test/org/apache/lucene/analysis/tests/TestRandomChains.java @@ -168,7 +168,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { private static final Map, Function> argProducers = Collections.unmodifiableMap( - new IdentityHashMap, Function>() { + new IdentityHashMap<>() { { put( int.class, @@ -176,7 +176,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { // TODO: could cause huge ram usage to use full int range for some filters // (e.g. allocate enormous arrays) // return Integer.valueOf(random.nextInt()); - return Integer.valueOf(TestUtil.nextInt(random, -50, 50)); + return TestUtil.nextInt(random, -50, 50); }); put( char.class, @@ -187,7 +187,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { while (true) { char c = (char) random.nextInt(65536); if (c < '\uD800' || c > '\uDFFF') { - return Character.valueOf(c); + return c; } } }); @@ -382,7 +382,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase { }); put( SynonymMap.class, - new Function() { + new Function<>() { @Override public Object apply(Random random) { SynonymMap.Builder b = new SynonymMap.Builder(random.nextBoolean()); @@ -448,12 +448,11 @@ private String randomNonEmptyString(Random random) { }); put( Automaton.class, - random -> { - return Operations.determinize( - new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE) - .toAutomaton(), - Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); - }); + random -> + Operations.determinize( + new RegExp(AutomatonTestUtil.randomRegexp(random), RegExp.NONE) + .toAutomaton(), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT)); put( PatternTypingFilter.PatternTypingRule[].class, random -> { @@ -625,9 +624,9 @@ public static void beforeClass() throws Exception { } final Comparator> ctorComp = Comparator.comparing(Constructor::toGenericString); - Collections.sort(tokenizers, ctorComp); - Collections.sort(tokenfilters, ctorComp); - Collections.sort(charfilters, ctorComp); + tokenizers.sort(ctorComp); + tokenfilters.sort(ctorComp); + charfilters.sort(ctorComp); if (VERBOSE) { System.out.println("tokenizers = " + tokenizers); System.out.println("tokenfilters = " + tokenfilters); @@ -642,7 +641,7 @@ public static void beforeClass() throws Exception { .filter(c -> c.getName().endsWith("Stemmer")) .map(stemmerCast) .sorted(Comparator.comparing(Class::getName)) - .collect(Collectors.toList()); + .toList(); if (VERBOSE) { System.out.println("snowballStemmers = " + snowballStemmers); } @@ -786,7 +785,7 @@ private T createComponent( if (cause instanceof IllegalArgumentException || (cause instanceof NullPointerException && Stream.of(args).anyMatch(Objects::isNull)) || cause instanceof UnsupportedOperationException) { - // thats ok, ignore + // that's ok, ignore if (VERBOSE) { System.err.println("Ignoring IAE/UOE/NPE from ctor:"); cause.printStackTrace(System.err); diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java index 892abfdd606e..bc07bce188f4 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/GeneratingSuggester.java @@ -30,7 +30,6 @@ import java.util.TreeSet; import java.util.function.Consumer; import java.util.function.IntPredicate; -import java.util.stream.Collectors; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.fst.FST; @@ -108,7 +107,7 @@ private List>> findSimilarDictionaryEntries( } }); - return roots.stream().sorted().collect(Collectors.toList()); + return roots.stream().sorted().toList(); } private static boolean isWorseThan(int score, CharsRef candidate, Weighted> root) { @@ -141,7 +140,7 @@ private List> expandRoots( } } } - return expanded.stream().limit(MAX_GUESSES).collect(Collectors.toList()); + return expanded.stream().limit(MAX_GUESSES).toList(); } // find minimum threshold for a passable suggestion @@ -223,7 +222,7 @@ && checkAffixCondition(prefixId, wordChars, stripLength, stemLength)) { } }); - return result.stream().limit(MAX_WORDS).collect(Collectors.toList()); + return result.stream().limit(MAX_WORDS).toList(); } private void processAffixes(boolean prefixes, String word, AffixProcessor processor) { diff --git a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java index 3b58e0f4f980..f288ef90070b 100644 --- a/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java +++ b/lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/Hunspell.java @@ -28,7 +28,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.stream.Collectors; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.IntsRef; @@ -304,10 +303,7 @@ private boolean hasForceUCaseProblem(Root root, WordCase originalCase, char[] * Dictionary#lookupEntries}. */ public List getRoots(String word) { - return stemmer.stem(word).stream() - .map(CharsRef::toString) - .distinct() - .collect(Collectors.toList()); + return stemmer.stem(word).stream().map(CharsRef::toString).distinct().toList(); } /** diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java index eafd62387a4f..28748f4f4b49 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFlattenGraphFilter.java @@ -23,7 +23,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Random; -import java.util.stream.Collectors; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.AutomatonToTokenStream; import org.apache.lucene.analysis.CharArraySet; @@ -84,16 +83,14 @@ public void testAlreadyFlatten() throws Exception { new CannedTokenStream( 0, 12, - new Token[] { - token("wtf", 1, 1, 0, 3), - token("what", 0, 1, 0, 3), - token("wow", 0, 1, 0, 3), - token("the", 1, 1, 0, 3), - token("that's", 0, 1, 0, 3), - token("fudge", 1, 1, 0, 3), - token("funny", 0, 1, 0, 3), - token("happened", 1, 1, 4, 12) - }); + token("wtf", 1, 1, 0, 3), + token("what", 0, 1, 0, 3), + token("wow", 0, 1, 0, 3), + token("the", 1, 1, 0, 3), + token("that's", 0, 1, 0, 3), + token("fudge", 1, 1, 0, 3), + token("funny", 0, 1, 0, 3), + token("happened", 1, 1, 4, 12)); TokenStream out = new FlattenGraphFilter(in); @@ -116,16 +113,14 @@ public void testWTF1() throws Exception { new CannedTokenStream( 0, 12, - new Token[] { - token("wtf", 1, 5, 0, 3), - token("what", 0, 1, 0, 3), - token("wow", 0, 3, 0, 3), - token("the", 1, 1, 0, 3), - token("fudge", 1, 3, 0, 3), - token("that's", 1, 1, 0, 3), - token("funny", 1, 1, 0, 3), - token("happened", 1, 1, 4, 12) - }); + token("wtf", 1, 5, 0, 3), + token("what", 0, 1, 0, 3), + token("wow", 0, 3, 0, 3), + token("the", 1, 1, 0, 3), + token("fudge", 1, 3, 0, 3), + token("that's", 1, 1, 0, 3), + token("funny", 1, 1, 0, 3), + token("happened", 1, 1, 4, 12)); TokenStream out = new FlattenGraphFilter(in); @@ -149,16 +144,14 @@ public void testWTF2() throws Exception { new CannedTokenStream( 0, 12, - new Token[] { - token("what", 1, 1, 0, 3), - token("wow", 0, 3, 0, 3), - token("wtf", 0, 5, 0, 3), - token("the", 1, 1, 0, 3), - token("fudge", 1, 3, 0, 3), - token("that's", 1, 1, 0, 3), - token("funny", 1, 1, 0, 3), - token("happened", 1, 1, 4, 12) - }); + token("what", 1, 1, 0, 3), + token("wow", 0, 3, 0, 3), + token("wtf", 0, 5, 0, 3), + token("the", 1, 1, 0, 3), + token("fudge", 1, 3, 0, 3), + token("that's", 1, 1, 0, 3), + token("funny", 1, 1, 0, 3), + token("happened", 1, 1, 4, 12)); TokenStream out = new FlattenGraphFilter(in); @@ -182,14 +175,12 @@ public void testNonGreedySynonyms() throws Exception { new CannedTokenStream( 0, 20, - new Token[] { - token("wizard", 1, 1, 0, 6), - token("wizard_of_oz", 0, 3, 0, 12), - token("of", 1, 1, 7, 9), - token("oz", 1, 1, 10, 12), - token("oz_screams", 0, 2, 10, 20), - token("screams", 1, 1, 13, 20), - }); + token("wizard", 1, 1, 0, 6), + token("wizard_of_oz", 0, 3, 0, 12), + token("of", 1, 1, 7, 9), + token("oz", 1, 1, 10, 12), + token("oz_screams", 0, 2, 10, 20), + token("screams", 1, 1, 13, 20)); TokenStream out = new FlattenGraphFilter(in); @@ -209,12 +200,10 @@ public void testNonGraph() throws Exception { new CannedTokenStream( 0, 22, - new Token[] { - token("hello", 1, 1, 0, 5), - token("pseudo", 1, 1, 6, 12), - token("world", 1, 1, 13, 18), - token("fun", 1, 1, 19, 22), - }); + token("hello", 1, 1, 0, 5), + token("pseudo", 1, 1, 6, 12), + token("world", 1, 1, 13, 18), + token("fun", 1, 1, 19, 22)); TokenStream out = new FlattenGraphFilter(in); @@ -234,9 +223,9 @@ public void testSimpleHole() throws Exception { new CannedTokenStream( 0, 13, - new Token[] { - token("hello", 1, 1, 0, 5), token("hole", 2, 1, 6, 10), token("fun", 1, 1, 11, 13), - }); + token("hello", 1, 1, 0, 5), + token("hole", 2, 1, 6, 10), + token("fun", 1, 1, 11, 13)); TokenStream out = new FlattenGraphFilter(in); @@ -259,9 +248,9 @@ public void testHoleUnderSyn() throws Exception { new CannedTokenStream( 0, 12, - new Token[] { - token("wizard", 1, 1, 0, 6), token("woz", 0, 3, 0, 12), token("oz", 2, 1, 10, 12), - }); + token("wizard", 1, 1, 0, 6), + token("woz", 0, 3, 0, 12), + token("oz", 2, 1, 10, 12)); TokenStream out = new FlattenGraphFilter(in); @@ -282,9 +271,9 @@ public void testStrangelyNumberedNodes() throws Exception { new CannedTokenStream( 0, 27, - new Token[] { - token("dog", 1, 3, 0, 5), token("puppy", 0, 3, 0, 5), token("flies", 3, 1, 6, 11), - }); + token("dog", 1, 3, 0, 5), + token("puppy", 0, 3, 0, 5), + token("flies", 3, 1, 6, 11)); TokenStream out = new FlattenGraphFilter(in); @@ -305,20 +294,18 @@ public void testTwoLongParallelPaths() throws Exception { new CannedTokenStream( 0, 11, - new Token[] { - token("a", 1, 1, 0, 1), - token("b", 0, 2, 0, 1), - token("a", 1, 2, 2, 3), - token("b", 1, 2, 2, 3), - token("a", 1, 2, 4, 5), - token("b", 1, 2, 4, 5), - token("a", 1, 2, 6, 7), - token("b", 1, 2, 6, 7), - token("a", 1, 2, 8, 9), - token("b", 1, 2, 8, 9), - token("a", 1, 2, 10, 11), - token("b", 1, 2, 10, 11), - }); + token("a", 1, 1, 0, 1), + token("b", 0, 2, 0, 1), + token("a", 1, 2, 2, 3), + token("b", 1, 2, 2, 3), + token("a", 1, 2, 4, 5), + token("b", 1, 2, 4, 5), + token("a", 1, 2, 6, 7), + token("b", 1, 2, 6, 7), + token("a", 1, 2, 8, 9), + token("b", 1, 2, 8, 9), + token("a", 1, 2, 10, 11), + token("b", 1, 2, 10, 11)); TokenStream out = new FlattenGraphFilter(in); @@ -340,9 +327,7 @@ public void testTwoLongParallelPaths() throws Exception { public void testAltPathFirstStepHole() throws Exception { TokenStream in = new CannedTokenStream( - 0, - 3, - new Token[] {token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("c", 1, 1, 2, 3)}); + 0, 3, token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("c", 1, 1, 2, 3)); TokenStream out = new FlattenGraphFilter(in); @@ -356,7 +341,7 @@ public void testAltPathFirstStepHole() throws Exception { 3); } - // Last node in an alt path fixes outputnode of long path. In this graph the follow up node fixes + // Last node in an alt path fixes output node of long path. In this graph the follow-up node fixes // that. // incorrect pos length of abc = 1 public void testAltPathLastStepHole() throws Exception { @@ -364,12 +349,10 @@ public void testAltPathLastStepHole() throws Exception { new CannedTokenStream( 0, 4, - new Token[] { - token("abc", 1, 3, 0, 3), - token("a", 0, 1, 0, 1), - token("b", 1, 1, 1, 2), - token("d", 2, 1, 3, 4) - }); + token("abc", 1, 3, 0, 3), + token("a", 0, 1, 0, 1), + token("b", 1, 1, 1, 2), + token("d", 2, 1, 3, 4)); TokenStream out = new FlattenGraphFilter(in); @@ -389,9 +372,9 @@ public void testLongHole() throws Exception { new CannedTokenStream( 0, 28, - new Token[] { - token("hello", 1, 1, 0, 5), token("hole", 5, 1, 20, 24), token("fun", 1, 1, 25, 28), - }); + token("hello", 1, 1, 0, 5), + token("hole", 5, 1, 20, 24), + token("fun", 1, 1, 25, 28)); TokenStream out = new FlattenGraphFilter(in); @@ -411,9 +394,7 @@ public void testLongHole() throws Exception { public void testAltPathLastStepLongHole() throws Exception { TokenStream in = new CannedTokenStream( - 0, - 4, - new Token[] {token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("d", 3, 1, 3, 4)}); + 0, 4, token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("d", 3, 1, 3, 4)); TokenStream out = new FlattenGraphFilter(in); @@ -434,9 +415,7 @@ public void testAltPathLastStepLongHole() throws Exception { public void testAltPathLastStepHoleWithoutEndToken() throws Exception { TokenStream in = new CannedTokenStream( - 0, - 2, - new Token[] {token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("b", 1, 1, 1, 2)}); + 0, 2, token("abc", 1, 3, 0, 3), token("a", 0, 1, 0, 1), token("b", 1, 1, 1, 2)); TokenStream out = new FlattenGraphFilter(in); @@ -457,9 +436,7 @@ public void testAltPathLastStepHoleWithoutEndToken() throws Exception { public void testAltPathLastStepHoleFollowedByHole() throws Exception { TokenStream in = new CannedTokenStream( - 0, - 5, - new Token[] {token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("e", 3, 1, 4, 5)}); + 0, 5, token("abc", 1, 3, 0, 3), token("b", 1, 1, 1, 2), token("e", 3, 1, 4, 5)); TokenStream out = new FlattenGraphFilter(in); @@ -480,14 +457,12 @@ public void testShingledGap() throws Exception { new CannedTokenStream( 0, 5, - new Token[] { - token("abc", 1, 3, 0, 3), - token("a", 0, 1, 0, 1), - token("b", 1, 1, 1, 2), - token("cde", 1, 3, 2, 5), - token("d", 1, 1, 3, 4), - token("e", 1, 1, 4, 5) - }); + token("abc", 1, 3, 0, 3), + token("a", 0, 1, 0, 1), + token("b", 1, 1, 1, 2), + token("cde", 1, 3, 2, 5), + token("d", 1, 1, 3, 4), + token("e", 1, 1, 4, 5)); TokenStream out = new FlattenGraphFilter(in); @@ -509,13 +484,11 @@ public void testShingledGapWithHoles() throws Exception { new CannedTokenStream( 0, 5, - new Token[] { - token("abc", 1, 3, 0, 3), - token("b", 1, 1, 1, 2), - token("cde", 1, 3, 2, 5), - token("d", 1, 1, 3, 4), - token("e", 1, 1, 4, 5) - }); + token("abc", 1, 3, 0, 3), + token("b", 1, 1, 1, 2), + token("cde", 1, 3, 2, 5), + token("d", 1, 1, 3, 4), + token("e", 1, 1, 4, 5)); TokenStream out = new FlattenGraphFilter(in); @@ -531,7 +504,7 @@ public void testShingledGapWithHoles() throws Exception { // When the first token is a hole there is no original token to offset from. public void testFirstTokenHole() throws Exception { - TokenStream in = new CannedTokenStream(0, 9, new Token[] {token("start", 2, 1, 0, 5)}); + TokenStream in = new CannedTokenStream(0, 9, token("start", 2, 1, 0, 5)); TokenStream out = new FlattenGraphFilter(in); assertTokenStreamContents( @@ -547,13 +520,11 @@ public void testShingleFromGap() throws Exception { new CannedTokenStream( 0, 9, - new Token[] { - token("a", 1, 1, 4, 8), - token("abc", 0, 3, 4, 7), - token("cd", 2, 2, 6, 8), - token("d", 1, 1, 7, 8), - token("e", 1, 1, 8, 9) - }); + token("a", 1, 1, 4, 8), + token("abc", 0, 3, 4, 7), + token("cd", 2, 2, 6, 8), + token("d", 1, 1, 7, 8), + token("e", 1, 1, 8, 9)); TokenStream out = new FlattenGraphFilter(in); assertTokenStreamContents( out, @@ -568,11 +539,7 @@ public void testShingleFromGap() throws Exception { public void testShingledGapAltPath() throws Exception { TokenStream in = new CannedTokenStream( - 0, - 4, - new Token[] { - token("abc", 1, 3, 0, 3), token("abcd", 0, 4, 0, 4), token("cd", 2, 2, 2, 4), - }); + 0, 4, token("abc", 1, 3, 0, 3), token("abcd", 0, 4, 0, 4), token("cd", 2, 2, 2, 4)); TokenStream out = new FlattenGraphFilter(in); assertTokenStreamContents( out, @@ -591,16 +558,14 @@ public void testHeavilyConnectedGraphWithGap() throws IOException { new CannedTokenStream( 0, 7, - new Token[] { - token("a", 1, 1, 0, 1), - token("ab", 0, 2, 0, 2), - token("abcdef", 0, 6, 0, 6), - token("abcd", 0, 4, 0, 4), - token("bcdef", 1, 5, 1, 7), - token("def", 2, 3, 4, 7), - token("e", 1, 1, 5, 6), - token("f", 1, 1, 6, 7) - }); + token("a", 1, 1, 0, 1), + token("ab", 0, 2, 0, 2), + token("abcdef", 0, 6, 0, 6), + token("abcd", 0, 4, 0, 4), + token("bcdef", 1, 5, 1, 7), + token("def", 2, 3, 4, 7), + token("e", 1, 1, 5, 6), + token("f", 1, 1, 6, 7)); TokenStream out = new FlattenGraphFilter(in); assertTokenStreamContents( out, @@ -618,11 +583,7 @@ public void testHeavilyConnectedGraphWithGap() throws IOException { public void testShingleWithLargeLeadingGap() throws IOException { TokenStream in = new CannedTokenStream( - 0, - 6, - new Token[] { - token("abcde", 1, 5, 0, 5), token("ef", 4, 2, 4, 6), token("f", 1, 1, 5, 6), - }); + 0, 6, token("abcde", 1, 5, 0, 5), token("ef", 4, 2, 4, 6), token("f", 1, 1, 5, 6)); TokenStream out = new FlattenGraphFilter(in); assertTokenStreamContents( out, @@ -780,7 +741,7 @@ public void testPathsNotLost() throws IOException { } acceptStrings.sort(Comparator.naturalOrder()); - acceptStrings = acceptStrings.stream().limit(wordCount).collect(Collectors.toList()); + acceptStrings = acceptStrings.stream().limit(wordCount).toList(); Automaton nonFlattenedAutomaton = Automata.makeStringUnion(acceptStrings); TokenStream ts = AutomatonToTokenStream.toTokenStream(nonFlattenedAutomaton); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java index 7d709e62e515..024a493feafe 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/email/TestUAX29URLEmailTokenizer.java @@ -27,7 +27,6 @@ import java.util.Arrays; import java.util.List; import java.util.Random; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenFilter; @@ -527,7 +526,7 @@ public void testWikiURLs() throws Exception { urlList.add(line); } } - urls = urlList.toArray(new String[urlList.size()]); + urls = urlList.toArray(new String[0]); } finally { if (null != bufferedReader) { bufferedReader.close(); @@ -576,7 +575,7 @@ public void testEmails() throws Exception { emailList.add(line); } } - emails = emailList.toArray(new String[emailList.size()]); + emails = emailList.toArray(new String[0]); } finally { if (null != bufferedReader) { bufferedReader.close(); @@ -667,7 +666,7 @@ public void testURLs() throws Exception { urlList.add(line); } } - urls = urlList.toArray(new String[urlList.size()]); + urls = urlList.toArray(new String[0]); } finally { if (null != bufferedReader) { bufferedReader.close(); @@ -881,7 +880,7 @@ protected TokenStreamComponents createComponents(String fieldName) { } }; - for (String tld : TLDs.collect(Collectors.toList())) { + for (String tld : TLDs.toList()) { String URL = "example." + tld; BaseTokenStreamTestCase.assertAnalyzesTo( analyzer, URL, new String[] {URL}, new String[] {""}); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java index 68f6922c758c..9b23a81b32bc 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestAllDictionaries.java @@ -97,7 +97,7 @@ public void testMaxPrologueNeeded() throws Exception { AtomicBoolean failTest = new AtomicBoolean(); Map> global = new LinkedHashMap<>(); - for (Path aff : findAllAffixFiles().collect(Collectors.toList())) { + for (Path aff : findAllAffixFiles().toList()) { Map> local = new LinkedHashMap<>(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try (ExposePosition is = new ExposePosition(Files.readAllBytes(aff))) { @@ -184,9 +184,7 @@ public void testDictionariesLoadSuccessfully() throws Exception { }; List> tasks = - findAllAffixFiles() - .map(aff -> (Callable) () -> process.apply(aff)) - .collect(Collectors.toList()); + findAllAffixFiles().map(aff -> (Callable) () -> process.apply(aff)).toList(); try { for (Future future : executor.invokeAll(tasks)) { future.get(); diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java index 5c2ec424b937..2ce977bfb7fe 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellRepositoryTestCases.java @@ -23,7 +23,6 @@ import java.util.Collection; import java.util.Set; import java.util.TreeSet; -import java.util.stream.Collectors; import org.junit.Assert; import org.junit.AssumptionViolatedException; import org.junit.Test; @@ -72,7 +71,7 @@ public static Collection data() throws IOException { } } - return names.stream().map(s -> new Object[] {s, tests.resolve(s)}).collect(Collectors.toList()); + return names.stream().map(s -> new Object[] {s, tests.resolve(s)}).toList(); } @Test diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java index 04989954591a..9bbb09102c08 100644 --- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java +++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java @@ -34,7 +34,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.regex.Pattern; -import java.util.stream.Collectors; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.NamedThreadFactory; import org.junit.Assume; @@ -184,7 +183,7 @@ private void checkSuggestionPerformance(String code, int wordCount) throws Excep loadWords(code, wordCount, dictionary).stream() .distinct() .filter(w -> hasQuickSuggestions(speller, base, optimized, w)) - .collect(Collectors.toList()); + .toList(); System.out.println("Checking " + words.size() + " misspelled words"); measure( diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java index 6de1d6078355..bac5fdfaf4d2 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/GenerateUTR30DataFiles.java @@ -40,7 +40,6 @@ import java.util.function.Predicate; import java.util.regex.Matcher; import java.util.regex.Pattern; -import java.util.stream.Collectors; /** * Downloads/generates lucene/analysis/icu/src/data/utr30/*.txt for the specified icu release tag. @@ -97,7 +96,7 @@ private static void expandRulesInUTR30DataFiles() throws IOException { && !name.equals(NFKC_CF_TXT); }; try (var stream = Files.list(Paths.get(".")).filter(predicate)) { - for (Path file : stream.collect(Collectors.toList())) { + for (Path file : stream.toList()) { expandDataFileRules(file); } } diff --git a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java index a1ce01a57ccd..b7f322eba644 100644 --- a/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java +++ b/lucene/analysis/icu/src/tools/java/org/apache/lucene/analysis/icu/RBBIRuleCompiler.java @@ -16,18 +16,18 @@ */ package org.apache.lucene.analysis.icu; +import static java.nio.charset.StandardCharsets.UTF_8; + import com.ibm.icu.text.RuleBasedBreakIterator; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; -import java.util.stream.Collectors; /** * Command-line utility to converts RuleBasedBreakIterator (.rbbi) files into binary compiled form @@ -38,8 +38,8 @@ public class RBBIRuleCompiler { static String getRules(Path ruleFile) throws IOException { StringBuilder rules = new StringBuilder(); InputStream in = Files.newInputStream(ruleFile); - BufferedReader cin = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8)); - String line = null; + BufferedReader cin = new BufferedReader(new InputStreamReader(in, UTF_8)); + String line; while ((line = cin.readLine()) != null) { if (!line.startsWith("#")) { rules.append(line); @@ -54,10 +54,7 @@ static String getRules(Path ruleFile) throws IOException { static void compile(Path srcDir, Path destDir) throws Exception { List files; try (var stream = Files.list(srcDir)) { - files = - stream - .filter(name -> name.getFileName().toString().endsWith("rbbi")) - .collect(Collectors.toList()); + files = stream.filter(name -> name.getFileName().toString().endsWith("rbbi")).toList(); } if (files.isEmpty()) throw new IOException("No input files matching *.rbbi at: " + srcDir); diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java index 9f687245e78b..8e137a8af3f4 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/completion/KatakanaRomanizer.java @@ -16,16 +16,16 @@ */ package org.apache.lucene.analysis.ja.completion; +import static java.nio.charset.StandardCharsets.UTF_8; + import java.io.BufferedReader; import java.io.InputStreamReader; -import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; @@ -39,14 +39,13 @@ public class KatakanaRomanizer { private static final String ROMAJI_MAP_FILE = "romaji_map.txt"; - private static KatakanaRomanizer INSTANCE; + private static final KatakanaRomanizer INSTANCE; static { // Build romaji-map and keystroke arrays from the pre-defined Katakana-Romaji mapping file. try (InputStreamReader is = new InputStreamReader( - KatakanaRomanizer.class.getResourceAsStream(ROMAJI_MAP_FILE), - Charset.forName("UTF-8")); + KatakanaRomanizer.class.getResourceAsStream(ROMAJI_MAP_FILE), UTF_8); BufferedReader ir = new BufferedReader(is)) { Map> romajiMap = new HashMap<>(); String line; @@ -118,7 +117,7 @@ public List romanize(CharsRef input) { List candidates = romajiMap.get(keystrokes[matched.keystrokeLen - 1][matched.keystrokeIndex]); - if (pendingOutputs.size() == 0) { + if (pendingOutputs.isEmpty()) { // There is no pending output. // Add the matched keystrokes to pending outputs list. for (CharsRef cref : candidates) { @@ -166,7 +165,7 @@ public List romanize(CharsRef input) { output.append(input.chars, pos, input.length - pos); } } - return pendingOutputs.stream().map(CharsRefBuilder::get).collect(Collectors.toList()); + return pendingOutputs.stream().map(CharsRefBuilder::get).toList(); } private MatchedKeystroke longestKeystrokeMatch(CharsRef input, int inputOffset) { diff --git a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java index 8afddb9ca961..fe8d7fd6225f 100644 --- a/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java +++ b/lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/dict/TokenInfoDictionaryBuilder.java @@ -26,7 +26,6 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.lucene.analysis.util.CSVUtil; import org.apache.lucene.util.IntsRefBuilder; @@ -57,10 +56,7 @@ public TokenInfoDictionaryBuilder( public TokenInfoDictionaryWriter build(Path dir) throws IOException { try (Stream files = Files.list(dir)) { List csvFiles = - files - .filter(path -> path.getFileName().toString().endsWith(".csv")) - .sorted() - .collect(Collectors.toList()); + files.filter(path -> path.getFileName().toString().endsWith(".csv")).sorted().toList(); return buildDictionary(csvFiles); } } @@ -123,7 +119,7 @@ private TokenInfoDictionaryWriter buildDictionary(List csvFiles) throws IO scratch.grow(token.length()); scratch.setLength(token.length()); for (int i = 0; i < token.length(); i++) { - scratch.setIntAt(i, (int) token.charAt(i)); + scratch.setIntAt(i, token.charAt(i)); } fstCompiler.add(scratch.get(), ord); } @@ -144,7 +140,7 @@ private TokenInfoDictionaryWriter buildDictionary(List csvFiles) throws IO * 4-9 - pos * 10 - base form * 11 - reading - * 12 - pronounciation + * 12 - pronunciation * * UniDic features * @@ -178,7 +174,7 @@ private String[] formatEntry(String[] features) { // If the surface reading is non-existent, use surface form for reading and pronunciation. // This happens with punctuation in UniDic and there are possibly other cases as well - if (features[13].length() == 0) { + if (features[13].isEmpty()) { features2[11] = features[0]; features2[12] = features[0]; } else { diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java index f66abba8b3ad..8639f66be7e4 100644 --- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java +++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/dict/TokenInfoDictionaryBuilder.java @@ -26,7 +26,6 @@ import java.util.Arrays; import java.util.Comparator; import java.util.List; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.lucene.analysis.util.CSVUtil; import org.apache.lucene.util.IntsRefBuilder; @@ -42,8 +41,8 @@ class TokenInfoDictionaryBuilder { */ private int offset = 0; - private String encoding; - private Normalizer.Form normalForm; + private final String encoding; + private final Normalizer.Form normalForm; TokenInfoDictionaryBuilder(String encoding, boolean normalizeEntries) { this.encoding = encoding; @@ -53,10 +52,7 @@ class TokenInfoDictionaryBuilder { public TokenInfoDictionaryWriter build(Path dir) throws IOException { try (Stream files = Files.list(dir)) { List csvFiles = - files - .filter(path -> path.getFileName().toString().endsWith(".csv")) - .sorted() - .collect(Collectors.toList()); + files.filter(path -> path.getFileName().toString().endsWith(".csv")).sorted().toList(); return buildDictionary(csvFiles); } } diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java index d9ae90a9d249..bbe97ec9b401 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestFSTDictionary.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.tests.codecs.uniformsplit.Rot13CypherTestUtil; import org.apache.lucene.tests.util.LuceneTestCase; @@ -107,7 +106,7 @@ public void testCommonPrefixes() throws Exception { for (int i = 0; i < blockFPs.length; i++) { blockFPs[i] = i; } - List blockKeys = vocab.stream().map(BytesRef::new).collect(Collectors.toList()); + List blockKeys = vocab.stream().map(BytesRef::new).toList(); FSTDictionary indexDictionary = createFSTDictionary(blockKeys, blockFPs); IndexDictionary.Browser browser = indexDictionary.browser(); for (int i = 0; i < vocab.size(); i++) { diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java index a5f909761204..ea178bc8ac1a 100644 --- a/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java +++ b/lucene/codecs/src/test/org/apache/lucene/codecs/uniformsplit/TestTermBytes.java @@ -144,15 +144,14 @@ private void validateExpectedSuffix(String[][] vocab) { } private void validateExpectedSuffix(Map vocab) { - List src = - vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList()); + List src = vocab.keySet().stream().sorted().map(BytesRef::new).toList(); List output = compressPrefixes(src); validateMapList( vocab, - src.stream().map(BytesRef::utf8ToString).collect(Collectors.toList()), + src.stream().map(BytesRef::utf8ToString).toList(), output.stream() .map(e -> e.getSuffixOffset() + createSuffixBytes(e).utf8ToString()) - .collect(Collectors.toList())); + .toList()); } private BytesRef createSuffixBytes(TermBytes termBytes) { @@ -167,21 +166,19 @@ private void validateExpectedMDP(String[][] vocab) { } private void validateExpectedMDP(Map vocab) { - List src = - vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList()); + List src = vocab.keySet().stream().sorted().map(BytesRef::new).toList(); List output = compressPrefixes(src); validateMapList( vocab, - src.stream().map(BytesRef::utf8ToString).collect(Collectors.toList()), + src.stream().map(BytesRef::utf8ToString).toList(), output.stream() .map(e -> new BytesRef(e.getTerm().bytes, 0, e.getMdpLength()).utf8ToString()) - .collect(Collectors.toList())); + .toList()); } private void validateIncrementalDecoding(Map vocab) { BytesRef previous = new BytesRef(80); - List src = - vocab.keySet().stream().sorted().map(BytesRef::new).collect(Collectors.toList()); + List src = vocab.keySet().stream().sorted().map(BytesRef::new).toList(); List output = compressPrefixes(src); for (int i = 0; i < src.size(); i++) { diff --git a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java index 68a2ce1cc26d..417fa442be49 100644 --- a/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java +++ b/lucene/core/src/java/org/apache/lucene/index/IndexWriter.java @@ -383,8 +383,8 @@ public synchronized void close() throws IOException { // synced to prevent doubl private final Deque pendingMerges = new ArrayDeque<>(); private final Set runningMerges = new HashSet<>(); private final List mergeExceptions = new ArrayList<>(); + private final Merges merges = new Merges(); private long mergeGen; - private Merges merges = new Merges(); private boolean didMessageState; private final AtomicInteger flushCount = new AtomicInteger(); private final AtomicInteger flushDeletesCount = new AtomicInteger(); @@ -657,7 +657,7 @@ DirectoryReader getReader(boolean applyAllDeletes, boolean writeAllDeletes) thro sr.close(); } }) - .collect(Collectors.toList())); + .toList()); } }; } @@ -6616,7 +6616,7 @@ public void setIndexWriterMaxDocs(int limit) { @Override public FieldInfosBuilder newFieldInfosBuilder(String softDeletesFieldName) { return new FieldInfosBuilder() { - private FieldInfos.Builder builder = + private final FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(softDeletesFieldName)); @Override diff --git a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java index 0b7a2cb72962..f10e0fc3199c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java +++ b/lucene/core/src/java/org/apache/lucene/index/MergePolicy.java @@ -83,7 +83,7 @@ public enum PauseReason { PAUSED, /** Other reason. */ OTHER - }; + } private final ReentrantLock pauseLock = new ReentrantLock(); private final Condition pausing = pauseLock.newCondition(); @@ -103,7 +103,7 @@ public enum PauseReason { /** Creates a new merge progress info. */ public OneMergeProgress() { // Place all the pause reasons in there immediately so that we can simply update values. - pauseTimesNS = new EnumMap(PauseReason.class); + pauseTimesNS = new EnumMap<>(PauseReason.class); for (PauseReason p : PauseReason.values()) { pauseTimesNS.put(p, new AtomicLong()); } @@ -170,8 +170,7 @@ public void wakeup() { /** Returns pause reasons and associated times in nanoseconds. */ public Map getPauseTimes() { Set> entries = pauseTimesNS.entrySet(); - return entries.stream() - .collect(Collectors.toMap((e) -> e.getKey(), (e) -> e.getValue().get())); + return entries.stream().collect(Collectors.toMap(Entry::getKey, (e) -> e.getValue().get())); } final void setMergeThread(Thread owner) { @@ -223,7 +222,7 @@ public static class OneMerge { * @param segments List of {@link SegmentCommitInfo}s to be merged. */ public OneMerge(List segments) { - if (0 == segments.size()) { + if (segments.isEmpty()) { throw new RuntimeException("segments must include at least one segment"); } // clone the list, as the in list may be based off original SegmentInfos and may be modified @@ -275,7 +274,7 @@ public void mergeInit() throws IOException { /** * Called by {@link IndexWriter} after the merge is done and all readers have been closed. * - * @param success true iff the merge finished successfully ie. was committed + * @param success true iff the merge finished successfully i.e. was committed * @param segmentDropped true iff the merged segment was dropped since it was fully deleted */ public void mergeFinished(boolean success, boolean segmentDropped) throws IOException {} @@ -284,7 +283,7 @@ public void mergeFinished(boolean success, boolean segmentDropped) throws IOExce final void close( boolean success, boolean segmentDropped, IOConsumer readerConsumer) throws IOException { - // this method is final to ensure we never miss a super call to cleanup and finish the merge + // this method is final to ensure we never miss a super call to clean up and finish the merge if (mergeCompleted.complete(success) == false) { throw new IllegalStateException("merge has already finished"); } @@ -524,10 +523,7 @@ public String toString() { CompletableFuture getMergeCompletedFutures() { return CompletableFuture.allOf( - merges.stream() - .map(m -> m.mergeCompleted) - .collect(Collectors.toList()) - .toArray(CompletableFuture[]::new)); + merges.stream().map(m -> m.mergeCompleted).toArray(CompletableFuture[]::new)); } /** Waits, until interrupted, for all merges to complete. */ @@ -763,7 +759,7 @@ public boolean useCompoundFile( } /** - * Return the byte size of the provided {@link SegmentCommitInfo}, pro-rated by percentage of + * Return the byte size of the provided {@link SegmentCommitInfo}, prorated by percentage of * non-deleted documents is set. */ protected long size(SegmentCommitInfo info, MergeContext mergeContext) throws IOException { diff --git a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java index 72e077f7e020..1bac886c7b4c 100644 --- a/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java +++ b/lucene/core/src/java/org/apache/lucene/index/ReaderPool.java @@ -30,7 +30,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; -import java.util.stream.Collectors; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.util.CollectionUtil; @@ -177,7 +176,7 @@ boolean isReaderPoolingEnabled() { /** * Releases the {@link ReadersAndUpdates}. This should only be called if the {@link - * #get(SegmentCommitInfo, boolean)} is called with the create paramter set to true. + * #get(SegmentCommitInfo, boolean)} is called with the 'create' parameter set to true. * * @return true if any files were written by this release call. */ @@ -276,7 +275,7 @@ boolean writeDocValuesUpdatesForMerge(List infos) throws IOEx /** * Returns a list of all currently maintained ReadersAndUpdates sorted by it's ram consumption * largest to smallest. This list can also contain readers that don't consume any ram at this - * point ie. don't have any updates buffered. + * point i.e. don't have any updates buffered. */ synchronized List getReadersByRam() { class RamRecordingHolder { @@ -295,19 +294,17 @@ class RamRecordingHolder { } readersByRam = new ArrayList<>(readerMap.size()); for (ReadersAndUpdates rld : readerMap.values()) { - // we have to record the ram usage once and then sort - // since the ram usage can change concurrently and that will confuse the sort or hit an + // we have to record the RAM usage once and then sort + // since the RAM usage can change concurrently and that will confuse the sort or hit an // assertion // the we can acquire here is not enough we would need to lock all ReadersAndUpdates to make - // sure it doesn't - // change + // sure it doesn't change readersByRam.add(new RamRecordingHolder(rld)); } } // Sort this outside of the lock by largest ramBytesUsed: CollectionUtil.introSort(readersByRam, (a, b) -> Long.compare(b.ramBytesUsed, a.ramBytesUsed)); - return Collections.unmodifiableList( - readersByRam.stream().map(h -> h.updates).collect(Collectors.toList())); + return readersByRam.stream().map(h -> h.updates).toList(); } /** Remove all our references to readers, and commits any pending changes. */ @@ -334,7 +331,7 @@ synchronized void dropAll() throws IOException { priorE = IOUtils.useOrSuppress(priorE, t); } } - assert readerMap.size() == 0; + assert readerMap.isEmpty(); if (priorE != null) { throw IOUtils.rethrowAlways(priorE); } diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java index f6bcadb4b8f4..79c7d2f418f5 100644 --- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseMatcher.java @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.index.Impact; import org.apache.lucene.index.Impacts; import org.apache.lucene.index.ImpactsEnum; @@ -62,8 +61,7 @@ public ExactPhraseMatcher( super(matchCost); final DocIdSetIterator approximation = - ConjunctionUtils.intersectIterators( - Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList())); + ConjunctionUtils.intersectIterators(Arrays.stream(postings).map(p -> p.postings).toList()); final ImpactsSource impactsSource = mergeImpacts(Arrays.stream(postings).map(p -> p.impacts).toArray(ImpactsEnum[]::new)); @@ -82,8 +80,7 @@ public ExactPhraseMatcher( for (PhraseQuery.PostingsAndFreq posting : postings) { postingsAndPositions.add(new PostingsAndPosition(posting.postings, posting.position)); } - this.postings = - postingsAndPositions.toArray(new PostingsAndPosition[postingsAndPositions.size()]); + this.postings = postingsAndPositions.toArray(new PostingsAndPosition[0]); } @Override @@ -204,7 +201,7 @@ static ImpactsSource mergeImpacts(ImpactsEnum[] impactsEnums) { return new ImpactsSource() { - class SubIterator { + static class SubIterator { final Iterator iterator; Impact current; @@ -263,7 +260,7 @@ public List getImpacts(int level) { final int docIdUpTo = getDocIdUpTo(level); PriorityQueue pq = - new PriorityQueue(impacts.length) { + new PriorityQueue<>(impacts.length) { @Override protected boolean lessThan(SubIterator a, SubIterator b) { return a.current.freq < b.current.freq; diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java b/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java index ceebc5454e05..99f36ea5f7f9 100644 --- a/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java +++ b/lucene/core/src/java/org/apache/lucene/search/MatchesUtils.java @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.BytesRefIterator; @@ -62,12 +61,11 @@ public Iterator iterator() { /** Amalgamate a collection of {@link Matches} into a single object */ public static Matches fromSubMatches(List subMatches) { - if (subMatches == null || subMatches.size() == 0) { + if (subMatches == null || subMatches.isEmpty()) { return null; } - List sm = - subMatches.stream().filter(m -> m != MATCH_WITH_NO_TERMS).collect(Collectors.toList()); - if (sm.size() == 0) { + List sm = subMatches.stream().filter(m -> m != MATCH_WITH_NO_TERMS).toList(); + if (sm.isEmpty()) { return MATCH_WITH_NO_TERMS; } if (sm.size() == 1) { diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java index ca70a52fbb76..68e43642707e 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java +++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseMatcher.java @@ -25,7 +25,6 @@ import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.index.Impact; import org.apache.lucene.index.Impacts; import org.apache.lucene.index.ImpactsSource; @@ -100,8 +99,7 @@ public SloppyPhraseMatcher( } approximation = - ConjunctionUtils.intersectIterators( - Arrays.stream(postings).map(p -> p.postings).collect(Collectors.toList())); + ConjunctionUtils.intersectIterators(Arrays.stream(postings).map(p -> p.postings).toList()); // What would be a good upper bound of the sloppy frequency? A sum of the // sub frequencies would be correct, but it is usually so much higher than // the actual sloppy frequency that it doesn't help skip irrelevant @@ -334,8 +332,7 @@ private PhrasePositions lesser(PhrasePositions pp, PhrasePositions pp2) { private int collide(PhrasePositions pp) { int tpPos = tpPos(pp); PhrasePositions[] rg = rptGroups[pp.rptGroup]; - for (int i = 0; i < rg.length; i++) { - PhrasePositions pp2 = rg[i]; + for (PhrasePositions pp2 : rg) { if (pp2 != pp && tpPos(pp2) == tpPos) { return pp2.rptInd; } @@ -511,13 +508,7 @@ private boolean initFirstTime() throws IOException { */ private void sortRptGroups(ArrayList> rgs) { rptGroups = new PhrasePositions[rgs.size()][]; - Comparator cmprtr = - new Comparator() { - @Override - public int compare(PhrasePositions pp1, PhrasePositions pp2) { - return pp1.offset - pp2.offset; - } - }; + Comparator cmprtr = Comparator.comparingInt(pp -> pp.offset); for (int i = 0; i < rptGroups.length; i++) { PhrasePositions[] rg = rgs.get(i).toArray(new PhrasePositions[0]); Arrays.sort(rg, cmprtr); @@ -567,7 +558,7 @@ private ArrayList> gatherRptGroups( HashMap tg = termGroups(rptTerms, bb); HashSet distinctGroupIDs = new HashSet<>(tg.values()); for (int i = 0; i < distinctGroupIDs.size(); i++) { - tmp.add(new HashSet()); + tmp.add(new HashSet<>()); } for (PhrasePositions pp : rpp) { for (Term t : pp.terms) { @@ -586,8 +577,8 @@ private ArrayList> gatherRptGroups( return res; } - /** Actual position in doc of a PhrasePosition, relies on that position = tpPos - offset) */ - private final int tpPos(PhrasePositions pp) { + /** Actual position in doc of a PhrasePosition, relies on that position = tpPos - offset */ + private int tpPos(PhrasePositions pp) { return pp.position + pp.offset; } diff --git a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java index f44ceaf3ac89..807f41359c42 100644 --- a/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java +++ b/lucene/core/src/java/org/apache/lucene/search/SynonymQuery.java @@ -24,7 +24,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; import org.apache.lucene.index.Impact; import org.apache.lucene.index.Impacts; import org.apache.lucene.index.ImpactsEnum; @@ -101,7 +100,7 @@ public Builder addTerm(BytesRef term, float boost) { /** Builds the {@link SynonymQuery}. */ public SynonymQuery build() { - Collections.sort(terms, Comparator.comparing(a -> a.term)); + terms.sort(Comparator.comparing(a -> a.term)); return new SynonymQuery(terms.toArray(new TermAndBoost[0]), field); } } @@ -117,8 +116,7 @@ private SynonymQuery(TermAndBoost[] terms, String field) { } public List getTerms() { - return Collections.unmodifiableList( - Arrays.stream(terms).map(t -> new Term(field, t.term)).collect(Collectors.toList())); + return Arrays.stream(terms).map(t -> new Term(field, t.term)).toList(); } @Override @@ -232,8 +230,7 @@ public Matches matches(LeafReaderContext context, int doc) throws IOException { if (indexTerms == null) { return super.matches(context, doc); } - List termList = - Arrays.stream(terms).map(t -> new Term(field, t.term)).collect(Collectors.toList()); + List termList = Arrays.stream(terms).map(t -> new Term(field, t.term)).toList(); return MatchesUtils.forField( field, () -> DisjunctionMatchesIterator.fromTerms(context, doc, getQuery(), field, termList)); @@ -357,7 +354,7 @@ static ImpactsSource mergeImpacts(ImpactsEnum[] impactsEnums, float[] boosts) { assert impactsEnums.length == boosts.length; return new ImpactsSource() { - class SubIterator { + static class SubIterator { final Iterator iterator; int previousFreq; Impact current; @@ -439,7 +436,7 @@ public List getImpacts(int level) { .map( impact -> new Impact((int) Math.ceil(impact.freq * boost), impact.norm)) - .collect(Collectors.toList()); + .toList(); } else { impactList = impacts[i].getImpacts(impactsLevel); } diff --git a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java index f36a651b16b6..22da71384ffb 100644 --- a/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java +++ b/lucene/core/src/java/org/apache/lucene/store/FSDirectory.java @@ -98,8 +98,7 @@ public abstract class FSDirectory extends BaseDirectory { * Maps files that we are trying to delete (or we tried already but failed) before attempting to * delete that key. */ - private final Set pendingDeletes = - Collections.newSetFromMap(new ConcurrentHashMap()); + private final Set pendingDeletes = ConcurrentHashMap.newKeySet(); private final AtomicInteger opsSinceLastDelete = new AtomicInteger(); @@ -186,7 +185,7 @@ private static String[] listAll(Path dir, Set skipNames) throws IOExcept } } - String[] array = entries.toArray(new String[entries.size()]); + String[] array = entries.toArray(new String[0]); // Directory.listAll javadocs state that we sort the results here, so we don't let filesystem // specifics leak out of this abstraction: Arrays.sort(array); @@ -214,7 +213,7 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti maybeDeletePendingFiles(); // If this file was pending delete, we are now bringing it back to life: if (pendingDeletes.remove(name)) { - privateDeleteFile(name, true); // try again to delete it - this is best effort + privateDeleteFile(name, true); // try again to delete it - this is the best effort pendingDeletes.remove(name); // watch out - if the delete fails it put } return new FSIndexOutput(name); @@ -266,8 +265,8 @@ public void rename(String source, String dest) throws IOException { } maybeDeletePendingFiles(); if (pendingDeletes.remove(dest)) { - privateDeleteFile(dest, true); // try again to delete it - this is best effort - pendingDeletes.remove(dest); // watch out if the delete fails it's back in here. + privateDeleteFile(dest, true); // try again to delete it - this is the best effort + pendingDeletes.remove(dest); // watch out if the delete fails, it's back in here } Files.move(directory.resolve(source), directory.resolve(dest), StandardCopyOption.ATOMIC_MOVE); } diff --git a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java index 3e1f42762ded..8a590bcdfb78 100644 --- a/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java +++ b/lucene/core/src/test/org/apache/lucene/analysis/tokenattributes/TestBytesRefAttImpl.java @@ -16,7 +16,7 @@ */ package org.apache.lucene.analysis.tokenattributes; -import java.util.stream.Stream; +import java.util.Arrays; import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.AttributeImpl; import org.apache.lucene.util.BytesRef; @@ -49,7 +49,7 @@ public static T assertCopyIsEqual(T att) throws Except public void testLucene9856() { assertTrue( "BytesTermAttributeImpl must explicitly declare to implement TermToBytesRefAttribute", - Stream.of(BytesTermAttributeImpl.class.getInterfaces()) - .anyMatch(TermToBytesRefAttribute.class::equals)); + Arrays.asList(BytesTermAttributeImpl.class.getInterfaces()) + .contains(TermToBytesRefAttribute.class)); } } diff --git a/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java b/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java index 71d9291c0efb..510d68d7a2b1 100644 --- a/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java +++ b/lucene/core/src/test/org/apache/lucene/document/TestLongHashSet.java @@ -128,7 +128,7 @@ public void testRandom() { if (values.length > 0 && random().nextBoolean()) { values[values.length / 2] = Long.MIN_VALUE; } - Set set1 = LongStream.of(values).mapToObj(Long::valueOf).collect(Collectors.toSet()); + Set set1 = LongStream.of(values).boxed().collect(Collectors.toSet()); Arrays.sort(values); LongHashSet set2 = new LongHashSet(values); assertEquals(set1, set2); diff --git a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java index 95964ff814ad..06c4b5c97256 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java @@ -28,7 +28,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; @@ -412,14 +411,12 @@ public void testInvalidMaxMergeCountAndThreads() throws Exception { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); expectThrows( IllegalArgumentException.class, - () -> { - cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 3); - }); + () -> + cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 3)); expectThrows( IllegalArgumentException.class, - () -> { - cms.setMaxMergesAndThreads(3, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS); - }); + () -> + cms.setMaxMergesAndThreads(3, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS)); } public void testLiveMaxMergeCount() throws Exception { @@ -653,8 +650,7 @@ public void message(String component, String message) { @Override public boolean isEnabled(String component) { - if (component.equals("MS")) return true; - return false; + return component.equals("MS"); } }); iwc.setMaxBufferedDocs(2); @@ -681,9 +677,7 @@ public boolean isEnabled(String component) { for (Thread t : mergeThreadSet) { String name = t.getName(); List threadMsgs = - messages.stream() - .filter(line -> line.startsWith("merge thread " + name)) - .collect(Collectors.toList()); + messages.stream().filter(line -> line.startsWith("merge thread " + name)).toList(); assertTrue( "Expected:·a·value·equal·to·or·greater·than·3,·got:" + threadMsgs.size() @@ -736,15 +730,13 @@ public void testResetToAutoDefault() throws Exception { expectThrows( IllegalArgumentException.class, - () -> { - cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 4); - }); + () -> + cms.setMaxMergesAndThreads(ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, 4)); expectThrows( IllegalArgumentException.class, - () -> { - cms.setMaxMergesAndThreads(4, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS); - }); + () -> + cms.setMaxMergesAndThreads(4, ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS)); cms.setMaxMergesAndThreads( ConcurrentMergeScheduler.AUTO_DETECT_MERGES_AND_THREADS, diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java index 4f6da4a767bc..39166971e7e8 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestDemoParallelLeafReader.java @@ -23,7 +23,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -75,7 +74,7 @@ *

Each per-segment index lives in a private directory next to the main index, and they are * deleted once their segments are removed from the index. They are "volatile", meaning if e.g. the * index is replicated to another machine, it's OK to not copy parallel segments indices, since they - * will just be regnerated (at a cost though). + * will just be regenerated (at a cost though). */ // @SuppressSysoutChecks(bugUrl="we print stuff") @@ -97,8 +96,7 @@ abstract static class ReindexingReader implements Closeable { private final Path segsPath; /** Which segments have been closed, but their parallel index is not yet not removed. */ - private final Set closedSegments = - Collections.newSetFromMap(new ConcurrentHashMap()); + private final Set closedSegments = ConcurrentHashMap.newKeySet(); /** Holds currently open parallel readers for each segment. */ private final Map parallelReaders = new ConcurrentHashMap<>(); @@ -154,8 +152,8 @@ public ReindexingReader(Path root) throws IOException { protected abstract IndexWriterConfig getIndexWriterConfig() throws IOException; /** - * Optional method to validate that the provided parallell reader in fact reflects the changes - * in schemaGen. + * Optional method to validate that the provided parallel reader in fact reflects the changes in + * schemaGen. */ protected void checkParallelReader(LeafReader reader, LeafReader parallelReader, long schemaGen) throws IOException {} @@ -287,7 +285,7 @@ public void close() throws IOException { // Make sure we deleted all parallel indices for segments that are no longer in the main index: private void assertNoExtraSegments() throws IOException { - Set liveIDs = new HashSet(); + Set liveIDs = new HashSet<>(); for (SegmentCommitInfo info : SegmentInfos.readLatestCommit(indexDir)) { String idString = StringHelper.idToString(info.info.getId()); liveIDs.add(idString); @@ -585,7 +583,7 @@ private void pruneOldSegments(boolean removeOldGens) throws IOException { SegmentInfos lastCommit = SegmentInfos.readLatestCommit(indexDir); if (DEBUG) System.out.println("TEST: prune"); - Set liveIDs = new HashSet(); + Set liveIDs = new HashSet<>(); for (SegmentCommitInfo info : lastCommit) { String idString = StringHelper.idToString(info.info.getId()); liveIDs.add(idString); @@ -790,7 +788,7 @@ protected void reindex( throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); - // The order of our docIDs must precisely matching incoming reader: + // The order of our docIDs must precisely match incoming reader: iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); @@ -847,7 +845,7 @@ protected void reindex( throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); - // The order of our docIDs must precisely matching incoming reader: + // The order of our docIDs must precisely match incoming reader: iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); @@ -957,7 +955,7 @@ protected IndexWriterConfig getIndexWriterConfig() throws IOException { tmp.setFloorSegmentMB(.01); iwc.setMergePolicy(tmp); if (TEST_NIGHTLY) { - // during nightly tests, we might use too many files if we arent careful + // during nightly tests, we might use too many files if we aren't careful iwc.setUseCompoundFile(true); } return iwc; @@ -977,7 +975,7 @@ protected void reindex( throws IOException { IndexWriterConfig iwc = newIndexWriterConfig(); - // The order of our docIDs must precisely matching incoming reader: + // The order of our docIDs must precisely match incoming reader: iwc.setMergePolicy(new LogByteSizeMergePolicy()); IndexWriter w = new IndexWriter(parallelDir, iwc); int maxDoc = reader.maxDoc(); @@ -1596,14 +1594,7 @@ private static void testPointRangeQuery(IndexSearcher s) throws IOException { assertTrue(value <= max); } - Arrays.sort( - hits.scoreDocs, - new Comparator() { - @Override - public int compare(ScoreDoc a, ScoreDoc b) { - return a.doc - b.doc; - } - }); + Arrays.sort(hits.scoreDocs, Comparator.comparingInt(a -> a.doc)); NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number"); for (ScoreDoc hit : hits.scoreDocs) { diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java index 0390edb36cf3..59c008ef2879 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -869,7 +869,7 @@ public void testDeadlock() throws Exception { dir.close(); } - private class IndexerThreadInterrupt extends Thread { + private static class IndexerThreadInterrupt extends Thread { volatile boolean failed; volatile boolean finish; @@ -4333,7 +4333,7 @@ public void testRandomOperations() throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setMergePolicy( new FilterMergePolicy(newMergePolicy()) { - boolean keepFullyDeletedSegment = random().nextBoolean(); + final boolean keepFullyDeletedSegment = random().nextBoolean(); @Override public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) { @@ -4759,7 +4759,7 @@ public MergeSpecification findFullFlushMerges( List fullyDeletedSegments = segmentInfos.asList().stream() .filter(s -> s.info.maxDoc() - s.getDelCount() == 0) - .collect(Collectors.toList()); + .toList(); if (fullyDeletedSegments.isEmpty()) { return null; } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java index f70f14a9f1fc..51b502a122b4 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java @@ -18,7 +18,6 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; -import static java.util.stream.Collectors.toList; import java.io.IOException; import java.util.ArrayList; @@ -410,7 +409,7 @@ public void testAddIndexesAndDoDeletesThreads() throws Throwable { addDirThreads.close(true); - assertTrue(addDirThreads.failures.size() == 0); + assertTrue(addDirThreads.failures.isEmpty()); TestUtil.checkIndex(mainDir); @@ -736,11 +735,7 @@ public void testAfterClose() throws Exception { IndexSearcher searcher = newSearcher(r); assertEquals(100, searcher.count(q)); - expectThrows( - AlreadyClosedException.class, - () -> { - DirectoryReader.openIfChanged(r); - }); + expectThrows(AlreadyClosedException.class, () -> DirectoryReader.openIfChanged(r)); r.close(); dir1.close(); @@ -769,7 +764,7 @@ public void testDuringAddIndexes() throws Exception { DirectoryReader r = DirectoryReader.open(writer); final int numIterations = 10; - final List excs = Collections.synchronizedList(new ArrayList()); + final List excs = Collections.synchronizedList(new ArrayList<>()); // Only one thread can addIndexes at a time, because // IndexWriter acquires a write lock in each directory: @@ -812,8 +807,8 @@ public void run() { } } - for (int i = 0; i < threads.length; i++) { - threads[i].join(); + for (Thread thread : threads) { + thread.join(); } // final check DirectoryReader r2 = DirectoryReader.openIfChanged(r); @@ -853,7 +848,7 @@ public void testDuringAddDelete() throws Exception { newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy(2)); if (TEST_NIGHTLY) { // if we have a ton of iterations we need to make sure we don't do unnecessary - // extra flushing otherwise we will timeout on nightly + // extra flushing otherwise we will time out on nightly iwc.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB); iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); } @@ -1148,11 +1143,7 @@ public void eval(MockDirectoryWrapper dir) throws IOException { // other NRT reader, since it is already marked closed! for (int i = 0; i < 2; i++) { shouldFail.set(true); - expectThrows( - FakeIOException.class, - () -> { - DirectoryReader.open(writer).close(); - }); + expectThrows(FakeIOException.class, () -> DirectoryReader.open(writer).close()); } writer.close(); @@ -1214,7 +1205,7 @@ public void testIndexReaderWriterWithLeafSorter() throws IOException { final long MISSING_VALUE = ASC_SORT ? Long.MAX_VALUE : Long.MIN_VALUE; // missing values at the end - // create a comparator that sort leaf readers according with + // create a comparator that sort leaf readers according to // the min value (asc sort) or max value (desc sort) of its points Comparator leafSorter = Comparator.comparingLong( @@ -1347,13 +1338,9 @@ public void testIndexReaderWriterWithLeafSorter() throws IOException { // provided leafSorter private static void assertLeavesSorted( DirectoryReader reader, Comparator leafSorter) { - List lrs = - reader.leaves().stream().map(LeafReaderContext::reader).collect(toList()); + List lrs = reader.leaves().stream().map(LeafReaderContext::reader).toList(); List expectedSortedlrs = - reader.leaves().stream() - .map(LeafReaderContext::reader) - .sorted(leafSorter) - .collect(toList()); + reader.leaves().stream().map(LeafReaderContext::reader).sorted(leafSorter).toList(); assertEquals(expectedSortedlrs, lrs); } } diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java index 9c9d39f70928..a6bcea369d28 100644 --- a/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java +++ b/lucene/core/src/test/org/apache/lucene/index/TestTermsHashPerField.java @@ -20,7 +20,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -257,21 +257,19 @@ class Posting { RandomStrings.randomRealisticUnicodeOfCodepointLengthBetween(random(), 1, 10); postingMap.putIfAbsent(newBytesRef(randomString), new Posting()); } - List bytesRefs = Arrays.asList(postingMap.keySet().toArray(new BytesRef[0])); + List bytesRefs = new ArrayList<>(postingMap.keySet()); Collections.sort(bytesRefs); int numDocs = 1 + random().nextInt(200); int termOrd = 0; - for (int i = 0; i < numDocs; i++) { + for (int doc = 0; doc < numDocs; doc++) { int numTerms = 1 + random().nextInt(200); - int doc = i; for (int j = 0; j < numTerms; j++) { BytesRef ref = RandomPicks.randomFrom(random(), bytesRefs); Posting posting = postingMap.get(ref); if (posting.termId == -1) { posting.termId = termOrd++; } - posting.docAndFreq.putIfAbsent(doc, 0); - posting.docAndFreq.compute(doc, (key, oldVal) -> oldVal + 1); + posting.docAndFreq.merge(doc, 1, Integer::sum); hash.add(ref, doc); } hash.finish(); diff --git a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java index fddeb96f10f2..905263c74dda 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestLRUQueryCache.java @@ -42,7 +42,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.IntConsumer; -import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.LongPoint; @@ -198,7 +197,7 @@ public Integer reduce(Collection collectors) .map( filterCollector -> (DummyTotalHitCountCollector) filterCollector.in) - .collect(Collectors.toList())); + .toList()); } }); assertEquals(totalHits2, totalHits1); @@ -555,7 +554,7 @@ public long accumulateObject( } /** DummyQuery with Accountable, pretending to be a memory-eating query */ - private class AccountableDummyQuery extends DummyQuery implements Accountable { + private static class AccountableDummyQuery extends DummyQuery implements Accountable { @Override public long ramBytesUsed() { @@ -841,7 +840,7 @@ public void testFineGrainedStats() throws IOException { @Override protected void onHit(Object readerCoreKey, Query query) { super.onHit(readerCoreKey, query); - switch (indexId.get(readerCoreKey).intValue()) { + switch (indexId.get(readerCoreKey)) { case 1: hitCount1.incrementAndGet(); break; @@ -856,7 +855,7 @@ protected void onHit(Object readerCoreKey, Query query) { @Override protected void onMiss(Object readerCoreKey, Query query) { super.onMiss(readerCoreKey, query); - switch (indexId.get(readerCoreKey).intValue()) { + switch (indexId.get(readerCoreKey)) { case 1: missCount1.incrementAndGet(); break; @@ -1331,11 +1330,7 @@ public int frequency(final Query query) { public void onUse(final Query query) { AtomicInteger count; synchronized (counts) { - count = counts.get(query); - if (count == null) { - count = new AtomicInteger(); - counts.put(query, count); - } + count = counts.computeIfAbsent(query, k -> new AtomicInteger()); } count.incrementAndGet(); } @@ -1388,8 +1383,8 @@ public void testPropagateBulkScorer() throws IOException { weight = new WeightWrapper(weight, scorerCalled, bulkScorerCalled); weight = cache.doCache(weight, NEVER_CACHE); weight.bulkScorer(leaf); - assertEquals(true, bulkScorerCalled.get()); - assertEquals(false, scorerCalled.get()); + assertTrue(bulkScorerCalled.get()); + assertFalse(scorerCalled.get()); assertEquals(0, cache.getCacheCount()); searcher.getIndexReader().close(); @@ -1779,7 +1774,7 @@ public void testDocValuesUpdatesDontBreakCache() throws IOException { assertEquals(2, searcher.count(query)); assertEquals(2, query.scorerCreatedCount.get()); // both segments cached - w.updateNumericDocValue(new Term("text", "text"), "field", 2l); + w.updateNumericDocValue(new Term("text", "text"), "field", 2L); reader.close(); reader = DirectoryReader.open(w); searcher = diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java index 27543e1ff9b5..b25f9afaa721 100644 --- a/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java +++ b/lucene/core/src/test/org/apache/lucene/search/TestMultiCollectorManager.java @@ -25,7 +25,6 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.function.Predicate; -import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; @@ -53,10 +52,8 @@ public void testCollection() throws IOException { for (int iter = 0; iter < 100; iter++) { int docs = RandomNumbers.randomIntBetween(random(), 1000, 10000); SortedSet expected = generateDocIds(docs, random()); - List expectedEven = - expected.stream().filter(evenPredicate).collect(Collectors.toList()); - List expectedOdd = - expected.stream().filter(oddPredicate).collect(Collectors.toList()); + List expectedEven = expected.stream().filter(evenPredicate).toList(); + List expectedOdd = expected.stream().filter(oddPredicate).toList(); // Test only wrapping one of the collector managers: MultiCollectorManager mcm = new MultiCollectorManager(cm1); @@ -291,7 +288,7 @@ public Object reduce(Collection collectors) throws IOException { private static CollectorManager collectorManager( ScoreMode scoreMode, Class expectedScorer) { - return new CollectorManager() { + return new CollectorManager<>() { @Override public Collector newCollector() throws IOException { diff --git a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java index ec8447841cba..bd954f19d3fb 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestDirectory.java @@ -46,7 +46,7 @@ public void testDirectInstantiation() throws Exception { if (hasWorkingMMapOnWindows()) { dirs0.add(new MMapDirectory(path)); } - final FSDirectory[] dirs = dirs0.stream().toArray(FSDirectory[]::new); + final FSDirectory[] dirs = dirs0.toArray(FSDirectory[]::new); for (int i = 0; i < dirs.length; i++) { FSDirectory dir = dirs[i]; @@ -58,8 +58,7 @@ public void testDirectInstantiation() throws Exception { out.writeBytes(largeBuffer, largeBuffer.length); out.close(); - for (int j = 0; j < dirs.length; j++) { - FSDirectory d2 = dirs[j]; + for (FSDirectory d2 : dirs) { d2.ensureOpen(); assertTrue(slowFileExists(d2, fname)); assertEquals(1 + largeBuffer.length, d2.fileLength(fname)); @@ -84,19 +83,14 @@ public void testDirectInstantiation() throws Exception { // delete with a different dir dirs[(i + 1) % dirs.length].deleteFile(fname); - for (int j = 0; j < dirs.length; j++) { - FSDirectory d2 = dirs[j]; + for (FSDirectory d2 : dirs) { assertFalse(slowFileExists(d2, fname)); } Lock lock = dir.obtainLock(lockname); for (Directory other : dirs) { - expectThrows( - LockObtainFailedException.class, - () -> { - other.obtainLock(lockname); - }); + expectThrows(LockObtainFailedException.class, () -> other.obtainLock(lockname)); } lock.close(); @@ -106,8 +100,7 @@ public void testDirectInstantiation() throws Exception { lock.close(); } - for (int i = 0; i < dirs.length; i++) { - FSDirectory dir = dirs[i]; + for (FSDirectory dir : dirs) { dir.ensureOpen(); dir.close(); assertFalse(dir.isOpen); @@ -117,18 +110,11 @@ public void testDirectInstantiation() throws Exception { // LUCENE-1468 public void testNotDirectory() throws Throwable { Path path = createTempDir("testnotdir"); - Directory fsDir = new NIOFSDirectory(path); - try { + try (Directory fsDir = new NIOFSDirectory(path)) { IndexOutput out = fsDir.createOutput("afile", newIOContext(random())); out.close(); assertTrue(slowFileExists(fsDir, "afile")); - expectThrows( - IOException.class, - () -> { - new NIOFSDirectory(path.resolve("afile")); - }); - } finally { - fsDir.close(); + expectThrows(IOException.class, () -> new NIOFSDirectory(path.resolve("afile"))); } } diff --git a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index 2298ef82ea3a..aff4fe2e2732 100644 --- a/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ b/lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -71,23 +71,23 @@ public void testBasic() throws IOException { // we should see only fdx,fdt files here String[] files = primaryDir.listAll(); assertTrue(files.length > 0); - for (int x = 0; x < files.length; x++) { - String ext = FileSwitchDirectory.getExtension(files[x]); + for (String file : files) { + String ext = FileSwitchDirectory.getExtension(file); assertTrue(fileExtensions.contains(ext)); } files = secondaryDir.listAll(); assertTrue(files.length > 0); // we should not see fdx,fdt files here - for (int x = 0; x < files.length; x++) { - String ext = FileSwitchDirectory.getExtension(files[x]); + for (String file : files) { + String ext = FileSwitchDirectory.getExtension(file); assertFalse(fileExtensions.contains(ext)); } reader.close(); writer.close(); files = fsd.listAll(); - for (int i = 0; i < files.length; i++) { - assertNotNull(files[i]); + for (String file : files) { + assertNotNull(file); } fsd.close(); } @@ -110,12 +110,8 @@ private Directory newFSSwitchDirectory(Path aDir, Path bDir, Set primary public void testNoDir() throws Throwable { Path primDir = createTempDir("foo"); Path secondDir = createTempDir("bar"); - Directory dir = newFSSwitchDirectory(primDir, secondDir, Collections.emptySet()); - expectThrows( - IndexNotFoundException.class, - () -> { - DirectoryReader.open(dir); - }); + Directory dir = newFSSwitchDirectory(primDir, secondDir, Collections.emptySet()); + expectThrows(IndexNotFoundException.class, () -> DirectoryReader.open(dir)); dir.close(); } @@ -151,7 +147,7 @@ public void testRenameTmpFile() throws IOException { @Override protected Directory getDirectory(Path path) throws IOException { - Set extensions = new HashSet(); + Set extensions = new HashSet<>(); if (random().nextBoolean()) { extensions.add("cfs"); } @@ -187,8 +183,7 @@ public void testDeleteAndList() throws IOException { true)) { dir.createOutput("foo.tim", IOContext.DEFAULT).close(); Function stripExtra = - array -> - Arrays.asList(array).stream().filter(f -> f.startsWith("extra") == false).count(); + array -> Arrays.stream(array).filter(f -> f.startsWith("extra") == false).count(); try (IndexInput indexInput = dir.openInput("foo.tim", IOContext.DEFAULT)) { assert indexInput != null; dir.deleteFile("foo.tim"); diff --git a/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java b/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java index b6f426fcc004..577f0f9e36c9 100644 --- a/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java +++ b/lucene/distribution.tests/src/test/org/apache/lucene/distribution/TestModularLayer.java @@ -218,7 +218,7 @@ public void testMultiReleaseJar() { } final int runtimeVersion = Runtime.version().feature(); - if (jarVersions.contains(Integer.valueOf(runtimeVersion))) { + if (jarVersions.contains(runtimeVersion)) { Assertions.assertThat( loader.loadClass("org.apache.lucene.store.MemorySegmentIndexInput")) .isNotNull(); @@ -231,11 +231,10 @@ public void testMultiReleaseJar() { public void testAllCoreModulesAreNamedModules() { Assertions.assertThat(allLuceneModules) .allSatisfy( - module -> { - Assertions.assertThat(module.descriptor().isAutomatic()) - .as(module.descriptor().name()) - .isFalse(); - }); + module -> + Assertions.assertThat(module.descriptor().isAutomatic()) + .as(module.descriptor().name()) + .isFalse()); } /** Ensure all modules have the same (expected) version. */ @@ -283,9 +282,7 @@ private TreeMap> getClasspathServiceProviders(ModuleRefe try (ModuleReader reader = module.open(); Stream entryStream = reader.list()) { List serviceProviderEntryList = - entryStream - .filter(entry -> serviceEntryPattern.matcher(entry).find()) - .collect(Collectors.toList()); + entryStream.filter(entry -> serviceEntryPattern.matcher(entry).find()).toList(); for (String entry : serviceProviderEntryList) { List implementations; @@ -294,7 +291,7 @@ private TreeMap> getClasspathServiceProviders(ModuleRefe Arrays.stream(new String(is.readAllBytes(), StandardCharsets.UTF_8).split("\n")) .map(String::trim) .filter(line -> !line.isBlank() && !line.startsWith("#")) - .collect(Collectors.toList()); + .toList(); } Matcher matcher = serviceEntryPattern.matcher(entry); @@ -344,10 +341,7 @@ public void testAllExportedPackagesInSync() throws IOException { if (module.descriptor().name().equals("org.apache.lucene.core")) { // Internal packages should not be exported to unqualified targets. - jarPackages.removeIf( - entry -> { - return entry.startsWith("org.apache.lucene.internal"); - }); + jarPackages.removeIf(entry -> entry.startsWith("org.apache.lucene.internal")); // Internal packages should use qualified exports. moduleExports.removeIf( @@ -364,11 +358,10 @@ public void testAllExportedPackagesInSync() throws IOException { Assertions.assertThat(moduleExports) .as("Exported packages in module: " + module.descriptor().name()) .allSatisfy( - export -> { - Assertions.assertThat(export.targets()) - .as("We only support unqualified exports for now?") - .isEmpty(); - }) + export -> + Assertions.assertThat(export.targets()) + .as("We only support unqualified exports for now?") + .isEmpty()) .map(ModuleDescriptor.Exports::source) .containsExactlyInAnyOrderElementsOf(jarPackages); } @@ -392,11 +385,10 @@ public void testAllOpenAnalysisPackagesInSync() throws IOException { Assertions.assertThat(moduleOpens) .as("Open packages in module: " + module.descriptor().name()) .allSatisfy( - export -> { - Assertions.assertThat(export.targets()) - .as("Opens should only be targeted to Lucene Core.") - .containsExactly("org.apache.lucene.core"); - }) + export -> + Assertions.assertThat(export.targets()) + .as("Opens should only be targeted to Lucene Core.") + .containsExactly("org.apache.lucene.core")) .map(ModuleDescriptor.Opens::source) .containsExactlyInAnyOrderElementsOf(jarPackages); } diff --git a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java index da4bb4f3fc53..0140c35df069 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/TestDrillSideways.java @@ -29,7 +29,6 @@ import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.stream.Collectors; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -211,7 +210,7 @@ public void testNoCaching() throws Exception { DrillDownQuery ddq = new DrillDownQuery(config); ddq.add("Color", "Blue"); - // Setup an IndexSearcher that will try to cache queries aggressively: + // Set up an IndexSearcher that will try to cache queries aggressively: IndexSearcher searcher = getNewSearcher(writer.getReader()); searcher.setQueryCachingPolicy( new QueryCachingPolicy() { @@ -224,7 +223,7 @@ public boolean shouldCache(Query query) { } }); - // Setup a DS instance for searching: + // Set up a DS instance for searching: TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter); DrillSideways ds = getNewDrillSideways(searcher, config, taxoReader); @@ -250,10 +249,7 @@ public boolean shouldCache(Query query) { // test getTopChildren(0, dim) expectThrows( - IllegalArgumentException.class, - () -> { - concurrentResult.facets.getTopChildren(0, "Color"); - }); + IllegalArgumentException.class, () -> concurrentResult.facets.getTopChildren(0, "Color")); writer.close(); IOUtils.close(searcher.getIndexReader(), taxoReader, taxoWriter, dir, taxoDir); @@ -491,11 +487,7 @@ private void runDrillSidewaysTestCases(FacetsConfig config, DrillSideways ds) th // test getAllDims(0) DrillSidewaysResult finalR1 = r; - expectThrows( - IllegalArgumentException.class, - () -> { - finalR1.facets.getAllDims(0); - }); + expectThrows(IllegalArgumentException.class, () -> finalR1.facets.getAllDims(0)); // More interesting case: drill-down on two fields ddq = new DrillDownQuery(config); @@ -585,11 +577,7 @@ private void runDrillSidewaysTestCases(FacetsConfig config, DrillSideways ds) th // test getTopChildren(0, dim) DrillSidewaysResult finalR = r; - expectThrows( - IllegalArgumentException.class, - () -> { - finalR.facets.getTopChildren(0, "Author"); - }); + expectThrows(IllegalArgumentException.class, () -> finalR.facets.getTopChildren(0, "Author")); } public void testBasicWithCollectorManager() throws Exception { @@ -1003,7 +991,7 @@ public Doc() {} int[] dims; // 2nd value per dim for the doc (so we test - // multi-valued fields): + // multivalued fields): int[] dims2; boolean deleted; @@ -1084,7 +1072,7 @@ public void testRandom() throws Exception { values.add(s); } } - dimValues[dim] = values.toArray(new String[values.size()]); + dimValues[dim] = values.toArray(new String[0]); valueCount *= 2; } @@ -1657,7 +1645,7 @@ public List reduce(Collection collectors) { .sorted(comparator) .map(cr -> new DocAndScore(cr.docAndScore)) .limit(numDocs) - .collect(Collectors.toList()); + .toList(); } } @@ -1927,8 +1915,7 @@ void verifyEquals( if (VERBOSE) { idx = 0; System.out.println(" expected (sorted)"); - for (int i = 0; i < topNIDs.length; i++) { - int expectedOrd = topNIDs[i]; + for (int expectedOrd : topNIDs) { String value = dimValues[dim][expectedOrd]; System.out.println( " " @@ -2113,11 +2100,7 @@ protected FacetsCollectorManager createDrillDownFacetsCollectorManager() { topNDimsResult.get(0).toString()); // test getAllDims(0) - expectThrows( - IllegalArgumentException.class, - () -> { - facets.getAllDims(0); - }); + expectThrows(IllegalArgumentException.class, () -> facets.getAllDims(0)); // More interesting case: drill-down on two fields ddq = new DrillDownQuery(config); ddq.add("Author", "Lisa"); @@ -2140,7 +2123,7 @@ protected FacetsCollectorManager createDrillDownFacetsCollectorManager() { } public void testScorer() throws Exception { - // LUCENE-6001 some scorers, eg ReqExlScorer, can hit NPE if cost is called after nextDoc + // LUCENE-6001 some scorers, e.g. ReqExlScorer, can hit NPE if cost is called after nextDoc Directory dir = newDirectory(); Directory taxoDir = newDirectory(); @@ -2235,7 +2218,7 @@ public void testExtendedDrillSidewaysResult() throws Exception { facets = new MultiFacets(drillSidewaysFacets, drillDownFacets); } - // Facets computed using FacetsCollecter exposed in DrillSidewaysResult + // Facets computed using FacetsCollector exposed in DrillSidewaysResult // should match the Facets computed by {@link DrillSideways#buildFacetsResult} FacetResult facetResultActual = facets.getTopChildren(2, "dim"); FacetResult facetResultExpected = r.facets.getTopChildren(2, "dim"); @@ -2251,7 +2234,7 @@ public void testExtendedDrillSidewaysResult() throws Exception { @Test public void testDrillSidewaysSearchUseCorrectIterator() throws Exception { - // This test reproduces an issue (see github #12211) where DrillSidewaysScorer would ultimately + // This test reproduces an issue (see GitHub #12211) where DrillSidewaysScorer would ultimately // cause multiple consecutive calls to TwoPhaseIterator::matches, which results in a failed // assert in the PostingsReaderBase implementation (or a failing to match a document that should // have matched, if asserts are disabled). diff --git a/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java b/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java index 3fbb86503cec..9b72e0d6cfd7 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/facetset/TestExactFacetSetMatcher.java @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; @@ -414,11 +413,7 @@ public void testLongFacetSetMatchingWithFastMatchQuery() throws Exception { private static Query createFastMatchQuery(String field, int... values) { return new TermInSetQuery( - field, - Arrays.stream(values) - .mapToObj(String::valueOf) - .map(BytesRef::new) - .collect(Collectors.toList())); + field, Arrays.stream(values).mapToObj(String::valueOf).map(BytesRef::new).toList()); } private static void addFastMatchField( diff --git a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java index f64b5518f8fb..4a8a5379b949 100644 --- a/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java +++ b/lucene/facet/src/test/org/apache/lucene/facet/sortedset/TestSortedSetDocValuesFacets.java @@ -30,7 +30,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; @@ -112,11 +111,9 @@ public void testBasic() throws Exception { new String[0], 3, -1, - new LabelAndValue[] { - new LabelAndValue("bar", 1), - new LabelAndValue("foo", 2), - new LabelAndValue("zoo", 1) - }); + new LabelAndValue("bar", 1), + new LabelAndValue("foo", 2), + new LabelAndValue("zoo", 1)); // test getAllDims List results = facets.getAllDims(10); @@ -154,11 +151,7 @@ public void testBasic() throws Exception { topDimsResults1.get(0).toString()); // test getTopDims(0) - expectThrows( - IllegalArgumentException.class, - () -> { - facets.getAllDims(0); - }); + expectThrows(IllegalArgumentException.class, () -> facets.getAllDims(0)); // test getSpecificValue assertEquals(2, facets.getSpecificValue("a", "foo")); @@ -362,7 +355,7 @@ public void testBasicHierarchical() throws Exception { try { Facets facets = getAllFacets(searcher, state, exec); - // since a is not set to be hierarchical but _is_ multi-valued, we expect a value of 2 + // since a is not set to be hierarchical but _is_ multivalued, we expect a value of 2 // (since two unique docs contain at least one value for this dim): assertEquals( "dim=a path=[] value=2 childCount=3\n foo (2)\n bar (1)\n zoo (1)\n", @@ -383,11 +376,9 @@ public void testBasicHierarchical() throws Exception { new String[0], 3, 2, - new LabelAndValue[] { - new LabelAndValue("bar", 1), - new LabelAndValue("foo", 2), - new LabelAndValue("zoo", 1) - }); + new LabelAndValue("bar", 1), + new LabelAndValue("foo", 2), + new LabelAndValue("zoo", 1)); assertFacetResult( facets.getAllChildren("c", "buzz"), @@ -395,11 +386,9 @@ public void testBasicHierarchical() throws Exception { new String[] {"buzz"}, 3, 2, - new LabelAndValue[] { - new LabelAndValue("bee", 1), - new LabelAndValue("bif", 2), - new LabelAndValue("biz", 1) - }); + new LabelAndValue("bee", 1), + new LabelAndValue("bif", 2), + new LabelAndValue("biz", 1)); assertFacetResult( facets.getAllChildren("c", "buzz", "bif"), @@ -407,7 +396,7 @@ public void testBasicHierarchical() throws Exception { new String[] {"buzz", "bif"}, 1, 2, - new LabelAndValue[] {new LabelAndValue("baf", 2)}); + new LabelAndValue("baf", 2)); // test getSpecificValue (and make sure hierarchical dims are supported: LUCENE-10584): assertEquals(2, facets.getSpecificValue("c", "buzz")); @@ -473,22 +462,11 @@ public void testCountAll() throws Exception { // test getAllChildren assertFacetResult( - facets.getAllChildren("a"), - "a", - new String[0], - 1, - 1, - new LabelAndValue[] { - new LabelAndValue("bar", 1), - }); + facets.getAllChildren("a"), "a", new String[0], 1, 1, new LabelAndValue("bar", 1)); // test topNChildren = 0 Facets finalFacets = facets; - expectThrows( - IllegalArgumentException.class, - () -> { - finalFacets.getTopChildren(0, "a"); - }); + expectThrows(IllegalArgumentException.class, () -> finalFacets.getTopChildren(0, "a")); ExecutorService exec = new ThreadPoolExecutor( @@ -496,7 +474,7 @@ public void testCountAll() throws Exception { TestUtil.nextInt(random(), 2, 6), Long.MAX_VALUE, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new NamedThreadFactory("TestIndexSearcher")); try { facets = new ConcurrentSortedSetDocValuesFacetCounts(state, exec); @@ -579,9 +557,9 @@ public void testHierarchicalCountAll() throws Exception { new String[0], 3, 3, - new LabelAndValue[] { - new LabelAndValue("bar", 1), new LabelAndValue("baz", 1), new LabelAndValue("buz", 1), - }); + new LabelAndValue("bar", 1), + new LabelAndValue("baz", 1), + new LabelAndValue("buz", 1)); assertFacetResult( facets.getAllChildren("b"), @@ -589,9 +567,8 @@ public void testHierarchicalCountAll() throws Exception { new String[0], 2, 3, - new LabelAndValue[] { - new LabelAndValue("bar", 2), new LabelAndValue("buzz", 1), - }); + new LabelAndValue("bar", 2), + new LabelAndValue("buzz", 1)); ExecutorService exec = new ThreadPoolExecutor( @@ -599,7 +576,7 @@ public void testHierarchicalCountAll() throws Exception { TestUtil.nextInt(random(), 2, 6), Long.MAX_VALUE, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new NamedThreadFactory("TestIndexSearcher")); try { facets = new ConcurrentSortedSetDocValuesFacetCounts(state, exec); @@ -673,9 +650,8 @@ public void testBasicSingleValued() throws Exception { new String[0], 2, 3, - new LabelAndValue[] { - new LabelAndValue("baz", 1), new LabelAndValue("foo", 2), - }); + new LabelAndValue("baz", 1), + new LabelAndValue("foo", 2)); // DrillDown: DrillDownQuery q = new DrillDownQuery(config); @@ -998,10 +974,7 @@ public void testStaleState() throws Exception { searcher.search(new MatchAllDocsQuery(), new FacetsCollectorManager()); expectThrows( - IllegalStateException.class, - () -> { - new SortedSetDocValuesFacetCounts(state, c); - }); + IllegalStateException.class, () -> new SortedSetDocValuesFacetCounts(state, c)); } } } @@ -1108,11 +1081,9 @@ public void testSparseFacets() throws Exception { new String[0], 3, 3, - new LabelAndValue[] { - new LabelAndValue("foo1", 1), - new LabelAndValue("foo2", 1), - new LabelAndValue("foo3", 1), - }); + new LabelAndValue("foo1", 1), + new LabelAndValue("foo2", 1), + new LabelAndValue("foo3", 1)); assertFacetResult( facets.getAllChildren("b"), @@ -1120,17 +1091,11 @@ public void testSparseFacets() throws Exception { new String[0], 2, 2, - new LabelAndValue[] { - new LabelAndValue("bar1", 1), new LabelAndValue("bar2", 1), - }); + new LabelAndValue("bar1", 1), + new LabelAndValue("bar2", 1)); assertFacetResult( - facets.getAllChildren("c"), - "c", - new String[0], - 1, - 1, - new LabelAndValue[] {new LabelAndValue("baz1", 1)}); + facets.getAllChildren("c"), "c", new String[0], 1, 1, new LabelAndValue("baz1", 1)); assertFacetResult( facets.getAllChildren("d"), @@ -1138,7 +1103,8 @@ public void testSparseFacets() throws Exception { new String[0], 2, 2, - new LabelAndValue[] {new LabelAndValue("biz1", 1), new LabelAndValue("biz2", 1)}); + new LabelAndValue("biz1", 1), + new LabelAndValue("biz2", 1)); Collection resources = state.getChildResources(); assertTrue(state.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); @@ -1217,14 +1183,10 @@ public void testHierarchicalSparseFacets() throws Exception { new String[] {"foo"}, 2, 2, - new LabelAndValue[] {new LabelAndValue("bar", 1), new LabelAndValue("baz", 1)}); + new LabelAndValue("bar", 1), + new LabelAndValue("baz", 1)); assertFacetResult( - facets.getAllChildren("d"), - "d", - new String[0], - 1, - 2, - new LabelAndValue[] {new LabelAndValue("foo", 2)}); + facets.getAllChildren("d"), "d", new String[0], 1, 2, new LabelAndValue("foo", 2)); Collection resources = state.getChildResources(); assertTrue(state.toString().contains(FacetsConfig.DEFAULT_INDEX_FIELD_NAME)); @@ -1285,7 +1247,8 @@ public void testSomeSegmentsMissing() throws Exception { new String[0], 2, 2, - new LabelAndValue[] {new LabelAndValue("foo1", 1), new LabelAndValue("foo2", 1)}); + new LabelAndValue("foo1", 1), + new LabelAndValue("foo2", 1)); } finally { if (exec != null) exec.shutdownNow(); } @@ -1346,7 +1309,8 @@ public void testHierarchicalSomeSegmentsMissing() throws Exception { new String[0], 2, 2, - new LabelAndValue[] {new LabelAndValue("boo", 1), new LabelAndValue("foo", 2)}); + new LabelAndValue("boo", 1), + new LabelAndValue("foo", 2)); } finally { if (exec != null) exec.shutdownNow(); } @@ -1411,20 +1375,13 @@ public void testRandom() throws Exception { // Slow, yet hopefully bug-free, faceting: @SuppressWarnings({"rawtypes", "unchecked"}) Map[] expectedCounts = new HashMap[numDims]; - for (int i = 0; i < numDims; i++) { - expectedCounts[i] = new HashMap<>(); - } + Arrays.setAll(expectedCounts, i -> new HashMap<>()); for (TestDoc doc : testDocs) { if (doc.content.equals(searchToken)) { for (int j = 0; j < numDims; j++) { if (doc.dims[j] != null) { - Integer v = expectedCounts[j].get(doc.dims[j]); - if (v == null) { - expectedCounts[j].put(doc.dims[j], 1); - } else { - expectedCounts[j].put(doc.dims[j], v.intValue() + 1); - } + expectedCounts[j].merge(doc.dims[j], 1, Integer::sum); } } } @@ -1445,12 +1402,12 @@ public void testRandom() throws Exception { "dim" + i, new String[0], totCount, - labelValues.toArray(new LabelAndValue[labelValues.size()]), + labelValues.toArray(new LabelAndValue[0]), labelValues.size())); } } - // Sort by highest value, tie break by value: + // Sort by highest value, tie-break by value: sortFacetResults(expected); List actual = facets.getAllDims(10); @@ -1603,18 +1560,15 @@ public void testRandomHierarchicalFlatMix() throws Exception { } else { newLabelAndValues = labelAndValues; } - newLabelAndValues = - Arrays.stream(newLabelAndValues) - .sorted( - (o1, o2) -> { - if (o1.value.equals(o2.value)) { - return new BytesRef(o1.label).compareTo(new BytesRef(o2.label)); - } else { - return o2.value.intValue() - o1.value.intValue(); - } - }) - .collect(Collectors.toList()) - .toArray(LabelAndValue[]::new); + Arrays.sort( + newLabelAndValues, + (o1, o2) -> { + if (o1.value.equals(o2.value)) { + return new BytesRef(o1.label).compareTo(new BytesRef(o2.label)); + } else { + return o2.value.intValue() - o1.value.intValue(); + } + }); FacetResult newResult = new FacetResult(result.dim, result.path, 0, newLabelAndValues, childCount); expectedResults.put(parentDimPathString, newResult); @@ -1842,9 +1796,7 @@ public void testHierarchicalNonExistentDimension() throws Exception { expectThrows( IllegalArgumentException.class, - () -> { - facets.getTopChildren(5, "non-existent dimension", "with a path"); - }); + () -> facets.getTopChildren(5, "non-existent dimension", "with a path")); } finally { if (exec != null) exec.shutdownNow(); } @@ -1911,7 +1863,7 @@ private ExecutorService randomExecutorServiceOrNull() { TestUtil.nextInt(random(), 2, 6), Long.MAX_VALUE, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new NamedThreadFactory("TestIndexSearcher")); } } diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java index ef1f038248eb..0f38d6574d5a 100644 --- a/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java +++ b/lucene/highlighter/src/test/org/apache/lucene/search/matchhighlight/TestMatchHighlighter.java @@ -29,7 +29,6 @@ import java.util.Objects; import java.util.Set; import java.util.function.Function; -import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; @@ -336,164 +335,157 @@ protected TokenStreamComponents createComponents(String fieldName) { for (String field : List.of(FLD_TEXT1, FLD_TEXT2)) { String inputDocument = "The quick brown fox jumps over the lazy dog"; - List queryResultPairs = - Arrays.asList( - new String[][] { - {"fn:ordered(brown dog)", "0. %s: The quick >brown fox jumps over the lazy dog<"}, - { - "fn:within(fn:or(lazy quick) 1 fn:or(dog fox))", - "0. %s: The quick brown fox jumps over the >lazy< dog" - }, - { - "fn:containedBy(fox fn:ordered(brown fox dog))", - "0. %s: The quick brown >fox< jumps over the lazy dog" - }, - { - "fn:atLeast(2 quick fox \"furry dog\")", - "0. %s: The >quick brown fox< jumps over the lazy dog" - }, - { - "fn:maxgaps(0 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", - "0. %s: The quick brown fox jumps over the >lazy dog<" - }, - { - "fn:maxgaps(1 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", - "0. %s: The >quick brown fox< jumps over the >lazy dog<" - }, - { - "fn:maxwidth(2 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", - "0. %s: The quick brown fox jumps over the >lazy dog<" - }, - { - "fn:maxwidth(3 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", - "0. %s: The >quick brown fox< jumps over the >lazy dog<" - }, - {"fn:or(quick \"fox\")", "0. %s: The >quick< brown >fox< jumps over the lazy dog"}, - {"fn:or(\"quick fox\")"}, - { - "fn:phrase(quick brown fox)", - "0. %s: The >quick brown fox< jumps over the lazy dog" - }, - {"fn:wildcard(jump*)", "0. %s: The quick brown fox >jumps< over the lazy dog"}, - {"fn:wildcard(br*n)", "0. %s: The quick >brown< fox jumps over the lazy dog"}, - {"fn:fuzzyTerm(fxo)", "0. %s: The quick brown >fox< jumps over the lazy dog"}, - {"fn:or(dog fox)", "0. %s: The quick brown >fox< jumps over the lazy >dog<"}, - { - "fn:phrase(fn:ordered(quick fox) jumps)", - "0. %s: The >quick brown fox jumps< over the lazy dog" - }, - { - "fn:ordered(quick jumps dog)", - "0. %s: The >quick brown fox jumps over the lazy dog<" - }, - { - "fn:ordered(quick fn:or(fox dog))", - "0. %s: The >quick brown fox< jumps over the lazy dog" - }, - { - "fn:ordered(quick jumps fn:or(fox dog))", - "0. %s: The >quick brown fox jumps over the lazy dog<" - }, - { - "fn:unordered(dog jumps quick)", - "0. %s: The >quick brown fox jumps over the lazy dog<" - }, - { - "fn:unordered(fn:or(fox dog) quick)", - "0. %s: The >quick brown fox< jumps over the lazy dog" - }, - { - "fn:unordered(fn:phrase(brown fox) fn:phrase(fox jumps))", - "0. %s: The quick >brown fox jumps< over the lazy dog" - }, - {"fn:ordered(fn:phrase(brown fox) fn:phrase(fox jumps))"}, - {"fn:unorderedNoOverlaps(fn:phrase(brown fox) fn:phrase(fox jumps))"}, - { - "fn:before(fn:or(brown lazy) fox)", - "0. %s: The quick >brown< fox jumps over the lazy dog" - }, - { - "fn:before(fn:or(brown lazy) fn:or(dog fox))", - "0. %s: The quick >brown< fox jumps over the >lazy< dog" - }, - { - "fn:after(fn:or(brown lazy) fox)", - "0. %s: The quick brown fox jumps over the >lazy< dog" - }, - { - "fn:after(fn:or(brown lazy) fn:or(dog fox))", - "0. %s: The quick brown fox jumps over the >lazy< dog" - }, - { - "fn:within(fn:or(fox dog) 1 fn:or(quick lazy))", - "0. %s: The quick brown fox jumps over the lazy >dog<" - }, - { - "fn:within(fn:or(fox dog) 2 fn:or(quick lazy))", - "0. %s: The quick brown >fox< jumps over the lazy >dog<" - }, - { - "fn:notWithin(fn:or(fox dog) 1 fn:or(quick lazy))", - "0. %s: The quick brown >fox< jumps over the lazy dog" - }, - { - "fn:containedBy(fn:or(fox dog) fn:ordered(quick lazy))", - "0. %s: The quick brown >fox< jumps over the lazy dog" - }, - { - "fn:notContainedBy(fn:or(fox dog) fn:ordered(quick lazy))", - "0. %s: The quick brown fox jumps over the lazy >dog<" - }, - { - "fn:containing(fn:atLeast(2 quick fox dog) jumps)", - "0. %s: The quick brown >fox jumps over the lazy dog<" - }, - { - "fn:notContaining(fn:ordered(fn:or(the The) fn:or(fox dog)) brown)", - "0. %s: The quick brown fox jumps over >the lazy dog<" - }, - { - "fn:overlapping(fn:phrase(brown fox) fn:phrase(fox jumps))", - "0. %s: The quick >brown fox< jumps over the lazy dog" - }, - { - "fn:overlapping(fn:or(fox dog) fn:extend(lazy 2 2))", - "0. %s: The quick brown fox jumps over the lazy >dog<" - }, - { - "fn:nonOverlapping(fn:phrase(brown fox) fn:phrase(lazy dog))", - "0. %s: The quick >brown fox< jumps over the lazy dog" - }, - { - "fn:nonOverlapping(fn:or(fox dog) fn:extend(lazy 2 2))", - "0. %s: The quick brown >fox< jumps over the lazy dog" - }, - { - "fn:atLeast(2 fn:unordered(furry dog) fn:unordered(brown dog) lazy quick)", - "0. %s: The >quick >brown fox jumps over the lazy<<> dog<" - }, - {"fn:extend(fox 1 2)", "0. %s: The quick >brown fox jumps over< the lazy dog"}, - { - "fn:extend(fn:or(dog fox) 2 0)", - "0. %s: The >quick brown fox< jumps over >the lazy dog<" - }, - { - "fn:containedBy(fn:or(fox dog) fn:extend(lazy 3 3))", - "0. %s: The quick brown fox jumps over the lazy >dog<" - }, - { - "fn:notContainedBy(fn:or(fox dog) fn:extend(lazy 3 3))", - "0. %s: The quick brown >fox< jumps over the lazy dog" - }, - { - "fn:containing(fn:extend(fn:or(lazy brown) 1 1) fn:or(fox dog))", - "0. %s: The >quick brown fox< jumps over >the lazy dog<" - }, - { - "fn:notContaining(fn:extend(fn:or(fox dog) 1 0) fn:or(brown yellow))", - "0. %s: The quick brown fox jumps over the >lazy dog<" - } - }); + String[][] queryResultPairs = + new String[][] { + {"fn:ordered(brown dog)", "0. %s: The quick >brown fox jumps over the lazy dog<"}, + { + "fn:within(fn:or(lazy quick) 1 fn:or(dog fox))", + "0. %s: The quick brown fox jumps over the >lazy< dog" + }, + { + "fn:containedBy(fox fn:ordered(brown fox dog))", + "0. %s: The quick brown >fox< jumps over the lazy dog" + }, + { + "fn:atLeast(2 quick fox \"furry dog\")", + "0. %s: The >quick brown fox< jumps over the lazy dog" + }, + { + "fn:maxgaps(0 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", + "0. %s: The quick brown fox jumps over the >lazy dog<" + }, + { + "fn:maxgaps(1 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", + "0. %s: The >quick brown fox< jumps over the >lazy dog<" + }, + { + "fn:maxwidth(2 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", + "0. %s: The quick brown fox jumps over the >lazy dog<" + }, + { + "fn:maxwidth(3 fn:ordered(fn:or(quick lazy) fn:or(fox dog)))", + "0. %s: The >quick brown fox< jumps over the >lazy dog<" + }, + {"fn:or(quick \"fox\")", "0. %s: The >quick< brown >fox< jumps over the lazy dog"}, + {"fn:or(\"quick fox\")"}, + {"fn:phrase(quick brown fox)", "0. %s: The >quick brown fox< jumps over the lazy dog"}, + {"fn:wildcard(jump*)", "0. %s: The quick brown fox >jumps< over the lazy dog"}, + {"fn:wildcard(br*n)", "0. %s: The quick >brown< fox jumps over the lazy dog"}, + {"fn:fuzzyTerm(fxo)", "0. %s: The quick brown >fox< jumps over the lazy dog"}, + {"fn:or(dog fox)", "0. %s: The quick brown >fox< jumps over the lazy >dog<"}, + { + "fn:phrase(fn:ordered(quick fox) jumps)", + "0. %s: The >quick brown fox jumps< over the lazy dog" + }, + {"fn:ordered(quick jumps dog)", "0. %s: The >quick brown fox jumps over the lazy dog<"}, + { + "fn:ordered(quick fn:or(fox dog))", + "0. %s: The >quick brown fox< jumps over the lazy dog" + }, + { + "fn:ordered(quick jumps fn:or(fox dog))", + "0. %s: The >quick brown fox jumps over the lazy dog<" + }, + { + "fn:unordered(dog jumps quick)", + "0. %s: The >quick brown fox jumps over the lazy dog<" + }, + { + "fn:unordered(fn:or(fox dog) quick)", + "0. %s: The >quick brown fox< jumps over the lazy dog" + }, + { + "fn:unordered(fn:phrase(brown fox) fn:phrase(fox jumps))", + "0. %s: The quick >brown fox jumps< over the lazy dog" + }, + {"fn:ordered(fn:phrase(brown fox) fn:phrase(fox jumps))"}, + {"fn:unorderedNoOverlaps(fn:phrase(brown fox) fn:phrase(fox jumps))"}, + { + "fn:before(fn:or(brown lazy) fox)", + "0. %s: The quick >brown< fox jumps over the lazy dog" + }, + { + "fn:before(fn:or(brown lazy) fn:or(dog fox))", + "0. %s: The quick >brown< fox jumps over the >lazy< dog" + }, + { + "fn:after(fn:or(brown lazy) fox)", + "0. %s: The quick brown fox jumps over the >lazy< dog" + }, + { + "fn:after(fn:or(brown lazy) fn:or(dog fox))", + "0. %s: The quick brown fox jumps over the >lazy< dog" + }, + { + "fn:within(fn:or(fox dog) 1 fn:or(quick lazy))", + "0. %s: The quick brown fox jumps over the lazy >dog<" + }, + { + "fn:within(fn:or(fox dog) 2 fn:or(quick lazy))", + "0. %s: The quick brown >fox< jumps over the lazy >dog<" + }, + { + "fn:notWithin(fn:or(fox dog) 1 fn:or(quick lazy))", + "0. %s: The quick brown >fox< jumps over the lazy dog" + }, + { + "fn:containedBy(fn:or(fox dog) fn:ordered(quick lazy))", + "0. %s: The quick brown >fox< jumps over the lazy dog" + }, + { + "fn:notContainedBy(fn:or(fox dog) fn:ordered(quick lazy))", + "0. %s: The quick brown fox jumps over the lazy >dog<" + }, + { + "fn:containing(fn:atLeast(2 quick fox dog) jumps)", + "0. %s: The quick brown >fox jumps over the lazy dog<" + }, + { + "fn:notContaining(fn:ordered(fn:or(the The) fn:or(fox dog)) brown)", + "0. %s: The quick brown fox jumps over >the lazy dog<" + }, + { + "fn:overlapping(fn:phrase(brown fox) fn:phrase(fox jumps))", + "0. %s: The quick >brown fox< jumps over the lazy dog" + }, + { + "fn:overlapping(fn:or(fox dog) fn:extend(lazy 2 2))", + "0. %s: The quick brown fox jumps over the lazy >dog<" + }, + { + "fn:nonOverlapping(fn:phrase(brown fox) fn:phrase(lazy dog))", + "0. %s: The quick >brown fox< jumps over the lazy dog" + }, + { + "fn:nonOverlapping(fn:or(fox dog) fn:extend(lazy 2 2))", + "0. %s: The quick brown >fox< jumps over the lazy dog" + }, + { + "fn:atLeast(2 fn:unordered(furry dog) fn:unordered(brown dog) lazy quick)", + "0. %s: The >quick >brown fox jumps over the lazy<<> dog<" + }, + {"fn:extend(fox 1 2)", "0. %s: The quick >brown fox jumps over< the lazy dog"}, + { + "fn:extend(fn:or(dog fox) 2 0)", + "0. %s: The >quick brown fox< jumps over >the lazy dog<" + }, + { + "fn:containedBy(fn:or(fox dog) fn:extend(lazy 3 3))", + "0. %s: The quick brown fox jumps over the lazy >dog<" + }, + { + "fn:notContainedBy(fn:or(fox dog) fn:extend(lazy 3 3))", + "0. %s: The quick brown >fox< jumps over the lazy dog" + }, + { + "fn:containing(fn:extend(fn:or(lazy brown) 1 1) fn:or(fox dog))", + "0. %s: The >quick brown fox< jumps over >the lazy dog<" + }, + { + "fn:notContaining(fn:extend(fn:or(fox dog) 1 0) fn:or(brown yellow))", + "0. %s: The quick brown fox jumps over the >lazy dog<" + } + }; // Verify assertions. new IndexBuilder(this::toField) @@ -789,9 +781,8 @@ private void assertHighlights(List> docList, String... expectedForm } } - var expectedTrimmed = - Stream.of(expectedFormattedLines).map(String::trim).collect(Collectors.toList()); - var actualTrimmed = actualLines.stream().map(String::trim).collect(Collectors.toList()); + var expectedTrimmed = Stream.of(expectedFormattedLines).map(String::trim).toList(); + var actualTrimmed = actualLines.stream().map(String::trim).toList(); if (!Objects.equals(expectedTrimmed, actualTrimmed)) { throw new AssertionError( "Actual hits were:\n" @@ -807,8 +798,8 @@ private List> toDocList(Stream high docHighlights -> docHighlights.fields.entrySet().stream() .map(e -> e.getKey() + ": " + String.join(", ", e.getValue())) - .collect(Collectors.toList())) - .collect(Collectors.toList()); + .toList()) + .toList(); } private IndexableField toField(String name, String value) { diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java index 23e23889a272..b0015456fecf 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/SearchPanelProvider.java @@ -35,7 +35,6 @@ import java.util.Locale; import java.util.Objects; import java.util.Set; -import java.util.stream.Collectors; import javax.swing.BorderFactory; import javax.swing.JButton; import javax.swing.JCheckBox; @@ -140,7 +139,7 @@ public final class SearchPanelProvider implements SearchTabOperator { private final JButton searchBtn = new JButton(); - private JCheckBox exactHitsCntCB = new JCheckBox(); + private final JCheckBox exactHitsCntCB = new JCheckBox(); private final JButton mltBtn = new JButton(); @@ -811,16 +810,10 @@ public void openIndex(LukeState state) { }); operatorRegistry .get(FieldValuesTabOperator.class) - .ifPresent( - operator -> { - operator.setFields(searchModel.getFieldNames()); - }); + .ifPresent(operator -> operator.setFields(searchModel.getFieldNames())); operatorRegistry .get(MLTTabOperator.class) - .ifPresent( - operator -> { - operator.setFields(searchModel.getFieldNames()); - }); + .ifPresent(operator -> operator.setFields(searchModel.getFieldNames())); queryStringTA.setText("*:*"); parsedQueryTA.setText(""); @@ -864,7 +857,7 @@ public enum Tab { VALUES(4), MLT(5); - private int tabIdx; + private final int tabIdx; Tab(int tabIdx) { this.tabIdx = tabIdx; @@ -937,7 +930,7 @@ public int getColumnWidth() { String v = String.join(",", Arrays.asList(e.getValue())); return e.getKey() + "=" + v + ";"; }) - .collect(Collectors.toList()); + .toList(); data[i][Column.VALUE.getIndex()] = String.join(" ", concatValues); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java index 46399d00b582..0f1351453299 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/analysis/TokenAttributeDialogFactory.java @@ -24,7 +24,6 @@ import java.awt.Window; import java.io.IOException; import java.util.List; -import java.util.stream.Collectors; import javax.swing.BorderFactory; import javax.swing.JButton; import javax.swing.JDialog; @@ -103,7 +102,7 @@ private JPanel content() { att -> att.getAttValues().entrySet().stream() .map(e -> TokenAttValue.of(att.getAttClass(), e.getKey(), e.getValue()))) - .collect(Collectors.toList()); + .toList(); TableUtils.setupTable( attributesTable, ListSelectionModel.SINGLE_SELECTION, diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java index 723cd21dbb13..b49ef949cb57 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/AddDocumentDialogFactory.java @@ -35,7 +35,6 @@ import java.util.List; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.swing.BorderFactory; import javax.swing.BoxLayout; @@ -152,9 +151,7 @@ private AddDocumentDialogFactory() throws IOException { this.indexOptionsDialogFactory = IndexOptionsDialogFactory.getInstance(); this.helpDialogFactory = HelpDialogFactory.getInstance(); this.newFieldList = - IntStream.range(0, ROW_COUNT) - .mapToObj(i -> NewField.newInstance()) - .collect(Collectors.toList()); + IntStream.range(0, ROW_COUNT).mapToObj(i -> NewField.newInstance()).toList(); operatorRegistry.register(AddDocumentDialogOperator.class, this); indexHandler.addObserver(new Observer()); @@ -388,7 +385,7 @@ void addDocument(ActionEvent e) { .filter(nf -> !nf.isDeleted()) .filter(nf -> !StringUtils.isNullOrEmpty(nf.getName())) .filter(nf -> !StringUtils.isNullOrEmpty(nf.getValue())) - .collect(Collectors.toList()); + .toList(); if (validFields.isEmpty()) { infoTA.setText("Please add one or more fields. Name and Value are both required."); return; @@ -411,7 +408,6 @@ void addDocument(ActionEvent e) { log.info("Added document: " + doc); } - @SuppressWarnings("unchecked") private IndexableField toIndexableField(NewField nf) throws Exception { final Constructor constr; if (nf.getType().equals(TextField.class) || nf.getType().equals(StringField.class)) { @@ -505,9 +501,9 @@ enum Column implements TableColumnInfo { OPTIONS("Options", 3, String.class), VALUE("Value", 4, String.class); - private String colName; - private int index; - private Class type; + private final String colName; + private final int index; + private final Class type; Column(String colName, int index, Class type) { this.colName = colName; @@ -589,7 +585,7 @@ public Component getTableCellRendererComponent( static final class OptionsCellRenderer implements TableCellRenderer { - private JDialog dialog; + private final JDialog dialog; private final IndexOptionsDialogFactory indexOptionsDialogFactory; @@ -609,7 +605,6 @@ public OptionsCellRenderer( } @Override - @SuppressWarnings("unchecked") public Component getTableCellRendererComponent( JTable table, Object value, boolean isSelected, boolean hasFocus, int row, int column) { if (table != null && this.table != table) { @@ -635,9 +630,7 @@ public void mouseClicked(MouseEvent e) { title, 500, 500, - (factory) -> { - factory.setNewField(newFieldList.get(row)); - }); + (factory) -> factory.setNewField(newFieldList.get(row))); } } }); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java index 8095c2c62b15..35301622cb29 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/dialog/documents/TermVectorDialogFactory.java @@ -133,9 +133,9 @@ enum Column implements TableColumnInfo { POSITIONS("Positions", 2, String.class), OFFSETS("Offsets", 3, String.class); - private String colName; - private int index; - private Class type; + private final String colName; + private final int index; + private final Class type; Column(String colName, int index, Class type) { this.colName = colName; @@ -172,23 +172,14 @@ public Class getType() { String termText = entry.getTermText(); long freq = tvEntries.get(i).getFreq(); String positions = - String.join( - ",", - entry.getPositions().stream() - .map(pos -> Integer.toString(pos.getPosition())) - .collect(Collectors.toList())); + entry.getPositions().stream() + .map(pos -> Integer.toString(pos.getPosition())) + .collect(Collectors.joining(",")); String offsets = - String.join( - ",", - entry.getPositions().stream() - .filter( - pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent()) - .map( - pos -> - Integer.toString(pos.getStartOffset().orElse(-1)) - + "-" - + Integer.toString(pos.getEndOffset().orElse(-1))) - .collect(Collectors.toList())); + entry.getPositions().stream() + .filter(pos -> pos.getStartOffset().isPresent() && pos.getEndOffset().isPresent()) + .map(pos -> pos.getStartOffset().orElse(-1) + "-" + pos.getEndOffset().orElse(-1)) + .collect(Collectors.joining(",")); data[i] = new Object[] {termText, freq, positions, offsets}; } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java index 54b6b69eb482..7df0869ffe3f 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/CustomAnalyzerPanelProvider.java @@ -39,7 +39,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.swing.BorderFactory; import javax.swing.DefaultComboBoxModel; @@ -164,7 +163,7 @@ private JPanel initCustomAnalyzerHeaderPanel() { panel.add(confDirBtn); buildBtn.setText( FontUtils.elegantIconHtml( - "", MessageUtils.getLocalizedMessage("analysis.button.build_analyzser"))); + "", MessageUtils.getLocalizedMessage("analysis.button.build_analyzer"))); buildBtn.setFont(StyleConstants.FONT_BUTTON_LARGE); buildBtn.setMargin(new Insets(3, 3, 3, 3)); buildBtn.addActionListener(listeners::buildAnalyzer); @@ -464,11 +463,10 @@ private void loadExternalJars() { int ret = fileChooser.showOpenDialog(containerPanel); if (ret == JFileChooser.APPROVE_OPTION) { File[] files = fileChooser.getSelectedFiles(); - analysisModel.addExternalJars( - Arrays.stream(files).map(File::getAbsolutePath).collect(Collectors.toList())); + analysisModel.addExternalJars(Arrays.stream(files).map(File::getAbsolutePath).toList()); operatorRegistry .get(CustomAnalyzerPanelOperator.class) - .ifPresent(operator -> operator.resetAnalysisComponents()); + .ifPresent(CustomAnalyzerPanelOperator::resetAnalysisComponents); messageBroker.showStatusMessage("External jars were added."); } } @@ -569,8 +567,7 @@ private void addTokenFilter() { selectedItem, tfParamsList.get(tfParamsList.size() - 1), () -> { - selectedTfList.setModel( - new DefaultComboBoxModel<>(updatedList.toArray(new String[updatedList.size()]))); + selectedTfList.setModel(new DefaultComboBoxModel<>(updatedList.toArray(new String[0]))); tfFactoryCombo.setSelectedItem(""); tfEditBtn.setEnabled(true); buildBtn.setEnabled(true); @@ -617,9 +614,7 @@ private void editTokenizer() { -1, selectedItem, tokParams, - () -> { - buildBtn.setEnabled(true); - }); + () -> buildBtn.setEnabled(true)); } private void editTokenFilters() { @@ -704,7 +699,7 @@ public void updateCharFilters(List deletedIndexes) { IntStream.range(0, cfParamsList.size()) .filter(i -> !deletedIndexes.contains(i)) .mapToObj(cfParamsList::get) - .collect(Collectors.toList()); + .toList(); cfParamsList.clear(); cfParamsList.addAll(updatedParamList); assert selectedCfList.getModel().getSize() == cfParamsList.size(); @@ -725,7 +720,7 @@ public void updateTokenFilters(List deletedIndexes) { IntStream.range(0, tfParamsList.size()) .filter(i -> !deletedIndexes.contains(i)) .mapToObj(tfParamsList::get) - .collect(Collectors.toList()); + .toList(); tfParamsList.clear(); tfParamsList.addAll(updatedParamList); assert selectedTfList.getModel().getSize() == tfParamsList.size(); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java index 63e3daa1bc18..36528a14df39 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/components/fragments/analysis/SimpleAnalyzeResultPanelProvider.java @@ -22,7 +22,6 @@ import java.awt.event.MouseAdapter; import java.awt.event.MouseEvent; import java.util.List; -import java.util.stream.Collectors; import javax.swing.JLabel; import javax.swing.JPanel; import javax.swing.JScrollPane; @@ -198,7 +197,7 @@ public int getColumnWidth() { att -> att.getAttValues().entrySet().stream() .map(e -> e.getKey() + "=" + e.getValue())) - .collect(Collectors.toList()); + .toList(); data[i][Column.ATTR.getIndex()] = String.join(",", attValues); } } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java index fc69261037bf..7e4261c28dcb 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/ListUtils.java @@ -19,7 +19,6 @@ import java.util.List; import java.util.function.IntFunction; -import java.util.stream.Collectors; import java.util.stream.IntStream; import javax.swing.JList; import javax.swing.ListModel; @@ -34,7 +33,7 @@ public static List getAllItems(JList jlist) { public static List getAllItems(JList jlist, IntFunction mapFunc) { ListModel model = jlist.getModel(); - return IntStream.range(0, model.getSize()).mapToObj(mapFunc).collect(Collectors.toList()); + return IntStream.range(0, model.getSize()).mapToObj(mapFunc).toList(); } private ListUtils() {} diff --git a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java index a14cfb5ca7d3..9ffc84a01c58 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/app/desktop/util/TableUtils.java @@ -75,8 +75,8 @@ public static void setEnabled(JTable table, boolean enabled) { } public static String[] columnNames(T[] columns) { - return columnMap(columns).entrySet().stream() - .map(e -> e.getValue().getColName()) + return columnMap(columns).values().stream() + .map(TableColumnInfo::getColName) .toArray(String[]::new); } diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java index fb3a065cbb27..3fc46305dde5 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/analysis/AnalysisImpl.java @@ -33,7 +33,6 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharFilterFactory; import org.apache.lucene.analysis.TokenFilterFactory; @@ -81,19 +80,17 @@ public void addExternalJars(List jarFiles) { @Override public Collection getAvailableCharFilters() { - return CharFilterFactory.availableCharFilters().stream().sorted().collect(Collectors.toList()); + return CharFilterFactory.availableCharFilters().stream().sorted().toList(); } @Override public Collection getAvailableTokenizers() { - return TokenizerFactory.availableTokenizers().stream().sorted().collect(Collectors.toList()); + return TokenizerFactory.availableTokenizers().stream().sorted().toList(); } @Override public Collection getAvailableTokenFilters() { - return TokenFilterFactory.availableTokenFilters().stream() - .sorted() - .collect(Collectors.toList()); + return TokenFilterFactory.availableTokenFilters().stream().sorted().toList(); } @Override diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java index a103eee9ce17..c8f03fd811da 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/commits/CommitsImpl.java @@ -119,7 +119,7 @@ public List getFiles(long commitGen) throws LukeException { return ic.getFileNames().stream() .map(name -> File.of(indexPath, name)) .sorted(Comparator.comparing(File::getFileName)) - .collect(Collectors.toList()); + .toList(); } catch (IOException e) { throw new LukeException( String.format(Locale.ENGLISH, "Failed to load files for commit generation %d", commitGen), @@ -138,7 +138,7 @@ public List getSegments(long commitGen) throws LukeException { return infos.asList().stream() .map(Segment::of) .sorted(Comparator.comparing(Segment::getName)) - .collect(Collectors.toList()); + .toList(); } catch (IOException e) { throw new LukeException( String.format( diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java index 5e5bb692e565..b6d4c14747a7 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/overview/TopTerms.java @@ -22,11 +22,10 @@ import java.util.Map; import java.util.Objects; import java.util.WeakHashMap; -import java.util.stream.Collectors; import org.apache.lucene.index.IndexReader; import org.apache.lucene.misc.HighFreqTerms; -/** An utility class that collects terms and their statistics in a specific field. */ +/** A utility class that collects terms and their statistics in a specific field. */ final class TopTerms { private final IndexReader reader; @@ -52,8 +51,7 @@ List getTopTerms(String field, int numTerms) throws Exception { HighFreqTerms.getHighFreqTerms( reader, numTerms, field, new HighFreqTerms.DocFreqComparator()); - List topTerms = - Arrays.stream(stats).map(TermStats::of).collect(Collectors.toList()); + List topTerms = Arrays.stream(stats).map(TermStats::of).toList(); // cache computed statistics for later uses topTermsCache.put(field, topTerms); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java index cd2fb8769747..56a942ba6aad 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/search/SearchImpl.java @@ -107,7 +107,7 @@ public Collection getSortableFieldNames() { .map(f -> IndexUtils.getFieldInfo(reader, f)) .filter(info -> !info.getDocValuesType().equals(DocValuesType.NONE)) .map(info -> info.name) - .collect(Collectors.toList()); + .toList(); } @Override @@ -116,7 +116,7 @@ public Collection getSearchableFieldNames() { .map(f -> IndexUtils.getFieldInfo(reader, f)) .filter(info -> !info.getIndexOptions().equals(IndexOptions.NONE)) .map(info -> info.name) - .collect(Collectors.toList()); + .toList(); } @Override @@ -155,7 +155,7 @@ public Query parseQuery( query = query.rewrite(searcher); } catch (IOException e) { throw new LukeException( - String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query.toString()), e); + String.format(Locale.ENGLISH, "Failed to rewrite query: %s", query), e); } } @@ -428,7 +428,7 @@ public List guessSortTypes(String name) { new SortField(name, SortField.Type.FLOAT), new SortField(name, SortField.Type.DOUBLE) }) - .collect(Collectors.toList()); + .toList(); case SORTED_NUMERIC: return Arrays.stream( @@ -438,7 +438,7 @@ public List guessSortTypes(String name) { new SortedNumericSortField(name, SortField.Type.FLOAT), new SortedNumericSortField(name, SortField.Type.DOUBLE) }) - .collect(Collectors.toList()); + .toList(); case SORTED: return Arrays.stream( @@ -446,7 +446,7 @@ public List guessSortTypes(String name) { new SortField(name, SortField.Type.STRING), new SortField(name, SortField.Type.STRING_VAL) }) - .collect(Collectors.toList()); + .toList(); case SORTED_SET: return Collections.singletonList(new SortedSetSortField(name, false)); diff --git a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java index cab0bf5c04e6..95dbea6cafba 100644 --- a/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java +++ b/lucene/luke/src/java/org/apache/lucene/luke/models/util/IndexUtils.java @@ -36,7 +36,6 @@ import java.util.Objects; import java.util.logging.Level; import java.util.logging.Logger; -import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; @@ -98,7 +97,7 @@ public static IndexReader openIndex(String indexPath, String dirImpl) throws Exc // find all valid index directories in this directory Files.walkFileTree( root, - new SimpleFileVisitor() { + new SimpleFileVisitor<>() { @Override public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs) throws IOException { @@ -127,7 +126,7 @@ public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs) if (readers.size() == 1) { return readers.get(0); } else { - return new MultiReader(readers.toArray(new IndexReader[readers.size()])); + return new MultiReader(readers.toArray(new IndexReader[0])); } } @@ -448,7 +447,7 @@ public static FieldInfo getFieldInfo(IndexReader reader, String fieldName) { public static Collection getFieldNames(IndexReader reader) { return StreamSupport.stream(getFieldInfos(reader).spliterator(), false) .map(f -> f.name) - .collect(Collectors.toList()); + .toList(); } /** diff --git a/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties b/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties index 6dbb6092a562..56becab17d10 100644 --- a/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties +++ b/lucene/luke/src/resources/org/apache/lucene/luke/app/desktop/messages/messages.properties @@ -184,7 +184,7 @@ analysis.label.show_chain=(Show analysis chain) analysis.radio.preset=Preset analysis.radio.custom=Custom analysis.button.browse=Browse -analysis.button.build_analyzser=Build Analyzer +analysis.button.build_analyzer=Build Analyzer analysis.button.test=Test Analyzer analysis.checkbox.step_by_step=Step By Step analysis.hyperlink.load_jars=Load external jars diff --git a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java index 8db6c1c52c27..6952539013e3 100644 --- a/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java +++ b/lucene/luke/src/test/org/apache/lucene/luke/app/desktop/util/inifile/TestSimpleIniFile.java @@ -24,7 +24,6 @@ import java.nio.file.Path; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import org.apache.lucene.tests.util.LuceneTestCase; import org.junit.Test; @@ -37,7 +36,7 @@ public void testStore() throws IOException { assertTrue(Files.isRegularFile(path)); try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { - List lines = br.lines().collect(Collectors.toList()); + List lines = br.lines().toList(); assertEquals(8, lines.size()); assertEquals("[section1]", lines.get(0)); assertEquals("s1 = aaa", lines.get(1)); diff --git a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java index 0a827b3690a7..c910d7385933 100644 --- a/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java +++ b/lucene/monitor/src/java/org/apache/lucene/monitor/QueryTree.java @@ -24,7 +24,6 @@ import java.util.Optional; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.stream.Collectors; import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; @@ -67,11 +66,7 @@ public String toString() { /** Returns a string of {@code width} spaces */ protected String space(int width) { - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < width; i++) { - sb.append(" "); - } - return sb.toString(); + return " ".repeat(width); } /** Returns a leaf node for a particular term */ @@ -149,16 +144,14 @@ public String toString(int depth) { /** Returns a conjunction of a set of child nodes */ public static QueryTree conjunction( List> children, TermWeightor weightor) { - if (children.size() == 0) { + if (children.isEmpty()) { throw new IllegalArgumentException("Cannot build a conjunction with no children"); } if (children.size() == 1) { return children.get(0).apply(weightor); } - List qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList()); - List restricted = - qt.stream().filter(t -> t.weight() > 0).collect(Collectors.toList()); - if (restricted.size() == 0) { + List qt = children.stream().map(f -> f.apply(weightor)).toList(); + if (qt.stream().noneMatch(t -> t.weight() > 0)) { // all children are ANY, so just return the first one return qt.get(0); } @@ -172,13 +165,13 @@ static QueryTree conjunction(QueryTree... children) { /** Returns a disjunction of a set of child nodes */ public static QueryTree disjunction( List> children, TermWeightor weightor) { - if (children.size() == 0) { + if (children.isEmpty()) { throw new IllegalArgumentException("Cannot build a disjunction with no children"); } if (children.size() == 1) { return children.get(0).apply(weightor); } - List qt = children.stream().map(f -> f.apply(weightor)).collect(Collectors.toList()); + List qt = children.stream().map(f -> f.apply(weightor)).toList(); Optional firstAnyChild = qt.stream().filter(q -> q.weight() == 0).findAny(); // if any of the children is an ANY node, just return that, otherwise build the disjunction return firstAnyChild.orElseGet(() -> new DisjunctionQueryTree(qt)); diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java index 90e15a047756..73bc0d610d8b 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/ConjunctionIntervalsSource.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; @@ -80,10 +79,7 @@ public final IntervalMatchesIterator matches(String field, LeafReaderContext ctx subs.add(mi); } IntervalIterator it = - combine( - subs.stream() - .map(m -> IntervalMatches.wrapMatches(m, doc)) - .collect(Collectors.toList())); + combine(subs.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList()); if (it.advance(doc) != doc) { return null; } diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java index 7e1a701d7393..6f683537c6a0 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/DisjunctionIntervalsSource.java @@ -75,7 +75,7 @@ public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IO subIterators.add(it); } } - if (subIterators.size() == 0) { + if (subIterators.isEmpty()) { return null; } return new DisjunctionIntervalIterator(subIterators); @@ -91,14 +91,12 @@ public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int subMatches.add(mi); } } - if (subMatches.size() == 0) { + if (subMatches.isEmpty()) { return null; } DisjunctionIntervalIterator it = new DisjunctionIntervalIterator( - subMatches.stream() - .map(m -> IntervalMatches.wrapMatches(m, doc)) - .collect(Collectors.toList())); + subMatches.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList()); if (it.advance(doc) != doc) { return null; } @@ -170,7 +168,7 @@ static class DisjunctionIntervalIterator extends IntervalIterator { this.approximation = new DisjunctionDISIApproximation(disiQueue); this.iterators = iterators; this.intervalQueue = - new PriorityQueue(iterators.size()) { + new PriorityQueue<>(iterators.size()) { @Override protected boolean lessThan(IntervalIterator a, IntervalIterator b) { return a.end() < b.end() || (a.end() == b.end() && a.start() >= b.start()); diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java index 7d37c331dd0a..c50c7fcc49e4 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/Disjunctions.java @@ -21,7 +21,6 @@ import java.util.Collections; import java.util.List; import java.util.function.Function; -import java.util.stream.Collectors; import org.apache.lucene.search.IndexSearcher; final class Disjunctions { @@ -59,7 +58,7 @@ public static List pullUp( if (rewritten.size() == 1) { return Collections.singletonList(function.apply(rewritten.get(0))); } - return rewritten.stream().map(function).collect(Collectors.toList()); + return rewritten.stream().map(function).toList(); } // Given a source containing disjunctions, and a mapping function, @@ -70,7 +69,7 @@ public static List pullUp( if (disjuncts.size() == 1) { return Collections.singletonList(function.apply(disjuncts.get(0))); } - return disjuncts.stream().map(function).collect(Collectors.toList()); + return disjuncts.stream().map(function).toList(); } // Separate out disjunctions into individual sources diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java index d00e1820682c..1cdb5ab8c347 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/MinimizingConjunctionIntervalsSource.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; @@ -65,9 +64,7 @@ public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int } IntervalIterator it = combine( - subs.stream() - .map(m -> IntervalMatches.wrapMatches(m, doc)) - .collect(Collectors.toList()), + subs.stream().map(m -> IntervalMatches.wrapMatches(m, doc)).toList(), cacheIterators(subs)); if (it.advance(doc) != doc) { return null; diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java index eebdbbac6f09..6d1413efabd9 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervals.java @@ -63,7 +63,7 @@ public class TestIntervals extends LuceneTestCase { // 0 1 2 3 4 5 6 7 8 9 // // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 - private static String[] field1_docs = { + private static final String[] field1_docs = { "Nothing of interest to anyone here", "Pease porridge hot, pease porridge cold, pease porridge in the pot nine days old. Some like it hot, some like it cold, some like it in the pot nine days old", "Pease porridge cold, pease porridge hot, pease porridge in the pot twelve days old. Some like it cold, some like it hot, some like it in the fraggle", @@ -76,7 +76,7 @@ public class TestIntervals extends LuceneTestCase { // 0 1 2 3 4 5 6 7 8 9 // // 012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 - private static String[] field2_docs = { + private static final String[] field2_docs = { "In Xanadu did Kubla Khan a stately pleasure dome decree", "Where Alph the sacred river ran through caverns measureless to man", "a b a c b a b c", @@ -88,7 +88,7 @@ public class TestIntervals extends LuceneTestCase { private static Directory directory; private static IndexSearcher searcher; - private static Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); + private static final Analyzer analyzer = new StandardAnalyzer(CharArraySet.EMPTY_SET); private static final FieldType FIELD_TYPE = new FieldType(TextField.TYPE_STORED); @@ -202,7 +202,7 @@ private void checkVisits( @Override public void consumeTerms(Query query, Term... terms) { visitedSources[0]++; - actualTerms.addAll(Arrays.stream(terms).map(Term::text).collect(Collectors.toList())); + actualTerms.addAll(Arrays.stream(terms).map(Term::text).toList()); } @Override @@ -261,9 +261,9 @@ public void testIntervalsOnFieldWithNoPositions() throws IOException { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> { - Intervals.term("wibble").intervals("id", searcher.getIndexReader().leaves().get(0)); - }); + () -> + Intervals.term("wibble") + .intervals("id", searcher.getIndexReader().leaves().get(0))); assertEquals( "Cannot create an IntervalIterator over field id because it has no indexed positions", e.getMessage()); @@ -454,10 +454,7 @@ Sanity check that the subclauses of a disjunction are presented in sorted order Collections.shuffle(Arrays.asList(terms), random()); IntervalsSource source = - Intervals.or( - Arrays.stream(terms) - .map((term) -> Intervals.term(term)) - .toArray((sz) -> new IntervalsSource[sz])); + Intervals.or(Arrays.stream(terms).map(Intervals::term).toArray(IntervalsSource[]::new)); assertEquals(expected, source.toString()); } diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java index 28d117d12791..fe520d675c5d 100644 --- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java +++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/fst/FSTCompletion.java @@ -24,7 +24,6 @@ import java.util.List; import java.util.Spliterator; import java.util.function.Consumer; -import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -93,12 +92,12 @@ public int compareTo(Completion o) { /** * @see #FSTCompletion(FST, boolean, boolean) */ - private boolean exactFirst; + private final boolean exactFirst; /** * @see #FSTCompletion(FST, boolean, boolean) */ - private boolean higherWeightsFirst; + private final boolean higherWeightsFirst; /** * Constructs an FSTCompletion, specifying higherWeightsFirst and exactFirst. @@ -146,7 +145,7 @@ private static Arc[] cacheRootArcs(FST automaton) { } Collections.reverse(rootArcs); // we want highest weights first. - return rootArcs.toArray(new Arc[rootArcs.size()]); + return rootArcs.toArray(new Arc[0]); } catch (IOException e) { throw new RuntimeException(e); } @@ -196,19 +195,17 @@ private int getExactMatchStartingFromRootArc(int rootArcIndex, BytesRef utf8) { * then alphabetically (UTF-8 codepoint order). */ public List lookup(CharSequence key, int num) { - if (key.length() == 0 || automaton == null) { + if (key.isEmpty() || automaton == null) { return EMPTY_RESULT; } if (!higherWeightsFirst && rootArcs.length > 1) { - // We could emit a warning here (?). An optimal strategy for - // alphabetically sorted + // We could emit a warning here (?). An optimal strategy for alphabetically sorted // suggestions would be to add them with a constant weight -- this saves - // unnecessary - // traversals and sorting. - return lookup(key).sorted().limit(num).collect(Collectors.toList()); + // unnecessary traversals and sorting. + return lookup(key).sorted().limit(num).toList(); } else { - return lookup(key).limit(num).collect(Collectors.toList()); + return lookup(key).limit(num).toList(); } } @@ -221,7 +218,7 @@ public List lookup(CharSequence key, int num) { * @return Returns the suggestions */ public Stream lookup(CharSequence key) { - if (key.length() == 0 || automaton == null) { + if (key.isEmpty() || automaton == null) { return Stream.empty(); } @@ -292,8 +289,8 @@ private Stream completionStream( FST.BytesReader fstReader = automaton.getBytesReader(); class State { - Arc arc; - int outputLength; + final Arc arc; + final int outputLength; State(Arc arc, int outputLength) throws IOException { this.arc = automaton.readFirstTargetArc(arc, new Arc<>(), fstReader); diff --git a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java index ff2e85998c5d..d4e6e173bc31 100644 --- a/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java +++ b/lucene/suggest/src/test/org/apache/lucene/search/suggest/fst/TestFSTCompletion.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Locale; import java.util.Random; -import java.util.stream.Collectors; import org.apache.lucene.search.suggest.Input; import org.apache.lucene.search.suggest.InputArrayIterator; import org.apache.lucene.search.suggest.Lookup.LookupResult; @@ -98,7 +97,7 @@ public void testCompletionStream() throws Exception { .sorted( Comparator.comparing( completion -> completion.utf8.utf8ToString().toLowerCase(Locale.ROOT))) - .collect(Collectors.toList()); + .toList(); assertMatchEquals( completions, "foundation/1", "four/0", "fourblah/1", "fourier/0", "fourty/1.0"); @@ -231,8 +230,8 @@ public void testMultilingualInput() throws Exception { List result = lookup.lookup(stringToCharSequence("wit"), true, 5); assertEquals(5, result.size()); - assertTrue(result.get(0).key.toString().equals("wit")); // exact match. - assertTrue(result.get(1).key.toString().equals("with")); // highest count. + assertEquals("wit", result.get(0).key.toString()); // exact match. + assertEquals("with", result.get(1).key.toString()); // highest count. tempDir.close(); } @@ -276,7 +275,7 @@ public void testRandom() throws Exception { Directory tempDir = getDirectory(); FSTCompletionLookup lookup = new FSTCompletionLookup(tempDir, "fst"); - lookup.build(new InputArrayIterator(freqs.toArray(new Input[freqs.size()]))); + lookup.build(new InputArrayIterator(freqs.toArray(new Input[0]))); for (Input tf : freqs) { final String term = tf.term.utf8ToString(); @@ -315,8 +314,8 @@ private void assertMatchEquals(List res, String... expected) { i < result.length ? result[i] : "--")); } - System.err.println(b.toString()); - fail("Expected different output:\n" + b.toString()); + System.err.println(b); + fail("Expected different output:\n" + b); } } diff --git a/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java index 870c4f5548c0..3aefda16cb6f 100644 --- a/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java +++ b/lucene/test-framework/src/java/org/apache/lucene/tests/index/RandomPostingsTester.java @@ -40,7 +40,6 @@ import java.util.SortedMap; import java.util.TreeMap; import java.util.function.IntToLongFunction; -import java.util.stream.Collectors; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.FieldsProducer; @@ -121,9 +120,9 @@ public enum Option { private long totalPayloadBytes; // Holds all postings: - private Map> fields; + private final Map> fields; - private FieldInfos fieldInfos; + private final FieldInfos fieldInfos; List allTerms; private int maxDoc; @@ -1252,9 +1251,7 @@ private void verifyEnum( Impacts impacts = impactsEnum.getImpacts(); INDEX_PACKAGE_ACCESS.checkImpacts(impacts, doc); impactsCopy = - impacts.getImpacts(0).stream() - .map(i -> new Impact(i.freq, i.norm)) - .collect(Collectors.toList()); + impacts.getImpacts(0).stream().map(i -> new Impact(i.freq, i.norm)).toList(); } freq = impactsEnum.freq(); long norm = docToNorm.applyAsLong(doc); @@ -1301,9 +1298,7 @@ private void verifyEnum( for (int level = 0; level < impacts.numLevels(); ++level) { if (impacts.getDocIdUpTo(level) >= max) { impactsCopy = - impacts.getImpacts(level).stream() - .map(i -> new Impact(i.freq, i.norm)) - .collect(Collectors.toList()); + impacts.getImpacts(level).stream().map(i -> new Impact(i.freq, i.norm)).toList(); break; } } @@ -1342,9 +1337,7 @@ private void verifyEnum( for (int level = 0; level < impacts.numLevels(); ++level) { if (impacts.getDocIdUpTo(level) >= max) { impactsCopy = - impacts.getImpacts(level).stream() - .map(i -> new Impact(i.freq, i.norm)) - .collect(Collectors.toList()); + impacts.getImpacts(level).stream().map(i -> new Impact(i.freq, i.norm)).toList(); break; } } @@ -1370,12 +1363,12 @@ private void verifyEnum( private static class TestThread extends Thread { private Fields fieldsSource; - private EnumSet