Skip to content

Commit

Permalink
Merge branch 'main' into remove-duplicate-wildcard
Browse files Browse the repository at this point in the history
Signed-off-by: Craig Perkins <[email protected]>
  • Loading branch information
cwperks authored Nov 6, 2023
2 parents bbe2984 + c1962cc commit 713d165
Show file tree
Hide file tree
Showing 19 changed files with 98 additions and 101 deletions.
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352))
- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247))
- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814))
- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866))
- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670))
- Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069))

### Dependencies
- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298))
Expand All @@ -125,6 +125,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273))
- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999))
- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057))
- Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087))
- Remove adjacent duplicate wildcard in Regex ([#11060](https://github.com/opensearch-project/OpenSearch/pull/11060))

### Deprecated
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,13 @@
* GitHub history for details.
*/

/*
* This code is based on code from SFL4J 1.5.11
* Copyright (c) 2004-2007 QOS.ch
* All rights reserved.
* SPDX-License-Identifier: MIT
*/

package org.opensearch.core.common.logging;

import java.util.HashSet;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import org.opensearch.test.OpenSearchIntegTestCase;

import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE;
import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING;
import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE;

@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
Expand Down Expand Up @@ -124,30 +123,4 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex
assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false);
}

public void testIndexReplicationTypeWhenRestrictSettingTrue() {
testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values()));
}

public void testIndexReplicationTypeWhenRestrictSettingFalse() {
testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values()));
}

private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) {
String expectedExceptionMsg =
"Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];";
String clusterManagerName = internalCluster().startNode(
Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build()
);
internalCluster().startDataOnlyNodes(1);

// Test create index fails
Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build();
if (setRestrict) {
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings));
assertEquals(expectedExceptionMsg, exception.getMessage());
} else {
createIndex(INDEX_NAME, indexSettings);
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -1066,9 +1066,14 @@ public void testScrollCreatedOnReplica() throws Exception {

client(replica).prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();

currentFiles = List.of(replicaShard.store().directory().listAll());
assertFalse("Files should be cleaned up post scroll clear request", currentFiles.containsAll(snapshottedSegments));
assertBusy(
() -> assertFalse(
"Files should be cleaned up post scroll clear request",
List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments)
)
);
assertEquals(100, scrollHits);

}

/**
Expand Down Expand Up @@ -1327,9 +1332,12 @@ public void testPitCreatedOnReplica() throws Exception {
// delete the PIT
DeletePitRequest deletePITRequest = new DeletePitRequest(pitResponse.getId());
client().execute(DeletePitAction.INSTANCE, deletePITRequest).actionGet();

currentFiles = List.of(replicaShard.store().directory().listAll());
assertFalse("Files should be cleaned up", currentFiles.containsAll(snapshottedSegments));
assertBusy(
() -> assertFalse(
"Files should be cleaned up",
List.of(replicaShard.store().directory().listAll()).containsAll(snapshottedSegments)
)
);
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@

import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;

import org.opensearch.action.admin.indices.refresh.RefreshRequest;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.action.search.SearchType;
import org.opensearch.action.support.WriteRequest;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.FeatureFlags;
import org.opensearch.index.query.TermQueryBuilder;
Expand Down Expand Up @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception {
client().prepareIndex("test")
.setId("" + i)
.setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3]))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
client().prepareIndex("idx_unmapped_author")
.setId("" + i)
.setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3]))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
}
client().admin().indices().refresh(new RefreshRequest("test")).get();
}

public void testIssue10719() throws Exception {
Expand Down Expand Up @@ -221,10 +222,6 @@ public void testNestedDiversity() throws Exception {
}

public void testNestedSamples() throws Exception {
assumeFalse(
"Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046",
internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
);
// Test samples nested under samples
int MAX_DOCS_PER_AUTHOR = 1;
int MAX_DOCS_PER_GENRE = 2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@

import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;

import org.opensearch.action.admin.indices.refresh.RefreshRequest;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.action.search.SearchType;
import org.opensearch.action.support.WriteRequest;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.FeatureFlags;
import org.opensearch.index.query.TermQueryBuilder;
Expand Down Expand Up @@ -132,13 +132,14 @@ public void setupSuiteScopeCluster() throws Exception {
client().prepareIndex("test")
.setId("" + i)
.setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3]))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
client().prepareIndex("idx_unmapped_author")
.setId("" + i)
.setSource("name", parts[2], "genre", parts[8], "price", Float.parseFloat(parts[3]))
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
}
client().admin().indices().refresh(new RefreshRequest("test")).get();
}

public void testIssue10719() throws Exception {
Expand Down Expand Up @@ -195,6 +196,23 @@ public void testSimpleSampler() throws Exception {
assertThat(maxBooksPerAuthor, equalTo(3L));
}

public void testSimpleSamplerShardSize() throws Exception {
final int SHARD_SIZE = randomIntBetween(1, 3);
SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(SHARD_SIZE);
sampleAgg.subAggregation(terms("authors").field("author"));
SearchResponse response = client().prepareSearch("test")
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(new TermQueryBuilder("genre", "fantasy"))
.setFrom(0)
.setSize(60)
.addAggregation(sampleAgg)
.get();
assertSearchResponse(response);
Sampler sample = response.getAggregations().get("sample");
Terms authors = sample.getAggregations().get("authors");
assertEquals(SHARD_SIZE * NUM_SHARDS, sample.getDocCount());
}

public void testUnmappedChildAggNoDiversity() throws Exception {
SamplerAggregationBuilder sampleAgg = sampler("sample").shardSize(100);
sampleAgg.subAggregation(terms("authors").field("author"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ public void testMultiNested() throws Exception {
refresh();
// check the numDocs
assertDocumentCount("test", 7);
indexRandomForConcurrentSearch("test");

// do some multi nested queries
SearchResponse searchResponse = client().prepareSearch("test")
Expand Down Expand Up @@ -485,6 +486,7 @@ public void testExplain() throws Exception {
)
.setRefreshPolicy(IMMEDIATE)
.get();
indexRandomForConcurrentSearch("test");

SearchResponse searchResponse = client().prepareSearch("test")
.setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total))
Expand Down Expand Up @@ -968,6 +970,10 @@ public void testNestedSortWithMultiLevelFiltering() throws Exception {

// https://github.com/elastic/elasticsearch/issues/31554
public void testLeakingSortValues() throws Exception {
assumeFalse(
"Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/11065",
internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
);
assertAcked(
prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1))
.setMapping(
Expand Down Expand Up @@ -1035,6 +1041,7 @@ public void testLeakingSortValues() throws Exception {
.get();

refresh();
indexRandomForConcurrentSearch("test");

SearchResponse searchResponse = client().prepareSearch()
.setQuery(termQuery("_id", 2))
Expand Down Expand Up @@ -1627,6 +1634,7 @@ public void testCheckFixedBitSetCache() throws Exception {
client().prepareIndex("test").setId("1").setSource("field", "value").get();
refresh();
ensureSearchable("test");
indexRandomForConcurrentSearch("test");

// No nested mapping yet, there shouldn't be anything in the fixed bit set cache
ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ public void clearIndex() {
public void testPit() throws Exception {
CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true);
request.setIndices(new String[] { "index" });
indexRandomForConcurrentSearch("index");
ActionFuture<CreatePitResponse> execute = client().execute(CreatePitAction.INSTANCE, request);
CreatePitResponse pitResponse = execute.get();
SearchResponse searchResponse = client().prepareSearch("index")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.ParameterizedOpenSearchIntegTestCase;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
Expand Down Expand Up @@ -99,7 +98,7 @@ public Settings nodeSettings(int nodeOrdinal) {
}

// see #2896
public void testStopOneNodePreferenceWithRedState() throws IOException {
public void testStopOneNodePreferenceWithRedState() throws Exception {
assertAcked(
prepareCreate("test").setSettings(
Settings.builder().put("index.number_of_shards", cluster().numDataNodes() + 2).put("index.number_of_replicas", 0)
Expand All @@ -110,6 +109,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException {
client().prepareIndex("test").setId("" + i).setSource("field1", "value1").get();
}
refresh();
indexRandomForConcurrentSearch("test");
internalCluster().stopRandomDataNode();
client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).get();
String[] preferences = new String[] {
Expand Down Expand Up @@ -138,7 +138,7 @@ public void testStopOneNodePreferenceWithRedState() throws IOException {
assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
}

public void testNoPreferenceRandom() {
public void testNoPreferenceRandom() throws Exception {
assertAcked(
prepareCreate("test").setSettings(
// this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Expand All @@ -149,6 +149,7 @@ public void testNoPreferenceRandom() {

client().prepareIndex("test").setSource("field1", "value1").get();
refresh();
indexRandomForConcurrentSearch("test");

final Client client = internalCluster().smartClient();
SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).get();
Expand Down Expand Up @@ -201,7 +202,7 @@ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() {
}
}

public void testNodesOnlyRandom() {
public void testNodesOnlyRandom() throws Exception {
assertAcked(
prepareCreate("test").setSettings(
// this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Expand All @@ -211,6 +212,7 @@ public void testNodesOnlyRandom() {
ensureGreen();
client().prepareIndex("test").setSource("field1", "value1").get();
refresh();
indexRandomForConcurrentSearch("test");

final Client client = internalCluster().smartClient();
// multiple wildchar to cover multi-param usecase
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,7 @@ public void testDocWithAllTypes() throws Exception {
String docBody = copyToStringFromClasspath("/org/opensearch/search/query/all-example-document.json");
reqs.add(client().prepareIndex("test").setId("1").setSource(docBody, MediaTypeRegistry.JSON));
indexRandom(true, false, reqs);
indexRandomForConcurrentSearch("test");

SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get();
assertHits(resp.getHits(), "1");
Expand Down Expand Up @@ -225,6 +226,7 @@ public void testKeywordWithWhitespace() throws Exception {
reqs.add(client().prepareIndex("test").setId("2").setSource("f1", "bar"));
reqs.add(client().prepareIndex("test").setId("3").setSource("f1", "foo bar"));
indexRandom(true, false, reqs);
indexRandomForConcurrentSearch("test");

SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get();
assertHits(resp.getHits(), "3");
Expand All @@ -245,6 +247,7 @@ public void testRegexCaseInsensitivity() throws Exception {
indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake"));
indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake"));
indexRandom(true, false, indexRequests);
indexRandomForConcurrentSearch("messages");

SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get();
assertNoFailures(response);
Expand Down Expand Up @@ -282,6 +285,7 @@ public void testAllFields() throws Exception {
List<IndexRequestBuilder> reqs = new ArrayList<>();
reqs.add(client().prepareIndex("test_1").setId("1").setSource("f1", "foo", "f2", "eggplant"));
indexRandom(true, false, reqs);
indexRandomForConcurrentSearch("test_1");

SearchResponse resp = client().prepareSearch("test_1")
.setQuery(queryStringQuery("foo eggplant").defaultOperator(Operator.AND))
Expand Down Expand Up @@ -374,6 +378,7 @@ public void testLimitOnExpandedFields() throws Exception {

client().prepareIndex("testindex").setId("1").setSource("field_A0", "foo bar baz").get();
refresh();
indexRandomForConcurrentSearch("testindex");

// single field shouldn't trigger the limit
doAssertOneHitForQueryString("field_A0:foo");
Expand Down Expand Up @@ -465,6 +470,7 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception {
List<IndexRequestBuilder> indexRequests = new ArrayList<>();
indexRequests.add(client().prepareIndex("test").setId("1").setSource("f3", "text", "f2", "one"));
indexRandom(true, false, indexRequests);
indexRandomForConcurrentSearch("test");

// The wildcard field matches aliases for both a text and geo_point field.
// By default, the geo_point field should be ignored when building the query.
Expand Down
Loading

0 comments on commit 713d165

Please sign in to comment.