Skip to content

Commit

Permalink
Merge branch '2.x' into rr-backport
Browse files Browse the repository at this point in the history
Signed-off-by: Ankit Kala <[email protected]>
  • Loading branch information
ankitkala authored Mar 14, 2024
2 parents 7a07d28 + a5f335f commit 94185b2
Show file tree
Hide file tree
Showing 57 changed files with 1,241 additions and 302 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328))
- Support for returning scores in matched queries ([#11626](https://github.com/opensearch-project/OpenSearch/pull/11626))
- Add shard id property to SearchLookup for use in field types provided by plugins ([#1063](https://github.com/opensearch-project/OpenSearch/pull/1063))
- Force merge API supports performing on primary shards only ([#11269](https://github.com/opensearch-project/OpenSearch/pull/11269))
- Add kuromoji_completion analyzer and filter ([#4835](https://github.com/opensearch-project/OpenSearch/issues/4835))
- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880))
- The org.opensearch.bootstrap.Security should support codebase for JAR files with classifiers ([#12586](https://github.com/opensearch-project/OpenSearch/issues/12586))
- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561))
- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103))

### Dependencies
- Bump `com.squareup.okio:okio` from 3.7.0 to 3.8.0 ([#12290](https://github.com/opensearch-project/OpenSearch/pull/12290))
Expand All @@ -38,9 +40,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `org.apache.zookeeper:zookeeper` from 3.9.1 to 3.9.2 ([#12580](https://github.com/opensearch-project/OpenSearch/pull/12580))
- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#12579](https://github.com/opensearch-project/OpenSearch/pull/12579))
- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611))
- Bump `reactor-netty` from 1.1.15 to 1.1.17 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633))
- Bump `reactor` from 3.5.14 to 3.5.15 ([#12633](https://github.com/opensearch-project/OpenSearch/pull/12633))

### Changed
- Allow composite aggregation to run under a parent filter aggregation ([#11499](https://github.com/opensearch-project/OpenSearch/pull/11499))
- Quickly compute terms aggregations when the top-level query is functionally match-all for a segment ([#11643](https://github.com/opensearch-project/OpenSearch/pull/11643))

### Deprecated

Expand Down
4 changes: 2 additions & 2 deletions buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ netty = 4.1.107.Final
joda = 2.12.2

# project reactor
reactor_netty = 1.1.15
reactor = 3.5.14
reactor_netty = 1.1.17
reactor = 3.5.15

# client dependencies
httpclient = 4.5.14
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,19 @@ public SpanContext(Span span) {
Span getSpan() {
return span;
}

/**
* Sets the error for the current span behind this context
* @param cause error
*/
public void setError(final Exception cause) {
span.setError(cause);
}

/**
* Ends current span
*/
public void endSpan() {
span.endSpan();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ public SpanCreationContext attributes(Attributes attributes) {
}

/**
* Sets the parent for spann
* @param parent parent
* Sets the parent for span
* @param parent parent span context
* @return spanCreationContext
*/
public SpanCreationContext parent(SpanContext parent) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ public void onPhaseStart(SearchPhaseContext context) {}
public void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {}

@Override
public void onPhaseFailure(SearchPhaseContext context) {}
public void onPhaseFailure(SearchPhaseContext context, Throwable cause) {}

@Override
public void onRequestStart(SearchRequestContext searchRequestContext) {}
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
319b1d41f28e92b31b7ca0f19183337f5539bb44

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
9ed949dcd050ef30d9eeedd53d95d1dce20ce832

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
319b1d41f28e92b31b7ca0f19183337f5539bb44

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
9ed949dcd050ef30d9eeedd53d95d1dce20ce832
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@
"wait_for_completion": {
"type" : "boolean",
"description" : "If false, the request will return a task immediately and the operation will run in background. Defaults to true."
},
"primary_only": {
"type" : "boolean",
"description" : "Specify whether the operation should only perform on primary shards. Defaults to false."
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,23 @@
index: test
max_num_segments: 10
only_expunge_deletes: true

---
"Test primary_only parameter":
- skip:
version: " - 2.12.99"
reason: "primary_only is available in 2.13.0+"

- do:
indices.create:
index: test
body:
settings:
index.number_of_shards: 2
index.number_of_replicas: 1

- do:
indices.forcemerge:
index: test
primary_only: true
- match: { _shards.total: 2 }
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@
# will return a task immediately and the merge process will run in background.

- skip:
version: " - 2.6.99"
reason: "only available in 2.7+"
features: allowed_warnings
version: " - 2.6.99, 2.13.0 - "
reason: "wait_for_completion was introduced in 2.7.0 and task description was changed in 2.13.0"
features: allowed_warnings, node_selector

- do:
indices.create:
index: test_index

- do:
node_selector:
version: " 2.7.0 - 2.12.99"
indices.forcemerge:
index: test_index
wait_for_completion: false
Expand All @@ -27,6 +29,30 @@
- match: { task.action: "indices:admin/forcemerge" }
- match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true]" }

---
"Force merge index with wait_for_completion after task description changed":
- skip:
version: " - 2.12.99 "
reason: "task description was changed in 2.13.0"
features: allowed_warnings, node_selector

- do:
node_selector:
version: " 2.13.0 - "
indices.forcemerge:
index: test_index
wait_for_completion: false
max_num_segments: 1
- match: { task: /^.+$/ }
- set: { task: taskId }

- do:
tasks.get:
wait_for_completion: true
task_id: $taskId
- match: { task.action: "indices:admin/forcemerge" }
- match: { task.description: "Force-merge indices [test_index], maxSegments[1], onlyExpungeDeletes[false], flush[true], primaryOnly[false]" }

# .tasks index is created when the force-merge operation completes, so we should delete .tasks index finally,
# if not, the .tasks index may introduce unexpected warnings and then cause other test cases to fail.
# Delete the .tasks index directly will also introduce warning, but currently we don't have such APIs which can delete one
Expand Down
1 change: 0 additions & 1 deletion server/licenses/reactor-core-3.5.14.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions server/licenses/reactor-core-3.5.15.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
4e07a24c671235a2a806e75e9b8ff23d7d1db3d4
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,24 @@ public void testForceMergeUUIDConsistent() throws IOException {
assertThat(primaryForceMergeUUID, is(replicaForceMergeUUID));
}

public void testForceMergeOnlyOnPrimaryShards() throws IOException {
internalCluster().ensureAtLeastNumDataNodes(2);
final String index = "test-index";
createIndex(
index,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build()
);
ensureGreen(index);
final ForceMergeResponse forceMergeResponse = client().admin()
.indices()
.prepareForceMerge(index)
.setMaxNumSegments(1)
.setPrimaryOnly(true)
.get();
assertThat(forceMergeResponse.getFailedShards(), is(0));
assertThat(forceMergeResponse.getSuccessfulShards(), is(1));
}

private static String getForceMergeUUID(IndexShard indexShard) throws IOException {
try (GatedCloseable<IndexCommit> wrappedIndexCommit = indexShard.acquireLastIndexCommit(true)) {
return wrappedIndexCommit.get().getUserData().get(Engine.FORCE_MERGE_UUID_KEY);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,6 @@ public void testGatewayRecovery() throws Exception {
logger.info("--> request recoveries");
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
assertThat(response.shardRecoveryStates().size(), equalTo(SHARD_COUNT));
assertThat(response.shardRecoveryStates().get(INDEX_NAME).size(), equalTo(1));

List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.opensearch.action.admin.indices.alias.Alias;
import org.opensearch.action.admin.indices.flush.FlushRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.stats.IndicesStatsRequest;
import org.opensearch.action.admin.indices.stats.IndicesStatsResponse;
import org.opensearch.action.get.GetResponse;
Expand Down Expand Up @@ -400,6 +401,14 @@ public void testMultipleShards() throws Exception {
}

public void testReplicationAfterForceMerge() throws Exception {
performReplicationAfterForceMerge(false, SHARD_COUNT * (1 + REPLICA_COUNT));
}

public void testReplicationAfterForceMergeOnPrimaryShardsOnly() throws Exception {
performReplicationAfterForceMerge(true, SHARD_COUNT);
}

private void performReplicationAfterForceMerge(boolean primaryOnly, int expectedSuccessfulShards) throws Exception {
final String nodeA = internalCluster().startDataOnlyNode();
final String nodeB = internalCluster().startDataOnlyNode();
createIndex(INDEX_NAME);
Expand Down Expand Up @@ -430,8 +439,16 @@ public void testReplicationAfterForceMerge() throws Exception {
waitForDocs(expectedHitCount, indexer);
waitForSearchableDocs(expectedHitCount, nodeA, nodeB);

// Force a merge here so that the in memory SegmentInfos does not reference old segments on disk.
client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).setFlush(false).get();
// Perform force merge only on the primary shards.
final ForceMergeResponse forceMergeResponse = client().admin()
.indices()
.prepareForceMerge(INDEX_NAME)
.setPrimaryOnly(primaryOnly)
.setMaxNumSegments(1)
.setFlush(false)
.get();
assertThat(forceMergeResponse.getFailedShards(), is(0));
assertThat(forceMergeResponse.getSuccessfulShards(), is(expectedSuccessfulShards));
refresh(INDEX_NAME);
verifyStoreContent();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,13 @@ public static final class Defaults {
public static final int MAX_NUM_SEGMENTS = -1;
public static final boolean ONLY_EXPUNGE_DELETES = false;
public static final boolean FLUSH = true;
public static final boolean PRIMARY_ONLY = false;
}

private int maxNumSegments = Defaults.MAX_NUM_SEGMENTS;
private boolean onlyExpungeDeletes = Defaults.ONLY_EXPUNGE_DELETES;
private boolean flush = Defaults.FLUSH;
private boolean primaryOnly = Defaults.PRIMARY_ONLY;

private static final Version FORCE_MERGE_UUID_VERSION = LegacyESVersion.V_7_7_0;

Expand Down Expand Up @@ -102,6 +104,9 @@ public ForceMergeRequest(StreamInput in) throws IOException {
maxNumSegments = in.readInt();
onlyExpungeDeletes = in.readBoolean();
flush = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_2_13_0)) {
primaryOnly = in.readBoolean();
}
if (in.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) {
forceMergeUUID = in.readOptionalString();
} else {
Expand Down Expand Up @@ -167,6 +172,21 @@ public ForceMergeRequest flush(boolean flush) {
return this;
}

/**
* Should force merge only performed on primary shards. Defaults to {@code false}.
*/
public boolean primaryOnly() {
return primaryOnly;
}

/**
* Should force merge only performed on primary shards. Defaults to {@code false}.
*/
public ForceMergeRequest primaryOnly(boolean primaryOnly) {
this.primaryOnly = primaryOnly;
return this;
}

/**
* Should this task store its result after it has finished?
*/
Expand All @@ -189,6 +209,8 @@ public String getDescription() {
+ onlyExpungeDeletes
+ "], flush["
+ flush
+ "], primaryOnly["
+ primaryOnly
+ "]";
}

Expand All @@ -198,6 +220,9 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeInt(maxNumSegments);
out.writeBoolean(onlyExpungeDeletes);
out.writeBoolean(flush);
if (out.getVersion().onOrAfter(Version.V_2_13_0)) {
out.writeBoolean(primaryOnly);
}
if (out.getVersion().onOrAfter(FORCE_MERGE_UUID_VERSION)) {
out.writeOptionalString(forceMergeUUID);
}
Expand All @@ -212,6 +237,8 @@ public String toString() {
+ onlyExpungeDeletes
+ ", flush="
+ flush
+ ", primaryOnly="
+ primaryOnly
+ '}';
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -81,4 +81,12 @@ public ForceMergeRequestBuilder setFlush(boolean flush) {
request.flush(flush);
return this;
}

/**
* Should force merge only performed on primary shards. Defaults to {@code false}.
*/
public ForceMergeRequestBuilder setPrimaryOnly(boolean primaryOnly) {
request.primaryOnly(primaryOnly);
return this;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -115,11 +115,16 @@ protected EmptyResult shardOperation(ForceMergeRequest request, ShardRouting sha
}

/**
* The refresh request works against *all* shards.
* The force merge request works against *all* shards by default, but it can work against all primary shards only
* by setting primary_only to true.
*/
@Override
protected ShardsIterator shards(ClusterState clusterState, ForceMergeRequest request, String[] concreteIndices) {
return clusterState.routingTable().allShards(concreteIndices);
if (request.primaryOnly()) {
return clusterState.routingTable().allShardsSatisfyingPredicate(concreteIndices, ShardRouting::primary);
} else {
return clusterState.routingTable().allShards(concreteIndices);
}
}

@Override
Expand Down
Loading

0 comments on commit 94185b2

Please sign in to comment.