Skip to content

Commit

Permalink
Merge branch 'main' of github.com:opensearch-project/OpenSearch into …
Browse files Browse the repository at this point in the history
…star_tree_codec
  • Loading branch information
bharath-techie committed Jul 8, 2024
2 parents 8430db2 + f14b5c8 commit d10acbb
Show file tree
Hide file tree
Showing 20 changed files with 342 additions and 73 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `opentelemetry` from 1.36.0 to 1.39.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457))
- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14506))
- Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517))
- Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610))

### Changed
- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187))
Expand All @@ -35,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448))
- Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575))
- Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597))
- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635))

### Deprecated

Expand All @@ -53,6 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378))
- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004))
- Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552))
- Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200))

### Security

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
public class FilterPath {

static final FilterPath EMPTY = new FilterPath();

private final String filter;
private final String segment;
private final FilterPath next;
Expand Down Expand Up @@ -99,32 +98,29 @@ public static FilterPath[] compile(Set<String> filters) {

List<FilterPath> paths = new ArrayList<>();
for (String filter : filters) {
if (filter != null) {
if (filter != null && !filter.isEmpty()) {
filter = filter.trim();
if (filter.length() > 0) {
paths.add(parse(filter, filter));
paths.add(parse(filter));
}
}
}
return paths.toArray(new FilterPath[0]);
}

private static FilterPath parse(final String filter, final String segment) {
int end = segment.length();

for (int i = 0; i < end;) {
char c = segment.charAt(i);
private static FilterPath parse(final String filter) {
// Split the filter into segments using a regex
// that avoids splitting escaped dots.
String[] segments = filter.split("(?<!\\\\)\\.");
FilterPath next = EMPTY;

if (c == '.') {
String current = segment.substring(0, i).replaceAll("\\\\.", ".");
return new FilterPath(filter, current, parse(filter, segment.substring(i + 1)));
}
++i;
if ((c == '\\') && (i < end) && (segment.charAt(i) == '.')) {
++i;
}
for (int i = segments.length - 1; i >= 0; i--) {
// Replace escaped dots with actual dots in the current segment.
String segment = segments[i].replaceAll("\\\\.", ".");
next = new FilterPath(filter, segment, next);
}
return new FilterPath(filter, segment.replaceAll("\\\\.", "."), EMPTY);

return next;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import org.opensearch.common.util.set.Sets;
import org.opensearch.test.OpenSearchTestCase;

import java.util.HashSet;
import java.util.Set;

import static java.util.Collections.singleton;
Expand Down Expand Up @@ -369,4 +370,20 @@ public void testMultipleFilterPaths() {
assertThat(filterPath.getSegment(), is(emptyString()));
assertSame(filterPath, FilterPath.EMPTY);
}

public void testCompileWithEmptyString() {
Set<String> filters = new HashSet<>();
filters.add("");
FilterPath[] filterPaths = FilterPath.compile(filters);
assertNotNull(filterPaths);
assertEquals(0, filterPaths.length);
}

public void testCompileWithNull() {
Set<String> filters = new HashSet<>();
filters.add(null);
FilterPath[] filterPaths = FilterPath.compile(filters);
assertNotNull(filterPaths);
assertEquals(0, filterPaths.length);
}
}
2 changes: 1 addition & 1 deletion plugins/repository-azure/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ dependencies {
// Start of transitive dependencies for azure-identity
api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0'
api "net.java.dev.jna:jna-platform:${versions.jna}"
api 'com.microsoft.azure:msal4j:1.15.1'
api 'com.microsoft.azure:msal4j:1.16.0'
api 'com.nimbusds:oauth2-oidc-sdk:11.9.1'
api 'com.nimbusds:nimbus-jose-jwt:9.40'
api 'com.nimbusds:content-type:2.3'
Expand Down
1 change: 0 additions & 1 deletion plugins/repository-azure/licenses/msal4j-1.15.1.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions plugins/repository-azure/licenses/msal4j-1.16.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
708a0a986ed091054f1c08866712e5b41aec6700
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,6 @@ initalMetadataVersion < internalCluster().client()
* After shard relocation completes, shuts down the docrep nodes and asserts remote
* index settings are applied even when the index is in YELLOW state
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13737")
public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Exception {
internalCluster().startClusterManagerOnlyNode();

Expand Down Expand Up @@ -332,7 +331,6 @@ public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Except
* After shard relocation completes, restarts the docrep node holding extra replica shard copy
* and asserts remote index settings are applied as soon as the docrep replica copy is unassigned
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13871")
public void testIndexSettingsUpdatedWhenDocrepNodeIsRestarted() throws Exception {
internalCluster().startClusterManagerOnlyNode();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA;
import static org.opensearch.index.shard.IndexShardTestCase.getTranslog;
import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING;
import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.comparesEqualTo;
Expand Down Expand Up @@ -133,6 +132,21 @@ private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throw
);
}

public void testRemoteStoreIndexCreationAndDeletionWithReferencedStore() throws InterruptedException, ExecutionException {
String dataNode = internalCluster().startNodes(1).get(0);
createIndex(INDEX_NAME, remoteStoreIndexSettings(0));
ensureYellowAndNoInitializingShards(INDEX_NAME);
ensureGreen(INDEX_NAME);

IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME);

// Simulating a condition where store is already in use by increasing ref count, this helps in testing index
// deletion when refresh is in-progress.
indexShard.store().incRef();
assertAcked(client().admin().indices().prepareDelete(INDEX_NAME));
indexShard.store().decRef();
}

public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception {
testPeerRecovery(1, true);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1287,6 +1287,7 @@ public Builder templates(Map<String, IndexTemplateMetadata> templates) {
}

public Builder templates(TemplatesMetadata templatesMetadata) {
this.templates.clear();
this.templates.putAll(templatesMetadata.getTemplates());
return this;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import org.opensearch.gateway.PriorityComparator;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
Expand All @@ -41,7 +40,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;

Expand Down Expand Up @@ -779,15 +777,16 @@ void allocateUnassigned() {
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned.
*/
ShardRouting[] unassignedShards = unassigned.drain();
List<ShardRouting> allUnassignedShards = Arrays.stream(unassignedShards).collect(Collectors.toList());
List<ShardRouting> localUnassignedShards = allUnassignedShards.stream()
.filter(shard -> RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation)))
.collect(Collectors.toList());
allUnassignedShards.removeAll(localUnassignedShards);
allUnassignedShards.forEach(shard -> routingNodes.unassigned().add(shard));
unassignedShards = localUnassignedShards.toArray(new ShardRouting[0]);
ShardRouting[] primary = unassignedShards;
List<ShardRouting> primaryList = new ArrayList<>();
for (ShardRouting shard : unassigned.drain()) {
if (RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation))) {
primaryList.add(shard);
} else {
routingNodes.unassigned().add(shard);
}
}

ShardRouting[] primary = primaryList.toArray(new ShardRouting[0]);
ShardRouting[] secondary = new ShardRouting[primary.length];
int secondaryLength = 0;
int primaryLength = primary.length;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@
import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode;
import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction;

import java.util.Locale;

/**
* A new allocation decider for migration of document replication clusters to remote store backed clusters:
* - For STRICT compatibility mode, the decision is always YES
Expand Down Expand Up @@ -101,7 +99,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
if (migrationDirection.equals(Direction.NONE)) {
// remote backed indices on docrep nodes and non remote backed indices on remote nodes are not allowed
boolean isNoDecision = remoteSettingsBackedIndex ^ targetNode.isRemoteStoreNode();
String reason = String.format(Locale.ROOT, " for %sremote store backed index", remoteSettingsBackedIndex ? "" : "non ");
String reason = " for " + (remoteSettingsBackedIndex ? "" : "non ") + "remote store backed index";
return allocation.decision(
isNoDecision ? Decision.NO : Decision.YES,
NAME,
Expand All @@ -114,11 +112,9 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
// check for remote store backed indices
if (remoteSettingsBackedIndex && targetNode.isRemoteStoreNode() == false) {
// allocations and relocations must be to a remote node
String reason = String.format(
Locale.ROOT,
" because a remote store backed index's shard copy can only be %s to a remote node",
((shardRouting.assignedToNode() == false) ? "allocated" : "relocated")
);
String reason = new StringBuilder(" because a remote store backed index's shard copy can only be ").append(
(shardRouting.assignedToNode() == false) ? "allocated" : "relocated"
).append(" to a remote node").toString();
return allocation.decision(Decision.NO, NAME, getDecisionDetails(false, shardRouting, targetNode, reason));
}

Expand Down Expand Up @@ -168,16 +164,18 @@ private Decision replicaShardDecision(ShardRouting replicaShardRouting, Discover

// get detailed reason for the decision
private String getDecisionDetails(boolean isYes, ShardRouting shardRouting, DiscoveryNode targetNode, String reason) {
return String.format(
Locale.ROOT,
"[%s migration_direction]: %s shard copy %s be %s to a %s node%s",
migrationDirection.direction,
(shardRouting.primary() ? "primary" : "replica"),
(isYes ? "can" : "can not"),
((shardRouting.assignedToNode() == false) ? "allocated" : "relocated"),
(targetNode.isRemoteStoreNode() ? "remote" : "non-remote"),
reason
);
return new StringBuilder("[").append(migrationDirection.direction)
.append(" migration_direction]: ")
.append(shardRouting.primary() ? "primary" : "replica")
.append(" shard copy ")
.append(isYes ? "can" : "can not")
.append(" be ")
.append((shardRouting.assignedToNode() == false) ? "allocated" : "relocated")
.append(" to a ")
.append(targetNode.isRemoteStoreNode() ? "remote" : "non-remote")
.append(" node")
.append(reason)
.toString();
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata(
&& clusterState.getNodes().delta(previousClusterState.getNodes()).hasChanges();
final boolean updateClusterBlocks = isPublicationEnabled && !clusterState.blocks().equals(previousClusterState.blocks());
final boolean updateHashesOfConsistentSettings = isPublicationEnabled
|| Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false;
&& Metadata.isHashesOfConsistentSettingsEqual(previousClusterState.metadata(), clusterState.metadata()) == false;

uploadedMetadataResults = writeMetadataInParallel(
clusterState,
Expand Down Expand Up @@ -476,7 +476,8 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata(
return manifestDetails;
}

private UploadedMetadataResults writeMetadataInParallel(
// package private for testing
UploadedMetadataResults writeMetadataInParallel(
ClusterState clusterState,
List<IndexMetadata> indexToUpload,
Map<String, IndexMetadata> prevIndexMetadataByName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import org.opensearch.common.io.Streams;
import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity;
import org.opensearch.common.remote.BlobPathParameters;
import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput;
import org.opensearch.core.common.io.stream.NamedWriteableRegistry;
import org.opensearch.core.common.io.stream.StreamInput;
import org.opensearch.core.compress.Compressor;
Expand Down Expand Up @@ -122,6 +123,8 @@ public UploadedMetadata getUploadedMetadata() {

public static Custom readFrom(StreamInput streamInput, NamedWriteableRegistry namedWriteableRegistry, String customType)
throws IOException {
return namedWriteableRegistry.getReader(Custom.class, customType).read(streamInput);
try (StreamInput in = new NamedWriteableAwareStreamInput(streamInput, namedWriteableRegistry)) {
return namedWriteableRegistry.getReader(Custom.class, customType).read(in);
}
}
}
17 changes: 15 additions & 2 deletions server/src/main/java/org/opensearch/index/IndexService.java
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,21 @@ public synchronized IndexShard createShard(
this.indexSettings.getRemoteStorePathStrategy()
);
}
remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path);
// When an instance of Store is created, a shardlock is created which is released on closing the instance of store.
// Currently, we create 2 instances of store for remote store backed indices: store and remoteStore.
// As there can be only one shardlock acquired for a given shard, the lock is shared between store and remoteStore.
// This creates an issue when we are deleting the index as it results in closing both store and remoteStore.
// Sample test failure: https://github.com/opensearch-project/OpenSearch/issues/13871
// The following method provides ShardLock that is not maintained by NodeEnvironment.
// As part of https://github.com/opensearch-project/OpenSearch/issues/13075, we want to move away from keeping 2
// store instances.
ShardLock remoteStoreLock = new ShardLock(shardId) {
@Override
protected void closeInternal() {
// Do nothing for shard lock on remote store
}
};
remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, remoteStoreLock, Store.OnClose.EMPTY, path);
} else {
// Disallow shards with remote store based settings to be created on non-remote store enabled nodes
// Even though we have `RemoteStoreMigrationAllocationDecider` in place to prevent something like this from happening at the
Expand All @@ -625,7 +639,6 @@ public synchronized IndexShard createShard(
} else {
directory = directoryFactory.newDirectory(this.indexSettings, path);
}

store = new Store(
shardId,
this.indexSettings,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1482,6 +1482,33 @@ public void testIsSegmentReplicationDisabled() {
assertFalse(metadata.isSegmentReplicationEnabled(indexName));
}

public void testTemplatesMetadata() {
TemplatesMetadata templatesMetadata1 = TemplatesMetadata.builder()
.put(
IndexTemplateMetadata.builder("template_1")
.patterns(Arrays.asList("bar-*", "foo-*"))
.settings(Settings.builder().put("random_index_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)).build())
.build()
)
.build();
Metadata metadata1 = Metadata.builder().templates(templatesMetadata1).build();
assertThat(metadata1.templates(), is(templatesMetadata1.getTemplates()));

TemplatesMetadata templatesMetadata2 = TemplatesMetadata.builder()
.put(
IndexTemplateMetadata.builder("template_2")
.patterns(Arrays.asList("bar-*", "foo-*"))
.settings(Settings.builder().put("random_index_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)).build())
.build()
)
.build();

Metadata metadata2 = Metadata.builder(metadata1).templates(templatesMetadata2).build();

assertThat(metadata2.templates(), is(templatesMetadata2.getTemplates()));

}

public static Metadata randomMetadata() {
Metadata.Builder md = Metadata.builder()
.put(buildIndexMetadata("index", "alias", randomBoolean() ? null : randomBoolean()).build(), randomBoolean())
Expand Down
Loading

0 comments on commit d10acbb

Please sign in to comment.