Skip to content

Commit

Permalink
Merge branch 'main' into dk_stream_chain
Browse files Browse the repository at this point in the history
Signed-off-by: Daniel Widdis <[email protected]>
  • Loading branch information
dbwiddis authored Oct 16, 2024
2 parents 12d35f1 + ec7b652 commit 3208bc2
Show file tree
Hide file tree
Showing 25 changed files with 403 additions and 29 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix protobuf-java leak through client library dependencies ([#16254](https://github.com/opensearch-project/OpenSearch/pull/16254))
- Fix multi-search with template doesn't return status code ([#16265](https://github.com/opensearch-project/OpenSearch/pull/16265))
- Fix inefficient Stream API call chains ending with count() ([#15386](https://github.com/opensearch-project/OpenSearch/pull/15386))
- [Streaming Indexing] Fix intermittent 'The bulk request must be terminated by a newline [\n]' failures [#16337](https://github.com/opensearch-project/OpenSearch/pull/16337))
- Fix wrong default value when setting `index.number_of_routing_shards` to null on index creation ([#16331](https://github.com/opensearch-project/OpenSearch/pull/16331))
- Fix disk usage exceeds threshold cluster can't spin up issue ([#15258](https://github.com/opensearch-project/OpenSearch/pull/15258)))

### Security

Expand Down
2 changes: 1 addition & 1 deletion buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ netty = 4.1.114.Final
joda = 2.12.7

# project reactor
reactor_netty = 1.1.22
reactor_netty = 1.1.23
reactor = 3.5.20

# client dependencies
Expand Down
4 changes: 4 additions & 0 deletions distribution/src/config/jvm.options
Original file line number Diff line number Diff line change
Expand Up @@ -85,3 +85,7 @@ ${error.file}

# HDFS ForkJoinPool.common() support by SecurityManager
-Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory

# See please https://bugs.openjdk.org/browse/JDK-8341127 (openjdk/jdk#21283)
23:-XX:CompileCommand=dontinline,java/lang/invoke/MethodHandle.setAsTypeCache
23:-XX:CompileCommand=dontinline,java/lang/invoke/MethodHandle.asTypeUncached
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,15 @@

package org.opensearch.bootstrap;

import org.opensearch.common.logging.LogConfigurator;
import org.opensearch.common.settings.KeyStoreCommandTestCase;
import org.opensearch.common.settings.KeyStoreWrapper;
import org.opensearch.common.settings.SecureSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.util.io.IOUtils;
import org.opensearch.core.common.settings.SecureString;
import org.opensearch.env.Environment;
import org.opensearch.node.Node;
import org.opensearch.test.OpenSearchTestCase;
import org.junit.After;
import org.junit.Before;
Expand All @@ -51,8 +53,14 @@
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;

public class BootstrapTests extends OpenSearchTestCase {
Environment env;
Expand Down Expand Up @@ -131,4 +139,38 @@ private void assertPassphraseRead(String source, String expected) {
}
}

public void testInitExecutionOrder() throws Exception {
AtomicInteger order = new AtomicInteger(0);
CountDownLatch countDownLatch = new CountDownLatch(1);
Thread mockThread = new Thread(() -> {
assertEquals(0, order.getAndIncrement());
countDownLatch.countDown();
});

Node mockNode = mock(Node.class);
doAnswer(invocation -> {
try {
boolean threadStarted = countDownLatch.await(1000, TimeUnit.MILLISECONDS);
assertTrue(
"Waited for one second but the keepAliveThread isn't started, please check the execution order of"
+ "keepAliveThread.start and node.start",
threadStarted
);
} catch (InterruptedException e) {
fail("Thread interrupted");
}
assertEquals(1, order.getAndIncrement());
return null;
}).when(mockNode).start();

LogConfigurator.registerErrorListener();
Bootstrap testBootstrap = new Bootstrap(mockThread, mockNode);
Bootstrap.setInstance(testBootstrap);

Bootstrap.startInstance(testBootstrap);

verify(mockNode).start();
assertEquals(2, order.get());
}

}

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
94b294fa90aee2e88ad4337251e278aaac21362c

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
94b294fa90aee2e88ad4337251e278aaac21362c
Original file line number Diff line number Diff line change
Expand Up @@ -304,11 +304,36 @@ public void testStreamingLargeDocument() throws IOException {
String.format(
Locale.getDefault(),
"{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n{ \"name\": \"%s\" }\n",
randomAlphaOfLength(5000)
randomAlphaOfLength(7000)
)
);

final StreamingRequest<ByteBuffer> streamingRequest = new StreamingRequest<>(
"POST",
"/_bulk/stream",
Flux.fromStream(stream).map(s -> ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)))
);

final StreamingResponse<ByteBuffer> streamingResponse = client().streamRequest(streamingRequest);

StepVerifier.create(Flux.from(streamingResponse.getBody()).map(b -> new String(b.array(), StandardCharsets.UTF_8)))
.expectNextMatches(s -> s.contains("\"result\":\"created\"") && s.contains("\"_id\":\"1\""))
.expectComplete()
.verify();

assertThat(streamingResponse.getStatusLine().getStatusCode(), equalTo(200));
assertThat(streamingResponse.getWarnings(), empty());
}

public void testStreamingLargeDocumentThatExceedsChunkSize() throws IOException {
final Stream<String> stream = Stream.of(
String.format(
Locale.getDefault(),
"{ \"index\": { \"_index\": \"test-streaming\", \"_id\": \"1\" } }\n{ \"name\": \"%s\" }\n",
randomAlphaOfLength(9000) /* the default chunk size limit is set 8k */
)
);

final Duration delay = Duration.ofMillis(1);
final StreamingRequest<ByteBuffer> streamingRequest = new StreamingRequest<>(
"POST",
"/_bulk/stream",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti
spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt())
.maxHeaderSize(maxHeaderSize.bytesAsInt())
.maxInitialLineLength(maxInitialLineLength.bytesAsInt())
.allowPartialChunks(false)
)
.handle((req, res) -> incomingRequest(req, res))
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,3 +112,33 @@
properties:
"":
type: keyword

---
"Create index with setting index.number_of_routing_shards to null":
- skip:
version: " - 2.99.99"
reason: "fixed in 3.0.0"
- do:
indices.create:
index: test_index
body:
settings:
number_of_routing_shards: null
- do:
cluster.state:
metric: [ metadata ]
index: test_index
- match : { metadata.indices.test_index.routing_num_shards: 1024 }

- do:
indices.create:
index: test_index1
body:
settings:
number_of_routing_shards: null
number_of_shards: 3
- do:
cluster.state:
metric: [ metadata ]
index: test_index1
- match : { metadata.indices.test_index1.routing_num_shards: 768 }
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,8 @@ public void testIndexDeletionNoPinnedTimestamps() throws Exception {
client().admin().indices().prepareDelete(INDEX_NAME).get();

assertBusy(() -> {
assertEquals(0, Files.list(translogMetadataPath).collect(Collectors.toList()).size());
assertEquals(0, Files.list(translogDataPath).collect(Collectors.toList()).size());
assertEquals(1, Files.list(translogMetadataPath).collect(Collectors.toList()).size());
assertEquals(4, Files.list(translogDataPath).collect(Collectors.toList()).size());
});
}

Expand Down Expand Up @@ -490,7 +490,7 @@ public void testIndexDeletionWithPinnedTimestamps() throws Exception {

assertBusy(() -> {
List<Path> metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList());
assertEquals(1, metadataFiles.size());
assertEquals(2, metadataFiles.size());

verifyTranslogDataFileCount(metadataFiles, translogDataPath);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,22 +27,26 @@
import org.opensearch.common.blobstore.BlobPath;
import org.opensearch.common.io.PathUtils;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.io.IOUtils;
import org.opensearch.core.index.Index;
import org.opensearch.core.rest.RestStatus;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.remote.RemoteStoreEnums;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.RemoteStoreSettings;
import org.opensearch.indices.recovery.RecoveryState;
import org.opensearch.indices.replication.common.ReplicationType;
import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService;
import org.opensearch.repositories.blobstore.BlobStoreRepository;
import org.opensearch.snapshots.AbstractSnapshotIntegTestCase;
import org.opensearch.snapshots.SnapshotInfo;
import org.opensearch.snapshots.SnapshotRestoreException;
import org.opensearch.snapshots.SnapshotState;
import org.opensearch.test.BackgroundIndexer;
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.junit.After;
Expand All @@ -53,15 +57,18 @@
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED;
import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS;
import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG;
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA;
Expand Down Expand Up @@ -885,4 +892,88 @@ public void testRestoreOperationsUsingDifferentRepos() throws Exception {
ensureGreen(indexName1);
assertDocsPresentInIndex(client, indexName1, 3 * numDocsInIndex1);
}

public void testContinuousIndexing() throws Exception {
internalCluster().startClusterManagerOnlyNode();
internalCluster().startDataOnlyNode();
String index = "test-index";
String snapshotRepo = "test-restore-snapshot-repo";
String baseSnapshotName = "snapshot_";
Path absolutePath1 = randomRepoPath().toAbsolutePath();
logger.info("Snapshot Path [{}]", absolutePath1);

createRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, true));

Client client = client();
Settings indexSettings = Settings.builder()
.put(super.indexSettings())
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build();

createIndex(index, indexSettings);
ensureGreen(index);

RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance(
RemoteStorePinnedTimestampService.class,
primaryNodeName(index)
);

remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(randomIntBetween(1, 5)));
RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueSeconds(randomIntBetween(1, 5)));

long totalDocs = 0;
Map<String, Long> snapshots = new HashMap<>();
int numDocs = randomIntBetween(200, 300);
totalDocs += numDocs;
try (BackgroundIndexer indexer = new BackgroundIndexer(index, MapperService.SINGLE_MAPPING_NAME, client(), numDocs)) {
int numberOfSnapshots = 5;
for (int i = 0; i < numberOfSnapshots; i++) {
logger.info("--> waiting for {} docs to be indexed ...", numDocs);
long finalTotalDocs1 = totalDocs;
assertBusy(() -> assertEquals(finalTotalDocs1, indexer.totalIndexedDocs()), 120, TimeUnit.SECONDS);
logger.info("--> {} total docs indexed", totalDocs);
String snapshotName = baseSnapshotName + i;
createSnapshot(snapshotRepo, snapshotName, new ArrayList<>());
snapshots.put(snapshotName, totalDocs);
if (i < numberOfSnapshots - 1) {
numDocs = randomIntBetween(200, 300);
indexer.continueIndexing(numDocs);
totalDocs += numDocs;
}
}
}

logger.info("Snapshots Status: " + snapshots);

for (String snapshot : snapshots.keySet()) {
logger.info("Restoring snapshot: {}", snapshot);
assertAcked(client().admin().indices().delete(new DeleteIndexRequest(index)).get());

RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin()
.cluster()
.prepareRestoreSnapshot(snapshotRepo, snapshot)
.setWaitForCompletion(true)
.setIndices()
.get();

assertEquals(RestStatus.OK, restoreSnapshotResponse1.status());

// Verify restored index's stats
ensureGreen(TimeValue.timeValueSeconds(60), index);
long finalTotalDocs = totalDocs;
assertBusy(() -> {
Long hits = client().prepareSearch(index)
.setQuery(matchAllQuery())
.setSize((int) finalTotalDocs)
.storedFields()
.execute()
.actionGet()
.getHits()
.getTotalHits().value;

assertEquals(snapshots.get(snapshot), hits);
});
}
}
}
Loading

0 comments on commit 3208bc2

Please sign in to comment.